code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from urllib.parse import quote_plus
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url, render_json
class LockClient(BaseClient):
"""Lock client class for working with rucio locks"""
LOCKS_BASEURL = 'locks'
def get_dataset_locks(self, scope, name):
"""
Get a dataset locks of the specified dataset.
:param scope: the scope of the did of the locks to list.
:param name: the name of the did of the locks to list.
"""
path = '/'.join([self.LOCKS_BASEURL, quote_plus(scope), quote_plus(name)])
url = build_url(choice(self.list_hosts), path=path, params={'did_type': 'dataset'})
result = self._send_request(url)
if result.status_code == codes.ok: # pylint: disable-msg=E1101
locks = self._load_json_data(result)
return locks
else:
exc_cls, exc_msg = self._get_exception(headers=result.headers,
status_code=result.status_code)
raise exc_cls(exc_msg)
def get_locks_for_dids(self, dids, **filter_args):
"""
Get list of locks for for all the files found, recursively, in the listed datasets or containers.
:param dids: list of dictionaries {"scope":..., "name":..., "type":...}
type can be either "dataset" or "container"
type is optional, but if specified, improves the query performance
:returns: list of dictionaries with lock info
"""
# convert did list to list of dictionaries
assert all(did.get("type", "dataset") in ("dataset", "container") for did in dids), "did type can be either 'container' or 'dataset'"
path = '/'.join([self.LOCKS_BASEURL, "bulk_locks_for_dids"])
url = build_url(choice(self.list_hosts), path=path)
result = self._send_request(url, type_='POST', data=render_json(dids=dids))
if result.status_code == codes.ok: # pylint: disable-msg=E1101
out = []
for lock in self._load_json_data(result):
filter_ok = (not filter_args) or all(lock.get(name) == value for name, value in filter_args.items())
if filter_ok:
out.append(lock)
return out
else:
exc_cls, exc_msg = self._get_exception(headers=result.headers,
status_code=result.status_code)
raise exc_cls(exc_msg)
def get_dataset_locks_by_rse(self, rse):
"""
Get all dataset locks of the specified rse.
:param rse: the rse of the locks to list.
"""
path = '/'.join([self.LOCKS_BASEURL, rse])
url = build_url(choice(self.list_hosts), path=path, params={'did_type': 'dataset'})
result = self._send_request(url)
if result.status_code == codes.ok: # pylint: disable-msg=E1101
locks = self._load_json_data(result)
return locks
else:
exc_cls, exc_msg = self._get_exception(headers=result.headers,
status_code=result.status_code)
raise exc_cls(exc_msg) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/client/lockclient.py | 0.665411 | 0.173919 | lockclient.py | pypi |
from json import dumps
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url
class SubscriptionClient(BaseClient):
"""SubscriptionClient class for working with subscriptions"""
SUB_BASEURL = 'subscriptions'
def add_subscription(self, name, account, filter_, replication_rules, comments, lifetime, retroactive, dry_run, priority=3):
"""
Adds a new subscription which will be verified against every new added file and dataset
:param name: Name of the subscription
:type: String
:param account: Account identifier
:type account: String
:param filter_: Dictionary of attributes by which the input data should be filtered
**Example**: ``{'dsn': 'data11_hi*.express_express.*,data11_hi*physics_MinBiasOverlay*', 'account': 'tzero'}``
:type filter_: Dict
:param replication_rules: Replication rules to be set : Dictionary with keys copies, rse_expression, weight, rse_expression
:type replication_rules: Dict
:param comments: Comments for the subscription
:type comments: String
:param lifetime: Subscription's lifetime (days); False if subscription has no lifetime
:type lifetime: Integer or False
:param retroactive: Flag to know if the subscription should be applied on previous data
:type retroactive: Boolean
:param dry_run: Just print the subscriptions actions without actually executing them (Useful if retroactive flag is set)
:type dry_run: Boolean
:param priority: The priority of the subscription (3 by default)
:type priority: Integer
"""
path = self.SUB_BASEURL + '/' + account + '/' + name
url = build_url(choice(self.list_hosts), path=path)
if retroactive:
raise NotImplementedError('Retroactive mode is not implemented')
if filter_ and not isinstance(filter_, dict):
raise TypeError('filter should be a dict')
if replication_rules and not isinstance(replication_rules, list):
raise TypeError('replication_rules should be a list')
data = dumps({'options': {'filter': filter_, 'replication_rules': replication_rules, 'comments': comments,
'lifetime': lifetime, 'retroactive': retroactive, 'dry_run': dry_run, 'priority': priority}})
result = self._send_request(url, type_='POST', data=data)
if result.status_code == codes.created: # pylint: disable=no-member
return result.text
else:
exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)
raise exc_cls(exc_msg)
def list_subscriptions(self, name=None, account=None):
"""
Returns a dictionary with the subscription information :
Examples: ``{'status': 'INACTIVE/ACTIVE/BROKEN', 'last_modified_date': ...}``
:param name: Name of the subscription
:type: String
:param account: Account identifier
:type account: String
:returns: Dictionary containing subscription parameter
:rtype: Dict
:raises: exception.NotFound if subscription is not found
"""
path = self.SUB_BASEURL
if account:
path += '/%s' % (account)
if name:
path += '/%s' % (name)
elif name:
path += '/Name/%s' % (name)
else:
path += '/'
url = build_url(choice(self.list_hosts), path=path)
result = self._send_request(url, type_='GET')
if result.status_code == codes.ok: # pylint: disable=no-member
return self._load_json_data(result)
else:
exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)
raise exc_cls(exc_msg)
def update_subscription(self, name, account=None, filter_=None, replication_rules=None, comments=None, lifetime=None, retroactive=None, dry_run=None, priority=None):
"""
Updates a subscription
:param name: Name of the subscription
:type: String
:param account: Account identifier
:type account: String
:param filter_: Dictionary of attributes by which the input data should be filtered
**Example**: ``{'dsn': 'data11_hi*.express_express.*,data11_hi*physics_MinBiasOverlay*', 'account': 'tzero'}``
:type filter_: Dict
:param replication_rules: Replication rules to be set : Dictionary with keys copies, rse_expression, weight, rse_expression
:type replication_rules: Dict
:param comments: Comments for the subscription
:type comments: String
:param lifetime: Subscription's lifetime (days); False if subscription has no lifetime
:type lifetime: Integer or False
:param retroactive: Flag to know if the subscription should be applied on previous data
:type retroactive: Boolean
:param dry_run: Just print the subscriptions actions without actually executing them (Useful if retroactive flag is set)
:type dry_run: Boolean
:param priority: The priority of the subscription
:type priority: Integer
:raises: exception.NotFound if subscription is not found
"""
if not account:
account = self.account
if retroactive:
raise NotImplementedError('Retroactive mode is not implemented')
path = self.SUB_BASEURL + '/' + account + '/' + name
url = build_url(choice(self.list_hosts), path=path)
if filter_ and not isinstance(filter_, dict):
raise TypeError('filter should be a dict')
if replication_rules and not isinstance(replication_rules, list):
raise TypeError('replication_rules should be a list')
data = dumps({'options': {'filter': filter_, 'replication_rules': replication_rules, 'comments': comments,
'lifetime': lifetime, 'retroactive': retroactive, 'dry_run': dry_run, 'priority': priority}})
result = self._send_request(url, type_='PUT', data=data)
if result.status_code == codes.created: # pylint: disable=no-member
return True
else:
exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)
raise exc_cls(exc_msg)
def list_subscription_rules(self, account, name):
"""
List the associated rules of a subscription.
:param account: Account of the subscription.
:param name: Name of the subscription.
"""
path = '/'.join([self.SUB_BASEURL, account, name, 'Rules'])
url = build_url(choice(self.list_hosts), path=path)
result = self._send_request(url, type_='GET')
if result.status_code == codes.ok: # pylint: disable=no-member
return self._load_json_data(result)
else:
exc_cls, exc_msg = self._get_exception(headers=result.headers, status_code=result.status_code, data=result.content)
raise exc_cls(exc_msg) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/client/subscriptionclient.py | 0.680666 | 0.172799 | subscriptionclient.py | pypi |
from json import dumps
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url
class ConfigClient(BaseClient):
"""Client class for working with the configuration"""
CONFIG_BASEURL = 'config'
def get_config(self, section=None, option=None):
"""
Sends the request to get the matching configuration.
:param section: the optional name of the section.
:param option: the optional option within the section.
:return: dictionary containing the configuration.
"""
if section is None and option is not None:
raise ValueError('--section not specified')
path = self.CONFIG_BASEURL
if section is not None and option is None:
path += '/' + section
elif section is not None and option is not None:
path += '/'.join(['', section, option])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return r.json()
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def set_config_option(self, section, option, value, use_body_for_params=True):
"""
Sends the request to create or set an option within a section. Missing sections will be created.
:param section: the name of the section.
:param option: the name of the option.
:param value: the value to set on the config option
:param use_body_for_params: send parameters in a json-encoded request body instead of url-encoded
TODO: remove this parameter
The format of the /config endpoint was recently changed. We migrated from performing a PUT on
"/config/<section>/<option>/<value>" to sending the parameters using a json-encoded body.
This was done to fix multiple un-wanted side effects related to how the middleware treats
values encoded in a path.
For a smooth transition, we allow both cases for now, but we should migrate to only passing
values via the request body.
:return: True if option was removed successfully. False otherwise.
"""
if use_body_for_params:
url = build_url(choice(self.list_hosts), path=self.CONFIG_BASEURL)
data = dumps({
section: {
option: value
}
})
r = self._send_request(url, type_='POST', data=data)
else:
path = '/'.join([self.CONFIG_BASEURL, section, option, value])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='PUT')
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_config_option(self, section, option):
"""
Sends the request to remove an option from a section
:param section: the name of the section.
:param option: the name of the option.
:return: True if option was removed successfully. False otherwise.
"""
path = '/'.join([self.CONFIG_BASEURL, section, option])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='DEL')
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/client/configclient.py | 0.559771 | 0.151592 | configclient.py | pypi |
from json import dumps
from urllib.parse import quote_plus
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url
class AccountLimitClient(BaseClient):
"""Account limit client class for working with account limits"""
ACCOUNTLIMIT_BASEURL = 'accountlimits'
def set_account_limit(self, account, rse, bytes_, locality):
"""
Sets an account limit for a given limit scope.
:param account: The name of the account.
:param rse: The rse name.
:param bytes_: An integer with the limit in bytes.
:param locality: The scope of the account limit. 'local' or 'global'.
:return: True if quota was created successfully else False.
"""
if locality == 'local':
return self.set_local_account_limit(account, rse, bytes_)
elif locality == 'global':
return self.set_global_account_limit(account, rse, bytes_)
else:
from rucio.common.exception import UnsupportedOperation
raise UnsupportedOperation('The provided scope (%s) for the account limit was invalid' % locality)
def delete_account_limit(self, account, rse, locality):
"""
Deletes an account limit for a given limit scope.
:param account: The name of the account.
:param rse: The rse name.
:param locality: The scope of the account limit. 'local' or 'global'.
:return: True if quota was created successfully else False.
"""
if locality == 'local':
return self.delete_local_account_limit(account, rse)
elif locality == 'global':
return self.delete_global_account_limit(account, rse)
else:
from rucio.common.exception import UnsupportedOperation
raise UnsupportedOperation('The provided scope (%s) for the account limit was invalid' % locality)
def set_local_account_limit(self, account, rse, bytes_):
"""
Sends the request to set an account limit for an account.
:param account: The name of the account.
:param rse: The rse name.
:param bytes_: An integer with the limit in bytes.
:return: True if quota was created successfully else False.
"""
data = dumps({'bytes': bytes_})
path = '/'.join([self.ACCOUNTLIMIT_BASEURL, 'local', account, rse])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_local_account_limit(self, account, rse):
"""
Sends the request to remove an account limit.
:param account: The name of the account.
:param rse: The rse name.
:return: True if quota was removed successfully. False otherwise.
:raises AccountNotFound: if account doesn't exist.
"""
path = '/'.join([self.ACCOUNTLIMIT_BASEURL, 'local', account, rse])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='DEL')
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def set_global_account_limit(self, account, rse_expression, bytes_):
"""
Sends the request to set a global account limit for an account.
:param account: The name of the account.
:param rse_expression: The rse expression.
:param bytes_: An integer with the limit in bytes.
:return: True if quota was created successfully else False.
"""
data = dumps({'bytes': bytes_})
path = '/'.join([self.ACCOUNTLIMIT_BASEURL, 'global', account, quote_plus(rse_expression)])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_global_account_limit(self, account, rse_expression):
"""
Sends the request to remove a global account limit.
:param account: The name of the account.
:param rse_expression: The rse expression.
:return: True if quota was removed successfully. False otherwise.
:raises AccountNotFound: if account doesn't exist.
"""
path = '/'.join([self.ACCOUNTLIMIT_BASEURL, 'global', account, quote_plus(rse_expression)])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='DEL')
if r.status_code == codes.ok:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/client/accountlimitclient.py | 0.758958 | 0.23845 | accountlimitclient.py | pypi |
from json import dumps, loads
from urllib.parse import quote_plus
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url
class MetaClient(BaseClient):
"""Meta client class for working with data identifier attributes"""
META_BASEURL = 'meta'
def add_key(self, key, key_type, value_type=None, value_regexp=None):
"""
Sends the request to add a new key.
:param key: the name for the new key.
:param key_type: the type of the key: all(container, dataset, file), collection(dataset or container), file, derived(compute from file for collection).
:param value_type: the type of the value, if defined.
:param value_regexp: the regular expression that values should match, if defined.
:return: True if key was created successfully.
:raises Duplicate: if key already exists.
"""
path = '/'.join([self.META_BASEURL, quote_plus(key)])
url = build_url(choice(self.list_hosts), path=path)
data = dumps({'value_type': value_type and str(value_type),
'value_regexp': value_regexp,
'key_type': key_type})
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_keys(self):
"""
Sends the request to list all keys.
:return: a list containing the names of all keys.
"""
path = self.META_BASEURL + '/'
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url)
if r.status_code == codes.ok:
keys = loads(r.text)
return keys
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_values(self, key):
"""
Sends the request to list all values for a key.
:return: a list containing the names of all values for a key.
"""
path = '/'.join([self.META_BASEURL, quote_plus(key)]) + '/'
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url)
if r.status_code == codes.ok:
values = loads(r.text)
return values
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def add_value(self, key, value):
"""
Sends the request to add a value to a key.
:param key: the name for key.
:param value: the value.
:return: True if value was created successfully.
:raises Duplicate: if valid already exists.
"""
path = '/'.join([self.META_BASEURL, quote_plus(key)]) + '/'
data = dumps({'value': value})
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return True
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def del_value(self, key, value):
"""
Delete a value for a key.
:param key: the name for key.
:param value: the value.
"""
pass
def del_key(self, key):
"""
Delete an allowed key.
:param key: the name for key.
"""
pass
def update_key(self, key, type_=None, regexp=None):
"""
Update a key.
:param key: the name for key.
:param type_: the type of the value, if defined.
:param regexp: the regular expression that values should match, if defined.
"""
pass | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/client/metaclient.py | 0.608478 | 0.213316 | metaclient.py | pypi |
from json import dumps, loads
from typing import Any, Optional, Union
from urllib.parse import quote_plus
from requests.status_codes import codes
from rucio.client.baseclient import BaseClient
from rucio.client.baseclient import choice
from rucio.common.utils import build_url
class RuleClient(BaseClient):
"""RuleClient class for working with replication rules"""
RULE_BASEURL = 'rules'
def add_replication_rule(
self,
dids: list[str],
copies: int,
rse_expression: str,
priority: int = 3,
lifetime: Optional[int] = None,
grouping: str = 'DATASET',
notify: str = 'N',
source_replica_expression: Optional[str] = None,
activity: Optional[str] = None,
account: Optional[str] = None,
meta: Optional[str] = None,
ignore_availability: bool = False,
purge_replicas: bool = False,
ask_approval: bool = False,
asynchronous: bool = False,
locked: bool = False,
delay_injection=None,
comment=None,
weight=None,
):
"""
:param dids: The data identifier set.
:param copies: The number of replicas.
:param rse_expression: Boolean string expression to give the list of RSEs.
:param priority: Priority of the transfers.
:param lifetime: The lifetime of the replication rules (in seconds).
:param grouping: ALL - All files will be replicated to the same RSE.
DATASET - All files in the same dataset will be replicated to the same RSE.
NONE - Files will be completely spread over all allowed RSEs without any grouping considerations at all.
:param notify: Notification setting for the rule (Y, N, C).
:param source_replica_expression: RSE Expression for RSEs to be considered for source replicas.
:param activity: Transfer Activity to be passed to FTS.
:param account: The account owning the rule.
:param meta: Metadata, as dictionary.
:param ignore_availability: Option to ignore the availability of RSEs.
:param purge_replicas: When the rule gets deleted purge the associated replicas immediately.
:param ask_approval: Ask for approval of this replication rule.
:param asynchronous: Create rule asynchronously by judge-injector.
:param locked: If the rule is locked, it cannot be deleted.
:param delay_injection:
:param comment: Comment about the rule.
:param weight: If the weighting option of the replication rule is used, the choice of RSEs takes their weight into account.
"""
path = self.RULE_BASEURL + '/'
url = build_url(choice(self.list_hosts), path=path)
# TODO remove the subscription_id from the client; It will only be used by the core;
data = dumps({'dids': dids, 'copies': copies, 'rse_expression': rse_expression,
'weight': weight, 'lifetime': lifetime, 'grouping': grouping,
'account': account, 'locked': locked, 'source_replica_expression': source_replica_expression,
'activity': activity, 'notify': notify, 'purge_replicas': purge_replicas,
'ignore_availability': ignore_availability, 'comment': comment, 'ask_approval': ask_approval,
'asynchronous': asynchronous, 'delay_injection': delay_injection, 'priority': priority, 'meta': meta})
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return loads(r.text)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def delete_replication_rule(
self, rule_id: str, purge_replicas: Optional[bool] = None
):
"""
Deletes a replication rule and all associated locks.
:param rule_id: The id of the rule to be deleted
:param purge_replicas: Immediately delete the replicas.
:raises: RuleNotFound, AccessDenied
"""
path = self.RULE_BASEURL + '/' + rule_id
url = build_url(choice(self.list_hosts), path=path)
data = dumps({'purge_replicas': purge_replicas})
r = self._send_request(url, type_='DEL', data=data)
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def get_replication_rule(self, rule_id: str):
"""
Get a replication rule.
:param rule_id: The id of the rule to be retrieved.
:raises: RuleNotFound
"""
path = self.RULE_BASEURL + '/' + rule_id
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return next(self._load_json_data(r))
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def update_replication_rule(self, rule_id: str, options: dict[str, Any]):
"""
:param rule_id: The id of the rule to be retrieved.
:param options: Options dictionary.
:raises: RuleNotFound
"""
path = self.RULE_BASEURL + '/' + rule_id
url = build_url(choice(self.list_hosts), path=path)
data = dumps({'options': options})
r = self._send_request(url, type_='PUT', data=data)
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def reduce_replication_rule(
self, rule_id: str, copies: int, exclude_expression=None
):
"""
:param rule_id: Rule to be reduced.
:param copies: Number of copies of the new rule.
:param exclude_expression: RSE Expression of RSEs to exclude.
:raises: RuleReplaceFailed, RuleNotFound
"""
path = self.RULE_BASEURL + '/' + rule_id + '/reduce'
url = build_url(choice(self.list_hosts), path=path)
data = dumps({'copies': copies, 'exclude_expression': exclude_expression})
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.ok:
return loads(r.text)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def move_replication_rule(
self, rule_id: str, rse_expression: str, override
):
"""
Move a replication rule to another RSE and, once done, delete the original one.
:param rule_id: Rule to be moved.
:param rse_expression: RSE expression of the new rule.
:param override: Configurations to update for the new rule.
:raises: RuleNotFound, RuleReplaceFailed
"""
path = self.RULE_BASEURL + '/' + rule_id + '/move'
url = build_url(choice(self.list_hosts), path=path)
data = dumps({
'rule_id': rule_id,
'rse_expression': rse_expression,
'override': override,
})
r = self._send_request(url, type_='POST', data=data)
if r.status_code == codes.created:
return loads(r.text)
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def approve_replication_rule(self, rule_id: str):
"""
:param rule_id: Rule to be approved.
:raises: RuleNotFound
"""
path = self.RULE_BASEURL + '/' + rule_id
url = build_url(choice(self.list_hosts), path=path)
data = dumps({'options': {'approve': True}})
r = self._send_request(url, type_='PUT', data=data)
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def deny_replication_rule(self, rule_id: str, reason: Optional[str] = None):
"""
:param rule_id: Rule to be denied.
:param reason: Reason for denying the rule.
:raises: RuleNotFound
"""
path = self.RULE_BASEURL + '/' + rule_id
url = build_url(choice(self.list_hosts), path=path)
options: dict[str, Union[bool, str]] = {'approve': False}
if reason:
options['comment'] = reason
data = dumps({'options': options})
r = self._send_request(url, type_='PUT', data=data)
if r.status_code == codes.ok:
return True
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg)
def list_replication_rule_full_history(
self, scope: Union[str, bytes], name: Union[str, bytes]
):
"""
List the rule history of a DID.
:param scope: The scope of the DID.
:param name: The name of the DID.
"""
path = '/'.join([self.RULE_BASEURL, quote_plus(scope), quote_plus(name), 'history'])
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)
raise exc_cls(exc_msg)
def examine_replication_rule(self, rule_id: str):
"""
Examine a replication rule for errors during transfer.
:param rule_id: Rule to be denied.
:raises: RuleNotFound
"""
path = self.RULE_BASEURL + '/' + rule_id + '/analysis'
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return next(self._load_json_data(r))
exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)
raise exc_cls(exc_msg)
def list_replica_locks(self, rule_id: str):
"""
List details of all replica locks for a rule.
:param rule_id: Rule to be denied.
:raises: RuleNotFound
"""
path = self.RULE_BASEURL + '/' + rule_id + '/locks'
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET')
if r.status_code == codes.ok:
return self._load_json_data(r)
exc_cls, exc_msg = self._get_exception(r.headers, r.status_code)
raise exc_cls(exc_msg)
def list_replication_rules(self, filters=None):
"""
List all replication rules which match a filter
:param filters: dictionary of attributes by which the rules should be filtered
:returns: True if successful, otherwise false.
"""
filters = filters or {}
path = self.RULE_BASEURL + '/'
url = build_url(choice(self.list_hosts), path=path)
r = self._send_request(url, type_='GET', params=filters)
if r.status_code == codes.ok:
return self._load_json_data(r)
else:
exc_cls, exc_msg = self._get_exception(headers=r.headers, status_code=r.status_code, data=r.content)
raise exc_cls(exc_msg) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/client/ruleclient.py | 0.763043 | 0.191857 | ruleclient.py | pypi |
import copy
import logging
import random
from time import sleep
from urllib.parse import urlparse
from rucio.common import exception, utils, constants
from rucio.common.config import config_get_int
from rucio.common.constraints import STRING_TYPES
from rucio.common.logging import formatted_logger
from rucio.common.utils import make_valid_did, GLOBALLY_SUPPORTED_CHECKSUMS
def get_rse_info(rse=None, vo='def', rse_id=None, session=None):
"""
Returns all protocol related RSE attributes.
Call with either rse and vo, or (in server mode) rse_id
:param rse: Name of the requested RSE
:param vo: The VO for the RSE.
:param rse_id: The id of the rse (use in server mode to avoid db calls)
:param session: The eventual database session.
:returns: a dict object with the following attributes:
id ... an internal identifier
rse ... the name of the RSE as string
type ... the storage type odf the RSE e.g. DISK
volatile ... boolean indictaing if the RSE is volatile
verify_checksum ... boolean indicating whether RSE supports requests for checksums
deteministic ... boolean indicating of the nameing of the files follows the defined determinism
domain ... indictaing the domain that should be assumed for transfers. Values are 'ALL', 'LAN', or 'WAN'
protocols ... all supported protocol in form of a list of dict objects with the followig structure
- scheme ... protocol scheme e.g. http, srm, ...
- hostname ... hostname of the site
- prefix ... path to the folder where the files are stored
- port ... port used for this protocol
- impl ... naming the python class of the protocol implementation
- extended_attributes ... additional information for the protocol
- domains ... a dict naming each domain and the priority of the protocol for each operation (lower is better, zero is not upported)
:raises RSENotFound: if the provided RSE coud not be found in the database.
"""
# __request_rse_info will be assigned when the module is loaded as it depends on the rucio environment (server or client)
# __request_rse_info, rse_region are defined in /rucio/rse/__init__.py
key = '{}:{}'.format(rse, vo) if rse_id is None else str(rse_id)
key = 'rse_info_%s' % (key)
rse_info = RSE_REGION.get(key) # NOQA pylint: disable=undefined-variable
if not rse_info: # no cached entry found
rse_info = __request_rse_info(str(rse), vo=vo, rse_id=rse_id, session=session) # NOQA pylint: disable=undefined-variable
RSE_REGION.set(key, rse_info) # NOQA pylint: disable=undefined-variable
return rse_info
def _get_possible_protocols(rse_settings, operation, scheme=None, domain=None, impl=None):
"""
Filter the list of available protocols or provided by the supported ones.
:param rse_settings: The rse settings.
:param operation: The operation (write, read).
:param scheme: Optional filter if no specific protocol is defined in
rse_setting for the provided operation.
:param domain: Optional domain (lan/wan), if not specified, both will be returned
:returns: The list of possible protocols.
"""
operation = operation.lower()
candidates = rse_settings['protocols']
# convert scheme to list, if given as string
if scheme and not isinstance(scheme, list):
scheme = scheme.split(',')
tbr = []
for protocol in candidates:
# Check if impl given and filter if so
if impl and protocol['impl'] != impl:
tbr.append(protocol)
continue
# Check if scheme given and filter if so
if scheme and protocol['scheme'] not in scheme:
tbr.append(protocol)
continue
filtered = True
if not domain:
for d in list(protocol['domains'].keys()):
if protocol['domains'][d][operation]:
filtered = False
else:
if protocol['domains'].get(domain, {operation: None}).get(operation):
filtered = False
if filtered:
tbr.append(protocol)
if len(candidates) <= len(tbr):
raise exception.RSEProtocolNotSupported('No protocol for provided settings'
' found : %s.' % str(rse_settings))
return [c for c in candidates if c not in tbr]
def get_protocols_ordered(rse_settings, operation, scheme=None, domain='wan', impl=None):
if operation not in utils.rse_supported_protocol_operations():
raise exception.RSEOperationNotSupported('Operation %s is not supported' % operation)
if domain and domain not in utils.rse_supported_protocol_domains():
raise exception.RSEProtocolDomainNotSupported('Domain %s not supported' % domain)
candidates = _get_possible_protocols(rse_settings, operation, scheme, domain, impl)
candidates.sort(key=lambda k: k['domains'][domain][operation])
return candidates
def select_protocol(rse_settings, operation, scheme=None, domain='wan'):
if operation not in utils.rse_supported_protocol_operations():
raise exception.RSEOperationNotSupported('Operation %s is not supported' % operation)
if domain and domain not in utils.rse_supported_protocol_domains():
raise exception.RSEProtocolDomainNotSupported('Domain %s not supported' % domain)
candidates = _get_possible_protocols(rse_settings, operation, scheme, domain)
# Shuffle candidates to load-balance over equal sources
random.shuffle(candidates)
return min(candidates, key=lambda k: k['domains'][domain][operation])
def create_protocol(rse_settings, operation, scheme=None, domain='wan', auth_token=None, protocol_attr=None, logger=logging.log, impl=None):
"""
Instanciates the protocol defined for the given operation.
:param rse_settings: RSE attributes
:param operation: Intended operation for this protocol
:param scheme: Optional filter if no specific protocol is defined in rse_setting for the provided operation
:param domain: Optional specification of the domain
:param auth_token: Optionally passing JSON Web Token (OIDC) string for authentication
:param protocol_attr: Optionally passing the full protocol availability information to correctly select WAN/LAN
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: An instance of the requested protocol
"""
# Verify feasibility of Protocol
operation = operation.lower()
if operation not in utils.rse_supported_protocol_operations():
raise exception.RSEOperationNotSupported('Operation %s is not supported' % operation)
if domain and domain not in utils.rse_supported_protocol_domains():
raise exception.RSEProtocolDomainNotSupported('Domain %s not supported' % domain)
if impl:
candidate = _get_possible_protocols(rse_settings, operation, scheme, domain, impl=impl)
if len(candidate) == 0:
raise exception.RSEProtocolNotSupported('Protocol implementation %s operation %s on domain %s not supported' % (impl, operation, domain))
protocol_attr = candidate[0]
elif not protocol_attr:
protocol_attr = select_protocol(rse_settings, operation, scheme, domain)
else:
candidates = _get_possible_protocols(rse_settings, operation, scheme, domain)
if protocol_attr not in candidates:
raise exception.RSEProtocolNotSupported('Protocol %s operation %s on domain %s not supported' % (protocol_attr, operation, domain))
# Instantiate protocol
comp = protocol_attr['impl'].split('.')
prefix = '.'.join(comp[-2:]) + ': '
logger = formatted_logger(logger, prefix + "%s")
mod = __import__('.'.join(comp[:-1]))
for n in comp[1:]:
try:
mod = getattr(mod, n)
except AttributeError as e:
logger(logging.DEBUG, 'Protocol implementations not supported.')
raise exception.RucioException(str(e)) # TODO: provide proper rucio exception
protocol_attr['auth_token'] = auth_token
protocol = mod(protocol_attr, rse_settings, logger=logger)
return protocol
def lfns2pfns(rse_settings, lfns, operation='write', scheme=None, domain='wan', auth_token=None, logger=logging.log, impl=None):
"""
Convert the lfn to a pfn
:rse_settings: RSE attributes
:param lfns: logical file names as a dict containing 'scope' and 'name' as keys. For bulk a list of dicts can be provided
:param operation: Intended operation for this protocol
:param scheme: Optional filter if no specific protocol is defined in rse_setting for the provided operation
:param domain: Optional specification of the domain
:param auth_token: Optionally passing JSON Web Token (OIDC) string for authentication
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: a dict with scope:name as key and the PFN as value
"""
return create_protocol(rse_settings, operation, scheme, domain, auth_token=auth_token, logger=logger, impl=impl).lfns2pfns(lfns)
def parse_pfns(rse_settings, pfns, operation='read', domain='wan', auth_token=None):
"""
Checks if a PFN is feasible for a given RSE. If so it splits the pfn in its various components.
:rse_settings: RSE attributes
:param pfns: list of PFNs
:param operation: Intended operation for this protocol
:param domain: Optional specification of the domain
:param auth_token: Optionally passing JSON Web Token (OIDC) string for authentication
:returns: A dict with the parts known by the selected protocol e.g. scheme, hostname, prefix, path, name
:raises RSEFileNameNotSupported: if provided PFN is not supported by the RSE/protocol
:raises RSENotFound: if the referred storage is not found i the repository (rse_id)
:raises InvalidObject: If the properties parameter doesn't include scheme, hostname, and port as keys
:raises RSEOperationNotSupported: If no matching protocol was found for the requested operation
"""
if len(set([urlparse(pfn).scheme for pfn in pfns])) != 1:
raise ValueError('All PFNs must provide the same protocol scheme')
return create_protocol(rse_settings, operation, urlparse(pfns[0]).scheme, domain, auth_token=auth_token).parse_pfns(pfns)
def exists(rse_settings, files, domain='wan', scheme=None, impl=None, auth_token=None, vo='def', logger=logging.log):
"""
Checks if a file is present at the connected storage.
Providing a list indicates the bulk mode.
:rse_settings: RSE attributes
:param files: a single dict or a list with dicts containing 'scope' and 'name'
if LFNs are used and only 'name' if PFNs are used.
E.g. {'name': '2_rse_remote_get.raw', 'scope': 'user.jdoe'}, {'name': 'user/jdoe/5a/98/3_rse_remote_get.raw'}
:param domain: The network domain, either 'wan' (default) or 'lan'
:param auth_token: Optionally passing JSON Web Token (OIDC) string for authentication
:param vo: The VO for the RSE
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: True/False for a single file or a dict object with 'scope:name' for LFNs or 'name' for PFNs as keys and True or the exception as value for each file in bulk mode
:raises RSENotConnected: no connection to a specific storage has been established
"""
ret = {}
gs = True # gs represents the global status which inidcates if every operation workd in bulk mode
protocol = create_protocol(rse_settings, 'read', scheme=scheme, impl=impl, domain=domain, auth_token=auth_token, logger=logger)
protocol.connect()
try:
protocol.exists(None)
except NotImplementedError:
protocol = create_protocol(rse_settings, 'write', scheme=scheme, domain=domain, auth_token=auth_token, logger=logger)
protocol.connect()
except:
pass
files = [files] if not type(files) is list else files
for f in files:
exists = None
if isinstance(f, STRING_TYPES):
exists = protocol.exists(f)
ret[f] = exists
elif 'scope' in f: # a LFN is provided
pfn = list(protocol.lfns2pfns(f).values())[0]
if isinstance(pfn, exception.RucioException):
raise pfn
logger(logging.DEBUG, 'Checking if %s exists', pfn)
# deal with URL signing if required
if rse_settings['sign_url'] is not None and pfn[:5] == 'https':
pfn = __get_signed_url(rse_settings['rse'], rse_settings['sign_url'], 'read', pfn, vo) # NOQA pylint: disable=undefined-variable
exists = protocol.exists(pfn)
ret[f['scope'] + ':' + f['name']] = exists
else:
exists = protocol.exists(f['name'])
ret[f['name']] = exists
if not exists:
gs = False
protocol.close()
if len(ret) == 1:
for x in ret:
return ret[x]
return [gs, ret]
def upload(rse_settings, lfns, domain='wan', source_dir=None, force_pfn=None, force_scheme=None, transfer_timeout=None, delete_existing=False, sign_service=None, auth_token=None, vo='def', logger=logging.log, impl=None):
"""
Uploads a file to the connected storage.
Providing a list indicates the bulk mode.
:rse_settings: RSE attributes
:param lfns: a single dict or a list with dicts containing 'scope' and 'name'.
Examples:
[
{'name': '1_rse_local_put.raw', 'scope': 'user.jdoe', 'filesize': 42, 'adler32': '87HS3J968JSNWID'},
{'name': '2_rse_local_put.raw', 'scope': 'user.jdoe', 'filesize': 4711, 'adler32': 'RSSMICETHMISBA837464F'}
]
If the 'filename' key is present, it will be used by Rucio as the actual name of the file on disk (separate from the Rucio 'name').
:param domain: The network domain, either 'wan' (default) or 'lan'
:param source_dir: path to the local directory including the source files
:param force_pfn: use the given PFN -- can lead to dark data, use sparingly
:param force_scheme: use the given protocol scheme, overriding the protocol priority in the RSE description
:param transfer_timeout: set this timeout (in seconds) for the transfers, for protocols that support it
:param sign_service: use the given service (e.g. gcs, s3, swift) to sign the URL
:param auth_token: Optionally passing JSON Web Token (OIDC) string for authentication
:param vo: The VO for the RSE
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: True/False for a single file or a dict object with 'scope:name' as keys and True or the exception as value for each file in bulk mode
:raises RSENotConnected: no connection to a specific storage has been established
:raises SourceNotFound: local source file can not be found
:raises DestinationNotAccessible: remote destination directory is not accessible
:raises ServiceUnavailable: for any other reason
"""
ret = {}
gs = True # gs represents the global status which indicates if every operation worked in bulk mode
protocol = create_protocol(rse_settings, 'write', scheme=force_scheme, domain=domain, auth_token=auth_token, logger=logger, impl=impl)
protocol.connect()
protocol_delete = create_protocol(rse_settings, 'delete', domain=domain, auth_token=auth_token, logger=logger, impl=impl)
protocol_delete.connect()
lfns = [lfns] if not type(lfns) is list else lfns
for lfn in lfns:
base_name = lfn.get('filename', lfn['name'])
name = lfn.get('name', base_name)
scope = lfn['scope']
if 'adler32' not in lfn and 'md5' not in lfn:
gs = False
ret['%s:%s' % (scope, name)] = exception.RucioException('Missing checksum for file %s:%s' % (lfn['scope'], name))
continue
if 'filesize' not in lfn:
gs = False
ret['%s:%s' % (scope, name)] = exception.RucioException('Missing filesize for file %s:%s' % (lfn['scope'], name))
continue
if force_pfn:
pfn = force_pfn
readpfn = force_pfn
else:
pfn = list(protocol.lfns2pfns(make_valid_did(lfn)).values())[0]
if isinstance(pfn, exception.RucioException):
raise pfn
readpfn = pfn
if sign_service is not None:
# need a separate signed URL for read operations (exists and stat)
readpfn = __get_signed_url(rse_settings['rse'], sign_service, 'read', pfn, vo) # NOQA pylint: disable=undefined-variable
pfn = __get_signed_url(rse_settings['rse'], sign_service, 'write', pfn, vo) # NOQA pylint: disable=undefined-variable
# First check if renaming operation is supported
if protocol.renaming:
# Check if file replica is already on the storage system
if protocol.overwrite is False and delete_existing is False and protocol.exists(pfn):
ret['%s:%s' % (scope, name)] = exception.FileReplicaAlreadyExists('File %s in scope %s already exists on storage as PFN %s' % (name, scope, pfn))
gs = False
else:
if protocol.exists('%s.rucio.upload' % pfn): # Check for left over of previous unsuccessful attempts
try:
logger(logging.DEBUG, 'Deleting %s.rucio.upload', pfn)
protocol_delete.delete('%s.rucio.upload' % list(protocol_delete.lfns2pfns(make_valid_did(lfn)).values())[0])
except Exception as e:
ret['%s:%s' % (scope, name)] = exception.RSEOperationNotSupported('Unable to remove temporary file %s.rucio.upload: %s' % (pfn, str(e)))
gs = False
continue
if delete_existing:
if protocol.exists('%s' % pfn): # Check for previous completed uploads that have to be removed before upload
try:
logger(logging.DEBUG, 'Deleting %s', pfn)
protocol_delete.delete('%s' % list(protocol_delete.lfns2pfns(make_valid_did(lfn)).values())[0])
except Exception as e:
ret['%s:%s' % (scope, name)] = exception.RSEOperationNotSupported('Unable to remove file %s: %s' % (pfn, str(e)))
gs = False
continue
try: # Try uploading file
logger(logging.DEBUG, 'Uploading to %s.rucio.upload', pfn)
protocol.put(base_name, '%s.rucio.upload' % pfn, source_dir, transfer_timeout=transfer_timeout)
except Exception as e:
gs = False
ret['%s:%s' % (scope, name)] = e
continue
valid = None
try: # Get metadata of file to verify if upload was successful
try:
stats = _retry_protocol_stat(protocol, '%s.rucio.upload' % pfn)
# Verify all supported checksums and keep rack of the verified ones
verified_checksums = []
for checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS:
if (checksum_name in stats) and (checksum_name in lfn):
verified_checksums.append(stats[checksum_name] == lfn[checksum_name])
# Upload is successful if at least one checksum was found
valid = any(verified_checksums)
if not valid and ('filesize' in stats) and ('filesize' in lfn):
valid = stats['filesize'] == lfn['filesize']
except NotImplementedError:
if rse_settings['verify_checksum'] is False:
valid = True
else:
raise exception.RucioException('Checksum not validated')
except exception.RSEChecksumUnavailable:
if rse_settings['verify_checksum'] is False:
valid = True
else:
raise exception.RucioException('Checksum not validated')
except Exception as e:
gs = False
ret['%s:%s' % (scope, name)] = e
continue
if valid: # The upload finished successful and the file can be renamed
try:
logger(logging.DEBUG, 'Renaming %s.rucio.upload to %s', pfn, pfn)
protocol.rename('%s.rucio.upload' % pfn, pfn)
ret['%s:%s' % (scope, name)] = True
except Exception as e:
gs = False
ret['%s:%s' % (scope, name)] = e
else:
gs = False
ret['%s:%s' % (scope, name)] = exception.RucioException('Replica %s is corrupted.' % pfn)
else:
# Check if file replica is already on the storage system
if protocol.overwrite is False and delete_existing is False and protocol.exists(readpfn):
ret['%s:%s' % (scope, name)] = exception.FileReplicaAlreadyExists('File %s in scope %s already exists on storage as PFN %s' % (name, scope, pfn))
gs = False
else:
try: # Try uploading file
logger(logging.DEBUG, 'Uploading to %s', pfn)
protocol.put(base_name, pfn, source_dir, transfer_timeout=transfer_timeout)
except Exception as e:
gs = False
ret['%s:%s' % (scope, name)] = e
continue
valid = None
try: # Get metadata of file to verify if upload was successful
try:
stats = _retry_protocol_stat(protocol, pfn)
# Verify all supported checksums and keep rack of the verified ones
verified_checksums = []
for checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS:
if (checksum_name in stats) and (checksum_name in lfn):
verified_checksums.append(stats[checksum_name] == lfn[checksum_name])
# Upload is successful if at least one checksum was found
valid = any(verified_checksums)
if not valid and ('filesize' in stats) and ('filesize' in lfn):
valid = stats['filesize'] == lfn['filesize']
except NotImplementedError:
if rse_settings['verify_checksum'] is False:
valid = True
else:
raise exception.RucioException('Checksum not validated')
except exception.RSEChecksumUnavailable:
if rse_settings['verify_checksum'] is False:
valid = True
else:
raise exception.RucioException('Checksum not validated')
except Exception as e:
gs = False
ret['%s:%s' % (scope, name)] = e
continue
if not valid:
gs = False
ret['%s:%s' % (scope, name)] = exception.RucioException('Replica %s is corrupted.' % pfn)
protocol.close()
protocol_delete.close()
if len(ret) == 1:
for x in ret:
if isinstance(ret[x], Exception):
raise ret[x]
else:
return {0: ret[x], 1: ret, 'success': ret[x], 'pfn': pfn}
return {0: gs, 1: ret, 'success': gs, 'pfn': pfn}
def delete(rse_settings, lfns, domain='wan', auth_token=None, logger=logging.log, impl=None):
"""
Delete a file from the connected storage.
Providing a list indicates the bulk mode.
:rse_settings: RSE attributes
:param lfns: a single dict or a list with dicts containing 'scope' and 'name'. E.g. [{'name': '1_rse_remote_delete.raw', 'scope': 'user.jdoe'}, {'name': '2_rse_remote_delete.raw', 'scope': 'user.jdoe'}]
:param domain: The network domain, either 'wan' (default) or 'lan'
:param auth_token: Optionally passing JSON Web Token (OIDC) string for authentication
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: True/False for a single file or a dict object with 'scope:name' as keys and True or the exception as value for each file in bulk mode
:raises RSENotConnected: no connection to a specific storage has been established
:raises SourceNotFound: remote source file can not be found on storage
:raises ServiceUnavailable: for any other reason
"""
ret = {}
gs = True # gs represents the global status which inidcates if every operation workd in bulk mode
protocol = create_protocol(rse_settings, 'delete', domain=domain, auth_token=auth_token, logger=logger, impl=impl)
protocol.connect()
lfns = [lfns] if not type(lfns) is list else lfns
for lfn in lfns:
pfn = list(protocol.lfns2pfns(lfn).values())[0]
try:
protocol.delete(pfn)
ret['%s:%s' % (lfn['scope'], lfn['name'])] = True
except Exception as e:
ret['%s:%s' % (lfn['scope'], lfn['name'])] = e
gs = False
protocol.close()
if len(ret) == 1:
for x in ret:
if isinstance(ret[x], Exception):
raise ret[x]
else:
return ret[x]
return [gs, ret]
def rename(rse_settings, files, domain='wan', auth_token=None, logger=logging.log, impl=None):
"""
Rename files stored on the connected storage.
Providing a list indicates the bulk mode.
:rse_settings: RSE attributes
:param files: a single dict or a list with dicts containing 'scope', 'name', 'new_scope' and 'new_name'
if LFNs are used or only 'name' and 'new_name' if PFNs are used.
If 'new_scope' or 'new_name' are not provided, the current one is used.
Examples:
[
{'name': '3_rse_remote_rename.raw', 'scope': 'user.jdoe', 'new_name': '3_rse_new.raw', 'new_scope': 'user.jdoe'},
{'name': 'user/jdoe/d9/cb/9_rse_remote_rename.raw', 'new_name': 'user/jdoe/c6/4a/9_rse_new.raw'}
]
:param domain: The network domain, either 'wan' (default) or 'lan'
:param auth_token: Optionally passing JSON Web Token (OIDC) string for authentication
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: True/False for a single file or a dict object with LFN (key) and True/False (value) in bulk mode
:raises RSENotConnected: no connection to a specific storage has been established
:raises SourceNotFound: remote source file can not be found on storage
:raises DestinationNotAccessible: remote destination directory is not accessible
:raises ServiceUnavailable: for any other reason
"""
ret = {}
gs = True # gs represents the global status which inidcates if every operation workd in bulk mode
protocol = create_protocol(rse_settings, 'write', domain=domain, auth_token=auth_token, logger=logger, impl=impl)
protocol.connect()
files = [files] if not type(files) is list else files
for f in files:
pfn = None
new_pfn = None
key = None
if 'scope' in f: # LFN is provided
key = '%s:%s' % (f['scope'], f['name'])
# Check if new name is provided
if 'new_name' not in f:
f['new_name'] = f['name']
# Check if new scope is provided
if 'new_scope' not in f:
f['new_scope'] = f['scope']
pfn = list(protocol.lfns2pfns({'name': f['name'], 'scope': f['scope']}).values())[0]
new_pfn = list(protocol.lfns2pfns({'name': f['new_name'], 'scope': f['new_scope']}).values())[0]
else:
pfn = f['name']
new_pfn = f['new_name']
key = pfn
# Check if target is not on storage
if protocol.exists(new_pfn):
ret[key] = exception.FileReplicaAlreadyExists('File %s already exists on storage' % (new_pfn))
gs = False
# Check if source is on storage
elif not protocol.exists(pfn):
ret[key] = exception.SourceNotFound('File %s not found on storage' % (pfn))
gs = False
else:
try:
protocol.rename(pfn, new_pfn)
ret[key] = True
except Exception as e:
ret[key] = e
gs = False
protocol.close()
if len(ret) == 1:
for x in ret:
if isinstance(ret[x], Exception):
raise ret[x]
else:
return ret[x]
return [gs, ret]
def get_space_usage(rse_settings, scheme=None, domain='wan', auth_token=None, logger=logging.log, impl=None):
"""
Get RSE space usage information.
:rse_settings: RSE attributes
:param scheme: optional filter to select which protocol to be used.
:param domain: The network domain, either 'wan' (default) or 'lan'
:param auth_token: Optionally passing JSON Web Token (OIDC) string for authentication
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
:returns: a list with dict containing 'totalsize' and 'unusedsize'
:raises ServiceUnavailable: if some generic error occured in the library.
"""
gs = True
ret = {}
protocol = create_protocol(rse_settings, 'read', scheme=scheme, domain=domain, auth_token=auth_token, logger=logger, impl=impl)
protocol.connect()
try:
totalsize, unusedsize = protocol.get_space_usage()
ret["totalsize"] = totalsize
ret["unusedsize"] = unusedsize
except Exception as e:
ret = e
gs = False
protocol.close()
return [gs, ret]
def find_matching_scheme(rse_settings_dest, rse_settings_src, operation_src, operation_dest, domain='wan', scheme=None):
"""
Find the best matching scheme between two RSEs
:param rse_settings_dest: RSE settings for the destination RSE.
:param rse_settings_src: RSE settings for the src RSE.
:param operation_src: Source Operation such as read, write.
:param operation_dest: Dest Operation such as read, write.
:param domain: Domain such as lan, wan.
:param scheme: List of supported schemes.
:returns: Tuple of matching schemes (dest_scheme, src_scheme, dest_scheme_priority, src_scheme_priority).
"""
operation_src = operation_src.lower()
operation_dest = operation_dest.lower()
src_candidates = copy.copy(rse_settings_src['protocols'])
dest_candidates = copy.copy(rse_settings_dest['protocols'])
# Clean up src_candidates
tbr = list()
for protocol in src_candidates:
# Check if scheme given and filter if so
if scheme:
if not isinstance(scheme, list):
scheme = scheme.split(',')
if protocol['scheme'] not in scheme:
tbr.append(protocol)
continue
prot = protocol['domains'].get(domain, {}).get(operation_src, 1)
if prot is None or prot == 0:
tbr.append(protocol)
for r in tbr:
src_candidates.remove(r)
# Clean up dest_candidates
tbr = list()
for protocol in dest_candidates:
# Check if scheme given and filter if so
if scheme:
if not isinstance(scheme, list):
scheme = scheme.split(',')
if protocol['scheme'] not in scheme:
tbr.append(protocol)
continue
prot = protocol['domains'].get(domain, {}).get(operation_dest, 1)
if prot is None or prot == 0:
tbr.append(protocol)
for r in tbr:
dest_candidates.remove(r)
if not len(src_candidates) or not len(dest_candidates):
raise exception.RSEProtocolNotSupported('No protocol for provided settings found : %s.' % str(rse_settings_dest))
# Shuffle the candidates to load-balance across equal weights.
random.shuffle(dest_candidates)
random.shuffle(src_candidates)
# Select the one with the highest priority
dest_candidates = sorted(dest_candidates, key=lambda k: k['domains'][domain][operation_dest])
src_candidates = sorted(src_candidates, key=lambda k: k['domains'][domain][operation_src])
for dest_protocol in dest_candidates:
for src_protocol in src_candidates:
if __check_compatible_scheme(dest_protocol['scheme'], src_protocol['scheme']):
return (dest_protocol['scheme'], src_protocol['scheme'], dest_protocol['domains'][domain][operation_dest], src_protocol['domains'][domain][operation_src])
raise exception.RSEProtocolNotSupported('No protocol for provided settings found : %s.' % str(rse_settings_dest))
def _retry_protocol_stat(protocol, pfn):
"""
try to stat file, on fail try again 1s, 2s, 4s, 8s, 16s, 32s later. Fail is all fail
:param protocol: The protocol to use to reach this file
:param pfn: Physical file name of the target for the protocol stat
"""
retries = config_get_int('client', 'protocol_stat_retries', raise_exception=False, default=6)
for attempt in range(retries):
try:
stats = protocol.stat(pfn)
return stats
except exception.RSEChecksumUnavailable as e:
# The stat succeeded here, but the checksum failed
raise e
except NotImplementedError:
break
except Exception:
sleep(2**attempt)
return protocol.stat(pfn)
def __check_compatible_scheme(dest_scheme, src_scheme):
"""
Check if two schemes are compatible, such as srm and gsiftp
:param dest_scheme: Destination scheme
:param src_scheme: Source scheme
:param scheme: List of supported schemes
:returns: True if schemes are compatible, False otherwise.
"""
if dest_scheme == src_scheme:
return True
if src_scheme in constants.SCHEME_MAP.get(dest_scheme, []):
return True
return False | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/rsemanager.py | 0.747984 | 0.162579 | rsemanager.py | pypi |
import errno
import os
from rucio.common.exception import FileAlreadyExists, ServiceUnavailable, SourceNotFound
from rucio.rse.protocols import protocol
try:
import arc # pylint: disable=import-error
except:
pass
class DataPoint:
'''
Wrapper around arc.datapoint_from_url() which does not clean up DataPoints
when python objects are destroyed, leading to connection leaking when used
with gridftp. This class should be used instead of arc.datapoint_from_url().
It can be called like dp = DataPoint('gsiftp://...', uc); dp.h.Stat()
where uc is an arc.UserConfig object.
'''
def __init__(self, u, uc):
self.h = arc.datapoint_from_url(u, uc)
def __del__(self):
arc.DataPoint.__swig_destroy__(self.h)
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using ARC client."""
def __init__(self, protocol_attr, rse_settings, logger=None):
"""
Set up UserConfig object.
"""
super(Default, self).__init__(protocol_attr, rse_settings, logger=logger)
# Arc logging to stdout, uncomment for debugging. Should use root
# logger level eventually.
# root_logger = arc.Logger_getRootLogger()
# stream = arc.LogStream(sys.stdout)
# root_logger.addDestination(stream)
# # Set threshold to VERBOSE or DEBUG for more information
# root_logger.setThreshold(arc.DEBUG)
self.cfg = arc.UserConfig()
try:
self.cfg.ProxyPath(os.environ['X509_USER_PROXY'])
except:
pass
def path2pfn(self, path):
"""
Retruns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
return ''.join([self.rse['scheme'], '://%s' % self.rse['hostname'], path])
def exists(self, pfn):
""" Checks if the requested file is known by the referred RSE.
:param pfn: Physical file name
:returns: True if the file exists, False if it doesn't
:raise ServiceUnavailable
"""
dp = DataPoint(str(pfn), self.cfg)
fileinfo = arc.FileInfo()
status = dp.h.Stat(fileinfo)
if not status:
if status.GetErrno() == errno.ENOENT:
return False
raise ServiceUnavailable(str(status))
return True
def connect(self):
""" Establishes the actual connection to the referred RSE.
:raise RSEAccessDenied
"""
pass
def close(self):
""" Closes the connection to RSE."""
pass
def __arc_copy(self, src, dest, space_token=None, transfer_timeout=None):
# TODO set proxy path
# Convert the arguments to DataPoint objects
source = DataPoint(str(src), self.cfg)
if source.h is None:
raise ServiceUnavailable("Can't handle source %s" % src)
destination = DataPoint(str(dest), self.cfg)
if destination.h is None:
raise ServiceUnavailable("Can't handle destination %s" % dest)
if space_token:
destination.h.GetURL().AddOption('spacetoken', space_token)
# DataMover does the transfer
mover = arc.DataMover()
# Don't attempt to retry on error
mover.retry(False)
# Passive and insecure gridftp
mover.passive(True)
mover.secure(False)
# Do the transfer
status = mover.Transfer(source.h, destination.h, arc.FileCache(), arc.URLMap())
if not status:
if status.GetErrno() == errno.ENOENT:
raise SourceNotFound()
if status.GetErrno() == errno.EEXIST:
raise FileAlreadyExists()
raise ServiceUnavailable(str(status))
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
self.__arc_copy(pfn, dest, transfer_timeout=transfer_timeout)
def put(self, source, target, source_dir=None, transfer_timeout=None):
""" Allows to store files inside the referred RSE.
:param source: Physical file name
:param target: Name of the file on the storage system e.g. with prefixed scope
:param source_dir Path where the to be transferred files are stored in the local file system
:param transfer_timeout Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
if source_dir:
sf = source_dir + '/' + source
else:
sf = source
space_token = None
if self.attributes['extended_attributes'] is not None and 'space_token' in list(self.attributes['extended_attributes'].keys()):
space_token = self.attributes['extended_attributes']['space_token']
self.__arc_copy(sf, target, space_token, transfer_timeout=transfer_timeout)
def delete(self, pfn):
""" Deletes a file from the connected RSE.
:param pfn: Physical file name
:raises ServiceUnavailable, SourceNotFound
"""
dp = DataPoint(str(pfn), self.cfg)
if dp.h is None:
raise ServiceUnavailable("Can't handle pfn %s" % pfn)
status = dp.h.Remove()
if not status:
if status.GetErrno() == errno.ENOENT:
raise SourceNotFound()
raise ServiceUnavailable(str(status))
def rename(self, pfn, new_pfn):
""" Allows to rename a file stored inside the connected RSE.
:param pfn: Current physical file name
:param new_pfn New physical file name
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
dp = DataPoint(str(pfn), self.cfg)
if dp.h is None:
raise ServiceUnavailable("Can't handle pfn %s" % pfn)
url = arc.URL(str(new_pfn))
if not url:
raise ServiceUnavailable("Can't handle new pfn %s" % new_pfn)
status = dp.h.Rename(url)
if not status:
if status.GetErrno() == errno.ENOENT:
raise SourceNotFound()
raise ServiceUnavailable(str(status)) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/ngarc.py | 0.601711 | 0.201931 | ngarc.py | pypi |
import logging
import os
import os.path
import shutil
from subprocess import call
from rucio.common import exception
from rucio.common.utils import adler32
from rucio.rse.protocols import protocol
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using the local filesystem."""
def exists(self, pfn):
"""
Checks if the requested file is known by the referred RSE.
:param pfn: Physical file name
:returns: True if the file exists, False if it doesn't
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
status = ''
try:
status = os.path.exists(self.pfn2path(pfn))
except Exception as e:
raise exception.ServiceUnavailable(e)
return status
def connect(self):
"""
Establishes the actual connection to the referred RSE.
:param credentials: needed to establish a connection with the stroage.
:raises RSEAccessDenied: if no connection could be established.
"""
pass
def close(self):
""" Closes the connection to RSE."""
pass
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
try:
shutil.copy(self.pfn2path(pfn), dest)
except IOError as e:
try: # To check if the error happend local or remote
with open(dest, 'wb'):
pass
call(['rm', '-rf', dest])
except IOError as e:
if e.errno == 2:
raise exception.DestinationNotAccessible(e)
else:
raise exception.ServiceUnavailable(e)
if e.errno == 2:
raise exception.SourceNotFound(e)
else:
raise exception.ServiceUnavailable(e)
def put(self, source, target, source_dir=None, transfer_timeout=None):
"""
Allows to store files inside the referred RSE.
:param source: path to the source file on the client file system
:param target: path to the destination file on the storage
:param source_dir: Path where the to be transferred files are stored in the local file system
:param transfer_timeout Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
target = self.pfn2path(target)
if source_dir:
sf = source_dir + '/' + source
else:
sf = source
try:
dirs = os.path.dirname(target)
if not os.path.exists(dirs):
os.makedirs(dirs)
shutil.copy(sf, target)
except IOError as e:
if e.errno == 2:
raise exception.SourceNotFound(e)
elif not self.exists(self.rse['prefix']):
path = ''
for p in self.rse['prefix'].split('/'):
path += p + '/'
os.mkdir(path)
shutil.copy(sf, self.pfn2path(target))
else:
raise exception.DestinationNotAccessible(e)
def delete(self, pfn):
""" Deletes a file from the connected RSE.
:param pfn: pfn to the to be deleted file
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
try:
os.remove(self.pfn2path(pfn))
except OSError as e:
if e.errno == 2:
raise exception.SourceNotFound(e)
def rename(self, pfn, new_pfn):
""" Allows to rename a file stored inside the connected RSE.
:param path: path to the current file on the storage
:param new_path: path to the new file on the storage
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
path = self.pfn2path(pfn)
new_path = self.pfn2path(new_pfn)
try:
if not os.path.exists(os.path.dirname(new_path)):
os.makedirs(os.path.dirname(new_path))
os.rename(path, new_path)
except IOError as e:
if e.errno == 2:
if self.exists(self.pfn2path(path)):
raise exception.SourceNotFound(e)
else:
raise exception.DestinationNotAccessible(e)
else:
raise exception.ServiceUnavailable(e)
def lfns2pfns(self, lfns):
""" Returns fully qualified PFNs for the file referred by each lfn in
the lfns list.
:param lfns: List of lfns. If lfn['path'] is present it is used as
the path to the file, otherwise the path is constructed
deterministically.
:returns: Fully qualified PFNs.
"""
pfns = {}
prefix = self.attributes['prefix']
if not prefix.startswith('/'):
prefix = ''.join(['/', prefix])
if not prefix.endswith('/'):
prefix = ''.join([prefix, '/'])
lfns = [lfns] if isinstance(lfns, dict) else lfns
for lfn in lfns:
scope, name = str(lfn['scope']), lfn['name']
if lfn.get('path'):
pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'],
'://',
self.attributes['hostname'],
prefix,
lfn['path'] if not lfn['path'].startswith('/') else lfn['path'][1:]
])
else:
pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'],
'://',
self.attributes['hostname'],
prefix,
self._get_path(scope=scope, name=name)
])
return pfns
def pfn2path(self, pfn):
tmp = list(self.parse_pfns(pfn).values())[0]
return '/'.join([tmp['prefix'], tmp['path'], tmp['name']])
def stat(self, pfn):
""" Determines the file size in bytes and checksum (adler32) of the provided file.
:param pfn: The PFN the file.
:returns: a dict containing the keys filesize and adler32.
"""
path = self.pfn2path(pfn)
return {'filesize': os.stat(path)[os.path.stat.ST_SIZE], 'adler32': adler32(path)}
class Symlink(Default):
""" Implementing access to RSEs using the local filesystem, creating a symlink on a get """
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
A download/get will create a symlink on the local file system pointing to the
underlying file. Other operations act directly on the remote file.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
path = self.pfn2path(pfn)
os.symlink(path, dest)
self.logger(logging.DEBUG,
'Symlink {} created for {} from {}'
.format(dest, path, pfn))
if not os.lstat(dest):
# problem in creating the symlink
self.logger(logging.ERROR, 'Symlink {} could not be created'.format(dest))
raise exception.DestinationNotAccessible()
if not os.path.exists(dest):
# could not find the file following the symlink
self.logger(logging.ERROR, 'Symlink {} appears to be a broken link to {}'
.format(dest, path))
if os.lstat(dest) and os.path.islink(dest):
os.unlink(dest)
raise exception.SourceNotFound()
def pfn2path(self, pfn):
# obtain path and sanitise from multiple slashes, etc
path = os.path.normpath(super().pfn2path(pfn))
self.logger(logging.DEBUG, 'Extracted path: {} from: {}'.format(path, pfn))
return path | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/posix.py | 0.642881 | 0.269948 | posix.py | pypi |
from rucio.common import exception
from rucio.rse.protocols import protocol
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using the local filesystem."""
def __init__(self, protocol_attr, rse_settings, logger=None):
""" Initializes the object with information about the referred RSE.
:param props: Properties derived from the RSE Repository
"""
super(Default, self).__init__(protocol_attr, rse_settings, logger=logger)
self.attributes.pop('determinism_type', None)
self.files = []
def path2pfn(self, path):
"""
Retruns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
return ''.join([self.rse['scheme'], '://%s' % self.rse['hostname'], path])
def exists(self, pfn):
""" Checks if the requested file is known by the referred RSE.
:param pfn: Physical file name
:returns: True if the file exists, False if it doesn't
:raise ServiceUnavailable
"""
return pfn in self.files
def connect(self):
""" Establishes the actual connection to the referred RSE.
:param credentials: Provide all necessary information to establish a connection
to the referred storage system. Some is loaded from the repository inside the
RSE class and some must be provided specific for the SFTP protocol like
username, password, private_key, private_key_pass, port.
For details about possible additional parameters and details about their usage
see the pysftp.Connection() documentation.
NOTE: the host parametrer is overwritten with the value provided by the repository
:raise RSEAccessDenied
"""
pass
def close(self):
""" Closes the connection to RSE."""
pass
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
if pfn not in self.files:
raise exception.SourceNotFound(pfn)
def put(self, source, target, source_dir=None, transfer_timeout=None):
""" Allows to store files inside the referred RSE.
:param source: Physical file name
:param target: Name of the file on the storage system e.g. with prefixed scope
:param source_dir Path where the to be transferred files are stored in the local file system
:param transfer_timeout Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
self.files.append(target)
def delete(self, pfn):
""" Deletes a file from the connected RSE.
:param pfn: Physical file name
:raises ServiceUnavailable, SourceNotFound
"""
pass
def bulk_delete(self, pfns):
"""
Submits an async task to bulk delete files.
:param pfns: list of pfns to delete
:raises TransferAPIError: if unexpected response from the service.
"""
pass
def rename(self, pfn, new_pfn):
""" Allows to rename a file stored inside the connected RSE.
:param pfn: Current physical file name
:param new_pfn New physical file name
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
pass | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/mock.py | 0.901733 | 0.400339 | mock.py | pypi |
import json
import os
import requests
from rucio.common import exception
from rucio.rse.protocols import protocol
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using gsiftp."""
def __init__(self, protocol_attr, rse_settings, logger=None):
""" Initializes the object with information about the referred RSE.
:param props: Properties derived from the RSE Repository
"""
super(Default, self).__init__(protocol_attr, rse_settings, logger=logger)
def connect(self):
"""
Establishes the actual connection to the referred RSE.
If we decide to use gfal, init should be done here.
:raises RSEAccessDenied
"""
pass
def close(self):
"""
Closes the connection to RSE.
"""
pass
def get_space_usage(self):
"""
Get RSE space usage information.
:returns: a list with dict containing 'totalsize' and 'unusedsize'
:raises ServiceUnavailable: if some generic error occured in the library.
"""
rse_name = self.rse['rse']
dest = '/tmp/rucio-gsiftp-site-size_' + rse_name
space_usage_url = ''
# url of space usage json, woud be nicer to have it in rse_settings
agis = requests.get('http://atlas-agis-api.cern.ch/request/ddmendpoint/query/list/?json').json()
agis_token = ''
for res in agis:
if rse_name == res['name']:
agis_token = res['token']
space_usage_url = res['space_usage_url']
import gfal2 # pylint: disable=import-error
gfal2.set_verbose(gfal2.verbose_level.normal) # pylint: disable=no-member
try:
if os.path.exists(dest):
os.remove(dest)
ctx = gfal2.creat_context() # pylint: disable=no-member
ctx.set_opt_string_list("SRM PLUGIN", "TURL_PROTOCOLS", ["gsiftp", "rfio", "gsidcap", "dcap", "kdcap"])
params = ctx.transfer_parameters()
params.timeout = 3600
ret = ctx.filecopy(params, str(space_usage_url), str('file://' + dest))
if ret == 0:
data_file = open(dest)
data = json.load(data_file)
data_file.close()
if agis_token not in list(data.keys()):
print('ERROR: space usage json has different token as key')
else:
totalsize = int(data[agis_token]['total_space'])
used = int(data[agis_token]['used_space'])
unusedsize = totalsize - used
return totalsize, unusedsize
except Exception as error:
print(error)
raise exception.ServiceUnavailable(error) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/gsiftp.py | 0.557604 | 0.169715 | gsiftp.py | pypi |
import os
from os.path import dirname
from urllib.parse import urlparse
from rucio.common import exception
from rucio.common.utils import execute
from rucio.rse.protocols import protocol
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using the RFIO protocol. """
def connect(self, credentials):
"""
Establishes the actual connection to the referred RSE.
:param credentials: needed to establish a connection with the stroage.
:raises RSEAccessDenied: if no connection could be established.
"""
extended_attributes = self.rse['protocol']['extended_attributes']
if 'STAGE_SVCCLASS' in extended_attributes:
os.environ['STAGE_SVCCLASS'] = extended_attributes['STAGE_SVCCLASS']
def path2pfn(self, path):
"""
Retruns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
return ''.join([self.rse['scheme'], '://', path])
def exists(self, path):
"""
Checks if the requested file is known by the referred RSE.
:param path: Physical file name
:returns: True if the file exists, False if it doesn't
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
cmd = f'rfstat {path}'
status, out, err = execute(cmd)
return status == 0
def close(self):
""" Closes the connection to RSE."""
if 'STAGE_SVCCLASS' in os.environ:
del os.environ['STAGE_SVCCLASS']
def put(self, source, target, source_dir, transfer_timeout=None):
"""
Allows to store files inside the referred RSE.
:param source: path to the source file on the client file system
:param target: path to the destination file on the storage
:param source_dir: Path where the to be transferred files are stored in the local file system
:param transfer_timeout: Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
if not self.exists(dirname(target)):
self.mkdir(dirname(target))
cmd = f'rfcp {source} {target}'
status, out, err = execute(cmd)
return status == 0
def mkdir(self, directory):
""" Create new directory. """
cmd = f'rfmkdir -p {directory}'
status, out, err = execute(cmd)
return status == 0
def split_pfn(self, pfn):
"""
Splits the given PFN into the parts known by the protocol. During parsing the PFN is also checked for
validity on the given RSE with the given protocol.
:param pfn: a fully qualified PFN
:returns: a dict containing all known parts of the PFN for the protocol e.g. scheme, hostname, port, prefix, path, filename
:raises RSEFileNameNotSupported: if the provided PFN doesn't match with the protocol settings
"""
parsed = urlparse(pfn)
ret = dict()
ret['scheme'] = parsed.scheme
ret['hostname'] = parsed.netloc.partition(':')[0]
ret['port'] = int(parsed.netloc.partition(':')[2]) if parsed.netloc.partition(':')[2] != '' else 0
ret['path'] = parsed.path
# Protect against 'lazy' defined prefixes for RSEs in the repository
self.rse['prefix'] = '' if self.rse['prefix'] is None else self.rse['prefix']
if not self.rse['prefix'].startswith('/'):
self.rse['prefix'] = '/' + self.rse['prefix']
if not self.rse['prefix'].endswith('/'):
self.rse['prefix'] += '/'
if self.rse['hostname'] != ret['hostname']:
raise exception.RSEFileNameNotSupported('Invalid hostname: provided \'%s\', expected \'%s\'' % (ret['hostname'], self.rse['hostname']))
if not ret['path'].startswith(self.rse['prefix']):
raise exception.RSEFileNameNotSupported('Invalid prefix: provided \'%s\', expected \'%s\'' % ('/'.join(ret['path'].split('/')[0:len(self.rse['prefix'].split('/')) - 1]),
self.rse['prefix'])) # len(...)-1 due to the leading '/
# Spliting parsed.path into prefix, path, filename
ret['prefix'] = self.rse['prefix']
ret['path'] = ret['path'].partition(self.rse['prefix'])[2]
ret['name'] = ret['path'].split('/')[-1]
ret['path'] = ret['path'].partition(ret['name'])[0]
return ret | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/rfio.py | 0.644449 | 0.326862 | rfio.py | pypi |
from rucio.rse.protocols import protocol
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using the local filesystem."""
def __init__(self, protocol_attr, rse_settings, logger=None):
""" Initializes the object with information about the referred RSE.
:param props: Properties derived from the RSE Repository
"""
super(Default, self).__init__(protocol_attr, rse_settings, logger=logger)
self.attributes.pop('determinism_type', None)
self.files = []
def _get_path(self, scope, name):
""" Transforms the physical file name into the local URI in the referred RSE.
Suitable for sites implementoing the RUCIO naming convention.
:param name: filename
:param scope: scope
:returns: RSE specific URI of the physical file
"""
return '%s/%s' % (scope, name)
def path2pfn(self, path):
"""
Retruns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
return ''.join([self.rse['scheme'], '://%s' % self.rse['hostname'], path])
def exists(self, pfn):
""" Checks if the requested file is known by the referred RSE.
:param pfn: Physical file name
:returns: True if the file exists, False if it doesn't
:raise ServiceUnavailable
"""
raise NotImplementedError
def connect(self):
""" Establishes the actual connection to the referred RSE.
:param credentials: Provide all necessary information to establish a connection
to the referred storage system. Some is loaded from the repository inside the
RSE class and some must be provided specific for the SFTP protocol like
username, password, private_key, private_key_pass, port.
For details about possible additional parameters and details about their usage
see the pysftp.Connection() documentation.
NOTE: the host parametrer is overwritten with the value provided by the repository
:raise RSEAccessDenied
"""
raise NotImplementedError
def close(self):
""" Closes the connection to RSE."""
raise NotImplementedError
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout Transfer timeout (in seconds)
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError
def put(self, source, target, source_dir=None, transfer_timeout=None):
""" Allows to store files inside the referred RSE.
:param source: Physical file name
:param target: Name of the file on the storage system e.g. with prefixed scope
:param source_dir Path where the to be transferred files are stored in the local file system
:param transfer_timeout Transfer timeout (in seconds)
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError
def delete(self, pfn):
""" Deletes a file from the connected RSE.
:param pfn: Physical file name
:raises ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError
def rename(self, pfn, new_pfn):
""" Allows to rename a file stored inside the connected RSE.
:param pfn: Current physical file name
:param new_pfn New physical file name
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/cache.py | 0.900269 | 0.424173 | cache.py | pypi |
import logging
import os
from rucio.common import exception
from rucio.common.utils import execute, PREFERRED_CHECKSUM
from rucio.rse.protocols import protocol
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using the XRootD protocol using GSI authentication."""
def __init__(self, protocol_attr, rse_settings, logger=logging.log):
""" Initializes the object with information about the referred RSE.
:param props: Properties derived from the RSE Repository
"""
super(Default, self).__init__(protocol_attr, rse_settings, logger=logger)
self.scheme = self.attributes['scheme']
self.hostname = self.attributes['hostname']
self.port = str(self.attributes['port'])
self.logger = logger
def path2pfn(self, path):
"""
Returns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
self.logger(logging.DEBUG, 'xrootd.path2pfn: path: {}'.format(path))
if not path.startswith('xroot') and not path.startswith('root'):
if path.startswith('/'):
return '%s://%s:%s/%s' % (self.scheme, self.hostname, self.port, path)
else:
return '%s://%s:%s//%s' % (self.scheme, self.hostname, self.port, path)
else:
return path
def exists(self, pfn):
""" Checks if the requested file is known by the referred RSE.
:param pfn: Physical file name
:returns: True if the file exists, False if it doesn't
:raise ServiceUnavailable
"""
self.logger(logging.DEBUG, 'xrootd.exists: pfn: {}'.format(pfn))
try:
path = self.pfn2path(pfn)
cmd = 'XrdSecPROTOCOL=gsi xrdfs %s:%s stat %s' % (self.hostname, self.port, path)
self.logger(logging.DEBUG, 'xrootd.exists: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status != 0:
return False
except Exception as e:
raise exception.ServiceUnavailable(e)
return True
def stat(self, path):
"""
Returns the stats of a file.
:param path: path to file
:raises ServiceUnavailable: if some generic error occured in the library.
:returns: a dict with two keys, filesize and an element of GLOBALLY_SUPPORTED_CHECKSUMS.
"""
self.logger(logging.DEBUG, 'xrootd.stat: path: {}'.format(path))
ret = {}
chsum = None
if path.startswith('root:'):
path = self.pfn2path(path)
try:
# xrdfs stat for getting filesize
cmd = 'XrdSecPROTOCOL=gsi xrdfs %s:%s stat %s' % (self.hostname, self.port, path)
self.logger(logging.DEBUG, 'xrootd.stat: filesize cmd: {}'.format(cmd))
status_stat, out, err = execute(cmd)
if status_stat == 0:
for line in out.split('\n'):
if line and ':' in line:
k, v = line.split(':', maxsplit=1)
if k.strip().lower() == 'size':
ret['filesize'] = v.strip()
break
# xrdfs query checksum for getting checksum
cmd = 'XrdSecPROTOCOL=gsi xrdfs %s:%s query checksum %s' % (self.hostname, self.port, path)
self.logger(logging.DEBUG, 'xrootd.stat: checksum cmd: {}'.format(cmd))
status_query, out, err = execute(cmd)
if status_query == 0:
chsum, value = out.strip('\n').split()
ret[chsum] = value
except Exception as e:
raise exception.ServiceUnavailable(e)
if 'filesize' not in ret:
raise exception.ServiceUnavailable('Filesize could not be retrieved.')
if PREFERRED_CHECKSUM != chsum or not chsum:
msg = '{} does not match with {}'.format(chsum, PREFERRED_CHECKSUM)
raise exception.RSEChecksumUnavailable(msg)
return ret
def pfn2path(self, pfn):
"""
Returns the path of a file given the pfn, i.e. scheme and hostname are subtracted from the pfn.
:param path: pfn of a file
:returns: path.
"""
self.logger(logging.DEBUG, 'xrootd.pfn2path: pfn: {}'.format(pfn))
if pfn.startswith('//'):
return pfn
elif pfn.startswith('/'):
return '/' + pfn
else:
prefix = self.attributes['prefix']
path = pfn.partition(self.attributes['prefix'])[2]
path = prefix + path
return path
def lfns2pfns(self, lfns):
"""
Returns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
self.logger(logging.DEBUG, 'xrootd.lfns2pfns: lfns: {}'.format(lfns))
pfns = {}
prefix = self.attributes['prefix']
if not prefix.startswith('/'):
prefix = ''.join(['/', prefix])
if not prefix.endswith('/'):
prefix = ''.join([prefix, '/'])
lfns = [lfns] if type(lfns) == dict else lfns
for lfn in lfns:
scope, name = lfn['scope'], lfn['name']
if 'path' in lfn and lfn['path'] is not None:
pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'], '://', self.attributes['hostname'], ':', str(self.attributes['port']), prefix, lfn['path']])
else:
pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'], '://', self.attributes['hostname'], ':', str(self.attributes['port']), prefix, self._get_path(scope=scope, name=name)])
return pfns
def connect(self):
""" Establishes the actual connection to the referred RSE.
:param credentials: Provides information to establish a connection
to the referred storage system. For S3 connections these are
access_key, secretkey, host_base, host_bucket, progress_meter
and skip_existing.
:raises RSEAccessDenied
"""
self.logger(logging.DEBUG, 'xrootd.connect: port: {}, hostname {}'.format(self.port, self.hostname))
try:
# The query stats call is not implemented on some xroot doors.
# Workaround: fail, if server does not reply within 10 seconds for static config query
cmd = 'XrdSecPROTOCOL=gsi XRD_REQUESTTIMEOUT=10 xrdfs %s:%s query config %s:%s' % (self.hostname, self.port, self.hostname, self.port)
self.logger(logging.DEBUG, 'xrootd.connect: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status != 0:
raise exception.RSEAccessDenied(err)
except Exception as e:
raise exception.RSEAccessDenied(e)
def close(self):
""" Closes the connection to RSE."""
pass
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout: Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
self.logger(logging.DEBUG, 'xrootd.get: pfn: {}'.format(pfn))
try:
cmd = 'XrdSecPROTOCOL=gsi xrdcp -f %s %s' % (pfn, dest)
self.logger(logging.DEBUG, 'xrootd.get: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status == 54:
raise exception.SourceNotFound()
elif status != 0:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e)
def put(self, filename, target, source_dir, transfer_timeout=None):
"""
Allows to store files inside the referred RSE.
:param source: path to the source file on the client file system
:param target: path to the destination file on the storage
:param source_dir: Path where the to be transferred files are stored in the local file system
:param transfer_timeout: Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
self.logger(logging.DEBUG, 'xrootd.put: filename: {} target: {}'.format(filename, target))
source_dir = source_dir or '.'
source_url = '%s/%s' % (source_dir, filename)
self.logger(logging.DEBUG, 'xrootd put: source url: {}'.format(source_url))
path = self.path2pfn(target)
if not os.path.exists(source_url):
raise exception.SourceNotFound()
try:
cmd = 'XrdSecPROTOCOL=gsi xrdcp -f %s %s' % (source_url, path)
self.logger(logging.DEBUG, 'xrootd.put: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status != 0:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e)
def delete(self, pfn):
"""
Deletes a file from the connected RSE.
:param pfn: Physical file name
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
self.logger(logging.DEBUG, 'xrootd.delete: pfn: {}'.format(pfn))
if not self.exists(pfn):
raise exception.SourceNotFound()
try:
path = self.pfn2path(pfn)
cmd = 'XrdSecPROTOCOL=gsi xrdfs %s:%s rm %s' % (self.hostname, self.port, path)
self.logger(logging.DEBUG, 'xrootd.delete: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status != 0:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e)
def rename(self, pfn, new_pfn):
""" Allows to rename a file stored inside the connected RSE.
:param pfn: Current physical file name
:param new_pfn New physical file name
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
self.logger(logging.DEBUG, 'xrootd.rename: pfn: {}'.format(pfn))
if not self.exists(pfn):
raise exception.SourceNotFound()
try:
path = self.pfn2path(pfn)
new_path = self.pfn2path(new_pfn)
new_dir = new_path[:new_path.rindex('/') + 1]
cmd = 'XrdSecPROTOCOL=gsi xrdfs %s:%s mkdir -p %s' % (self.hostname, self.port, new_dir)
self.logger(logging.DEBUG, 'xrootd.stat: mkdir cmd: {}'.format(cmd))
status, out, err = execute(cmd)
cmd = 'XrdSecPROTOCOL=gsi xrdfs %s:%s mv %s %s' % (self.hostname, self.port, path, new_path)
self.logger(logging.DEBUG, 'xrootd.stat: rename cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status != 0:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/xrootd.py | 0.70253 | 0.207857 | xrootd.py | pypi |
from rucio.rse.protocols import ngarc
class Default(ngarc.Default):
""" Implementing access to RSEs using the ngarc protocol."""
def __init__(self, protocol_attr, rse_settings, logger=None):
""" Initializes the object with information about the referred RSE.
:param props: Properties derived from the RSE Repository
"""
super(Default, self).__init__(protocol_attr, rse_settings, logger=logger)
self.attributes.pop('determinism_type', None)
self.files = []
def _get_path(self, scope, name):
""" Transforms the physical file name into the local URI in the referred RSE.
Suitable for sites implementoing the RUCIO naming convention.
:param name: filename
:param scope: scope
:returns: RSE specific URI of the physical file
"""
return '%s/%s' % (scope, name)
def path2pfn(self, path):
"""
Retruns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
return ''.join([self.attributes['scheme'], '://%s' % self.attributes['hostname'], path])
def put(self, source, target, source_dir=None, transfer_timeout=None):
""" Allows to store files inside the referred RSE.
:param source: Physical file name
:param target: Name of the file on the storage system e.g. with prefixed scope
:param source_dir Path where the to be transferred files are stored in the local file system
:param transfer_timeout Transfer timeout (in seconds)
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError
def delete(self, pfn):
""" Deletes a file from the connected RSE.
:param pfn: Physical file name
:raises ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError
def rename(self, pfn, new_pfn):
""" Allows to rename a file stored inside the connected RSE.
:param pfn: Current physical file name
:param new_pfn New physical file name
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/http_cache.py | 0.892152 | 0.375105 | http_cache.py | pypi |
from rucio.rse.protocols import protocol
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using the local filesystem."""
def __init__(self, protocol_attr, rse_settings, logger=None):
""" Initializes the object with information about the referred RSE.
:param props: Properties derived from the RSE Repository
"""
super(Default, self).__init__(protocol_attr, rse_settings, logger=logger)
self.attributes.pop('determinism_type', None)
self.files = []
def path2pfn(self, path):
"""
Retruns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
return ''.join([self.rse['scheme'], '://%s' % self.rse['hostname'], path])
def exists(self, pfn):
""" Checks if the requested file is known by the referred RSE.
:param pfn: Physical file name
:returns: True if the file exists, False if it doesn't
:raise ServiceUnavailable
"""
raise NotImplementedError
def connect(self):
""" Establishes the actual connection to the referred RSE.
:param credentials: Provide all necessary information to establish a connection
to the referred storage system. Some is loaded from the repository inside the
RSE class and some must be provided specific for the SFTP protocol like
username, password, private_key, private_key_pass, port.
For details about possible additional parameters and details about their usage
see the pysftp.Connection() documentation.
NOTE: the host parametrer is overwritten with the value provided by the repository
:raise RSEAccessDenied
"""
raise NotImplementedError
def close(self):
""" Closes the connection to RSE."""
raise NotImplementedError
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout Transfer timeout (in seconds)
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError
def put(self, source, target, source_dir=None, transfer_timeout=None):
""" Allows to store files inside the referred RSE.
:param source: Physical file name
:param target: Name of the file on the storage system e.g. with prefixed scope
:param source_dir Path where the to be transferred files are stored in the local file system
:param transfer_timeout Transfer timeout (in seconds)
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError
def delete(self, pfn):
""" Deletes a file from the connected RSE.
:param pfn: Physical file name
:raises ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError
def rename(self, pfn, new_pfn):
""" Allows to rename a file stored inside the connected RSE.
:param pfn: Current physical file name
:param new_pfn New physical file name
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
raise NotImplementedError | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/dummy.py | 0.912966 | 0.411643 | dummy.py | pypi |
import hashlib
import logging
from configparser import NoOptionError, NoSectionError
from urllib.parse import urlparse
from rucio.common import config, exception
from rucio.common.utils import register_policy_package_algorithms
from rucio.rse import rsemanager
if getattr(rsemanager, 'CLIENT_MODE', None):
from rucio.client.rseclient import RSEClient
if getattr(rsemanager, 'SERVER_MODE', None):
from rucio.common.types import InternalScope
from rucio.core import replica
from rucio.core.rse import get_rse_vo
class RSEDeterministicTranslation(object):
"""
Execute the logic for translating a LFN to a path.
"""
_LFN2PFN_ALGORITHMS = {}
_DEFAULT_LFN2PFN = "hash"
def __init__(self, rse=None, rse_attributes=None, protocol_attributes=None):
"""
Initialize a translator object from the RSE, its attributes, and the protocol-specific
attributes.
:param rse: Name of RSE for this translation.
:param rse_attributes: A dictionary of RSE-specific attributes for use in the translation.
:param protocol_attributes: A dictionary of RSE/protocol-specific attributes.
"""
self.rse = rse
self.rse_attributes = rse_attributes if rse_attributes else {}
self.protocol_attributes = protocol_attributes if protocol_attributes else {}
self.loaded_policy_modules = False
@classmethod
def supports(cls, name):
"""
Check to see if a specific algorithm is supported.
:param name: Name of the deterministic algorithm.
:returns: True if `name` is an algorithm supported by the translator class, False otherwise.
"""
return name in cls._LFN2PFN_ALGORITHMS
@staticmethod
def register(lfn2pfn_callable, name=None):
"""
Provided a callable function, register it as one of the valid LFN2PFN algorithms.
The callable will receive five arguments:
- scope: Scope of the LFN.
- name: LFN's path name
- rse: RSE name the translation is being done for.
- rse_attributes: Attributes of the RSE.
- protocol_attributes: Attributes of the RSE's protocol
The return value should be the last part of the PFN - it will be appended to the
rest of the URL.
:param lfn2pfn_callable: Callable function to use for generating paths.
:param name: Algorithm name used for registration. If None, then `lfn2pfn_callable.__name__` is used.
"""
if name is None:
name = lfn2pfn_callable.__name__
RSEDeterministicTranslation._LFN2PFN_ALGORITHMS[name] = lfn2pfn_callable
@staticmethod
def __hash(scope, name, rse, rse_attrs, protocol_attrs):
"""
Given a LFN, turn it into a sub-directory structure using a hash function.
This takes the MD5 of the LFN and uses the first four characters as a subdirectory
name.
:param scope: Scope of the LFN.
:param name: File name of the LFN.
:param rse: RSE for PFN (ignored)
:param rse_attrs: RSE attributes for PFN (ignored)
:param protocol_attrs: RSE protocol attributes for PFN (ignored)
:returns: Path for use in the PFN generation.
"""
del rse
del rse_attrs
del protocol_attrs
hstr = hashlib.md5(('%s:%s' % (scope, name)).encode('utf-8')).hexdigest()
if scope.startswith('user') or scope.startswith('group'):
scope = scope.replace('.', '/')
return '%s/%s/%s/%s' % (scope, hstr[0:2], hstr[2:4], name)
@staticmethod
def __identity(scope, name, rse, rse_attrs, protocol_attrs):
"""
Given a LFN, convert it directly to a path using the mapping:
scope:path -> scope/path
:param scope: Scope of the LFN.
:param name: File name of the LFN.
:param rse: RSE for PFN (ignored)
:param rse_attrs: RSE attributes for PFN (ignored)
:param protocol_attrs: RSE protocol attributes for PFN (ignored)
:returns: Path for use in the PFN generation.
"""
del rse
del rse_attrs
del protocol_attrs
if scope.startswith('user') or scope.startswith('group'):
scope = scope.replace('.', '/')
return '%s/%s' % (scope, name)
@staticmethod
def __belleii(scope, name, rse, rse_attrs, protocol_attrs):
"""
Given a LFN, convert it directly to a path using the mapping:
path -> path
This is valid only for the belleii convention where the scope can be determined
from the LFN using a determinitic function.
:param scope: Scope of the LFN.
:param name: File name of the LFN.
:param rse: RSE for PFN (ignored)
:param rse_attrs: RSE attributes for PFN (ignored)
:param protocol_attrs: RSE protocol attributes for PFN (ignored)
:returns: Path for use in the PFN generation.
"""
del scope
del rse
del rse_attrs
del protocol_attrs
return name
@staticmethod
def __ligo(scope, name, rse, rse_attrs, protocol_attrs):
"""
Given a LFN, convert it directly to a path using the Caltech schema
e.g.,: ER8:H-H1_HOFT_C02-1126256640-4096 ->
ER8/hoft_C02/H1/H-H1_HOFT_C02-11262/H-H1_HOFT_C02-1126256640-4096
:param scope: Scope of the LFN (observing run: ER8, O2, postO1, ...)
:param name: File name of the LFN (E.g., H-H1_HOFT_C02-1126256640-4096.gwf)
:param rse: RSE for PFN (ignored)
:param rse_attrs: RSE attributes for PFN (ignored)
:param protocol_attrs: RSE protocol attributes for PFN (ignored)
:returns: Path for use in the PFN generation.
"""
del rse
del rse_attrs
del protocol_attrs
from ligo_rucio import lfn2pfn as ligo_lfn2pfn # pylint: disable=import-error
return ligo_lfn2pfn.ligo_lab(scope, name, None, None, None)
@staticmethod
def __xenon(scope, name, rse, rse_attrs, protocol_attrs):
"""
Given a LFN, turn it into a two level sub-directory structure based on the scope
plus a third level based on the name
:param scope: Scope of the LFN.
:param name: File name of the LFN.
:param rse: RSE for PFN (ignored)
:param rse_attrs: RSE attributes for PFN (ignored)
:param protocol_attrs: RSE protocol attributes for PFN (ignored)
:returns: Path for use in the PFN generation.
"""
del rse
del rse_attrs
del protocol_attrs
return '%s/%s/%s/%s' % (scope[0:7], scope[4:len(scope)], name.split('-')[0] + "-" + name.split('-')[1], name)
@staticmethod
def __lsst(scope, name, rse, rse_attrs, protocol_attrs):
"""
LFN2PFN algorithm for Rubin-LSST in the ESCAPE project
Replace convention delimiter '__' by '/'
The Escape instance does use the 'generic' Rucio schema.
:param scope: Scope of the LFN (ignored)
:param name: File name of the LFN.
:param rse: RSE for PFN (ignored)
:param rse_attrs: RSE attributes for PFN (ignored)
:param protocol_attrs: RSE protocol attributes for PFN (ignored)
:returns: Path for use in the PFN generation.
"""
del scope
del rse
del rse_attrs
del protocol_attrs
return name.replace('__', '/')
@classmethod
def _module_init_(cls):
"""
Initialize the class object on first module load.
"""
cls.register(cls.__hash, "hash")
cls.register(cls.__identity, "identity")
cls.register(cls.__ligo, "ligo")
cls.register(cls.__belleii, "belleii")
cls.register(cls.__xenon, "xenon")
cls.register(cls.__lsst, "lsst")
policy_module = None
try:
policy_module = config.config_get('policy', 'lfn2pfn_module')
except (NoOptionError, NoSectionError):
pass
if policy_module:
# TODO: The import of importlib is done like this due to a dependency issue with python 2.6 and incompatibility of the module with py3.x
# More information https://github.com/rucio/rucio/issues/875
import importlib
importlib.import_module(policy_module)
cls._DEFAULT_LFN2PFN = config.get_lfn2pfn_algorithm_default()
def path(self, scope, name):
""" Transforms the logical file name into a PFN's path.
:param lfn: filename
:param scope: scope
:returns: RSE specific URI of the physical file
"""
# on first call, register any lfn2pfn algorithms from the policy package(s) (server only)
if getattr(rsemanager, 'SERVER_MODE', None) and not self.loaded_policy_modules:
register_policy_package_algorithms('lfn2pfn', RSEDeterministicTranslation._LFN2PFN_ALGORITHMS)
self.loaded_policy_modules = True
algorithm = self.rse_attributes.get('lfn2pfn_algorithm', 'default')
if algorithm == 'default':
algorithm = RSEDeterministicTranslation._DEFAULT_LFN2PFN
algorithm_callable = RSEDeterministicTranslation._LFN2PFN_ALGORITHMS[algorithm]
return algorithm_callable(scope, name, self.rse, self.rse_attributes, self.protocol_attributes)
RSEDeterministicTranslation._module_init_() # pylint: disable=protected-access
class RSEProtocol(object):
""" This class is virtual and acts as a base to inherit new protocols from. It further provides some common functionality which applies for the amjority of the protocols."""
def __init__(self, protocol_attr, rse_settings, logger=logging.log):
""" Initializes the object with information about the referred RSE.
:param protocol_attr: Properties of the requested protocol.
:param rse_settting: The RSE settings.
:param logger: Optional decorated logger that can be passed from the calling daemons or servers.
"""
self.auth_token = protocol_attr['auth_token']
protocol_attr.pop('auth_token')
self.attributes = protocol_attr
self.translator = None
self.renaming = True
self.overwrite = False
self.rse = rse_settings
self.logger = logger
if self.rse['deterministic']:
self.translator = RSEDeterministicTranslation(self.rse['rse'], rse_settings, self.attributes)
if getattr(rsemanager, 'CLIENT_MODE', None) and \
not RSEDeterministicTranslation.supports(self.rse.get('lfn2pfn_algorithm')):
# Remote server has an algorithm we don't understand; always make the server do the lookup.
setattr(self, 'lfns2pfns', self.__lfns2pfns_client)
else:
if getattr(rsemanager, 'CLIENT_MODE', None):
setattr(self, 'lfns2pfns', self.__lfns2pfns_client)
if getattr(rsemanager, 'SERVER_MODE', None):
setattr(self, '_get_path', self._get_path_nondeterministic_server)
def lfns2pfns(self, lfns):
"""
Retruns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
pfns = {}
prefix = self.attributes['prefix']
if not prefix.startswith('/'):
prefix = ''.join(['/', prefix])
if not prefix.endswith('/'):
prefix = ''.join([prefix, '/'])
lfns = [lfns] if isinstance(lfns, dict) else lfns
for lfn in lfns:
scope, name = str(lfn['scope']), lfn['name']
if 'path' in lfn and lfn['path'] is not None:
pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'],
'://',
self.attributes['hostname'],
':',
str(self.attributes['port']),
prefix,
lfn['path'] if not lfn['path'].startswith('/') else lfn['path'][1:]
])
else:
try:
pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'],
'://',
self.attributes['hostname'],
':',
str(self.attributes['port']),
prefix,
self._get_path(scope=scope, name=name)
])
except exception.ReplicaNotFound as e:
self.logger(logging.WARNING, str(e))
return pfns
def __lfns2pfns_client(self, lfns):
""" Provides the path of a replica for non-deterministic sites. Will be assigned to get path by the __init__ method if neccessary.
:param scope: list of DIDs
:returns: dict with scope:name as keys and PFN as value (in case of errors the Rucio exception si assigned to the key)
"""
client = RSEClient() # pylint: disable=E0601
lfns = [lfns] if isinstance(lfns, dict) else lfns
lfn_query = ["%s:%s" % (lfn['scope'], lfn['name']) for lfn in lfns]
return client.lfns2pfns(self.rse['rse'], lfn_query, scheme=self.attributes['scheme'])
def _get_path(self, scope, name):
""" Transforms the logical file name into a PFN.
Suitable for sites implementing the RUCIO naming convention.
This implementation is only invoked if the RSE is deterministic.
:param scope: scope
:param name: filename
:returns: RSE specific URI of the physical file
"""
return self.translator.path(scope, name)
def _get_path_nondeterministic_server(self, scope, name): # pylint: disable=invalid-name
""" Provides the path of a replica for non-deterministic sites. Will be assigned to get path by the __init__ method if neccessary. """
vo = get_rse_vo(self.rse['id']) # pylint: disable=E0601
scope = InternalScope(scope, vo=vo) # pylint: disable=E0601
rep = replica.get_replica(scope=scope, name=name, rse_id=self.rse['id']) # pylint: disable=E0601
if 'path' in rep and rep['path'] is not None:
path = rep['path']
elif 'state' in rep and (rep['state'] is None or rep['state'] == 'UNAVAILABLE'):
raise exception.ReplicaUnAvailable('Missing path information and state is UNAVAILABLE for replica %s:%s on non-deterministic storage named %s' % (scope, name, self.rse['rse']))
else:
raise exception.ReplicaNotFound('Missing path information for replica %s:%s on non-deterministic storage named %s' % (scope, name, self.rse['rse']))
if path.startswith('/'):
path = path[1:]
if path.endswith('/'):
path = path[:-1]
return path
def parse_pfns(self, pfns):
"""
Splits the given PFN into the parts known by the protocol. It is also checked if the provided protocol supportes the given PFNs.
:param pfns: a list of a fully qualified PFNs
:returns: dic with PFN as key and a dict with path and name as value
:raises RSEFileNameNotSupported: if the provided PFN doesn't match with the protocol settings
"""
ret = dict()
pfns = [pfns] if isinstance(pfns, str) else pfns
for pfn in pfns:
parsed = urlparse(pfn)
scheme = parsed.scheme
hostname = parsed.netloc.partition(':')[0]
port = int(parsed.netloc.partition(':')[2]) if parsed.netloc.partition(':')[2] != '' else 0
while '//' in parsed.path:
parsed = parsed._replace(path=parsed.path.replace('//', '/'))
path = parsed.path
prefix = self.attributes['prefix']
while '//' in prefix:
prefix = prefix.replace('//', '/')
# Protect against 'lazy' defined prefixes for RSEs in the repository
if not prefix.startswith('/'):
prefix = '/' + prefix
if not prefix.endswith('/'):
prefix += '/'
if self.attributes['hostname'] != hostname:
if self.attributes['hostname'] != 'localhost': # In the database empty hostnames are replaced with localhost but for some URIs (e.g. file) a hostname is not included
raise exception.RSEFileNameNotSupported('Invalid hostname: provided \'%s\', expected \'%s\'' % (hostname, self.attributes['hostname']))
if self.attributes['port'] != port:
raise exception.RSEFileNameNotSupported('Invalid port: provided \'%s\', expected \'%s\'' % (port, self.attributes['port']))
if not path.startswith(prefix):
raise exception.RSEFileNameNotSupported('Invalid prefix: provided \'%s\', expected \'%s\'' % ('/'.join(path.split('/')[0:len(prefix.split('/')) - 1]),
prefix)) # len(...)-1 due to the leading '/
# Spliting parsed.path into prefix, path, filename
path = path.partition(prefix)[2]
name = path.split('/')[-1]
path = '/'.join(path.split('/')[:-1])
if not path.startswith('/'):
path = '/' + path
if path != '/' and not path.endswith('/'):
path = path + '/'
ret[pfn] = {'path': path, 'name': name, 'scheme': scheme, 'prefix': prefix, 'port': port, 'hostname': hostname, }
return ret
def exists(self, path):
"""
Checks if the requested file is known by the referred RSE.
:param path: Physical file name
:returns: True if the file exists, False if it doesn't
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
raise NotImplementedError
def connect(self):
"""
Establishes the actual connection to the referred RSE.
:raises RSEAccessDenied: if no connection could be established.
"""
raise NotImplementedError
def close(self):
""" Closes the connection to RSE."""
raise NotImplementedError
def get(self, path, dest, transfer_timeout=None):
"""
Provides access to files stored inside connected the RSE.
:param path: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout: Transfer timeout (in seconds)
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
raise NotImplementedError
def put(self, source, target, source_dir, transfer_timeout=None):
"""
Allows to store files inside the referred RSE.
:param source: path to the source file on the client file system
:param target: path to the destination file on the storage
:param source_dir: Path where the to be transferred files are stored in the local file system
:param transfer_timeout: Transfer timeout (in seconds)
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
raise NotImplementedError
def delete(self, path):
"""
Deletes a file from the connected RSE.
:param path: path to the to be deleted file
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
raise NotImplementedError
def rename(self, path, new_path):
""" Allows to rename a file stored inside the connected RSE.
:param path: path to the current file on the storage
:param new_path: path to the new file on the storage
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
raise NotImplementedError
def get_space_usage(self):
"""
Get RSE space usage information.
:returns: a list with dict containing 'totalsize' and 'unusedsize'
:raises ServiceUnavailable: if some generic error occured in the library.
"""
raise NotImplementedError
def stat(self, path):
"""
Returns the stats of a file.
:param path: path to file
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
:returns: a dict with two keys, filesize and adler32 of the file provided in path.
"""
raise NotImplementedError | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/protocol.py | 0.768646 | 0.179064 | protocol.py | pypi |
import logging
import os
import re
from rucio.common import exception
from rucio.common.utils import execute, PREFERRED_CHECKSUM
from rucio.rse.protocols import protocol
class Default(protocol.RSEProtocol):
""" Implementing access to RSEs using the SSH protocol."""
def __init__(self, protocol_attr, rse_settings, logger=logging.log):
""" Initializes the object with information about the referred RSE.
:param props: Properties derived from the RSE Repository
"""
super(Default, self).__init__(protocol_attr, rse_settings, logger=logger)
self.scheme = self.attributes['scheme']
self.hostname = self.attributes['hostname']
self.port = str(self.attributes['port'])
self.path = None
if self.attributes['extended_attributes'] is not None and\
'user' in list(self.attributes['extended_attributes'].keys()):
self.sshuser = self.attributes['extended_attributes']['user'] + '@'
else:
self.sshuser = ''
self.logger = logger
def path2pfn(self, path):
"""
Returns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
self.logger(logging.DEBUG, 'ssh.path2pfn: path: {}'.format(path))
if not path.startswith(str(self.scheme) + '://'):
return '%s://%s%s:%s/%s' % (self.scheme, self.sshuser, self.hostname, self.port, path)
else:
return path
def exists(self, pfn):
""" Checks if the requested file is known by the referred RSE.
:param pfn: Physical file name
:returns: True if the file exists, False if it doesn't
:raise ServiceUnavailable
"""
self.logger(logging.DEBUG, 'ssh.exists: pfn: {}'.format(pfn))
try:
path = self.pfn2path(pfn)
cmd = 'ssh -p %s %s%s find %s' % (self.port, self.sshuser, self.hostname, path)
self.logger(logging.DEBUG, 'ssh.exists: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status:
return False
except Exception as e:
raise exception.ServiceUnavailable(e)
return True
def stat(self, path):
"""
Returns the stats of a file.
:param path: path to file
:raises ServiceUnavailable: if some generic error occured in the library.
:returns: a dict with two keys, filesize and an element of GLOBALLY_SUPPORTED_CHECKSUMS.
"""
self.logger(logging.DEBUG, 'ssh.stat: path: {}'.format(path))
ret = {}
chsum = None
path = self.pfn2path(path)
try:
# ssh stat for getting filesize
cmd = 'ssh -p {0} {1}{2} stat --printf="%s" {3}'.format(self.port, self.sshuser, self.hostname, path)
self.logger(logging.DEBUG, 'ssh.stat: filesize cmd: {}'.format(cmd))
status_stat, out, err = execute(cmd)
if status_stat == 0:
ret['filesize'] = out
# ssh query checksum for getting md5 checksum
cmd = 'ssh -p %s %s%s md5sum %s' % (self.port, self.sshuser, self.hostname, path)
self.logger(logging.DEBUG, 'ssh.stat: checksum cmd: {}'.format(cmd))
status_query, out, err = execute(cmd)
if status_query == 0:
chsum = 'md5'
val = out.strip(' ').split()
ret[chsum] = val[0]
except Exception as e:
raise exception.ServiceUnavailable(e)
if 'filesize' not in ret:
raise exception.ServiceUnavailable('Filesize could not be retrieved.')
if PREFERRED_CHECKSUM != chsum or not chsum:
msg = '{} does not match with {}'.format(chsum, PREFERRED_CHECKSUM)
raise exception.RSEChecksumUnavailable(msg)
return ret
def pfn2path(self, pfn):
"""
Returns the path of a file given the pfn, i.e. scheme, user and hostname are subtracted from the pfn.
:param path: pfn of a file
:returns: path.
"""
path = pfn
if pfn.startswith(str(self.scheme) + '://'):
self.logger(logging.DEBUG, 'ssh.pfn2path: pfn: {}'.format(pfn))
prefix = self.attributes['prefix']
path = pfn.partition(self.attributes['prefix'])[2]
path = prefix + path
return path
def lfns2pfns(self, lfns):
"""
Returns a fully qualified PFN for the file referred by path.
:param path: The path to the file.
:returns: Fully qualified PFN.
"""
self.logger(logging.DEBUG, 'ssh.lfns2pfns: lfns: {}'.format(lfns))
pfns = {}
prefix = self.attributes['prefix']
if not prefix.startswith('/'):
prefix = ''.join(['/', prefix])
if not prefix.endswith('/'):
prefix = ''.join([prefix, '/'])
lfns = [lfns] if type(lfns) == dict else lfns
for lfn in lfns:
scope, name = lfn['scope'], lfn['name']
if 'path' in lfn and lfn['path'] is not None:
pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'], '://', self.sshuser, self.hostname, ':', self.port, prefix, lfn['path']])
else:
pfns['%s:%s' % (scope, name)] = ''.join([self.attributes['scheme'], '://', self.sshuser, self.hostname, ':', self.port, prefix, self._get_path(scope=scope, name=name)])
return pfns
def connect(self):
""" Establishes the actual connection to the referred RSE.
:raises RSEAccessDenied
"""
self.logger(logging.DEBUG, 'ssh.connect: port: {}, hostname {}, ssh-user {}'.format(self.port, self.hostname, self.sshuser))
try:
cmd = 'ssh -p %s %s%s echo ok 2>&1' % (self.port, self.sshuser, self.hostname)
status, out, err = execute(cmd)
checker = re.search(r'ok', out)
if not checker:
raise exception.RSEAccessDenied(err)
except Exception as e:
raise exception.RSEAccessDenied(e)
def close(self):
""" Closes the connection to RSE."""
pass
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout: Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
self.logger(logging.DEBUG, 'ssh.get: pfn: {}'.format(pfn))
try:
path = self.pfn2path(pfn)
destdir = os.path.dirname(dest)
cmd = 'mkdir -p %s' % (destdir)
self.logger(logging.DEBUG, 'ssh.get: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
cmd = 'scp %s%s:%s %s' % (self.sshuser, self.hostname, path, dest)
self.logger(logging.DEBUG, 'ssh.get: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e)
def put(self, filename, target, source_dir, transfer_timeout=None):
"""
Allows to store files inside the referred RSE.
:param source: path to the source file on the client file system
:param target: path to the destination file on the storage
:param source_dir: Path where the to be transferred files are stored in the local file system
:param transfer_timeout: Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
self.logger(logging.DEBUG, 'ssh.put: filename: {} target: {}'.format(filename, target))
source_dir = source_dir or '.'
source_url = '%s/%s' % (source_dir, filename)
self.logger(logging.DEBUG, 'ssh.put: source url: {}'.format(source_url))
path = self.pfn2path(target)
pathdir = os.path.dirname(path)
if not os.path.exists(source_url):
raise exception.SourceNotFound()
try:
cmd = 'ssh %s%s "mkdir -p %s" && scp %s %s%s:%s' % (self.sshuser, self.hostname, pathdir, source_url, self.sshuser, self.hostname, path)
self.logger(logging.DEBUG, 'ssh.put: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e)
def delete(self, pfn):
"""
Deletes a file from the connected RSE.
:param pfn: Physical file name
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
self.logger(logging.DEBUG, 'ssh.delete: pfn: {}'.format(pfn))
if not self.exists(pfn):
raise exception.SourceNotFound()
try:
path = self.pfn2path(pfn)
cmd = 'ssh -p %s %s%s rm %s' % (self.port, self.sshuser, self.hostname, path)
self.logger(logging.DEBUG, 'ssh.delete: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status != 0:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e)
def rename(self, pfn, new_pfn):
""" Allows to rename a file stored inside the connected RSE.
:param pfn: Current physical file name
:param new_pfn New physical file name
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
self.logger(logging.DEBUG, 'ssh.rename: pfn: {}'.format(pfn))
if not self.exists(pfn):
raise exception.SourceNotFound()
try:
path = self.pfn2path(pfn)
new_path = self.pfn2path(new_pfn)
new_dir = new_path[:new_path.rindex('/') + 1]
cmd = 'ssh -p %s %s%s "mkdir -p %s"' % (self.port, self.sshuser, self.hostname, new_dir)
self.logger(logging.DEBUG, 'ssh.rename: mkdir cmd: {}'.format(cmd))
status, out, err = execute(cmd)
cmd = 'ssh -p %s %s%s mv %s %s' % (self.port, self.sshuser, self.hostname, path, new_path)
self.logger(logging.DEBUG, 'ssh.rename: rename cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status != 0:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e)
class Rsync(Default):
""" Implementing access to RSEs using the ssh.Rsync implementation."""
def stat(self, path):
"""
Returns the stats of a file.
:param path: path to file
:raises ServiceUnavailable: if some generic error occured in the library.
:returns: a dict with two keys, filesize and an element of GLOBALLY_SUPPORTED_CHECKSUMS.
"""
self.logger(logging.DEBUG, 'rsync.stat: path: {}'.format(path))
ret = {}
chsum = None
path = self.pfn2path(path)
try:
# rsync stat for getting filesize
cmd = "rsync -an --size-only -e 'ssh -p {0}' --remove-source-files {1}{2}:{3}".format(self.port, self.sshuser, self.hostname, path)
self.logger(logging.DEBUG, 'rsync.stat: filesize cmd: {}'.format(cmd))
status_stat, out, err = execute(cmd)
if status_stat == 0:
sizestr = out.split(" ")[-4]
ret['filesize'] = sizestr.replace(',', '')
# rsync query checksum for getting md5 checksum
cmd = 'ssh -p %s %s%s md5sum %s' % (self.port, self.sshuser, self.hostname, path)
self.logger(logging.DEBUG, 'rsync.stat: checksum cmd: {}'.format(cmd))
status_query, out, err = execute(cmd)
if status_query == 0:
chsum = 'md5'
val = out.strip(' ').split()
ret[chsum] = val[0]
except Exception as e:
raise exception.ServiceUnavailable(e)
if 'filesize' not in ret:
raise exception.ServiceUnavailable('Filesize could not be retrieved.')
if PREFERRED_CHECKSUM != chsum or not chsum:
msg = '{} does not match with {}'.format(chsum, PREFERRED_CHECKSUM)
raise exception.RSEChecksumUnavailable(msg)
return ret
def connect(self):
""" Establishes the actual connection to the referred RSE.
:raises RSEAccessDenied
"""
self.logger(logging.DEBUG, 'rsync.connect: port: {}, hostname {}, ssh-user {}'.format(self.port, self.hostname, self.sshuser))
try:
cmd = 'ssh -p %s %s%s echo ok 2>&1' % (self.port, self.sshuser, self.hostname)
status, out, err = execute(cmd)
checker = re.search(r'ok', out)
if not checker:
raise exception.RSEAccessDenied(err)
cmd = 'ssh -p %s %s%s type rsync' % (self.port, self.sshuser, self.hostname)
status, out, err = execute(cmd)
checker = re.search(r'rsync is', out)
if not checker:
raise exception.RSEAccessDenied(err)
self.path = out.split(" ")[2][:-1]
except Exception as e:
raise exception.RSEAccessDenied(e)
def get(self, pfn, dest, transfer_timeout=None):
""" Provides access to files stored inside connected the RSE.
:param pfn: Physical file name of requested file
:param dest: Name and path of the files when stored at the client
:param transfer_timeout: Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible, ServiceUnavailable, SourceNotFound
"""
self.logger(logging.DEBUG, 'rsync.get: pfn: {}'.format(pfn))
try:
path = self.pfn2path(pfn)
destdir = os.path.dirname(dest)
cmd = 'mkdir -p %s && rsync -az -e "ssh -p %s" --append-verify %s%s:%s %s' % (destdir, self.port, self.sshuser, self.hostname, path, dest)
self.logger(logging.DEBUG, 'rsync.get: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e)
def put(self, filename, target, source_dir, transfer_timeout=None):
"""
Allows to store files inside the referred RSE.
:param source: path to the source file on the client file system
:param target: path to the destination file on the storage
:param source_dir: Path where the to be transferred files are stored in the local file system
:param transfer_timeout: Transfer timeout (in seconds) - dummy
:raises DestinationNotAccessible: if the destination storage was not accessible.
:raises ServiceUnavailable: if some generic error occured in the library.
:raises SourceNotFound: if the source file was not found on the referred storage.
"""
self.logger(logging.DEBUG, 'rsync.put: filename: {} target: {}'.format(filename, target))
source_dir = source_dir or '.'
source_url = '%s/%s' % (source_dir, filename)
self.logger(logging.DEBUG, 'rsync.put: source url: {}'.format(source_url))
path = self.pfn2path(target)
pathdir = os.path.dirname(path)
if not os.path.exists(source_url):
raise exception.SourceNotFound()
try:
cmd = 'ssh -p %s %s%s "mkdir -p %s" && rsync -az -e "ssh -p %s" --append-verify %s %s%s:%s' % (self.port, self.sshuser, self.hostname, pathdir, self.port, source_url, self.sshuser, self.hostname, path)
self.logger(logging.DEBUG, 'rsync.put: cmd: {}'.format(cmd))
status, out, err = execute(cmd)
if status:
raise exception.RucioException(err)
except Exception as e:
raise exception.ServiceUnavailable(e) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/rse/protocols/ssh.py | 0.70619 | 0.175644 | ssh.py | pypi |
import enum
from collections import namedtuple
from rucio.common.config import config_get_bool
"""
Constants.
"""
RESERVED_KEYS = ['scope', 'name', 'account', 'did_type', 'is_open', 'monotonic', 'obsolete', 'complete',
'availability', 'suppressed', 'bytes', 'length', 'md5', 'adler32', 'rule_evaluation_action',
'rule_evaluation_required', 'expired_at', 'deleted_at', 'created_at', 'updated_at']
# collection_keys =
# file_keys =
KEY_TYPES = ['ALL', 'COLLECTION', 'FILE', 'DERIVED']
# all(container, dataset, file), collection(dataset or container), file, derived(compute from file for collection)
SCHEME_MAP = {'srm': ['srm', 'gsiftp'],
'gsiftp': ['srm', 'gsiftp'],
'https': ['https', 'davs', 'srm+https', 'cs3s'],
'davs': ['https', 'davs', 'srm+https', 'cs3s'],
'srm+https': ['https', 'davs', 'srm+https', 'cs3s'],
'cs3s': ['https', 'davs', 'srm+https', 'cs3s'],
'root': ['root'],
'scp': ['scp'],
'rsync': ['rsync'],
'rclone': ['rclone']}
if config_get_bool('transfers', 'srm_https_compatibility', raise_exception=False, default=False):
SCHEME_MAP['srm'].append('https')
SCHEME_MAP['https'].append('srm')
SCHEME_MAP['srm'].append('davs')
SCHEME_MAP['davs'].append('srm')
SUPPORTED_PROTOCOLS = ['gsiftp', 'srm', 'root', 'davs', 'http', 'https', 'file', 'storm', 'srm+https', 'scp', 'rsync', 'rclone']
FTS_STATE = namedtuple('FTS_STATE', ['SUBMITTED', 'READY', 'ACTIVE', 'FAILED', 'FINISHED', 'FINISHEDDIRTY', 'NOT_USED',
'CANCELED'])('SUBMITTED', 'READY', 'ACTIVE', 'FAILED', 'FINISHED', 'FINISHEDDIRTY',
'NOT_USED', 'CANCELED')
FTS_COMPLETE_STATE = namedtuple('FTS_COMPLETE_STATE', ['OK', 'ERROR'])('Ok', 'Error')
# https://gitlab.cern.ch/fts/fts3/-/blob/master/src/db/generic/Job.h#L41
FTS_JOB_TYPE = namedtuple('FTS_JOB_TYPE', ['MULTIPLE_REPLICA', 'MULTI_HOP', 'SESSION_REUSE', 'REGULAR'])('R', 'H', 'Y', 'N')
# Messages constants
MAX_MESSAGE_LENGTH = 4000
class SuspiciousAvailability(enum.Enum):
ALL = 0
EXIST_COPIES = 1
LAST_COPY = 2
class ReplicaState(enum.Enum):
# From rucio.db.sqla.constants, update that file at the same time as this
AVAILABLE = 'A'
UNAVAILABLE = 'U'
COPYING = 'C'
BEING_DELETED = 'B'
BAD = 'D'
TEMPORARY_UNAVAILABLE = 'T'
@enum.unique
class HermesService(str, enum.Enum):
"""
The services supported by Hermes2.
"""
INFLUX = "INFLUX"
ELASTIC = "ELASTIC"
EMAIL = "EMAIL"
ACTIVEMQ = "ACTIVEMQ" | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/constants.py | 0.468547 | 0.244211 | constants.py | pypi |
import configparser
import json
import os
from collections.abc import Callable
from typing import TYPE_CHECKING, overload, Optional, TypeVar, Union
from rucio.common import exception
from rucio.common.exception import ConfigNotFound, DatabaseException
_T = TypeVar('_T')
_U = TypeVar('_U')
if TYPE_CHECKING:
from sqlalchemy.orm import Session
def convert_to_any_type(value) -> Union[bool, int, float, str]:
if value.lower() in ['true', 'yes', 'on']:
return True
elif value.lower() in ['false', 'no', 'off']:
return False
for conv in (int, float):
try:
return conv(value)
except:
pass
return value
def _convert_to_boolean(value):
if isinstance(value, bool):
return value
if value.lower() in ['true', 'yes', 'on', '1']:
return True
elif value.lower() in ['false', 'no', 'off', '0']:
return False
raise ValueError('Not a boolean: %s' % value)
@overload
def config_get(
section: str,
option: str,
*,
clean_cached: bool = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> str:
...
@overload
def config_get(
section: str,
option: str,
*,
default: _T = ...,
clean_cached: bool = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> Union[str, _T]:
...
@overload
def config_get(
section: str,
option: str,
raise_exception: bool,
default: _T = ...,
*,
clean_cached: bool = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> Union[str, _T]:
...
@overload
def config_get(
section: str,
option: str,
*,
clean_cached: bool = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
convert_type_fnc: Callable[[str], _T],
) -> _T:
...
@overload
def config_get(
section: str,
option: str,
*,
default: _T = ...,
clean_cached: bool = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
convert_type_fnc: Callable[[str], _U],
) -> Union[_T, _U]:
...
@overload
def config_get(
section: str,
option: str,
raise_exception: bool,
default: _T = ...,
*,
clean_cached: bool = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
convert_type_fnc: Callable[[str], _U],
) -> Union[_T, _U]:
...
def config_get(
section: str,
option: str,
raise_exception: bool = True,
default: _U = None,
clean_cached: bool = False,
check_config_table: bool = True,
session: "Optional[Session]" = None,
use_cache: bool = True,
expiration_time: int = 900,
convert_type_fnc: Callable[[str], _T] = lambda x: x,
) -> Union[_T, _U]:
"""
Return the string value for a given option in a section
First it looks at the configuration file and, if it is not found, check in the config table only if it is called
from a server/daemon (and if check_config_table is set).
:param section: the named section.
:param option: the named option.
:param raise_exception: Boolean to raise or not NoOptionError, NoSectionError or RuntimeError.
:param default: the default value if not found.
:param clean_cached: Deletes the cached config singleton instance if no config value is found
:param check_config_table: if not set, avoid looking at config table even if it is called from server/daemon
:param session: The database session in use. Only used if not found in config file and if it is called from
server/daemon
:param use_cache: Boolean if the cache should be used. Only used if not found in config file and if it is called
from server/daemon
:param expiration_time: Time after that the cached value gets ignored. Only used if not found in config file and if
it is called from server/daemon
:param convert_type_fnc: A function used to parse the string config value into the desired destination type
:returns: the configuration value.
:raises NoOptionError
:raises NoSectionError
:raises RuntimeError
"""
try:
return convert_type_fnc(get_config().get(section, option))
except (configparser.NoOptionError, configparser.NoSectionError, RuntimeError) as err:
try:
legacy_config = get_legacy_config(section, option)
if legacy_config is not None:
return convert_type_fnc(legacy_config)
except RuntimeError:
pass
from rucio.common.utils import is_client
client_mode = is_client()
if not client_mode and check_config_table:
try:
return __config_get_table(section=section, option=option, raise_exception=raise_exception,
default=default, clean_cached=clean_cached, session=session,
use_cache=use_cache, expiration_time=expiration_time,
convert_type_fnc=convert_type_fnc)
except (ConfigNotFound, DatabaseException, ImportError):
raise err
else:
if raise_exception and default is None:
raise err
if clean_cached:
clean_cached_config()
return default
def get_legacy_config(section: str, option: str):
"""
Returns a legacy config value, if it is present.
:param section: The section of the new config.
:param option: The option of the new config.
:returns: The string value of the legacy option if one is found, None otherwise.
"""
LEGACY_SECTION_NAME = {}
LEGACY_OPTION_NAME = {}
section = LEGACY_SECTION_NAME.get(section, section)
option = LEGACY_OPTION_NAME.get(option, option)
if config_has_option(section, option):
return get_config().get(section, option)
return None
def config_has_section(section: str) -> bool:
"""
Indicates whether the named section is present in the configuration. The DEFAULT section is not acknowledged.
:param section: Name of section in the Rucio config to verify.
:returns: True if the section exists in the configuration; False otherwise
"""
return get_config().has_section(section)
def config_has_option(section: str, option: str) -> bool:
"""
Indicates whether the named option is present in the configuration. The DEFAULT section is not acknowledged.
:param section: Name of section in the Rucio config to verify.
:param option: Name of option in the Rucio config to verify.
:returns: True if the section and option exists in the configuration; False otherwise
"""
return get_config().has_option(section, option)
def config_add_section(section: str):
"""
Add a new section to the configuration object. Throws DuplicateSectionError if it already exists.
:param section: Name of section in the Rucio config to add.
:returns: None
"""
return get_config().add_section(section)
@overload
def config_get_int(
section: str,
option: str,
*,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> int:
...
@overload
def config_get_int(
section: str,
option: str,
*,
default: int = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> int:
...
@overload
def config_get_int(
section: str,
option: str,
raise_exception,
default: _T = ...,
*,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> Union[int, _T]:
...
def config_get_int(
section,
option,
raise_exception=True,
default=None,
check_config_table=True,
session=None,
use_cache=True,
expiration_time=900,
):
"""
Return the integer value for a given option in a section
:param section: the named section.
:param option: the named option.
:param raise_exception: Boolean to raise or not NoOptionError, NoSectionError or RuntimeError.
:param default: the default value if not found.
:param check_config_table: if not set, avoid looking at config table even if it is called from server/daemon
:param session: The database session in use. Only used if not found in config file and if it is called from
server/daemon
:param use_cache: Boolean if the cache should be used. Only used if not found in config file and if it is called
from server/daemon
:param expiration_time: Time after that the cached value gets ignored. Only used if not found in config file and if
it is called from server/daemon
:returns: the configuration value.
:raises NoOptionError
:raises NoSectionError
:raises RuntimeError
:raises ValueError
"""
return config_get(
section,
option,
raise_exception=raise_exception,
default=default,
check_config_table=check_config_table,
session=session,
use_cache=True,
expiration_time=expiration_time,
convert_type_fnc=int,
)
@overload
def config_get_float(
section: str,
option: str,
*,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> float:
...
@overload
def config_get_float(
section: str,
option: str,
*,
default: float = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> float:
...
@overload
def config_get_float(
section: str,
option: str,
raise_exception,
default: _T = ...,
*,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> Union[float, _T]:
...
def config_get_float(
section,
option,
raise_exception=True,
default=None,
check_config_table=True,
session=None,
use_cache=True,
expiration_time=900,
):
"""
Return the floating point value for a given option in a section
:param section: the named section.
:param option: the named option.
:param raise_exception: Boolean to raise or not NoOptionError, NoSectionError or RuntimeError.
:param default: the default value if not found.
:param check_config_table: if not set, avoid looking at config table even if it is called from server/daemon
:param session: The database session in use. Only used if not found in config file and if it is called from
server/daemon
:param use_cache: Boolean if the cache should be used. Only used if not found in config file and if it is called
from server/daemon
:param expiration_time: Time after that the cached value gets ignored. Only used if not found in config file and if
it is called from server/daemon
:returns: the configuration value.
:raises NoOptionError
:raises NoSectionError
:raises RuntimeError
:raises ValueError
"""
return config_get(
section,
option,
raise_exception=raise_exception,
default=default,
check_config_table=check_config_table,
session=session,
use_cache=True,
expiration_time=expiration_time,
convert_type_fnc=float,
)
@overload
def config_get_bool(
section: str,
option: str,
*,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> bool:
...
@overload
def config_get_bool(
section: str,
option: str,
*,
default: bool = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> bool:
...
@overload
def config_get_bool(
section: str,
option: str,
raise_exception,
default: _T = ...,
*,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> Union[bool, _T]:
...
def config_get_bool(
section,
option,
raise_exception=True,
default=None,
check_config_table=True,
session=None,
use_cache=True,
expiration_time=900,
):
"""
Return the boolean value for a given option in a section
:param section: the named section.
:param option: the named option.
:param raise_exception: Boolean to raise or not NoOptionError, NoSectionError or RuntimeError.
:param default: the default value if not found.
:param check_config_table: if not set, avoid looking at config table even if it is called from server/daemon
:param session: The database session in use. Only used if not found in config file and if it is called from
server/daemon
:param use_cache: Boolean if the cache should be used. Only used if not found in config file and if it is called
from server/daemon
:param expiration_time: Time after that the cached value gets ignored. Only used if not found in config file and if
it is called from server/daemon
.
:returns: the configuration value.
:raises NoOptionError
:raises NoSectionError
:raises RuntimeError
:raises ValueError
"""
return config_get(
section,
option,
raise_exception=raise_exception,
default=default,
check_config_table=check_config_table,
session=session,
use_cache=True,
expiration_time=expiration_time,
convert_type_fnc=_convert_to_boolean,
)
@overload
def config_get_list(
section: str,
option: str,
*,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> list[str]:
...
@overload
def config_get_list(
section: str,
option: str,
*,
default: list[str] = ...,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> list[str]:
...
@overload
def config_get_list(
section: str,
option: str,
raise_exception,
default: _T = ...,
*,
check_config_table: bool = ...,
session: "Optional[Session]" = ...,
use_cache: bool = ...,
expiration_time: int = ...,
) -> Union[list[str], _T]:
...
def config_get_list(
section,
option,
raise_exception=True,
default=None,
check_config_table=True,
session=None,
use_cache=True,
expiration_time=900,
):
"""
Return a list for a given option in a section
:param section: the named section.
:param option: the named option.
:param raise_exception: Boolean to raise or not NoOptionError, NoSectionError or RuntimeError.
:param default: the default value if not found.
:param check_config_table: if not set, avoid looking at config table even if it is called from server/daemon
:param session: The database session in use. Only used if not found in config file and if it is called from
server/daemon
:param use_cache: Boolean if the cache should be used. Only used if not found in config file and if it is called
from server/daemon
:param expiration_time: Time after that the cached value gets ignored. Only used if not found in config file and if
it is called from server/daemon
.
:returns: the configuration value.
:raises NoOptionError
:raises NoSectionError
:raises RuntimeError
:raises ValueError
"""
value = config_get(
section,
option,
raise_exception=raise_exception,
default=default,
check_config_table=check_config_table,
session=session,
use_cache=True,
expiration_time=expiration_time,
)
if isinstance(value, str):
value = __convert_string_to_list(value)
return value
def __convert_string_to_list(string: str) -> list[str]:
"""
Convert a comma separated string to a list
:param string: The input string.
:returns: A list extracted from the string.
"""
if not string or not string.strip():
return []
return [item.strip(' ') for item in string.split(',')]
def __config_get_table(
section: str,
option: str,
*,
raise_exception: bool = True,
default: _T = None,
clean_cached: bool = True,
session: "Optional[Session]" = None,
use_cache: bool = True,
expiration_time: int = 900,
convert_type_fnc: Optional[Callable[[str], _T]],
) -> _T:
"""
Search for a section-option configuration parameter in the configuration table
:param section: the named section.
:param option: the named option.
:param raise_exception: Boolean to raise or not ConfigNotFound.
:param default: the default value if not found.
:param session: The database session in use.
:param use_cache: Boolean if the cache should be used.
:param expiration_time: Time after that the cached value gets ignored.
:returns: the configuration value from the config table.
:raises ConfigNotFound
:raises DatabaseException
"""
global __CONFIG
try:
from rucio.core.config import get as core_config_get
return core_config_get(section, option, default=default, session=session, use_cache=use_cache,
expiration_time=expiration_time, convert_type_fnc=convert_type_fnc)
except (ConfigNotFound, DatabaseException, ImportError) as err:
if raise_exception and default is None:
raise err
if clean_cached:
__CONFIG = None
return default
def config_get_options(section: str) -> list[str]:
"""Return all options from a given section"""
return get_config().options(section)
def config_get_items(section: str) -> list[tuple[str, str]]:
"""Return all (name, value) pairs from a given section"""
return get_config().items(section)
def config_remove_option(section: str, option: str) -> bool:
"""
Remove the specified option from a given section.
:param section: Name of section in the Rucio config.
:param option: Name of option to remove from Rucio configuration.
:returns: True if the option existed in the configuration, False otherwise.
:raises NoSectionError: If the section does not exist.
"""
return get_config().remove_option(section, option)
def config_set(section: str, option: str, value: str):
"""
Set a configuration option in a given section.
:param section: Name of section in the Rucio config.
:param option: Name of option to set in the Rucio configuration.
:param value: New value for the option.
:raises NoSectionError: If the section does not exist.
"""
return get_config().set(section, option, value)
def get_config_dirs():
"""
Returns all available configuration directories in order:
- $RUCIO_HOME/etc/
- $VIRTUAL_ENV/etc/
- /opt/rucio/
"""
configdirs = []
if 'RUCIO_HOME' in os.environ:
configdirs.append('%s/etc/' % os.environ['RUCIO_HOME'])
if 'VIRTUAL_ENV' in os.environ:
configdirs.append('%s/etc/' % os.environ['VIRTUAL_ENV'])
configdirs.append('/opt/rucio/etc/')
return configdirs
def get_lfn2pfn_algorithm_default():
"""Returns the default algorithm name for LFN2PFN translation for this server."""
default_lfn2pfn = "hash"
try:
default_lfn2pfn = config_get('policy', 'lfn2pfn_algorithm_default')
except (configparser.NoOptionError, configparser.NoSectionError, RuntimeError):
pass
return default_lfn2pfn
def get_rse_credentials(path_to_credentials_file: Optional[Union[str, os.PathLike]] = None):
""" Returns credentials for RSEs. """
path = ''
if path_to_credentials_file: # Use specific file for this connect
path = path_to_credentials_file
else: # Use file defined in th RSEMgr
for confdir in get_config_dirs():
p = os.path.join(confdir, 'rse-accounts.cfg')
if os.path.exists(p):
path = p
try:
# Load all user credentials
with open(path) as cred_file:
credentials = json.load(cred_file)
except Exception as error:
raise exception.ErrorLoadingCredentials(error)
return credentials
__CONFIG = None
def get_config() -> configparser.ConfigParser:
"""Factory function for the configuration class. Returns the ConfigParser instance."""
global __CONFIG
if __CONFIG is None:
__CONFIG = Config()
return __CONFIG.parser
def clean_cached_config():
"""Deletes the cached config singleton instance."""
global __CONFIG
__CONFIG = None
class Config:
"""
The configuration class reading the config file on init, located by using
get_config_dirs or the use of the RUCIO_CONFIG environment variable.
"""
def __init__(self):
self.parser = configparser.ConfigParser()
if 'RUCIO_CONFIG' in os.environ:
self.configfile = os.environ['RUCIO_CONFIG']
else:
configs = [os.path.join(confdir, 'rucio.cfg') for confdir in get_config_dirs()]
self.configfile = next(iter(filter(os.path.exists, configs)), None)
if self.configfile is None:
raise RuntimeError('Could not load Rucio configuration file. '
'Rucio looked in the following paths for a configuration file, in order:'
'\n\t' + '\n\t'.join(configs))
if not self.parser.read(self.configfile) == [self.configfile]:
raise RuntimeError('Could not load Rucio configuration file. '
'Rucio tried loading the following configuration file:'
'\n\t' + self.configfile) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/config.py | 0.78345 | 0.186206 | config.py | pypi |
from rucio.common.exception import DIDError
class DID(object):
"""
Class used to store a DID
Given an object did of type DID
scope is stored in did.scope
name is stored in did.name
"""
SCOPE_SEPARATOR = ':'
IMPLICIT_SCOPE_SEPARATOR = '.'
IMPLICIT_SCOPE_TO_LEN = {'user': 2, 'group': 2}
__slots__ = ['scope', 'name']
def __init__(self, *args, **kwargs):
"""
Constructs the DID object. Possible parameter combinations are:
DID()
DID('scope:name.did.str')
DID('user.implicit.scope.in.name')
DID('custom.scope', 'custom.name')
DID(['list.scope', 'list.name'])
DID(('tuple.scope', 'tuple.name'))
DID({'scope': 'dict.scope', 'name': 'dict.name'})
DID(scope='kw.scope')
DID(name='kw.name')
DID(name='user.kw.implicit.scope')
DID(scope='kw.scope', name='kw.name')
DID(did={'scope': 'kw.did.scope', 'name': 'kw.did.name'})
DID(did=['kw.list.scope', 'kw.list.name'])
DID(did=('kw.tuple.scope', 'kw.tuple.name'))
DID('arg.scope', name='kwarg.name')
DID('arg.name', scope='kwarg.scope')
"""
self.scope = self.name = ''
num_args = len(args)
num_kwargs = len(kwargs)
if (num_args + num_kwargs) > 2:
raise DIDError('Constructor takes at most 2 arguments. Given number: {}'.format(num_args + num_kwargs))
did = ''
if num_args == 1:
did = args[0]
if num_kwargs == 1:
if not isinstance(did, str):
raise DIDError('First argument of constructor is expected to be string type'
'when keyword argument is given. Given type: {}'.format(type(did)))
k, v = next(iter(kwargs.items()))
if k == 'scope':
did = (v, did)
elif k == 'name':
did = (did, v)
else:
raise DIDError('Constructor got unexpected keyword argument: {}'.format(k))
elif num_args == 0:
did = kwargs.get('did', kwargs)
else:
did = args
if isinstance(did, dict):
self.scope = did.get('scope', '')
self.name = did.get('name', '')
if not self.has_scope():
self.update_implicit_scope()
elif isinstance(did, tuple) or isinstance(did, list):
if len(did) != 2:
raise DIDError('Construction from tuple or list requires exactly 2 elements')
self.scope = did[0]
self.name = did[1]
elif isinstance(did, str):
did_parts = did.split(DID.SCOPE_SEPARATOR, 1)
if len(did_parts) == 1:
self.name = did
self.update_implicit_scope()
if not self.has_scope():
raise DIDError('Object construction from non-splitable string is ambigious')
else:
self.scope = did_parts[0]
self.name = did_parts[1]
elif isinstance(did, DID):
self.scope = did.scope
self.name = did.name
else:
raise DIDError('Cannot build object from: {}'.format(type(did)))
if self.name.endswith('/'):
self.name = self.name[:-1]
if not self.is_valid_format():
raise DIDError('Object has invalid format after construction: {}'.format(str(self)))
def update_implicit_scope(self):
"""
This method sets the scope if it is implicitly given in self.name
"""
did_parts = self.name.split(DID.IMPLICIT_SCOPE_SEPARATOR)
num_scope_parts = DID.IMPLICIT_SCOPE_TO_LEN.get(did_parts[0], 0)
if num_scope_parts > 0:
self.scope = '.'.join(did_parts[0:num_scope_parts])
def is_valid_format(self):
"""
Method to check if the stored DID has a valid format
:return: bool
"""
if self.scope.count(DID.SCOPE_SEPARATOR) or self.name.count(DID.SCOPE_SEPARATOR):
return False
return True
def has_scope(self):
"""
Method to check if the scope part was set
:return: bool
"""
return len(self.scope) > 0
def has_name(self):
"""
Method to check if the name part was set
:return: bool
"""
return len(self.name) > 0
def __str__(self):
"""
Creates the string representation of self
:return: string
"""
if self.has_scope() and self.has_name():
return '{}{}{}'.format(self.scope, DID.SCOPE_SEPARATOR, self.name)
elif self.has_scope():
return self.scope
return self.name
def __eq__(self, other):
"""
Equality comparison with another object
:return: bool
"""
if isinstance(other, str):
return str(self) == other
elif not isinstance(other, DID):
try:
other = DID(other)
except DIDError:
return False
return self.scope == other.scope and self.name == other.name
def __ne__(self, other):
"""
Inequality comparison with another object
:return: bool
"""
return not self.__eq__(other)
def __hash__(self):
"""
Uses the string representation of self to create a hash
:return: int
"""
return hash(str(self)) | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/didtype.py | 0.66628 | 0.166845 | didtype.py | pypi |
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT_LENGTH = 25
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-z0-9-_]{1,%s}$" % ACCOUNT_LENGTH}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Functional Test XrootD", "Functional Test WebDAV", "Stress Test"]}
SCOPE_LENGTH = 25
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % SCOPE_LENGTH}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 250
NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "^[A-Za-z0-9][A-Za-z0-9\\.\\-\\_]{1,%s}$" % NAME_LENGTH}
R_NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "\\w"}
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
DELAY_INJECTION = {"description": "Time (in seconds) to wait before starting applying the rule. Implies asynchronous rule creation.",
"type": ["integer", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Z0-9]+([_-][A-Z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"delay_injection": DELAY_INJECTION,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Array to filter DIDs by metadata",
"type": "array",
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-zA-Z0-9-_\\/\\.]{1,30}$'}
SCOPE_NAME_REGEXP = '/(.*)/(.*)'
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": {
"type": "object"
},
"distances": {
"type": "object"
}
}}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject(f'Problem validating {name}: {error}') | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/schema/domatpc.py | 0.457379 | 0.373076 | domatpc.py | pypi |
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT_LENGTH = 29
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-z0-9-_]{1,%s}$" % (ACCOUNT_LENGTH - 4)}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Data Brokering", "Data Consolidation", "Data Rebalancing",
"Debug", "Express", "Functional Test", "Group Subscriptions",
"Production Input", "Production Output",
"Analysis Input", "Analysis Output", "Staging",
"T0 Export", "T0 Tape", "Upload/Download (Job)",
"Upload/Download (User)", "User Subscriptions"]}
SCOPE_LENGTH = 29
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % (SCOPE_LENGTH - 4)}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 250
NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "^[A-Za-z0-9][A-Za-z0-9\\.\\-\\_]{1,%s}$" % NAME_LENGTH}
R_NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "\\w"}
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
DELAY_INJECTION = {"description": "Time (in seconds) to wait before starting applying the rule. Implies asynchronous rule creation.",
"type": ["integer", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Z0-9]+([_-][A-Z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"delay_injection": DELAY_INJECTION,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Array to filter DIDs by metadata",
"type": "array",
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-zA-Z0-9-_\\/\\.]{1,30}$'}
SCOPE_NAME_REGEXP = '/(.*)/(.*)'
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": {
"type": "object"
},
"distances": {
"type": "object"
}
}}
VO = {"description": "VO tag",
"type": "string",
"pattern": "^([a-zA-Z_\\-.0-9]{3})?$"}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT,
'vo': VO}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject(f'Problem validating {name}: {error}') | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/schema/generic_multi_vo.py | 0.475118 | 0.385404 | generic_multi_vo.py | pypi |
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT_LENGTH = 25
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-z0-9-_]{1,%s}$" % ACCOUNT_LENGTH}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Data Brokering", "Data Consolidation", "Data Rebalancing",
"Debug", "Express", "Functional Test", "Functional Test XrootD",
"Functional Test WebDAV", "Group Subscriptions",
"Production Input", "Production Output",
"Analysis Input", "Analysis Output", "Staging",
"T0 Export", "T0 Tape", "Upload/Download (Job)",
"Upload/Download (User)", "User Subscriptions",
"Globus Online Test", "Data Challenge"]}
SCOPE_LENGTH = 25
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % SCOPE_LENGTH}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 250
NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "^[A-Za-z0-9][A-Za-z0-9\\.\\-\\_]{1,%s}$" % NAME_LENGTH}
R_NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "\\w"}
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
DELAY_INJECTION = {"description": "Time (in seconds) to wait before starting applying the rule. Implies asynchronous rule creation.",
"type": ["integer", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Z0-9]+([_-][A-Z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"delay_injection": DELAY_INJECTION,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Array to filter DIDs by metadata",
"type": "array",
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-zA-Z0-9-_\\/\\.]{1,30}$'}
SCOPE_NAME_REGEXP = '/(.*)/(.*)'
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": {
"type": "object"
},
"distances": {
"type": "object"
}
}}
VO = {"description": "VO tag",
"type": "string",
"pattern": "^([a-zA-Z_\\-.0-9]{3})?$"}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT,
'vo': VO}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject(f'Problem validating {name}: {error}') | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/schema/atlas.py | 0.453746 | 0.373904 | atlas.py | pypi |
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT_LENGTH = 25
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-zA-Z0-9-_]{1,%s}$" % ACCOUNT_LENGTH}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Data Brokering", "Data Consolidation", "Data rebalancing",
"Debug", "Express", "Functional Test", "Group Subscriptions",
"Production Input", "Production Output",
"Analysis Input", "Analysis Output", "Staging",
"T0 Export", "T0 Tape", "Upload/Download (Job)",
"Upload/Download (User)", "User Subscriptions",
"Data Challenge", "DAC21"]}
SCOPE_LENGTH = 25
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % SCOPE_LENGTH}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 250
NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "^[A-Za-z0-9][A-Za-z0-9\\.\\-\\_]{1,%s}$" % NAME_LENGTH}
R_NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "\\w"}
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
DELAY_INJECTION = {"description": "Time (in seconds) to wait before starting applying the rule. Implies asynchronous rule creation.",
"type": ["integer", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Z0-9]+([_-][A-Z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
TIME_ENTRY = {
"description": "Datetime, ISO 8601",
"type": "string",
"pattern": r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d*$'
}
IP = {
"description": "Internet Protocol address v4, RFC 791",
"type": "string",
"pattern": r'^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}$'
}
CLIENT_STATE = {
"description": "Client state",
"type": "string",
"enum": ['DONE', 'FAILED', 'PROCESSING', 'ALREADY_DONE', 'FILE_NOT_FOUND', 'FOUND_IN_PCACHE', 'DOWNLOAD_ATTEMPT',
'FAIL_VALIDATE', 'FOUND_ROOT']
}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"delay_injection": DELAY_INJECTION,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Array to filter DIDs by metadata",
"type": "array",
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-zA-Z0-9-_\\/\\.]{1,30}$'}
SCOPE_NAME_REGEXP = '/(.*)/(.*)'
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": {
"type": "object"
},
"distances": {
"type": "object"
}
}}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject(f'Problem validating {name}: {error}') | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/schema/escape.py | 0.478041 | 0.362743 | escape.py | pypi |
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT_LENGTH = 25
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-z0-9-_]{1,%s}$" % ACCOUNT_LENGTH}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Data Brokering", "Data Consolidation", "Data rebalancing",
"Debug", "Express", "Functional Test", "Group Subscriptions",
"Production Input", "Production Output",
"Analysis Input", "Analysis Output", "Staging",
"T0 Export", "T0 Tape", "Upload/Download (Job)",
"Upload/Download (User)", "User Subscriptions"]}
SCOPE_LENGTH = 25
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % SCOPE_LENGTH}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 500
NAME = {"description": "IceCube Data Identifier name",
"type": "string",
"pattern": r"^\/[A-Za-z0-9][A-Za-z0-9\\.\\-\\_\/\#]{1,%s}$" % NAME_LENGTH}
# read name
R_NAME = NAME
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
DELAY_INJECTION = {"description": "Time (in seconds) to wait before starting applying the rule. Implies asynchronous rule creation.",
"type": ["integer", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Z0-9]+([_-][A-Z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"delay_injection": DELAY_INJECTION,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Array to filter DIDs by metadata",
"type": "array",
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-zA-Z0-9-_\\/\\.]{1,30}$'}
SCOPE_NAME_REGEXP = '/([^/]*)(?=/)(.*)'
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": {
"type": "object"
},
"distances": {
"type": "object"
}
}}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject(f'Problem validating {name}: {error}') | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/schema/icecube.py | 0.434461 | 0.372363 | icecube.py | pypi |
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT_LENGTH = 25
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-z0-9-_]{1,%s}$" % ACCOUNT_LENGTH}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Data Brokering", "Data Consolidation", "Data rebalancing",
"Debug", "Express", "Functional Test", "Group Subscriptions",
"Production Input", "Production Output",
"Analysis Input", "Analysis Output", "Staging",
"T0 Export", "T0 Tape", "Upload/Download (Job)",
"Upload/Download (User)", "User Subscriptions", "Data Challenge"]}
SCOPE_LENGTH = 25
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % SCOPE_LENGTH}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 500
NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": r"^/[A-Za-z0-9\.\-\_\+\/]{1,%s}$" % NAME_LENGTH}
R_NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "\\w"}
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
DELAY_INJECTION = {"description": "Time (in seconds) to wait before starting applying the rule. Implies asynchronous rule creation.",
"type": ["integer", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Z0-9]+([_-][A-Z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
TIME_ENTRY = {
"description": "Datetime, ISO 8601",
"type": "string",
"pattern": r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d*$'
}
IP = {
"description": "Internet Protocol address v4, RFC 791",
"type": "string",
"pattern": r'^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}$'
}
CLIENT_STATE = {
"description": "Client state",
"type": "string",
"enum": ['DONE', 'FAILED', 'PROCESSING', 'ALREADY_DONE', 'FILE_NOT_FOUND', 'FOUND_IN_PCACHE', 'DOWNLOAD_ATTEMPT',
'FAIL_VALIDATE', 'FOUND_ROOT']
}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"delay_injection": DELAY_INJECTION,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Array to filter DIDs by metadata",
"type": "array",
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-zA-Z0-9-_\\/\\.]{1,30}$'}
SCOPE_NAME_REGEXP = '/(.*)/(.*)'
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": {
"type": "object"
},
"distances": {
"type": "object"
}
}}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject(f'Problem validating {name}: {error}') | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/schema/lsst.py | 0.480722 | 0.325762 | lsst.py | pypi |
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT_LENGTH = 25
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-z0-9-_]{1,%s}$" % ACCOUNT_LENGTH}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Data Brokering", "Data Consolidation", "Data Rebalancing",
"Debug", "Express", "Functional Test", "Group Subscriptions",
"Production Input", "Production Output",
"Analysis Input", "Analysis Output", "Staging",
"T0 Export", "T0 Tape", "Upload/Download (Job)",
"Upload/Download (User)", "User Subscriptions", "Data Challenge"]}
SCOPE_LENGTH = 25
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % SCOPE_LENGTH}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 250
NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "^[A-Za-z0-9][A-Za-z0-9\\.\\-\\_]{1,%s}$" % NAME_LENGTH}
R_NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "\\w"}
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
DELAY_INJECTION = {"description": "Time (in seconds) to wait before starting applying the rule. Implies asynchronous rule creation.",
"type": ["integer", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Z0-9]+([_-][A-Z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
TIME_ENTRY = {
"description": "Datetime, ISO 8601",
"type": "string",
"pattern": r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d*$'
}
IP = {
"description": "Internet Protocol address v4, RFC 791",
"type": "string",
"pattern": r'^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4}$'
}
IPv4orIPv6 = {
"description": "IPv4 or IPv6 address",
"type": "string",
"format": "ipv4_or_ipv6"
}
CLIENT_STATE = {
"description": "Client state",
"type": "string",
"enum": ['DONE', 'FAILED', 'PROCESSING', 'ALREADY_DONE', 'FILE_NOT_FOUND', 'FOUND_IN_PCACHE', 'DOWNLOAD_ATTEMPT',
'FAIL_VALIDATE', 'FOUND_ROOT', 'ServiceUnavailable', 'SERVICE_ERROR', 'CP_TIMEOUT', 'COPY_ERROR',
'STAGEIN_ATTEMPT_FAILED', 'SourceNotFound', 'MISSINGOUTPUTFILE', 'MD_MISMATCH', 'CHECKSUMCALCULATIONFAILURE',
'MISSINGINPUT', 'MISSING_INPUT']
}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"delay_injection": DELAY_INJECTION,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Array to filter DIDs by metadata",
"type": "array",
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z0-9]+([_-][A-Z0-9]+)*)$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-zA-Z0-9-_\\/\\.]{1,30}$'}
SCOPE_NAME_REGEXP = '/(.*)/(.*)'
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": {
"type": "object"
},
"distances": {
"type": "object"
}
}}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject(f'Problem validating {name}: {error}') | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/schema/generic.py | 0.495606 | 0.387516 | generic.py | pypi |
from jsonschema import validate, ValidationError
from rucio.common.exception import InvalidObject
ACCOUNT_LENGTH = 25
ACCOUNT = {"description": "Account name",
"type": "string",
"pattern": "^[a-z0-9-_]{1,%s}$" % ACCOUNT_LENGTH}
ACCOUNTS = {"description": "Array of accounts",
"type": "array",
"items": ACCOUNT,
"minItems": 0,
"maxItems": 1000}
ACCOUNT_TYPE = {"description": "Account type",
"type": "string",
"enum": ["USER", "GROUP", "SERVICE"]}
ACTIVITY = {"description": "Activity name",
"type": "string",
"enum": ["Data Consolidation", "Data Rebalancing",
"Functional Test", "Functional Test WebDAV", "Recovery",
"Production Input", "Production Output", "Production Merge",
"Analysis Input", "Analysis Output", "Staging",
"Raw Export", "Upload/Download (Job)", "Upload/Download (User)",
"User Merge", "User Transfers", "Calibration Transfers"]}
SCOPE_LENGTH = 25
SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "^[a-zA-Z_\\-.0-9]{1,%s}$" % SCOPE_LENGTH}
R_SCOPE = {"description": "Scope name",
"type": "string",
"pattern": "\\w"}
NAME_LENGTH = 250
NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": r"^/[A-Za-z0-9\.\-\_\+\/]{1,%s}$" % NAME_LENGTH}
R_NAME = {"description": "Data Identifier name",
"type": "string",
"pattern": "\\w"}
LOCKED = {"description": "Rule locked status",
"type": ["boolean", "null"]}
ASK_APPROVAL = {"description": "Rule approval request",
"type": ["boolean", "null"]}
ASYNCHRONOUS = {"description": "Asynchronous rule creation",
"type": ["boolean", "null"]}
DELAY_INJECTION = {"description": "Time (in seconds) to wait before starting applying the rule. Implies asynchronous rule creation.",
"type": ["integer", "null"]}
PURGE_REPLICAS = {"description": "Rule purge replica status",
"type": "boolean"}
IGNORE_AVAILABILITY = {"description": "Rule ignore availability status",
"type": "boolean"}
RSE = {"description": "RSE name",
"type": "string",
"pattern": "^([A-Z])+((([A-Za-z0-9]+))*(-)?)*$"}
RSE_ATTRIBUTE = {"description": "RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9\._-]+[=<>][A-Za-z0-9_-]+)'}
DEFAULT_RSE_ATTRIBUTE = {"description": "Default RSE attribute",
"type": "string",
"pattern": r'([A-Za-z0-9]+([_-][A-Za-z0-9]+)*)'}
REPLICA_STATE = {"description": "Replica state",
"type": "string",
"enum": ["AVAILABLE", "UNAVAILABLE", "COPYING", "BEING_DELETED", "BAD", "SOURCE", "A", "U", "C", "B", "D", "S"]}
DATE = {"description": "Date",
"type": "string",
"pattern": r'((Mon)|(Tue)|(Wed)|(Thu)|(Fri)|(Sat)|(Sun))[,]\s\d{2}\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s\d{4}\s(0\d|1\d|2[0-3])(\:)(0\d|1\d|2\d|3\d|4\d|5\d)(\:)(0\d|1\d|2\d|3\d|4\d|5\d)\s(UTC)'}
DID_TYPE = {"description": "DID type",
"type": "string",
"enum": ["DATASET", "CONTAINER", "FILE", "F"]}
GROUPING = {"description": "Rule grouping",
"type": ["string", "null"],
"enum": ["DATASET", "NONE", "ALL", None]}
NOTIFY = {"description": "Rule notification setting",
"type": ["string", "null"],
"enum": ["Y", "C", "N", "P", None]}
COMMENT = {"description": "Rule comment",
"type": ["string", "null"],
"maxLength": 250}
METADATA = {"description": "Rule wfms metadata",
"type": ["string", "null"],
"maxLength": 3999}
BYTES = {"description": "Size in bytes",
"type": "integer"}
ADLER32 = {"description": "adler32",
"type": "string",
"pattern": "^[a-fA-F\\d]{8}$"}
WEIGHT = {"description": "Rule weight",
"type": ["string", "null"]}
MD5 = {"description": "md5",
"type": "string",
"pattern": "^[a-fA-F\\d]{32}$"}
UUID = {"description": "Universally Unique Identifier (UUID)",
"type": "string",
"pattern": '^(\\{){0,1}[0-9a-fA-F]{8}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{4}-?[0-9a-fA-F]{12}(\\}){0,1}$'}
META = {"description": "Data Identifier(DID) metadata",
"type": "object",
"properties": {"guid": UUID},
"additionalProperties": True}
PFN = {"description": "Physical File Name", "type": "string"}
COPIES = {"description": "Number of replica copies", "type": "integer"}
RSE_EXPRESSION = {"description": "RSE expression", "type": "string"}
SOURCE_REPLICA_EXPRESSION = {"description": "RSE expression", "type": ["string", "null"]}
LIFETIME = {"description": "Lifetime", "type": "number"}
RULE_LIFETIME = {"description": "Rule lifetime", "type": ["number", "null"]}
SUBSCRIPTION_ID = {"description": "Rule Subscription id", "type": ["string", "null"]}
PRIORITY = {"description": "Priority of the transfers",
"type": "integer"}
SPLIT_CONTAINER = {"description": "Rule split container mode",
"type": ["boolean", "null"]}
RULE = {"description": "Replication rule",
"type": "object",
"properties": {"dids": {"type": "array"},
"account": ACCOUNT,
"copies": COPIES,
"rse_expression": RSE_EXPRESSION,
"grouping": GROUPING,
"weight": WEIGHT,
"lifetime": RULE_LIFETIME,
"locked": LOCKED,
"subscription_id": SUBSCRIPTION_ID,
"source_replica_expression": SOURCE_REPLICA_EXPRESSION,
"activity": ACTIVITY,
"notify": NOTIFY,
"purge_replicas": PURGE_REPLICAS,
"ignore_availability": IGNORE_AVAILABILITY,
"comment": COMMENT,
"ask_approval": ASK_APPROVAL,
"asynchronous": ASYNCHRONOUS,
"delay_injection": DELAY_INJECTION,
"priority": PRIORITY,
'split_container': SPLIT_CONTAINER,
'meta': METADATA},
"required": ["dids", "copies", "rse_expression"],
"additionalProperties": False}
RULES = {"description": "Array of replication rules",
"type": "array",
"items": RULE,
"minItems": 1,
"maxItems": 1000}
COLLECTION_TYPE = {"description": "Dataset or container type",
"type": "string",
"enum": ["DATASET", "CONTAINER"]}
COLLECTION = {"description": "Dataset or container",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": COLLECTION_TYPE,
"meta": META,
"rules": RULES},
"required": ["scope", "name", "type"],
"additionalProperties": False}
COLLECTIONS = {"description": "Array of datasets or containers",
"type": "array",
"items": COLLECTION,
"minItems": 1,
"maxItems": 1000}
DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DID_FILTERS = {"description": "Array to filter DIDs by metadata",
"type": "array",
"additionalProperties": True}
R_DID = {"description": "Data Identifier(DID)",
"type": "object",
"properties": {"scope": R_SCOPE,
"name": R_NAME,
"type": DID_TYPE,
"meta": META,
"rules": RULES,
"bytes": BYTES,
"adler32": ADLER32,
"md5": MD5,
"state": REPLICA_STATE,
"pfn": PFN},
"required": ["scope", "name"],
"additionalProperties": False}
DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": DID,
"minItems": 1,
"maxItems": 1000}
R_DIDS = {"description": "Array of Data Identifiers(DIDs)",
"type": "array",
"items": R_DID,
"minItems": 1,
"maxItems": 1000}
ATTACHMENT = {"description": "Attachement",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"rse": {"description": "RSE name",
"type": ["string", "null"],
"pattern": "^([A-Z])+((([A-Za-z0-9]+))*(-)?)*$"},
"dids": DIDS},
"required": ["dids"],
"additionalProperties": False}
ATTACHMENTS = {"description": "Array of attachments",
"type": "array",
"items": ATTACHMENT,
"minItems": 1,
"maxItems": 1000}
SUBSCRIPTION_FILTER = {"type": "object",
"properties": {"datatype": {"type": "array"},
"prod_step": {"type": "array"},
"stream_name": {"type": "array"},
"project": {"type": "array"},
"scope": {"type": "array"},
"pattern": {"type": "string"},
"excluded_pattern": {"type": "string"},
"group": {"type": "string"},
"provenance": {"type": "string"},
"account": ACCOUNTS,
"grouping": {"type": "string"},
"split_rule": {"type": "boolean"}}}
ADD_REPLICA_FILE = {"description": "add replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME,
"bytes": BYTES,
"adler32": ADLER32},
"required": ["scope", "name", "bytes", "adler32"]}
ADD_REPLICA_FILES = {"description": "add replica files",
"type": "array",
"items": ADD_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_ADD_REPLICAS = {"description": "rucio cache add replicas",
"type": "object",
"properties": {"files": ADD_REPLICA_FILES,
"rse": RSE,
"lifetime": LIFETIME,
"operation": {"enum": ["add_replicas"]}},
"required": ['files', 'rse', 'lifetime', 'operation']}
DELETE_REPLICA_FILE = {"description": "delete replica file",
"type": "object",
"properties": {"scope": SCOPE,
"name": NAME},
"required": ["scope", "name"]}
DELETE_REPLICA_FILES = {"description": "delete replica files",
"type": "array",
"items": DELETE_REPLICA_FILE,
"minItems": 1,
"maxItems": 1000}
CACHE_DELETE_REPLICAS = {"description": "rucio cache delete replicas",
"type": "object",
"properties": {"files": DELETE_REPLICA_FILES,
"rse": RSE,
"operation": {"enum": ["delete_replicas"]}},
"required": ['files', 'rse', 'operation']}
MESSAGE_OPERATION = {"type": "object",
"properties": {'operation': {"enum": ["add_replicas", "delete_replicas"]}}}
ACCOUNT_ATTRIBUTE = {"description": "Account attribute",
"type": "string",
"pattern": r'^[a-zA-Z0-9-_\\/\\.]{1,30}$'}
# SCOPE_NAME_REGEXP = '/(.*)/(.*)'
SCOPE_NAME_REGEXP = '/([^/]*)(?=/)(.*)'
DISTANCE = {"description": "RSE distance",
"type": "object",
"properties": {
"src_rse_id": {"type": "string"},
"dest_rse_id": {"type": "string"},
"ranking": {"type": "integer"}
},
"required": ["src_rse_id", "dest_rse_id", "ranking"],
"additionalProperties": True}
IMPORT = {"description": "import data into rucio.",
"type": "object",
"properties": {
"rses": {
"type": "object"
},
"distances": {
"type": "object"
}
}}
SCHEMAS = {'account': ACCOUNT,
'account_type': ACCOUNT_TYPE,
'activity': ACTIVITY,
'name': NAME,
'r_name': R_NAME,
'rse': RSE,
'rse_attribute': RSE_ATTRIBUTE,
'scope': SCOPE,
'r_scope': R_SCOPE,
'did': DID,
'did_filters': DID_FILTERS,
'r_did': R_DID,
'dids': DIDS,
'rule': RULE,
'r_dids': R_DIDS,
'collection': COLLECTION,
'collections': COLLECTIONS,
'attachment': ATTACHMENT,
'attachments': ATTACHMENTS,
'subscription_filter': SUBSCRIPTION_FILTER,
'cache_add_replicas': CACHE_ADD_REPLICAS,
'cache_delete_replicas': CACHE_DELETE_REPLICAS,
'account_attribute': ACCOUNT_ATTRIBUTE,
'import': IMPORT}
def validate_schema(name, obj):
"""
Validate object against json schema
:param name: The json schema name.
:param obj: The object to validate.
"""
try:
if obj:
validate(obj, SCHEMAS.get(name, {}))
except ValidationError as error: # NOQA, pylint: disable=W0612
raise InvalidObject(f'Problem validating {name}: {error}') | /rucio-clients-32.2.0.tar.gz/rucio-clients-32.2.0/lib/rucio/common/schema/belleii.py | 0.461259 | 0.375678 | belleii.py | pypi |
from zlib import adler32
import gzip, glob
from .py3 import to_bytes, PY3
def part(nparts, path):
if nparts <= 1: return 0
if PY3: path = to_bytes(path)
#print("part(", nparts, path,"): adler:", adler32(path))
return adler32(path) % nparts
class _Partition(object):
def __init__(self, f, path):
self.F = f
self.Path = path
def __iter__(self):
return self
def __next__(self):
l = self.F.readline()
if not l:
raise StopIteration
return l.strip()
def rewind(self):
self.F.seek(0,0)
class PartitionedList(object):
def __init__(self, mode, filenames, compressed=False):
"""Initializes the PartitionedList object.
Parameters
----------
mode : str
"w" for write and "r" for read-only
filenames : list
Ordered list of file paths for the partition
compressed : boolean
Whether the files will be compressed with gzip. Used with "w" only. Existing files will be opened as gzip-compressed if they have the
.gz extension
Notes
-----
It is recommended to use ``open`` and ``create`` static methods instead of the constructor
"""
self.Mode = mode
self.FileNames = filenames
self.Files = []
self.NParts = len(filenames)
self.Compressed = compressed
if mode == "w":
self.Files = [open(fn, "w") if not compressed else gzip.open(fn, "wt") for fn in self.FileNames]
else:
self.Files = [open(fn, "r") if not fn.endswith(".gz") else gzip.open(fn, "rt") for fn in self.FileNames]
self.NWritten = 0
@staticmethod
def open(prefix=None, files=None):
"""Static method to open an existing partitioned list
Parameters
----------
prefix : str
Open files matching pattern: <prefix>*
files : list
Ordered list of file paths for the partition
"""
# open existing set
if files is None:
files = sorted(glob.glob(f"{prefix}.*"))
return PartitionedList("r", files)
@staticmethod
def create(nparts, prefix, compressed=False):
"""Static method to create a new partitioned list
Parameters
----------
nparts : int
Number of partitions to create. Each partition will be stored in a separate file.
prefix : str
Files will be created as <prefix>.00000, <prefix>.00001, ...
compressed : boolean
Whether to compress the partition files
"""
# create new set
gz = ".gz" if compressed else ""
files = ["%s.%05d%s" % (prefix, i, gz) for i in range(nparts)]
return PartitionedList("w", files, compressed)
@staticmethod
def create_file(path, compressed=False):
# create a single file set
if compressed and not path.endswith(".gz"):
path = path + ".gz"
return PartitionedList("w", [path], compressed)
def add(self, item):
"""Adds an item to the partitioned list by appending it to corresponding partition file. The partition file is chosen by computing
Adler32 checksum as an unsigned (positive) integer on the item and then taking modulo by the number of partitions in the list of the
integer result.
Parameters
----------
item : str or bytes
The item to add to the list
"""
if self.Mode != "w": raise ValueError("The list is not open for writing")
item = item.strip()
i = part(self.NParts, item)
#print(item, "%", self.NParts, "->", i)
item = item+"\n"
self.Files[i].write(item)
self.NWritten += 1
def files(self):
"""Returns ordered list of paths for the partition files
"""
return self.Files
@property
def partitions(self):
"""Returns list of Partition objects for the list. Each partition can be iterated to get the list of items:
for part in the_list.partitions:
for item in part:
...
"""
return [_Partition(f, path) for f, path in zip(self.Files, self.FileNames)]
def items(self):
"""Generator yielding all the items in the list.
"""
assert self.Mode == "r"
for f in self.Files:
l = f.readline()
while l:
yield l.strip()
l = f.readline()
def __iter__(self):
"""Iterator for the list. This allows the PartitionedList object to be used as:
for item in the_list:
...
"""
return self.items()
def close(self):
"""Closes the list. It is important to call this method for a list open for writing.
"""
[f.close() for f in self.Files]
def __del__(self):
"""The destructor will call close()
"""
self.close()
if __name__ == "__main__":
import sys, glob
prefix = sys.argv[1]
lst = PartitionedList.open(prefix=prefix)
for f in lst:
print (f) | /rucio_consistency-1.6.3-py3-none-any.whl/rucio_consistency/part.py | 0.483405 | 0.202443 | part.py | pypi |
instance_properties = {
"name": {
"type": "string"
},
"display_name": {
"type": "string"
},
"rucio_base_url": {
"type": "string"
},
"rucio_auth_url": {
"type": "string"
},
"rucio_webui_url": {
"type": "string"
},
"rucio_ca_cert": {
"type": "string"
},
"site_name": {
"type": "string"
},
"vo": {
"type": "string"
},
"voms_enabled": {
"type": "boolean",
"default": False
},
"voms_vomses_path": {
"type": "string"
},
"voms_certdir_path": {
"type": "string"
},
"voms_vomsdir_path": {
"type": "string"
},
"mode": {
"type": "string",
"enum": ["replica", "download"]
},
"app_id": {
"type": "string"
},
"destination_rse": {
"type": "string"
},
"replication_rule_lifetime_days": {
"type": "integer",
"default": 0
},
"rse_mount_path": {
"type": "string"
},
"path_begins_at": {
"type": "integer",
"default": 0
},
"cache_expires_at": {
"type": "integer",
"default": 0
},
"wildcard_enabled": {
"type": "boolean",
"default": False
},
"oidc_auth": {
"type": "string",
"enum": ["env", "file"]
},
"oidc_env_name": {
"type": "string"
},
"oidc_file_name": {
"type": "string"
},
}
instance = {
"type": "object",
"required": [
"name",
"display_name",
"rucio_base_url",
"mode"
],
"additionalProperties": True,
"properties": instance_properties,
"if": {"properties": {"mode": {"const": "replica"}}},
"then": {
"required": ["destination_rse", "rse_mount_path"]
},
"else": {
"required": ["rucio_ca_cert"]
}
}
remote_instance = {
"type": "object",
"required": [],
"additionalProperties": True,
"properties": instance_properties
}
remote_config = {
"type": "object",
"required": [
"$url",
"name",
"display_name"
],
"additionalProperties": True,
"properties": {
"$url": {
"type": "string"
},
"name": {
"type": "string"
},
"display_name": {
"type": "string"
}
}
}
root = {
"$schema": "http://json-schema.org/draft-07/schema",
"type": "array",
"default": [],
"additionalItems": False,
"items": {
"anyOf": [
instance,
remote_config
]
}
} | /rucio_jupyterlab-0.9.8.tar.gz/rucio_jupyterlab-0.9.8/rucio_jupyterlab/config/schema.py | 0.492188 | 0.403332 | schema.py | pypi |
import random
import numpy as np
import matplotlib.pyplot as plt
from ruck import *
class TravelingSalesmanModule(EaModule):
def __init__(self, points, num_individuals: int = 128, closed_path=False):
self.num_individuals = int(num_individuals)
self.points = np.array(points)
self.num_points = len(self.points)
self.closed_path = bool(closed_path)
# checks
assert self.points.ndim == 2
assert self.num_points > 0
assert self.num_individuals > 0
# OVERRIDE
def gen_starting_values(self):
values = [np.arange(self.num_points) for _ in range(self.num_individuals)]
[np.random.shuffle(v) for v in values]
return values
def generate_offspring(self, population):
# there are definitely much better ways to do this
return [Member(self._two_opt_swap(random.choice(population).value)) for _ in range(self.num_individuals)]
def evaluate_values(self, values):
# we negate because we want to minimize dist
return [-self._get_dist(v) for v in values]
def select_population(self, population, offspring):
return R.select_tournament(population + offspring, len(population), k=3)
# HELPER
def _two_opt_swap(self, idxs):
i, j = np.random.randint(0, self.num_points, 2)
i, j = min(i, j), max(i, j)
nidxs = np.concatenate([idxs[:i], idxs[i:j][::-1], idxs[j:]])
return nidxs
def _get_dist(self, value):
if self.closed_path:
idxs_from, idxs_to = value, np.roll(value, -1)
else:
idxs_from, idxs_to = value[:-1], value[1:]
# compute dist
return np.sum(np.linalg.norm(self.points[idxs_from] - self.points[idxs_to], ord=2, axis=-1))
def get_plot_points(self, value):
idxs = value.value if isinstance(value, Member) else value
# handle case
if self.closed_path:
idxs = np.concatenate([idxs, [idxs[0]]])
# get consecutive points
xs, ys = self.points[idxs].T
return xs, ys
if __name__ == '__main__':
# determinism
random.seed(42)
np.random.seed(42)
# get points
points = np.random.rand(72, 2)
# train
module = TravelingSalesmanModule(points=points, num_individuals=128, closed_path=False)
population, logbook, halloffame = Trainer(generations=1024).fit(module)
# plot path
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
ax.plot(*module.get_plot_points(halloffame[0]))
plt.show() | /ruck-0.2.4.tar.gz/ruck-0.2.4/examples/traveling_salesman.py | 0.644001 | 0.61422 | traveling_salesman.py | pypi |
<div align="center">
<h1 align="center">Ruckig</h1>
<h3 align="center">
Instantaneous Motion Generation for Robots and Machines.
</h3>
</div>
<p align="center">
<a href="https://github.com/pantor/ruckig/actions">
<img src="https://github.com/pantor/ruckig/workflows/CI/badge.svg" alt="CI">
</a>
<a href="https://github.com/pantor/ruckig/issues">
<img src="https://img.shields.io/github/issues/pantor/ruckig.svg" alt="Issues">
</a>
<a href="https://github.com/pantor/ruckig/releases">
<img src="https://img.shields.io/github/v/release/pantor/ruckig.svg?include_prereleases&sort=semver" alt="Releases">
</a>
<a href="https://github.com/pantor/ruckig/blob/master/LICENSE">
<img src="https://img.shields.io/badge/license-MIT-green.svg" alt="MIT">
</a>
</p>
Ruckig generates trajectories on-the-fly, allowing robots and machines to react instantaneously to sensor input. Ruckig calculates a trajectory to a *target* waypoint (with position, velocity, and acceleration) starting from *any* initial state limited by velocity, acceleration, and jerk constraints. Besides the target state, Ruckig allows to define intermediate positions for waypoint following. For state-to-state motions, Ruckig guarantees a time-optimal solution. With intermediate waypoints, Ruckig calculates the path and its time parametrization jointly, resulting in significantly faster trajectories compared to traditional methods.
More information can be found at [ruckig.com](https://ruckig.com) and in the corresponding paper [Jerk-limited Real-time Trajectory Generation with Arbitrary Target States](https://arxiv.org/abs/2105.04830), accepted for the *Robotics: Science and Systems (RSS), 2021* conference.
## Installation
Ruckig has no dependencies (except for testing). To build Ruckig using CMake, just run
```bash
mkdir -p build
cd build
cmake -DCMAKE_BUILD_TYPE=Release ..
make
```
To install Ruckig in a system-wide directory, use `(sudo) make install`. An example of using Ruckig in your CMake project is given by `examples/CMakeLists.txt`. However, you can also include Ruckig as a directory within your project and call `add_subdirectory(ruckig)` in your parent `CMakeLists.txt`. To enable the [Online API](http://api.ruckig.com/docs) for intermediate waypoints, just pass the `BUILD_ONLINE_CLIENT` flag to CMake.
Ruckig is also available as a Python module, in particular for development or debugging purposes. The Ruckig *Community Version* can be installed from [PyPI](https://pypi.org/project/ruckig/) via
```bash
pip install ruckig
```
When using CMake, the Python module can be built using the `BUILD_PYTHON_MODULE` flag. If you're only interested in the Python module (and not in the C++ library), you can build and install Ruckig via `pip install .`.
## Tutorial
Furthermore, we will explain the basics to get started with online generated trajectories within your application. There is also a [collection of examples](https://docs.ruckig.com/pages.html) that guide you through the most important features of Ruckig. A time-optimal trajectory for a single degree of freedom is shown in the figure below. We also added plots for the resulting trajectories of all examples. Let's get started!

### Waypoint-based Trajectory Generation
Ruckig provides three main interface classes: the *Ruckig*, the *InputParameter*, and the *OutputParameter* class.
First, you'll need to create a Ruckig instance with the number of DoFs as a template parameter, and the control cycle (e.g. in seconds) in the constructor.
```.cpp
Ruckig<6> ruckig {0.001}; // Number DoFs; control cycle in [s]
```
The input type has 3 blocks of data: the *current* state, the *target* state and the corresponding kinematic *limits*.
```.cpp
InputParameter<6> input; // Number DoFs
input.current_position = {0.2, ...};
input.current_velocity = {0.1, ...};
input.current_acceleration = {0.1, ...};
input.target_position = {0.5, ...};
input.target_velocity = {-0.1, ...};
input.target_acceleration = {0.2, ...};
input.max_velocity = {0.4, ...};
input.max_acceleration = {1.0, ...};
input.max_jerk = {4.0, ...};
OutputParameter<6> output; // Number DoFs
```
Given all input and output resources, we can iterate over the trajectory at each discrete time step. For most applications, this loop must run within a real-time thread and controls the actual hardware.
```.cpp
while (ruckig.update(input, output) == Result::Working) {
// Make use of the new state here!
// e.g. robot->setJointPositions(output.new_position);
output.pass_to_input(input); // Don't forget this!
}
```
Within the control loop, you need to update the *current state* of the input parameter according to the calculated trajectory. Therefore, the `pass_to_input` method copies the new kinematic state of the output to the current kinematic state of the input parameter. If (in the next step) the current state is not the expected, pre-calculated trajectory, Ruckig will calculate a new trajectory based on the novel input. When the trajectory has reached the target state, the `update` function will return `Result::Finished`.
### Intermediate Waypoints
The Ruckig Community Version now supports intermediate waypoints via a [remote API](http://api.ruckig.com/docs). Make sure to include `-DBUILD_ONLINE_CLIENT=ON` as a CMake flag when compiling - the PyPI Python version should bring that out of the box. To allocate the necessary memory for a variable number of waypoints beforehand, we need to pass the maximum number of waypoints to Ruckig via
```.cpp
Ruckig<6> otg {0.001, 8};
InputParameter<6> input {8};
OutputParameter<6> output {8};
```
The `InputParameter` class takes the number of waypoints as an optional input, however usually you will fill in the values (and therefore reserve its memory) yourself. Then you're ready to set intermediate via points by
```.cpp
input.intermediate_positions = {
{0.2, ...},
{0.8, ...},
};
```
As soon as at least one intermediate positions is given, the Ruckig Community Version switches to the mentioned (of course, non real-time capable) remote API. If you require real-time calculation on your own hardware, we refer to the *Ruckig Pro Version*.
When using *intermediate positions*, both the underlying motion planning problem as well as its calculation changes significantly. In particular, there are some fundamental limitations for jerk-limited online trajectory generation regarding the usage of waypoints. Please find more information about these limitations [here](https://docs.ruckig.com/md_pages_intermediate_waypoints.html), and in general we recommend to use
```.cpp
input.intermediate_positions = otg.filter_intermediate_positions(input.intermediate_positions, {0.1, ...});
```
to filter waypoints according to a (high) threshold distance. Setting *interrupt_calculation_duration* makes sure to be real-time capable by refining the solution in the next control invocation. Note that this is a soft interruption of the calculation. Currently, no minimum or discrete durations are supported when using intermediate positions.
### Input Parameter
To go into more detail, the *InputParameter* type has following members:
```.cpp
using Vector = std::array<double, DOFs>; // By default
Vector current_position;
Vector current_velocity; // Initialized to zero
Vector current_acceleration; // Initialized to zero
std::vector<Vector> intermediate_positions; // (only in Pro Version)
Vector target_position;
Vector target_velocity; // Initialized to zero
Vector target_acceleration; // Initialized to zero
Vector max_velocity;
Vector max_acceleration;
Vector max_jerk;
std::optional<Vector> min_velocity; // If not given, the negative maximum velocity will be used.
std::optional<Vector> min_acceleration; // If not given, the negative maximum acceleration will be used.
std::optional<Vector> min_position; // (only in Pro Version)
std::optional<Vector> max_position; // (only in Pro Version)
std::array<bool, DOFs> enabled; // Initialized to true
std::optional<double> minimum_duration;
std::optional<double> interrupt_calculation_duration; // [µs], (only in Pro Version)
ControlInterface control_interface; // The default position interface controls the full kinematic state.
Synchronization synchronization; // Synchronization behavior of multiple DoFs
DurationDiscretization duration_discretization; // Whether the duration should be a discrete multiple of the control cycle (off by default)
std::optional<Vector<ControlInterface>> per_dof_control_interface; // Sets the control interface for each DoF individually, overwrites global control_interface
std::optional<Vector<Synchronization>> per_dof_synchronization; // Sets the synchronization for each DoF individually, overwrites global synchronization
```
On top of the current state, target state, and constraints, Ruckig allows for a few more advanced settings:
- A *minimum* velocity and acceleration can be specified - these should be a negative number. If they are not given, the negative maximum velocity or acceleration will be used (similar to the jerk limit). For example, this might be useful in human robot collaboration settings with a different velocity limit towards a human. Or, when switching between different moving coordinate frames like picking from a conveyer belt.
- You can overwrite the global kinematic limits to specify limits for each section between two waypoints separately by using e.g. `per_section_max_velocity`.
- If a DoF is not *enabled*, it will be ignored in the calculation. Ruckig will output a trajectory with constant acceleration for those DoFs.
- A *minimum duration* can be optionally given. Note that Ruckig can not guarantee an exact, but only a minimum duration of the trajectory.
- The control interface (position or velocity control) can be switched easily. For example, a stop trajectory or visual servoing can be easily implemented with the velocity interface.
- Different synchronization behaviors (i.a. phase, time, or no synchonization) are implemented. Phase synchronization results in straight-line motions.
- The trajectory duration might be constrained to a multiple of the control cycle. This way, the *exact* state can be reached at a control loop execution.
We refer to the [API documentation](https://docs.ruckig.com/namespaceruckig.html) of the enumerations within the `ruckig` namespace for all available options.
### Input Validation
To check that Ruckig is able to generate a trajectory before the actual calculation step,
```.cpp
ruckig.validate_input(input, check_current_state_within_limits=false, check_target_state_within_limits=true);
// returns boolean
```
returns `false` if an input is not valid. The two boolean arguments check that the current or target state are within the limits. The check includes a typical catch of jerk-limited trajectory generation: When the current state is at maximal velocity, any positive acceleration will inevitable lead to a velocity violation *at a future timestep*. In general, this condition is fulfilled when
```
Abs(acceleration) <= Sqrt(2 * max_jerk * (max_velocity - Abs(velocity))).
```
If both arguments are set to true, the calculated trajectory is guaranteed to be *within the kinematic limits throughout* its duration. Also, note that there are range constraints of the input due to numerical reasons, see below for more details.
### Result Type
The `update` function of the Ruckig class returns a Result type that indicates the current state of the algorithm. This can either be **working**, **finished** if the trajectory has finished, or an **error** type if something went wrong during calculation. The result type can be compared as a standard integer.
State | Error Code
------------------------------- | ----------
Working | 0
Finished | 1
Error | -1
ErrorInvalidInput | -100
ErrorTrajectoryDuration | -101
ErrorPositionalLimits | -102
ErrorExecutionTimeCalculation | -110
ErrorSynchronizationCalculation | -111
### Output Parameter
The output class includes the new kinematic state and the overall trajectory.
```.cpp
Vector new_position;
Vector new_velocity;
Vector new_acceleration;
Trajectory trajectory; // The current trajectory
double time; // The current, auto-incremented time. Reset to 0 at a new calculation.
size_t new_section; // Index of the section between two (possibly filtered) intermediate positions.
bool did_section_change; // Was a new section reached in the last cycle?
bool new_calculation; // Whether a new calculation was performed in the last cycle
bool was_calculation_interrupted; // Was the trajectory calculation interrupted? (only in Pro Version)
double calculation_duration; // Duration of the calculation in the last cycle [µs]
```
Moreover, the **trajectory** class has a range of useful parameters and methods.
```.cpp
double duration; // Duration of the trajectory
std::array<double, DOFs> independent_min_durations; // Time-optimal profile for each independent DoF
<...> at_time(double time); // Get the kinematic state of the trajectory at a given time
<...> get_position_extrema(); // Returns information about the position extrema and their times
```
Again, we refer to the [API documentation](https://docs.ruckig.com) for the exact signatures.
### Offline Calculation
Ruckig also supports an offline approach for calculating a trajectory:
```.cpp
result = ruckig.calculate(input, trajectory);
```
When only using this method, the `Ruckig` constructor does not need a control cycle as an argument.
### Dynamic Number of Degrees of Freedom
So far, we have told Ruckig the number of DoFs as a template parameter. If you don't know the number of DoFs at compile-time, you can set the template parameter to `DynamicDOFs` and pass the DoFs to the constructor:
```.cpp
Ruckig<DynamicDOFs> otg {6, 0.001};
InputParameter<DynamicDOFs> input {6};
OutputParameter<DynamicDOFs> output {6};
```
However, we recommend to keep the template parameter when possible: First, it has a performance benefit of a few percent. Second, it is convenient for real-time programming due to its easier handling of memory allocations. When using dynamic degrees of freedom, make sure to allocate the memory of all vectors beforehand.
## Tests and Numerical Stability
The current test suite validates over 5.000.000.000 random trajectories. The numerical exactness is tested for the final position and final velocity to be within `1e-8`, for the final acceleration to be within `1e-10`, and for the velocity, acceleration and jerk limit to be within of a numerical error of `1e-12`. These are absolute values - we suggest to scale your input so that these correspond to your required precision of the system. For example, for most real-world systems we suggest to use input values in `[m]` (instead of e.g. `[mm]`), as `1e-8m` is sufficient precise for practical trajectory generation. Furthermore, all kinematic limits should be below `1e12`. The maximal supported trajectory duration is `7e3`, which again should suffice for most applications seeking for time-optimality. Note that Ruckig will also output values outside of this range, there is however no guarantee for correctness.
## Benchmark
We find that Ruckig is more than twice as fast as Reflexxes Type IV for state-to-state motions and well-suited for control cycles as low as 250 microseconds. The Ruckig *Community Version* is in general a more powerful and open-source alternative to the [Reflexxes Type IV](http://reflexxes.ws/) library. In fact, Ruckig is the first Type V trajectory generator for arbitrary target states and even supports directional velocity and acceleration limits, while also being faster on top.

For trajectories with intermediate waypoints, we compare Ruckig to [Toppra](https://github.com/hungpham2511/toppra), a state-of-the-art library for robotic motion planning. Ruckig is able to improve the trajectory duration on average by around 10%, as the path planning and time parametrization are calculated jointly. Moreover, Ruckig is real-time capable and supports jerk-constraints.

## Development
Ruckig is written in C++17. It is continuously tested on `ubuntu-latest`, `macos-latest`, and `windows-latest` against following versions
- Doctest v2.4 (only for testing)
- Pybind11 v2.9 (only for python wrapper)
If you still need to use C++11, you can patch the Ruckig *Community Version* by calling `sh scripts/patch-c++11.sh`. Note that this will result in a performance drop of a few percent. Moreover, the Python module is not supported.
## Used By
Ruckig is used by over hundred research labs, companies, and open-source projects worldwide, including:
- [MoveIt 2](https://moveit.ros.org) for trajectory generation.
- [CoppeliaSim](https://www.coppeliarobotics.com/) starting from version 4.3.
- [Struckig](https://github.com/stefanbesler/struckig), a port of Ruckig to Structered Text (ST - IEC61131-3) for usage on PLCs.
- [Frankx](https://github.com/pantor/frankx) for controlling the Franka Emika robot arm.
- and many others!
## Citation
```
@article{berscheid2021jerk,
title={Jerk-limited Real-time Trajectory Generation with Arbitrary Target States},
author={Berscheid, Lars and Kr{\"o}ger, Torsten},
journal={Robotics: Science and Systems XVII},
year={2021}
}
```
| /ruckig-0.7.1.tar.gz/ruckig-0.7.1/README.md | 0.879884 | 0.963712 | README.md | pypi |
import requests
from datetime import datetime, timedelta
from urllib.parse import urljoin
from .base import SmartZoneBase
class SmartZoneClient(SmartZoneBase):
'''
Documentation: http://docs.ruckuswireless.com/smartzone/5.1.2/vsze-public-api-reference-guide-512.html#api-information
Controller: https://n01.ruckus.cloud.mambowifi.com:8443
'''
###### Convert DATETIME to TIMESTAMP mileseconds
def ts_format(self, d):
'''
Convert to UNIX timestamp in ms
'''
return int(d.timestamp()*1000)
def datetemp(self, date_range):
a = int(date_range[0].timestamp()*1000)
b = int(date_range[1].timestamp()*1000)
date_range = (a, b)
return date_range
########## LOGON SESSION
def login(self):
'''
Use this API command to log on to the controller and acquire a valid logon session.
'''
url = self.endpoint('v8_2/session')
json = {
'username': self.username,
'password': self.password
}
r = self.session.post(url, json=json)
return print(r)
def retrieve(self):
'''
Use this API command to retrieve information about the current logon session.
'''
url = self.endpoint('v8_2/session')
r = self.session.get(url)
# validação
return r.json()
########## SESSION MANAGEMENT
def sessionManagement(self):
'''
Use this API command to retrieve information about the current logon sessions.
'''
url = self.endpoint('v8_2/sessionManagement')
r = self.session.get(url)
# validação
return r.json()
########## AP GROUP
def apGroup_retrieve(self, ID, zoneId):
'''
Required: id and zoneid (Variable 'id' is 'ID' in this function)
Use this API command to retrieve information about an AP group.
'''
url = self.endpoint('v8_2/rkszones/{}/apgroups/{}'.format(zoneId, ID))
r = self.session.get(url)
# validação
return r.json()
def apGroup_retrieveList(self, zoneId, index=None, listSize=None):
'''
Use this API command to retrieve the list of AP groups that belong to a zone.
-
Required: zoneid
index: string (optional) - The index of the first entry to be retrieved. Default: 0
listSize: string (optional) - The maximum number of entries to be retrieved. Default: 100
'''
data = {}
if index is None:
data['index'] = '0'
else:
data['index'] = index
if listSize is None:
data['listSize'] = '100'
else:
data['listSize'] = listSize
url = self.endpoint('v8_2/rkszones/{}/apgroups'.format(zoneId))
r = self.session.get(url, data=data)
# validação
return r.json()
def accessPointConfiguration_retrieve(self, apMac):
'''
Use this API command to retrieve the configuration of an AP.
-
Required: apMac
'''
url = self.endpoint('v8_2/aps/{}'.format(apMac))
r = self.session.get(url)
# validação
return r.json()
def accessPointConfiguration_retrieveList(self, index=None, listSize=None, zoneId=None, domainId=None):
'''
Use this API command to retrieve the list of APs that belong to a zone or a domain.
-
Required: None
index: string (optional) - The index of the first entry to be retrieved. Default: 0
listSize: string (optional) - The maximum number of entries to be retrieved. Default: 100
zoneId: string (optional) - Filter AP list by zone
domainId: string (optional) - Filter AP list by domain. Default: current logon domain
'''
data = {}
if index is None:
data['index'] = '0'
else:
data['index'] = index
if listSize is None:
data['listSize'] = '100'
else:
data['listSize'] = listSize
if zoneId is not None:
data['zoneId'] = zoneId
if domainId is not None:
data['domainId'] = domainId
url = self.endpoint('v8_2/aps')
r = self.session.get(url, data=data)
# validação
return r.json()
def accessPointOperation_retrieveOperationInformation(self, apMac):
'''
This API provide detailed AP status and configuration, therefore it was designed for single AP information retrieving. If you need to retrieve large number of ap states, please use “POST://query/ap” (refer to the “Query APs” section of the category “Access Point Operational”).
-
Required: apMac
'''
url = self.endpoint('v8_2/aps/{}/operational/summary'.format(apMac))
r = self.session.get(url)
# validação
return r.json()
def accessPointOperational_apPacketCapture(self, apMac):
'''
Required: apMac
Use this API to get AP packet capture status
'''
url = self.endpoint('v8_2/aps/{}/apPacketCapture'.format(apMac))
r = self.session.get(url)
# validação
return r.json()
def accessPointOperational_RetrieveIndoormapList(self, filter_type, filter_value, date_range, search_type=None, search_value=None, attributes=None):
'''
Query indoorMap with specified filters. Please click the link for the usage of Query Criteria.
Link: http://docs.ruckuswireless.com/smartzone/5.1.2/QueryCriteria.html
-
Required: filter_type and filter_value.
Example:
{
"filters": [
{
"type": "DOMAIN",
"value": "8b2081d5-9662-40d9-a3db-2a3cf4dde3f7"
}
],
"fullTextSearch": {
"type": "AND",
"value": ""
},
"attributes": [
"*"
]
}
'''
date_range = self.datetemp(date_range)
json = self.filters_json(filter_type, filter_value, date_range[0], date_range[1], search_type, search_value, attributes)
url = self.endpoint('v8_2/query/indoorMap')
r = self.session.post(url, json=json)
# validação
return r.json()
def accessPointOperational_queryAps(self, filter_type, filter_value, date_range, search_type=None, search_value=None, attributes=None):
'''
Query APs with specified filters Please click the link for the usage of Query Criteria.
Link: http://docs.ruckuswireless.com/smartzone/5.1.2/QueryCriteria.html
-
Required: filter_type and filter_value.
Example:
{
"filters": [
{
"type": "DOMAIN",
"value": "8b2081d5-9662-40d9-a3db-2a3cf4dde3f7"
}
],
"fullTextSearch": {
"type": "AND",
"value": ""
},
"attributes": [
"*"
]
}
'''
date_range = self.datetemp(date_range)
json = self.filters_json(filter_type, filter_value, date_range[0], date_range[1], search_type, search_value, attributes)
url = self.endpoint('v8_2/query/ap')
r = self.session.post(url, json=json)
# validação
return r.json()
########## WLAN GROUP
def wlanGroup_retrieveList(self, zoneId, index=None, listSize=None):
'''
Use this API command to retrieve the list of WLAN groups within a zone.
-
Required: zoneId
index: string (optional) - The index of the first entry to be retrieved. Default: 0
listSize: string (optional) - The maximum number of entries to be retrieved. Default: 100
'''
url = self.endpoint('v8_2/rkszones/{}/wlangroups'.format(zoneId))
data = {}
if index is None:
data['index'] = '0'
else:
data['index'] = index
if listSize is None:
data['listSize'] = '100'
else:
data['listSize'] = listSize
r = self.session.get(url, data = data)
# validação
return r.json()
########## WLAN
def wlan_retrieveList(self, zoneId, index=None, listSize=None):
'''
Use this API command to retrieve a list of WLANs within a zone.
-
Required: zoneId
index: string (optional) - The index of the first entry to be retrieved. Default: 0
listSize: string (optional) - The maximum number of entries to be retrieved. Default: 100
'''
url = self.endpoint('v8_2/rkszones/{}/wlans'.format(zoneId))
data = {}
if index is None:
data['index'] = '0'
else:
data['index'] = index
if listSize is None:
data['listSize'] = '100'
else:
data['listSize'] = listSize
r = self.session.get(url, data = data)
# validação
return r.json()
def wlan_queryWlans(self, filter_type, filter_value, date_range, search_type=None, search_value=None, attributes=None):
'''
Query WLANs with specified filters. Please click the link for the usage of Query Criteria.
Link: http://docs.ruckuswireless.com/smartzone/5.1.2/QueryCriteria.html
-
Required: filter_type and filter_value
Example:
{
"filters": [
{
"type": "DOMAIN",
"value": "8b2081d5-9662-40d9-a3db-2a3cf4dde3f7"
}
],
"fullTextSearch": {
"type": "AND",
"value": ""
},
"attributes": [
"*"
]
}
'''
date_range = self.datetemp(date_range)
json = self.filters_json(filter_type, filter_value, date_range[0], date_range[1], search_type, search_value, attributes)
url = self.endpoint('v8_2/query/wlan')
r = self.session.post(url, json=json)
# validação
return r.json()
########## ACCESS POINT APP - FALTA VALIDAR**************************
def accessPointAPP_retrieveTotalApCount(self, zoneId=None, domainId=None):
'''
Use this API command to retrieve the total AP count within a zone or a domain.
-
Required: zoneId or domainId
zoneId: string (optional) - Filter AP total count by zone. Default: current logon domain
domainId: string (optional) - Filter AP total count by domain. Default: current logon domain
'''
assert zoneId or domainId, "ERRO: zoneId or domainId is required."
data = {}
if zoneId is not None:
data['zoneId'] = zoneId
if domainId is not None:
data['domainId'] = domainId
url = self.endpoint('v8_2/aps/totalCount')
r = self.session.get(url, data=data)
# validação
return r.json()
########## WIRELESS CLIENT
def wirelessClient_retrieveTotalClientCount(self, apMac):
'''
Required: apMac
Use this API command to retrieve the total client count per AP.
'''
url = self.endpoint('v8_2/aps/{}/operational/client/totalCount'.format(apMac))
r = self.session.get(url)
# validação
return r.json()
def wirelessClient_queryClients(self, filter_type, filter_value, date_range, search_type=None, search_value=None, attributes=None):
'''
Query clients with specified filters. Please click the link for the usage of Query Criteria.
Link: http://docs.ruckuswireless.com/smartzone/5.1.2/QueryCriteria.html
-
Required: filter_type, filter_value
Example:
{
"filters": [
{
"type": "DOMAIN",
"value": "8b2081d5-9662-40d9-a3db-2a3cf4dde3f7"
}
],
"fullTextSearch": {
"type": "AND",
"value": ""
},
"attributes": [
"*"
]
}
'''
date_range = self.datetemp(date_range)
json = self.filters_json(filter_type, filter_value, date_range[0], date_range[1], search_type, search_value, attributes)
url = self.endpoint('v8_2/query/client')
r = self.session.post(url, json=json)
# validação
return r.json()
def wirelessClient_historicalClient_gen(self, filter_type, filter_value, date_range):
has_more = True
page = 1
while has_more:
d = self.wirelessClient_historicalClient(
filter_type,
filter_value,
date_range,
page=page,
limit=1000)
has_more = d.get('hasMore', False)
page += 1
for el in d.get('list', []):
yield el
def wirelessClient_historicalClient(self, filter_type, filter_value, date_range, page=1, limit=1000):
'''
Use this API command to retrive historical client. Please click the link for the usage of Query Criteria.
Link: http://docs.ruckuswireless.com/smartzone/5.1.2/QueryCriteria.html
-
Required: filter_type and filter_value.
Example:
{
"filters": [
{
"type": "DOMAIN",
"value": "d0d495e1-de50-40e3-8d09-e4bbeb4b4722"
}
]
}
'''
json = {
"filters": [
{
"type": filter_type,
"value": filter_value
}
],
"extraTimeRange": {
"start": self.ts_format(date_range[0]),
"end": self.ts_format(date_range[1]),
},
"page": page,
"limit": limit,
}
url = self.endpoint('v8_2/query/historicalclient')
r = self.session.post(url, json=json)
# validação
return r.json()
def wirelessClient_disconnectClient(self, mac, apMac):
'''
Use this API command to disconnect client.
-
Required: mac and apMac
Example:
{
"mac": "E8:99:C4:AD:7C:38",
"apMac": "C0:8A:DE:24:FA:00"
'''
json = {
"mac": mac,
"apMac": apMac
}
url = self.endpoint('v8_2/clients/disconnect')
r = self.session.post(url, json=json)
# validação
return r.json()
def trafficAnalysis_clientUsageWlan (self, filter_type, filter_value, date_range, interval='86400000', rate='tx+rx', frequence='2.4G+5G', page=1, limit=1000):
'''clientUsageWlan return number total of clients in zone, name of device and total bytes consumed
Params: (self, filter_type, filter_value, date_range, interval='86400000', rate='tx+rx', frequence='2.4G+5G', page=1, limit=1000)
Required: filter_type, filter_value and date_range
Optional: interval, rate, frequence, page and limit
filter_type & filter_value:
--------------------------------------------------------------------------
|TYPE(type)| Value | Example(value) |
|--------------------------------------------------------------------------|
|AP |apMac |{AP, 11:22:33:44:55:66} |
|APGROUP |apGroupId |{APGROUP, 758b6970-032a-11e7-9e78-0a0027000000} |
|CLIENT |clientMac |{CLIENT, AB:CD:00:00:00:03} |
|DOMAIN |domainId |{DOMAIN, 8b2081d5-9662-40d9-a3db-2a3cf4dde3f7} |
|INDOORMAP |indoorMapId |{INDOORMAP, 08733520-0a32-11e7-89a3-0a0027000000}|
|WLAN |wlanId |{WLAN, 1} |
|ZONE |zoneId |{ZONE, 91fa3fe0-03da-11e7-8d82-0a0027000000} |
--------------------------------------------------------------------------
frequence: 2.4G, 5G or 2.4G+5G
rate: tx, rx or tx+rx
interval: interval in mileseconds
limit: num of limite itens on page
page: number os pages
'''
date_range = self.datetemp(date_range)
json ={
"filters":[{
"type": filter_type,
"value": filter_value
}],
"extraFilters":[{
"type":"RADIOID",
"value": frequence
}],
"extraNotFilters":[{
"type":"MONITORINGENABLED",
"value":"true"
}],
"attributes":[
rate,
"Host_Name"
],
"extraTimeRange":{
"start": date_range[0],
"end": date_range[1],
"interval": interval},
"options":{
},
"limit": limit,
"page":page}
url = self.endpoint('v8_2/trafficAnalysis/client/usage/wlan')
r = self.session.post(url, json=json)
return r.json()
def trafficAnalysis_lineUsageWlan (self, filter_type, filter_value, date_range, interval='86400000', rate='tx+rx', frequence='2.4G+5G', page=1, limit=1000):
'''lieUsageWlan return consume total in zone per interval
Params: (self, filter_type, filter_value, date_range, interval='86400000', rate='tx+rx', frequence='2.4G+5G', page=1, limit=1000)
Required: filter_type, filter_value and date_range
Optional: interval, rate, frequence, page and limit
filter_type & filter_value:
--------------------------------------------------------------------------
|TYPE(type)| Value | Example(value) |
|--------------------------------------------------------------------------|
|AP |apMac |{AP, 11:22:33:44:55:66} |
|APGROUP |apGroupId |{APGROUP, 758b6970-032a-11e7-9e78-0a0027000000} |
|CLIENT |clientMac |{CLIENT, AB:CD:00:00:00:03} |
|DOMAIN |domainId |{DOMAIN, 8b2081d5-9662-40d9-a3db-2a3cf4dde3f7} |
|INDOORMAP |indoorMapId |{INDOORMAP, 08733520-0a32-11e7-89a3-0a0027000000}|
|WLAN |wlanId |{WLAN, 1} |
|ZONE |zoneId |{ZONE, 91fa3fe0-03da-11e7-8d82-0a0027000000} |
--------------------------------------------------------------------------
frequence: 2.4G, 5G or 2.4G+5G
rate: tx, rx or tx+rx
interval: interval in mileseconds
limit: num of limite itens on page
page: number os pages
'''
date_range = self.datetemp(date_range)
json ={
"filters":[{
"type": filter_type,
"value": filter_value
}],
"extraFilters":[{
"type":"RADIOID",
"value": frequence
}],
"extraNotFilters":[{
"type":"MONITORINGENABLED",
"value":"true"
}],
"attributes":[
rate,
"Host_Name"
],
"extraTimeRange":{
"start": date_range[0],
"end": date_range[1],
"interval": interval},
"options":{
},
"limit": limit,
"page":page}
url = self.endpoint('v8_2/trafficAnalysis/line/usage/wlan')
r = self.session.post(url, json=json)
return r.json()
def trafficAnalysis_lineUsageSplitTunnel (self, filter_type, filter_value, date_range, interval='86400000', rate='tx+rx', frequence='2.4G+5G', page=1, limit=1000):
'''lieUsageSplitTunnel
Params: (self, filter_type, filter_value, date_range, interval='86400000', rate='tx+rx', frequence='2.4G+5G', page=1, limit=1000)
Required: filter_type, filter_value and date_range
Optional: interval, rate, frequence, page and limit
filter_type & filter_value:
--------------------------------------------------------------------------
|TYPE(type)| Value | Example(value) |
|--------------------------------------------------------------------------|
|AP |apMac |{AP, 11:22:33:44:55:66} |
|APGROUP |apGroupId |{APGROUP, 758b6970-032a-11e7-9e78-0a0027000000} |
|CLIENT |clientMac |{CLIENT, AB:CD:00:00:00:03} |
|DOMAIN |domainId |{DOMAIN, 8b2081d5-9662-40d9-a3db-2a3cf4dde3f7} |
|INDOORMAP |indoorMapId |{INDOORMAP, 08733520-0a32-11e7-89a3-0a0027000000}|
|WLAN |wlanId |{WLAN, 1} |
|ZONE |zoneId |{ZONE, 91fa3fe0-03da-11e7-8d82-0a0027000000} |
--------------------------------------------------------------------------
frequence: 2.4G, 5G or 2.4G+5G
rate: tx, rx or tx+rx
interval: interval in mileseconds
limit: num of limite itens on page
page: number os pages
'''
date_range = self.datetemp(date_range)
json ={
"filters":[{
"type": filter_type,
"value": filter_value
}],
"extraFilters":[{
"type":"RADIOID",
"value": frequence
}],
"extraNotFilters":[{
"type":"MONITORINGENABLED",
"value":"true"
}],
"attributes":[
rate,
"Host_Name"
],
"extraTimeRange":{
"start": date_range[0],
"end": date_range[1],
"interval": interval},
"options":{
},
"limit": limit,
"page":page}
url = self.endpoint('v8_2/trafficAnalysis/line/usage/splitTunnel/wlan')
r = self.session.post(url, json=json)
return r.json() | /ruckus-python-api-0.0.12.tar.gz/ruckus-python-api-0.0.12/src/ruckus_api/smartzone_api.py | 0.563858 | 0.156041 | smartzone_api.py | pypi |
# RuCLIP
Zero-shot image classification model for Russian language
---
**RuCLIP** (**Ru**ssian **C**ontrastive **L**anguage–**I**mage **P**retraining) is a multimodal model
for obtaining images and text similarities and rearranging captions and pictures.
RuCLIP builds on a large body of work on zero-shot transfer, computer vision, natural language processing and
multimodal learning. This repo has the prototypes model of OpenAI CLIP's Russian version following [this paper](https://arxiv.org/abs/2103.00020).
# Models
+ [ruclip-vit-base-patch32-224](https://huggingface.co/sberbank-ai/ruclip-vit-base-patch32-224) 🤗
+ [ruclip-vit-base-patch16-224](https://huggingface.co/sberbank-ai/ruclip-vit-base-patch16-224) 🤗
+ [ruclip-vit-large-patch14-224](https://huggingface.co/sberbank-ai/ruclip-vit-large-patch14-224) 🤗
+ [ruclip-vit-base-patch32-384](https://huggingface.co/sberbank-ai/ruclip-vit-base-patch32-384) 🤗
+ [ruclip-vit-large-patch14-336](https://huggingface.co/sberbank-ai/ruclip-vit-large-patch14-336) 🤗
+ [ruclip-vit-base-patch16-384](https://huggingface.co/sberbank-ai/ruclip-vit-base-patch16-384) 🤗
# Installing
```
pip install ruclip==0.0.2
```
# Usage
[](https://colab.research.google.com/drive/1vXu3s0rcAOEAciz7B3vmVHd4J_gUJnk9?usp=sharing)
Standart RuCLIP API
[](https://colab.research.google.com/drive/1hgu7GNfBriLmAHg1oskdNIQsc0WJMwDa?usp=sharing)
RuCLIP + SberVqgan
[](https://colab.research.google.com/github/Lednik7/CLIP-ONNX/blob/main/examples/RuCLIP_onnx_example.ipynb)
ONNX example
### Init models
```python
import ruclip
device = 'cuda'
clip, processor = ruclip.load('ruclip-vit-base-patch32-384', device=device)
```
### Zero-Shot Classification [Minimal Example]
```python
import torch
import base64
import requests
import matplotlib.pyplot as plt
from PIL import Image
from io import BytesIO
# prepare images
bs4_urls = requests.get('https://raw.githubusercontent.com/sberbank-ai/ru-dolph/master/pics/pipelines/cats_vs_dogs_bs4.json').json()
images = [Image.open(BytesIO(base64.b64decode(bs4_url))) for bs4_url in bs4_urls]
# prepare classes
classes = ['кошка', 'собака']
templates = ['{}', 'это {}', 'на картинке {}', 'это {}, домашнее животное']
# predict
predictor = ruclip.Predictor(clip, processor, device, bs=8, templates=templates)
with torch.no_grad():
text_latents = predictor.get_text_latents(classes)
pred_labels = predictor.run(images, text_latents)
# show results
f, ax = plt.subplots(2,4, figsize=(12,6))
for i, (pil_img, pred_label) in enumerate(zip(images, pred_labels)):
ax[i//4, i%4].imshow(pil_img)
ax[i//4, i%4].set_title(classes[pred_label])
```

### Cosine similarity Visualization Example

### Softmax Scores Visualization Example

### Linear Probe and ZeroShot Correlation Results

### Linear Probe Example
```python
train = CIFAR100(root, download=True, train=True)
test = CIFAR100(root, download=True, train=False)
with torch.no_grad():
X_train = predictor.get_image_latents((pil_img for pil_img, _ in train)).cpu().numpy()
X_test = predictor.get_image_latents((pil_img for pil_img, _ in test)).cpu().numpy()
y_train, y_test = np.array(train.targets), np.array(test.targets)
clf = LogisticRegression(solver='lbfgs', penalty='l2', max_iter=1000, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
accuracy = np.mean((y_test == y_pred).astype(np.float)) * 100.
print(f"Accuracy = {accuracy:.3f}")
```
`>>> Accuracy = 75.680`
# Performance
We have evaluated the performance zero-shot image classification on the following datasets:
| Dataset | [ruCLIP Base \[vit-base-patch32-224\]](https://huggingface.co/sberbank-ai/ruclip-vit-base-patch32-224) | [ruCLIP Base \[vit-base-patch16-224\]](https://huggingface.co/sberbank-ai/ruclip-vit-base-patch16-224) | [ruCLIP Large \[vit-large-patch14-224\]](https://huggingface.co/sberbank-ai/ruclip-vit-large-patch14-224) | [ruCLIP Base \[vit-base-patch32-384\]](https://huggingface.co/sberbank-ai/ruclip-vit-base-patch32-384) | [ruCLIP Large \[vit-large-patch14-336\]](https://huggingface.co/sberbank-ai/ruclip-vit-large-patch14-336) | [ruCLIP Base \[vit-base-patch16-384\]](https://huggingface.co/sberbank-ai/ruclip-vit-base-patch16-384) | CLIP \[vit-base-patch16-224\] original + [OPUS-MT](https://huggingface.co/Helsinki-NLP/opus-mt-ru-en) | CLIP \[vit-base-patch16-224\] original |
| :----------------------------- | :------------------------------------------------------------------------------------------------------ | :------------------------------------------------------------------------------------------------------ | :--------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------ | :-------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------- | :-------------------------------------- |
| Food101, acc | 0.505 | 0.552 | 0.597 | 0.642 | **0.712**💥 | 0.689 | 0.664 | 0.883 |
| CIFAR10, acc | 0.818 | 0.810 | 0.878 | 0.862 | **0.906**💥 | 0.845 | 0.859 | 0.893 |
| CIFAR100, acc | 0.504 | 0.496 | 0.511 | 0.529 | 0.591 | 0.569 | **0.603**💥 | 0.647 |
| Birdsnap, acc | 0.115 | 0.117 | 0.172 | 0.161 | **0.213**💥 | 0.195 | 0.126 | 0.396 |
| SUN397, acc | 0.452 | 0.462 | 0.484 | 0.510 | **0.523**💥 | 0.521 | 0.447 | 0.631 |
| Stanford Cars, acc | 0.433 | 0.487 | 0.559 | 0.572 | **0.659**💥 | 0.626 | 0.567 | 0.638 |
| DTD, acc | 0.380 | 0.401 | 0.370 | 0.390 | 0.408 | **0.421**💥 | 0.243 | 0.432 |
| MNIST, acc | 0.447 | 0.464 | 0.337 | 0.404 | 0.242 | 0.478 | **0.559**💥 | 0.559 |
| STL10, acc | 0.932 | 0.932 | 0.934 | 0.946 | 0.956 | 0.964 | **0.967**💥 | 0.970 |
| PCam, acc | 0.501 | 0.505 | 0.520 | 0.506 | 0.554 | 0.501 | **0.603**💥 | 0.573 |
| CLEVR, acc | 0.148 | 0.128 | 0.152 | 0.188 | 0.142 | 0.132 | **0.240**💥 | 0.240 |
| Rendered SST2, acc | 0.489 | 0.527 | 0.529 | 0.508 | **0.539**💥 | 0.525 | 0.484 | 0.484 |
| ImageNet, acc | 0.375 | 0.401 | 0.426 | 0.451 | **0.488**💥 | 0.482 | 0.392 | 0.638 |
| FGVC Aircraft, mean-per-class | 0.033 | 0.043 | 0.046 | 0.053 | 0.075 | 0.046 | **0.220**💥 | 0.244 |
| Oxford Pets, mean-per-class | 0.560 | 0.595 | 0.604 | 0.587 | 0.546 | **0.635**💥 | 0.507 | 0.874 |
| Caltech101, mean-per-class | 0.786 | 0.775 | 0.777 | 0.834 | **0.835**💥 | **0.835**💥 | 0.792 | 0.883 |
| Flowers102, mean-per-class | 0.401 | 0.388 | 0.455 | 0.449 | **0.517**💥 | 0.452 | 0.357 | 0.697 |
| Hateful Memes, roc-auc | 0.564 | 0.516 | 0.530 | 0.537 | 0.519 | 0.543 | **0.579**💥 | 0.589 |
And for linear-prob evaluation:
| Dataset | ruCLIP Base \[vit-base-patch32-224\] | ruCLIP Base \[vit-base-patch16-224\] | ruCLIP Large \[vit-large-patch14-224\] | ruCLIP Base \[vit-base-patch32-384\] | [ruCLIP Large \[vit-large-patch14-336\]](https://huggingface.co/sberbank-ai/ruclip-vit-large-patch14-336) | [ruCLIP Base \[vit-base-patch16-384\]](https://huggingface.co/sberbank-ai/ruclip-vit-base-patch16-384) | CLIP \[vit-base-patch16-224\] original |
| :------------- | :------------------------------------ | :------------------------------------ | :-------------------------------------- | :------------------------------------ | :------------------------------------------------ | :----------------------------------------------- | :-------------------------------------- |
| Food101 | 0.765 | 0.827 | 0.840 | 0.851 | **0.896**💥 | 0.890 | 0.901 |
| CIFAR10 | 0.917 | 0.922 | 0.927 | 0.934 | **0.943**💥 | 0.942 | 0.953 |
| CIFAR100 | 0.716 | 0.739 | 0.734 | 0.745 | 0.770 | **0.773**💥 | 0.808 |
| Birdsnap | 0.347 | 0.503 | 0.567 | 0.434 | 0.609 | **0.612**💥 | 0.664 |
| SUN397 | 0.683 | 0.721 | 0.731 | 0.721 | **0.759**💥 | 0.758 | 0.777 |
| Stanford Cars | 0.697 | 0.776 | 0.797 | 0.766 | 0.831 | **0.840**💥 | 0.866 |
| DTD | 0.690 | 0.734 | 0.711 | 0.703 | 0.731 | **0.749**💥 | 0.770 |
| MNIST | 0.963 | **0.974**💥 | 0.949 | 0.965 | 0.949 | 0.971 | 0.989 |
| STL10 | 0.957 | 0.962 | 0.973 | 0.968 | **0.981**💥 | 0.974 | 0.982 |
| PCam | 0.827 | 0.823 | 0.791 | 0.835 | 0.807 | **0.846**💥 | 0.830 |
| CLEVR | 0.356 | 0.360 | 0.358 | 0.308 | 0.318 | **0.378**💥 | 0.604 |
| Rendered SST2 | 0.603 | 0.655 | 0.651 | 0.651 | 0.637 | **0.661**💥 | 0.606 |
| FGVC Aircraft | 0.254 | 0.312 | 0.290 | 0.283 | 0.341 | **0.362**💥 | 0.604 |
| Oxford Pets | 0.774 | 0.820 | 0.819 | 0.730 | 0.753 | **0.856**💥 | 0.931 |
| Caltech101 | 0.904 | 0.917 | 0.914 | 0.922 | **0.937**💥 | 0.932 | 0.956 |
| HatefulMemes | 0.545 | 0.568 | 0.563 | 0.581 | **0.585**💥 | 0.578 | 0.645 |
Also, we have created speed comparison based on CIFAR100 dataset using Nvidia-V100 for evaluation:
| | ruclip-vit-base-patch32-224 | ruclip-vit-base-patch16-224 | ruclip-vit-large-patch14-224 | ruclip-vit-base-patch32-384 | ruclip-vit-large-patch14-336 | ruclip-vit-base-patch16-384 |
|----------|-----------------------------|-----------------------------|------------------------------|-----------------------------|------------------------------|-----------------------------|
| iter/sec | **308.84** 💥 | 155.35 | 49.95 | 147.26 | 22.11 | 61.79 |
# Authors
+ Alex Shonenkov: [Github](https://github.com/shonenkov), [Kaggle GM](https://www.kaggle.com/shonenkov)
+ Daniil Chesakov: [Github](https://github.com/Danyache)
+ Denis Dimitrov: [Github](https://github.com/denndimitrov)
+ Igor Pavlov: [Github](https://github.com/boomb0om)
+ Andrey Kuznetsov: [Github](https://github.com/kuznetsoffandrey)
+ Anastasia Maltseva: [Github](https://github.com/NastyaMittseva)
# Supported by
[<img src="https://raw.githubusercontent.com/sberbank-ai/ru-dolph/master/pics/logo/airi-logo.png" height="50"/>](https://airi.net)
### Social Media
[](https://habr.com/ru/company/sberbank/blog/646447/)
| /ruclip-0.0.2.tar.gz/ruclip-0.0.2/README.md | 0.492676 | 0.874507 | README.md | pypi |
import paddle
import numpy as np
class ImagePrompts:
def __init__(self, pil_image, borders, vae, device='cpu', crop_first=False):
"""
Args:
pil_image (PIL.Image): image in PIL format
borders (dict[str] | int): borders that we croped from pil_image
example: {'up': 4, 'right': 0, 'left': 0, 'down': 0} (1 int eq 8 pixels)
vae (VQGanGumbelVAE): VQGAN model for image encoding
device (str): cpu or cuda
crop_first (bool): if True, croped image before VQGAN encoding
"""
self.device = device
img = self._preprocess_img(pil_image)
self.image_prompts_idx, self.image_prompts = self._get_image_prompts(img, borders, vae, crop_first)
def _preprocess_img(self, pil_img):
img = paddle.to_tensor(np.array(pil_img.convert('RGB')).transpose(2, 0, 1)) / 255.
img = img.unsqueeze(0).to(self.device, dtype=paddle.float32)
img = (2 * img) - 1
return img
@staticmethod
def _get_image_prompts(img, borders, vae, crop_first):
if crop_first:
assert borders['right'] + borders['left'] + borders['down'] == 0
up_border = borders['up'] * 8
_, _, [_, _, vqg_img] = vae.model.encode(img[:, :, :up_border, :])
else:
_, _, [_, _, vqg_img] = vae.model.encode(img)
bs, vqg_img_w, vqg_img_h = vqg_img.shape
mask = paddle.zeros([vqg_img_w, vqg_img_h])
if borders['up'] != 0:
mask[:borders['up'], :] = 1.
if borders['down'] != 0:
mask[-borders['down']:, :] = 1.
if borders['right'] != 0:
mask[:, :borders['right']] = 1.
if borders['left'] != 0:
mask[:, -borders['left']:] = 1.
mask = mask.reshape((-1,)).astype(paddle.bool)
image_prompts = vqg_img.reshape((bs, -1))
image_prompts_idx = np.arange(vqg_img_w * vqg_img_h)
image_prompts_idx = set(image_prompts_idx[mask])
return image_prompts_idx, image_prompts | /rudalle_paddle-0.0.1rc0-py3-none-any.whl/rudalle_paddle/image_prompts.py | 0.659076 | 0.368264 | image_prompts.py | pypi |
import os
import json
import paddle
import youtokentome as yttm
import paddle.vision.transforms as T
class RuCLIPProcessor:
eos_id = 3
bos_id = 2
unk_id = 1
pad_id = 0
def __init__(self, tokenizer_path, image_size=224, text_seq_length=76, mean=None, std=None):
self.tokenizer = yttm.BPE(tokenizer_path)
self.mean = mean or [0.485, 0.456, 0.406]
self.std = std or [0.229, 0.224, 0.225]
self.image_transform = T.Compose([
lambda img: img.convert('RGB') if img.mode != 'RGB' else img,
T.RandomResizedCrop(image_size, scale=(1., 1.), ratio=(1., 1.)),
T.ToTensor(),
T.Normalize(mean=self.mean, std=self.std)
])
self.text_seq_length = text_seq_length
self.image_size = image_size
def encode_text(self, text):
text = text.lower()
tokens = self.tokenizer.encode([text], output_type=yttm.OutputType.ID, dropout_prob=0.0)[0]
tokens = [self.bos_id] + tokens + [self.eos_id]
tokens = tokens[:self.text_seq_length]
mask = [1] * len(tokens)
return paddle.to_tensor(tokens).astype(paddle.int64), paddle.to_tensor(mask).astype(paddle.int64)
def decode_text(self, encoded):
return self.tokenizer.decode(encoded.cpu().numpy().tolist(), ignore_ids=[
self.eos_id, self.bos_id, self.unk_id, self.pad_id
])[0]
def __call__(self, text=None, images=None, **kwargs):
inputs = {}
if text is not None:
input_ids, masks = [], []
texts = [text] if isinstance(text, str) else text
for text in texts:
tokens, mask = self.encode_text(text)
input_ids.append(tokens)
masks.append(mask)
inputs['input_ids'] = paddle.pad_sequence(input_ids, batch_first=True)
inputs['attention_mask'] = paddle.pad_sequence(masks, batch_first=True)
if images is not None:
pixel_values = []
for i, image in enumerate(images):
pixel_values.append(self.image_transform(image))
inputs['pixel_values'] = paddle.pad_sequence(pixel_values, batch_first=True)
return inputs
@classmethod
def from_pretrained(cls, folder):
tokenizer_path = os.path.join(folder, 'bpe.model')
config = json.load(open(os.path.join(folder, 'config.json')))
image_size = config['vision_config']['image_size']
text_seq_length = config['text_config']['max_position_embeddings'] - 1
mean, std = config.get('mean'), config.get('std')
return cls(tokenizer_path, image_size=image_size, text_seq_length=text_seq_length, mean=mean, std=std) | /rudalle_paddle-0.0.1rc0-py3-none-any.whl/rudalle_paddle/ruclip/processor.py | 0.599368 | 0.238118 | processor.py | pypi |
import os
import json
import paddle
from clip import CLIP
class CLIPModel(CLIP):
def encode_text(self, text):
x = self.token_embedding(text)
if x.shape[1] != self.context_length:
x = paddle.concat([
x,
paddle.zeros(
[x.shape[0], self.context_length - x.shape[1], x.shape[2]],
dtype=x.dtype
)
], 1)
x = x + self.positional_embedding
x = self.transformer(x)
x = self.ln_final(x)[:, :text.shape[1]]
select = []
index = zip(
paddle.arange(x.shape[0]).numpy(),
text.argmax(axis=-1).numpy()
)
for i, j in index:
select.append(x[int(i), int(j)])
x = paddle.stack(select) @ self.text_projection
return x
def forward(self, **kwargs):
logits_per_image, logits_per_text = super(CLIPModel, self).forward(
kwargs.get('pixel_values'), kwargs.get('input_ids'))
outputs = type('LamdaCls', (), {
'logits_per_image': logits_per_image,
'logits_per_text': logits_per_text
})
return outputs
@classmethod
def from_pretrained(cls, folder):
with open(os.path.join(folder, 'config.json'), 'r', encoding='utf-8') as f:
src_conf = json.load(f)
dst_conf = {
'embed_dim': src_conf['projection_dim'],
# vision
'image_resolution': src_conf['vision_config']['image_size'],
'vision_layers': src_conf['vision_config']['num_hidden_layers'],
'vision_width': src_conf['vision_config']['hidden_size'],
'vision_patch_size': src_conf['vision_config_dict']['patch_size'],
# text
'context_length': src_conf['text_config']['max_position_embeddings'],
'vocab_size': src_conf['text_config']['vocab_size'],
'transformer_width': src_conf['text_config']['hidden_size'],
'transformer_heads': src_conf['text_config']['num_attention_heads'],
'transformer_layers': src_conf['text_config']['num_hidden_layers'],
}
obj = cls(**dst_conf)
paddle_weights = os.path.join(folder, 'ruclip_paddle.pdparams')
if not os.path.exists(paddle_weights):
cls.convert(folder)
obj.set_state_dict(paddle.load(paddle_weights))
return obj
@staticmethod
def convert(folder):
import os
import torch
torch_weights = os.path.join(folder, 'pytorch_model.bin')
target_model_path = os.path.join(folder, 'ruclip_paddle.pdparams')
if os.path.exists(target_model_path):
return
state_dict = torch.load(torch_weights, map_location='cpu')
name_pairs = [
('text_model.embeddings.position_embedding.weight', 'positional_embedding', False),
('visual_projection.weight', 'visual.proj', True),
('text_projection.weight', 'text_projection', True),
('text_model.embeddings.token_embedding.weight', 'token_embedding.weight', False),
('logit_scale', 'logit_scale', False),
('vision_model.embeddings.class_embedding', 'visual.class_embedding', False),
('vision_model.embeddings.patch_embedding.weight', 'visual.conv1.weight', False),
('vision_model.embeddings.position_embedding.weight', 'visual.positional_embedding', False),
('vision_model.pre_layrnorm', 'visual.ln_pre', False),
('vision_model.encoder.layers', 'visual.transformer.resblocks', True),
('text_model.encoder.layers', 'transformer.resblocks', True),
('self_attn.k_proj', 'attn.k_proj', True),
('self_attn.v_proj', 'attn.v_proj', True),
('self_attn.q_proj', 'attn.q_proj', True),
('self_attn.out_proj', 'attn.out_proj', True),
('layer_norm1', 'ln_1', False),
('layer_norm2', 'ln_2', False),
('mlp.fc1', 'mlp.c_fc', True),
('mlp.fc2', 'mlp.c_proj', True),
('vision_model.post_layernorm', 'visual.ln_post', False),
('text_model.final_layer_norm', 'ln_final', False)
]
exclude_names = [
'text_model.embeddings.position_ids',
'vision_model.embeddings.position_ids'
]
paddle_state_dict = {}
for name, param in state_dict.items():
is_pair = False
no_need_transpose = True
if name in exclude_names:
continue
for pre_name, post_name, do_transpose in name_pairs:
if pre_name in name:
is_pair = True
name = name.replace(pre_name, post_name)
no_need_transpose = not do_transpose if no_need_transpose else False
assert is_pair, f'Weight of {name} need to be converted.'
if not no_need_transpose and param.ndim == 2:
param = param.transpose(1, 0)
if param.ndim == 0:
param = param.unsqueeze(0)
param = param.cpu().detach().numpy()
paddle_state_dict[name] = param
paddle.save(paddle_state_dict, target_model_path) | /rudalle_paddle-0.0.1rc0-py3-none-any.whl/rudalle_paddle/ruclip/model.py | 0.608594 | 0.150684 | model.py | pypi |
import paddle
def _init_mask(text_tokens, image_tokens_per_dim):
attn_size = text_tokens + image_tokens_per_dim**2
mask = paddle.tril(paddle.ones([attn_size, attn_size]))
return mask
def get_row_mask(text_tokens=256, image_tokens_per_dim=32):
mask = _init_mask(text_tokens, image_tokens_per_dim)
step = image_tokens_per_dim + 1
for col in range(text_tokens, mask.shape[1]):
if col + step >= mask.shape[0] or col > mask.shape[1]:
continue # paddle not support index >= tensor shape
mask[col + step:, col] = 0.0
return mask
def get_col_mask(text_tokens=256, image_tokens_per_dim=32):
mask = _init_mask(text_tokens, image_tokens_per_dim)
step = image_tokens_per_dim - 1
for col in range(text_tokens, mask.shape[1]):
for i in range(1, mask.shape[0], step+1):
if col + i >= mask.shape[0] or col + i + step > mask.shape[0] or col > mask.shape[1]:
continue # paddle not support index >= tensor shape
mask[col + i: col + i + step, col] = 0.0
return mask
def get_conv_mask(text_tokens=256, image_tokens_per_dim=32, kernel=11):
mask = _init_mask(text_tokens, image_tokens_per_dim)
shift = kernel // 2
for pos in range(text_tokens, mask.shape[1]):
if pos + 1 < mask.shape[0] and pos < mask.shape[1]:
mask[pos+1:, pos] = 0.0
img = paddle.zeros([image_tokens_per_dim, image_tokens_per_dim])
pixel_id = pos - text_tokens
row = pixel_id // image_tokens_per_dim
col = pixel_id % image_tokens_per_dim
for r in range(-shift, shift+1):
for c in range(-shift, shift+1):
c_abs = (c + col) % image_tokens_per_dim
r_abs = (r + row) % image_tokens_per_dim
img[r_abs, c_abs] = 0.2
cell_id = r_abs * image_tokens_per_dim + c_abs
if text_tokens + cell_id > pos:
mask[text_tokens + cell_id, pos] = 1.0
img[row, col] = 1.0
return mask | /rudalle_paddle-0.0.1rc0-py3-none-any.whl/rudalle_paddle/dalle/image_attention.py | 0.52902 | 0.44089 | image_attention.py | pypi |
from math import sqrt, log
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle import einsum
from einops import rearrange
from taming.modules.diffusionmodules.model import Encoder, Decoder
class VQGanGumbelVAE(paddle.nn.Layer):
def __init__(self, config):
super().__init__()
model = GumbelVQ(
ddconfig=config.model.params.ddconfig,
n_embed=config.model.params.n_embed,
embed_dim=config.model.params.embed_dim,
kl_weight=config.model.params.kl_weight,
)
self.model = model
self.num_layers = int(log(config.model.params.ddconfig.attn_resolutions[0]) / log(2))
self.image_size = 256
self.num_tokens = config.model.params.n_embed
@paddle.no_grad()
def get_codebook_indices(self, img):
img = (2 * img) - 1
_, _, [_, _, indices] = self.model.encode(img)
return rearrange(indices, 'b h w -> b (h w)')
def decode(self, img_seq):
b, n = img_seq.shape
one_hot_indices = paddle.nn.functional.one_hot(img_seq, num_classes=self.num_tokens).astype(paddle.float32)
z = paddle.matmul(one_hot_indices, self.model.quantize.embed.weight)
z = rearrange(z, 'b (h w) c -> b c h w', h=int(sqrt(n)))
img = self.model.decode(z)
img = (img.clip(-1., 1.) + 1) * 0.5
return img
@staticmethod
def convert(model_path):
import os
import torch
torch_weights = model_path
target_model_path = model_path[:-5] + '.pdckpt'
if os.path.exists(target_model_path):
return target_model_path
state_dict = torch.load(torch_weights, map_location='cpu')['state_dict']
paddle_state_dict = {}
for name, param in state_dict.items():
if 'dense' in name and param.ndim == 2:
param = param.transpose(1, 0)
if param.ndim == 0:
param = param.unsqueeze(0)
param = param.cpu().detach().numpy()
paddle_state_dict[name] = param
paddle.save({'state_dict': paddle_state_dict}, target_model_path)
return model_path
class GumbelQuantize(nn.Layer):
"""
credit to @karpathy: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py (thanks!)
Gumbel Softmax trick quantizer
Categorical Reparameterization with Gumbel-Softmax, Jang et al. 2016
https://arxiv.org/abs/1611.01144
"""
def __init__(self, num_hiddens, embedding_dim, n_embed, straight_through=True,
kl_weight=5e-4, temp_init=1.0, use_vqinterface=True):
super().__init__()
self.embedding_dim = embedding_dim
self.n_embed = n_embed
self.straight_through = straight_through
self.temperature = temp_init
self.kl_weight = kl_weight
self.proj = nn.Conv2D(num_hiddens, n_embed, 1)
self.embed = nn.Embedding(self.n_embed, self.embedding_dim)
self.use_vqinterface = use_vqinterface
def forward(self, z, temp=None, return_logits=False):
hard = self.straight_through if self.training else True
temp = self.temperature if temp is None else temp
logits = self.proj(z)
soft_one_hot = F.gumbel_softmax(logits, tau=temp, axis=1, hard=hard)
z_q = einsum('b n h w, n d -> b d h w', soft_one_hot, self.embed.weight)
# + kl divergence to the prior loss
qy = F.softmax(logits, axis=1)
diff = self.kl_weight * paddle.sum(qy * paddle.log(qy * self.n_embed + 1e-10), axis=1).mean()
ind = soft_one_hot.argmax(axis=1)
if self.use_vqinterface:
if return_logits:
return z_q, diff, (None, None, ind), logits
return z_q, diff, (None, None, ind)
return z_q, diff, ind
class GumbelVQ(nn.Layer):
def __init__(self, ddconfig, n_embed, embed_dim, kl_weight=1e-8):
super().__init__()
z_channels = ddconfig['z_channels']
self.encoder = Encoder(**ddconfig)
self.decoder = Decoder(**ddconfig)
self.quantize = GumbelQuantize(z_channels, embed_dim, n_embed=n_embed, kl_weight=kl_weight, temp_init=1.0)
self.quant_conv = paddle.nn.Conv2D(ddconfig['z_channels'], embed_dim, 1)
self.post_quant_conv = paddle.nn.Conv2D(embed_dim, ddconfig['z_channels'], 1)
def encode(self, x):
h = self.encoder(x)
h = self.quant_conv(h)
quant, emb_loss, info = self.quantize(h)
return quant, emb_loss, info
def decode(self, quant):
quant = self.post_quant_conv(quant)
dec = self.decoder(quant)
return dec | /rudalle_paddle-0.0.1rc0-py3-none-any.whl/rudalle_paddle/vae/model.py | 0.883876 | 0.245288 | model.py | pypi |
import numpy as np
def pad_reflect(image, pad_size):
imsize = image.shape
height, width = imsize[:2]
new_img = np.zeros([height + pad_size * 2, width + pad_size * 2, imsize[2]]).astype(np.uint8)
new_img[pad_size:-pad_size, pad_size:-pad_size, :] = image
new_img[0:pad_size, pad_size:-pad_size, :] = np.flip(image[0:pad_size, :, :], axis=0) # top
new_img[-pad_size:, pad_size:-pad_size, :] = np.flip(image[-pad_size:, :, :], axis=0) # bottom
new_img[:, 0:pad_size, :] = np.flip(new_img[:, pad_size:pad_size * 2, :], axis=1) # left
new_img[:, -pad_size:, :] = np.flip(new_img[:, -pad_size * 2:-pad_size, :], axis=1) # right
return new_img
def unpad_image(image, pad_size):
return image[pad_size:-pad_size, pad_size:-pad_size, :]
def pad_patch(image_patch, padding_size, channel_last=True):
""" Pads image_patch with with padding_size edge values. """
if channel_last:
return np.pad(
image_patch,
((padding_size, padding_size), (padding_size, padding_size), (0, 0)),
'edge',
)
else:
return np.pad(
image_patch,
((0, 0), (padding_size, padding_size), (padding_size, padding_size)),
'edge',
)
def unpad_patches(image_patches, padding_size):
return image_patches[:, padding_size:-padding_size, padding_size:-padding_size, :]
def split_image_into_overlapping_patches(image_array, patch_size, padding_size=2):
""" Splits the image into partially overlapping patches.
The patches overlap by padding_size pixels.
Pads the image twice:
- first to have a size multiple of the patch size,
- then to have equal padding at the borders.
Args:
image_array: numpy array of the input image.
patch_size: size of the patches from the original image (without padding).
padding_size: size of the overlapping area.
"""
xmax, ymax, _ = image_array.shape
x_remainder = xmax % patch_size
y_remainder = ymax % patch_size
# modulo here is to avoid extending of patch_size instead of 0
x_extend = (patch_size - x_remainder) % patch_size
y_extend = (patch_size - y_remainder) % patch_size
# make sure the image is divisible into regular patches
extended_image = np.pad(image_array, ((0, x_extend), (0, y_extend), (0, 0)), 'edge')
# add padding around the image to simplify computations
padded_image = pad_patch(extended_image, padding_size, channel_last=True)
xmax, ymax, _ = padded_image.shape
patches = []
x_lefts = range(padding_size, xmax - padding_size, patch_size)
y_tops = range(padding_size, ymax - padding_size, patch_size)
for x in x_lefts:
for y in y_tops:
x_left = x - padding_size
y_top = y - padding_size
x_right = x + patch_size + padding_size
y_bottom = y + patch_size + padding_size
patch = padded_image[x_left:x_right, y_top:y_bottom, :]
patches.append(patch)
return np.array(patches), padded_image.shape
def stich_together(patches, padded_image_shape, target_shape, padding_size=4):
""" Reconstruct the image from overlapping patches.
After scaling, shapes and padding should be scaled too.
Args:
patches: patches obtained with split_image_into_overlapping_patches
padded_image_shape: shape of the padded image contructed in split_image_into_overlapping_patches
target_shape: shape of the final image
padding_size: size of the overlapping area.
"""
xmax, ymax, _ = padded_image_shape
patches = unpad_patches(patches, padding_size)
patch_size = patches.shape[1]
n_patches_per_row = ymax // patch_size
complete_image = np.zeros((xmax, ymax, 3))
row = -1
col = 0
for i in range(len(patches)):
if i % n_patches_per_row == 0:
row += 1
col = 0
complete_image[
row * patch_size: (row + 1) * patch_size, col * patch_size: (col + 1) * patch_size, :
] = patches[i]
col += 1
return complete_image[0: target_shape[0], 0: target_shape[1], :] | /rudalle_paddle-0.0.1rc0-py3-none-any.whl/rudalle_paddle/realesrgan/utils.py | 0.702938 | 0.628521 | utils.py | pypi |
import paddle
from paddle import nn as nn
from paddle.nn import functional as F
from .arch_util import default_init_weights, make_layer, pixel_unshuffle
class ResidualDenseBlock(nn.Layer):
"""Residual Dense Block.
Used in RRDB block in ESRGAN.
Args:
num_feat (int): Channel number of intermediate features.
num_grow_ch (int): Channels for each growth.
"""
def __init__(self, num_feat=64, num_grow_ch=32):
super(ResidualDenseBlock, self).__init__()
self.conv1 = nn.Conv2D(num_feat, num_grow_ch, 3, 1, 1)
self.conv2 = nn.Conv2D(num_feat + num_grow_ch, num_grow_ch, 3, 1, 1)
self.conv3 = nn.Conv2D(num_feat + 2 * num_grow_ch, num_grow_ch, 3, 1, 1)
self.conv4 = nn.Conv2D(num_feat + 3 * num_grow_ch, num_grow_ch, 3, 1, 1)
self.conv5 = nn.Conv2D(num_feat + 4 * num_grow_ch, num_feat, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, )
# initialization
default_init_weights([self.conv1, self.conv2, self.conv3, self.conv4, self.conv5], 0.1)
def forward(self, x):
x1 = self.lrelu(self.conv1(x))
x2 = self.lrelu(self.conv2(paddle.concat((x, x1), 1)))
x3 = self.lrelu(self.conv3(paddle.concat((x, x1, x2), 1)))
x4 = self.lrelu(self.conv4(paddle.concat((x, x1, x2, x3), 1)))
x5 = self.conv5(paddle.concat((x, x1, x2, x3, x4), 1))
# Emperically, we use 0.2 to scale the residual for better performance
return x5 * 0.2 + x
class RRDB(nn.Layer):
"""Residual in Residual Dense Block.
Used in RRDB-Net in ESRGAN.
Args:
num_feat (int): Channel number of intermediate features.
num_grow_ch (int): Channels for each growth.
"""
def __init__(self, num_feat, num_grow_ch=32):
super(RRDB, self).__init__()
self.rdb1 = ResidualDenseBlock(num_feat, num_grow_ch)
self.rdb2 = ResidualDenseBlock(num_feat, num_grow_ch)
self.rdb3 = ResidualDenseBlock(num_feat, num_grow_ch)
def forward(self, x):
out = self.rdb1(x)
out = self.rdb2(out)
out = self.rdb3(out)
# Emperically, we use 0.2 to scale the residual for better performance
return out * 0.2 + x
class RRDBNet(nn.Layer):
"""Networks consisting of Residual in Residual Dense Block, which is used
in ESRGAN.
ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.
We extend ESRGAN for scale x2 and scale x1.
Note: This is one option for scale 1, scale 2 in RRDBNet.
We first employ the pixel-unshuffle (an inverse operation of pixelshuffle to reduce the spatial size
and enlarge the channel size before feeding inputs into the main ESRGAN architecture.
Args:
num_in_ch (int): Channel number of inputs.
num_out_ch (int): Channel number of outputs.
num_feat (int): Channel number of intermediate features.
Default: 64
num_block (int): Block number in the trunk network. Defaults: 23
num_grow_ch (int): Channels for each growth. Default: 32.
"""
def __init__(self, num_in_ch, num_out_ch, scale=4, num_feat=64, num_block=23, num_grow_ch=32):
super(RRDBNet, self).__init__()
self.scale = scale
if scale == 2:
num_in_ch = num_in_ch * 4
elif scale == 1:
num_in_ch = num_in_ch * 16
self.conv_first = nn.Conv2D(num_in_ch, num_feat, 3, 1, 1)
self.body = make_layer(RRDB, num_block, num_feat=num_feat, num_grow_ch=num_grow_ch)
self.conv_body = nn.Conv2D(num_feat, num_feat, 3, 1, 1)
# upsample
self.conv_up1 = nn.Conv2D(num_feat, num_feat, 3, 1, 1)
self.conv_up2 = nn.Conv2D(num_feat, num_feat, 3, 1, 1)
if scale == 8:
self.conv_up3 = nn.Conv2D(num_feat, num_feat, 3, 1, 1)
self.conv_hr = nn.Conv2D(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2D(num_feat, num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2, )
def forward(self, x):
if self.scale == 2:
feat = pixel_unshuffle(x, scale=2)
elif self.scale == 1:
feat = pixel_unshuffle(x, scale=4)
else:
feat = x
feat = self.conv_first(feat)
body_feat = self.conv_body(self.body(feat))
feat = feat + body_feat
# upsample
feat = self.lrelu(self.conv_up1(F.interpolate(feat, scale_factor=2, mode='nearest')))
feat = self.lrelu(self.conv_up2(F.interpolate(feat, scale_factor=2, mode='nearest')))
if self.scale == 8:
feat = self.lrelu(self.conv_up3(F.interpolate(feat, scale_factor=2, mode='nearest')))
out = self.conv_last(self.lrelu(self.conv_hr(feat)))
return out | /rudalle_paddle-0.0.1rc0-py3-none-any.whl/rudalle_paddle/realesrgan/rrdbnet_arch.py | 0.884202 | 0.640214 | rrdbnet_arch.py | pypi |
import math
import paddle
from paddle import nn as nn
from paddle.nn import functional as F
from paddle.nn import initializer as init
from paddle.nn.layer.norm import _BatchNormBase
@paddle.no_grad()
def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
"""Initialize network weights.
Args:
module_list (list[nn.Layer] | nn.Layer): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for module in module_list:
for m in module.sublayers():
if isinstance(m, nn.Conv2D):
init.KaimingNormal(**kwargs)(m.weight)
m.weight.set_value(m.weight * scale)
if m.bias is not None:
init.Constant(bias_fill)(m.bias)
elif isinstance(m, nn.Linear):
init.KaimingNormal(**kwargs)(m.weight)
m.weight.set_value(m.weight * scale)
if m.bias is not None:
init.Constant(bias_fill)(m.bias)
elif isinstance(m, _BatchNormBase):
init.Constant(1)(m.weight)
if m.bias is not None:
init.Constant(bias_fill)(m.bias)
def make_layer(basic_block, num_basic_block, **kwarg):
"""Make layers by stacking the same blocks.
Args:
basic_block (nn.module): nn.module class for basic block.
num_basic_block (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
layers = []
for _ in range(num_basic_block):
layers.append(basic_block(**kwarg))
return nn.Sequential(*layers)
class ResidualBlockNoBN(nn.Layer):
"""Residual block without BN.
It has a style of:
---Conv-ReLU-Conv-+-
|________________|
Args:
num_feat (int): Channel number of intermediate features.
Default: 64.
res_scale (float): Residual scale. Default: 1.
pypaddle_init (bool): If set to True, use paddle default init,
otherwise, use default_init_weights. Default: False.
"""
def __init__(self, num_feat=64, res_scale=1, pypaddle_init=False):
super(ResidualBlockNoBN, self).__init__()
self.res_scale = res_scale
self.conv1 = nn.Conv2D(num_feat, num_feat, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2D(num_feat, num_feat, 3, 1, 1, bias=True)
self.relu = nn.ReLU()
if not pypaddle_init:
default_init_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = self.conv2(self.relu(self.conv1(x)))
return identity + out * self.res_scale
class Upsample(nn.Sequential):
"""Upsample module.
Args:
scale (int): Scale factor. Supported scales: 2^n and 3.
num_feat (int): Channel number of intermediate features.
"""
def __init__(self, scale, num_feat):
m = []
if (scale & (scale - 1)) == 0: # scale = 2^n
for _ in range(int(math.log(scale, 2))):
m.append(nn.Conv2D(num_feat, 4 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(2))
elif scale == 3:
m.append(nn.Conv2D(num_feat, 9 * num_feat, 3, 1, 1))
m.append(nn.PixelShuffle(3))
else:
raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.')
super(Upsample, self).__init__(*m)
def flow_warp(x, flow, interp_mode='bilinear', padding_mode='zeros', align_corners=True):
"""Warp an image or feature map with optical flow.
Args:
x (Tensor): Tensor with size (n, c, h, w).
flow (Tensor): Tensor with size (n, h, w, 2), normal value.
interp_mode (str): 'nearest' or 'bilinear'. Default: 'bilinear'.
padding_mode (str): 'zeros' or 'border' or 'reflection'.
Default: 'zeros'.
align_corners (bool): Before paddle 1.3, the default value is
align_corners=True. After paddle 1.3, the default value is
align_corners=False. Here, we use the True as default.
Returns:
Tensor: Warped image or feature map.
"""
assert x.shape[-2:] == flow.shape[1:3]
_, _, h, w = x.shape
# create mesh grid
grid_y, grid_x = paddle.meshgrid(paddle.arange(0, h).astype(x.dtype), paddle.arange(0, w).astype(x.dtype))
grid = paddle.stack((grid_x, grid_y), 2).astype(paddle.float32) # W(x), H(y), 2
grid.stop_gradient = True
vgrid = grid + flow
# scale grid to [-1,1]
vgrid_x = 2.0 * vgrid[:, :, :, 0] / max(w - 1, 1) - 1.0
vgrid_y = 2.0 * vgrid[:, :, :, 1] / max(h - 1, 1) - 1.0
vgrid_scaled = paddle.stack((vgrid_x, vgrid_y), axis=3)
output = F.grid_sample(x, vgrid_scaled, mode=interp_mode, padding_mode=padding_mode, align_corners=align_corners)
# TODO, what if align_corners=False
return output
def resize_flow(flow, size_type, sizes, interp_mode='bilinear', align_corners=False):
"""Resize a flow according to ratio or shape.
Args:
flow (Tensor): Precomputed flow. shape [N, 2, H, W].
size_type (str): 'ratio' or 'shape'.
sizes (list[int | float]): the ratio for resizing or the final output
shape.
1) The order of ratio should be [ratio_h, ratio_w]. For
downsampling, the ratio should be smaller than 1.0 (i.e., ratio
< 1.0). For upsampling, the ratio should be larger than 1.0 (i.e.,
ratio > 1.0).
2) The order of output_size should be [out_h, out_w].
interp_mode (str): The mode of interpolation for resizing.
Default: 'bilinear'.
align_corners (bool): Whether align corners. Default: False.
Returns:
Tensor: Resized flow.
"""
_, _, flow_h, flow_w = flow.shape
if size_type == 'ratio':
output_h, output_w = int(flow_h * sizes[0]), int(flow_w * sizes[1])
elif size_type == 'shape':
output_h, output_w = sizes[0], sizes[1]
else:
raise ValueError(f'Size type should be ratio or shape, but got type {size_type}.')
input_flow = flow.clone()
ratio_h = output_h / flow_h
ratio_w = output_w / flow_w
input_flow[:, 0, :, :] *= ratio_w
input_flow[:, 1, :, :] *= ratio_h
resized_flow = F.interpolate(
input=input_flow, size=(output_h, output_w), mode=interp_mode, align_corners=align_corners)
return resized_flow
# TODO: may write a cpp file
def pixel_unshuffle(x, scale):
""" Pixel unshuffle.
Args:
x (Tensor): Input feature with shape (b, c, hh, hw).
scale (int): Downsample ratio.
Returns:
Tensor: the pixel unshuffled feature.
"""
b, c, hh, hw = x.shape
out_channel = c * (scale**2)
assert hh % scale == 0 and hw % scale == 0
h = hh // scale
w = hw // scale
x_view = x.reshape([b, c, h, scale, w, scale])
return x_view.transpose([0, 1, 3, 5, 2, 4]).reshape([b, out_channel, h, w]) | /rudalle_paddle-0.0.1rc0-py3-none-any.whl/rudalle_paddle/realesrgan/arch_util.py | 0.843009 | 0.412944 | arch_util.py | pypi |
import paddle
import numpy as np
from PIL import Image
from .rrdbnet_arch import RRDBNet
from .utils import pad_reflect, split_image_into_overlapping_patches, stich_together, unpad_image
class RealESRGAN:
def __init__(self, device, scale=4):
self.device = device
self.scale = scale
self.model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=scale)
def load_weights(self, model_path):
if model_path[-4:] == '.pth':
model_path = RealESRGAN.convert(model_path)
loadnet = paddle.load(model_path)
if 'params' in loadnet:
self.model.set_state_dict(loadnet['params'])
elif 'params_ema' in loadnet:
self.model.set_state_dict(loadnet['params_ema'])
else:
self.model.set_state_dict(loadnet)
self.model.eval()
self.model.to(self.device)
def predict(self, lr_image, batch_size=4, patches_size=192,
padding=24, pad_size=15):
scale = self.scale
device = self.device
lr_image = np.array(lr_image)
lr_image = pad_reflect(lr_image, pad_size)
patches, p_shape = split_image_into_overlapping_patches(lr_image, patch_size=patches_size,
padding_size=padding)
img = paddle.to_tensor(patches / 255.).astype(paddle.float32).transpose((0, 3, 1, 2)).to(device).detach()
with paddle.no_grad():
res = self.model(img[0:batch_size])
for i in range(batch_size, img.shape[0], batch_size):
res = paddle.concat((res, self.model(img[i:i + batch_size])), 0)
sr_image = res.transpose((0, 2, 3, 1)).cpu().clip_(0, 1)
np_sr_image = sr_image.numpy()
padded_size_scaled = tuple(np.multiply(p_shape[0:2], scale)) + (3,)
scaled_image_shape = tuple(np.multiply(lr_image.shape[0:2], scale)) + (3,)
np_sr_image = stich_together(np_sr_image, padded_image_shape=padded_size_scaled,
target_shape=scaled_image_shape, padding_size=padding * scale)
sr_img = (np_sr_image * 255).astype(np.uint8)
sr_img = unpad_image(sr_img, pad_size * scale)
sr_img = Image.fromarray(sr_img)
return sr_img
@staticmethod
def convert(model_path):
import os
import torch
torch_weights = model_path
target_model_path = model_path[:-4] + '.pdparams'
if os.path.exists(target_model_path):
return target_model_path
_state_dict = torch.load(torch_weights, map_location='cpu')
if 'params' in _state_dict:
state_dict = _state_dict['params']
elif 'params_ema' in _state_dict:
state_dict = _state_dict['params_ema']
else:
state_dict = _state_dict
paddle_state_dict = {}
for name, param in state_dict.items():
if param.ndim == 0:
param = param.unsqueeze(0)
param = param.cpu().detach().numpy()
paddle_state_dict[name] = param
if 'params' in _state_dict:
paddle_state_dict = {'params': paddle_state_dict}
elif 'params_ema' in _state_dict:
paddle_state_dict = {'params_ema': paddle_state_dict}
paddle.save(paddle_state_dict, target_model_path)
return target_model_path | /rudalle_paddle-0.0.1rc0-py3-none-any.whl/rudalle_paddle/realesrgan/model.py | 0.708313 | 0.181426 | model.py | pypi |
# ruDALL-E
### Generate images from texts
[](https://www.apache.org/licenses/LICENSE-2.0)
[](https://pepy.tech/project/rudalle)
[](https://codecov.io/gh/sberbank-ai/ru-dalle)
[](https://gitlab.com/shonenkov/ru-dalle/-/pipelines)
[](https://results.pre-commit.ci/latest/github/sberbank-ai/ru-dalle/master)
```
pip install rudalle==1.1.3
```
### 🤗 HF Models:
[ruDALL-E Malevich (XL)](https://huggingface.co/sberbank-ai/rudalle-Malevich) \
[ruDALL-E Emojich (XL)](https://huggingface.co/sberbank-ai/rudalle-Emojich) (readme [here](https://github.com/sberbank-ai/ru-dalle/blob/master/Emojich.md)) \
[ruDALL-E Surrealist (XL)](https://huggingface.co/shonenkov-AI/rudalle-xl-surrealist) \
ruDALL-E Kandinsky (XXL) (soon)
### Minimal Example:
[](https://colab.research.google.com/drive/1wGE-046et27oHvNlBNPH07qrEQNE04PQ?usp=sharing)
[](https://www.kaggle.com/shonenkov/rudalle-example-generation)
[](https://huggingface.co/spaces/anton-l/rudall-e)
**Example usage ruDALL-E Malevich (XL) with 3.5GB vRAM!**
[](https://colab.research.google.com/drive/1AoolDYePUpPkRCKIu0cP9zV7lX5QGD3Z?usp=sharing)
**Finetuning example**
[](https://colab.research.google.com/drive/1Tb7J4PvvegWOybPfUubl5O7m5I24CBg5?usp=sharing)
### generation by ruDALLE:
```python
import ruclip
from rudalle.pipelines import generate_images, show, super_resolution, cherry_pick_by_ruclip
from rudalle import get_rudalle_model, get_tokenizer, get_vae, get_realesrgan
from rudalle.utils import seed_everything
# prepare models:
device = 'cuda'
dalle = get_rudalle_model('Malevich', pretrained=True, fp16=True, device=device)
tokenizer = get_tokenizer()
vae = get_vae(dwt=True).to(device)
# pipeline utils:
realesrgan = get_realesrgan('x2', device=device)
clip, processor = ruclip.load('ruclip-vit-base-patch32-384', device=device)
clip_predictor = ruclip.Predictor(clip, processor, device, bs=8)
text = 'радуга на фоне ночного города'
seed_everything(42)
pil_images = []
scores = []
for top_k, top_p, images_num in [
(2048, 0.995, 24),
]:
_pil_images, _scores = generate_images(text, tokenizer, dalle, vae, top_k=top_k, images_num=images_num, bs=8, top_p=top_p)
pil_images += _pil_images
scores += _scores
show(pil_images, 6)
```

### auto cherry-pick by ruCLIP:
```python
top_images, clip_scores = cherry_pick_by_ruclip(pil_images, text, clip_predictor, count=6)
show(top_images, 3)
```

### super resolution:
```python
sr_images = super_resolution(top_images, realesrgan)
show(sr_images, 3)
```

```python
text, seed = 'красивая тян из аниме', 6955
```

### Image Prompt
see `jupyters/ruDALLE-image-prompts-A100.ipynb`
```python
text, seed = 'Храм Василия Блаженного', 42
skyes = [red_sky, sunny_sky, cloudy_sky, night_sky]
```

### VideoDALL-E | ru[CogVideo](https://github.com/THUDM/CogVideo) by [@cene555](https://github.com/cene555)
**Video generation example**
[](https://colab.research.google.com/drive/1A_3Oe9r9DP3Ayd6DPvqKHIKlwNfLhVP5?usp=sharing)
**Finetuning example**
[](https://colab.research.google.com/drive/1R_joYWlvToA24tsa9BFYa2D6ffiMtyVy?usp=sharing)
### Aspect ratio images [**-->NEW<--**](https://github.com/shonenkov-AI/rudalle-aspect-ratio)

### [Kandinsky 12B](https://github.com/ai-forever/ru-dalle/blob/master/jupyters/Kandinsky-12b-A100.ipynb)
Request access: [Here](https://docs.google.com/forms/d/e/1FAIpQLSdYCT6LKDWgWGkd0Lq_sMLe2wZDZSkMUuCQx4qdXUnd6SrhvA/viewform)
`роботы акварелью в стиле ван гога`

[](https://habr.com/ru/company/sberbank/blog/671210/)

`FID = 15.4 (COCO Valid)`
### 🚀 Contributors 🚀
- [@bes](https://github.com/bes-dev) shared [great idea and realization with IDWT](https://github.com/bes-dev/vqvae_dwt_distiller.pytorch) for decoding images with higher quality 512x512! 😈💪 thanks a lot for your constructive advices, appreciate it
- [@neverix](https://www.kaggle.com/neverix) thanks a lot for contributing for speed up of inference
- [@Igor Pavlov](https://github.com/boomb0om) trained model and prepared code with [super-resolution](https://github.com/boomb0om/Real-ESRGAN-colab)
- [@oriBetelgeuse](https://github.com/oriBetelgeuse) thanks a lot for easy API of generation using image prompt
- [@Alex Wortega](https://github.com/AlexWortega) created first FREE version colab notebook with fine-tuning [ruDALL-E Malevich (XL)](https://huggingface.co/sberbank-ai/rudalle-Malevich) on sneakers domain 💪
- [@Anton Lozhkov](https://github.com/anton-l) Integrated to [Huggingface Spaces](https://huggingface.co/spaces) with [Gradio](https://github.com/gradio-app/gradio), see [here](https://huggingface.co/spaces/anton-l/rudall-e)
### Supported by
[<img src="https://raw.githubusercontent.com/sberbank-ai/ru-dolph/master/pics/logo/airi-logo.png" height="50"/>](https://airi.net)
### Social Media
[](https://habr.com/ru/company/sberbank/blog/589673/)
[](https://habr.com/ru/company/sberdevices/blog/586926/)
| /rudalle-1.1.3.tar.gz/rudalle-1.1.3/README.md | 0.453262 | 0.929983 | README.md | pypi |
from enum import Enum
import logging
import numbers
from decimal import Decimal
from datetime import date, datetime
from dateutil.tz import tzlocal, tzutc
log = logging.getLogger('rudderstack')
def is_naive(dt):
"""Determines if a given datetime.datetime is naive."""
return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None
def total_seconds(delta):
"""Determines total seconds with python < 2.7 compat."""
# http://stackoverflow.com/questions/3694835/python-2-6-5-divide-timedelta-with-timedelta
return (delta.microseconds
+ (delta.seconds + delta.days * 24 * 3600) * 1e6) / 1e6
def guess_timezone(dt):
"""Attempts to convert a naive datetime to an aware datetime."""
if is_naive(dt):
# attempts to guess the datetime.datetime.now() local timezone
# case, and then defaults to utc
delta = datetime.now() - dt
if total_seconds(delta) < 5:
# this was created using datetime.datetime.now()
# so we are in the local timezone
return dt.replace(tzinfo=tzlocal())
# at this point, the best we can do is guess UTC
return dt.replace(tzinfo=tzutc())
return dt
def remove_trailing_slash(host):
if host.endswith('/'):
return host[:-1]
return host
def clean(item):
if isinstance(item, Decimal):
return float(item)
elif isinstance(item, (str, bool, numbers.Number, datetime,
date, type(None))):
return item
elif isinstance(item, (set, list, tuple)):
return _clean_list(item)
elif isinstance(item, dict):
return _clean_dict(item)
elif isinstance(item, Enum):
return clean(item.value)
else:
return _coerce_unicode(item)
def _clean_list(list_):
return [clean(item) for item in list_]
def _clean_dict(dict_):
data = {}
for k, v in dict_.items():
try:
data[k] = clean(v)
except TypeError:
log.warning(
'Dictionary values must be serializeable to '
'JSON "%s" value %s of type %s is unsupported.',
k, v, type(v),
)
return data
def _coerce_unicode(cmplx):
try:
item = cmplx.decode("utf-8", "strict")
except AttributeError as exception:
item = ":".join(exception)
item.decode("utf-8", "strict")
log.warning('Error decoding: %s', item)
return None
return item | /rudder-sdk-python-2.0.2.tar.gz/rudder-sdk-python-2.0.2/rudderstack/analytics/utils.py | 0.771155 | 0.256791 | utils.py | pypi |
import warnings
from rudderstack.analytics.version import VERSION
from rudderstack.analytics.client import Client
import deprecation
__version__ = VERSION
"""Settings."""
write_key = Client.DefaultConfig.write_key
@property
@deprecation.deprecated(deprecated_in="2.0",
current_version=__version__,
details="Use the dataPlaneUrl property instead")
def host(self):
warnings.warn('The use of host is deprecated. Use dataPlaneUrl instead', DeprecationWarning)
return host
@host.setter
@deprecation.deprecated(deprecated_in="2.0",
current_version=__version__,
details="Use the dataPlaneUrl property instead")
def host(self, value: str):
warnings.warn('The use of host is deprecated. Use dataPlaneUrl instead', DeprecationWarning)
self.host = value
host = Client.DefaultConfig.host
@property
def dataPlaneUrl(self):
return dataPlaneUrl
@dataPlaneUrl.setter
def dataPlaneUrl(self, value: str):
self.host = value
on_error = Client.DefaultConfig.on_error
debug = Client.DefaultConfig.debug
send = Client.DefaultConfig.send
sync_mode = Client.DefaultConfig.sync_mode
max_queue_size = Client.DefaultConfig.max_queue_size
gzip = Client.DefaultConfig.gzip
timeout = Client.DefaultConfig.timeout
upload_interval = Client.DefaultConfig.upload_interval
upload_size = Client.DefaultConfig.upload_size
max_retries = Client.DefaultConfig.max_retries
default_client = None
def track(*args, **kwargs):
"""Send a track call."""
_proxy('track', *args, **kwargs)
def identify(*args, **kwargs):
"""Send a identify call."""
_proxy('identify', *args, **kwargs)
def group(*args, **kwargs):
"""Send a group call."""
_proxy('group', *args, **kwargs)
def alias(*args, **kwargs):
"""Send a alias call."""
_proxy('alias', *args, **kwargs)
def page(*args, **kwargs):
"""Send a page call."""
_proxy('page', *args, **kwargs)
def screen(*args, **kwargs):
"""Send a screen call."""
_proxy('screen', *args, **kwargs)
def flush():
"""Tell the client to flush."""
_proxy('flush')
def join():
"""Block program until the client clears the queue"""
_proxy('join')
def shutdown():
"""Flush all messages and cleanly shutdown the client"""
_proxy('flush')
_proxy('join')
def _proxy(method, *args, **kwargs):
"""Create an analytics client if one doesn't exist and send to it."""
global default_client
if not default_client:
if isinstance(dataPlaneUrl,str) and dataPlaneUrl != "":
finalDataplaneUrl = dataPlaneUrl
else:
finalDataplaneUrl = host
default_client = Client(write_key, host=finalDataplaneUrl, debug=debug,
max_queue_size=max_queue_size,
send=send, on_error=on_error,
gzip=gzip, max_retries=max_retries,
sync_mode=sync_mode, timeout=timeout)
fn = getattr(default_client, method)
fn(*args, **kwargs) | /rudder-sdk-python-2.0.2.tar.gz/rudder-sdk-python-2.0.2/rudderstack/analytics/__init__.py | 0.740925 | 0.170439 | __init__.py | pypi |
import os
from datetime import datetime
import click
from sagemaker.processing import ProcessingInput, ProcessingOutput
from ..aws.processing import get_sklearn_processor
from ..aws.s3 import download_s3_directory, get_s3_resource, parse_s3_path
from ..config import read_yaml
from ..constants import (
EXCLUDE_FILES,
EXCLUDE_FOLDERS,
SAGEMAKER_CONTAINER_PATH_MAIN,
)
from ..log import get_logger, verbosity_option
from ..utils.zip import zip_directory
from . import rudderlabs
logger = get_logger(__name__)
def run_pipeline_step(
pipeline_step_info: dict,
creds: dict,
instance_type: str,
job_id: str,
repository_path: str,
exclude_folders: list,
exclude_files: list,
) -> None:
"""Runs given pipeline step in sci-kit learn processor in amazon sagemaker
Args:
pipeline_step_info: Pipeline step information
creds: AWS credentials
instance_type: Instance type to use for the sagemaker job
job_id: all the outputs will be saved under the folder with this name
repository_path: Path to the repository
exclude_folders: List of directories to be excluded from the zip
exclude_files: List of files to be excluded from the zip
Returns:
None: None
"""
# Get sklearn processor
logger.info(f"Pipeline step: {pipeline_step_info['name']}")
job_name = (
f"{pipeline_step_info['name']}-{pipeline_step_info['job_suffix']}"
)
sklearn_processor = get_sklearn_processor(creds, instance_type, job_name)
# Prepare source code and input data
logger.info("Preparing source code")
source_code_zip_path = zip_directory(
repository_path,
exclude_folders=exclude_folders,
exclude_files=exclude_files,
)
input_data_zip_path = None
input_data_path = pipeline_step_info.get("input_data").replace(
"<job_id>", f"{job_id}"
)
# check input data path weather it is relative to repository or absolute
if not os.path.isabs(input_data_path):
input_data_path = os.path.join(repository_path, input_data_path)
if os.path.exists(input_data_path):
logger.info("Preparing input data")
input_data_zip_path = zip_directory(input_data_path)
sagemaker_input_data_path = os.path.join(
SAGEMAKER_CONTAINER_PATH_MAIN, "data"
)
sagemaker_output_data_path = os.path.join(
SAGEMAKER_CONTAINER_PATH_MAIN, "output"
)
sagemaker_code_path = os.path.join(SAGEMAKER_CONTAINER_PATH_MAIN, "code")
sagemaker_req_path = os.path.join(
SAGEMAKER_CONTAINER_PATH_MAIN, "requirements"
)
local_req_path = os.path.join(repository_path, "requirements.txt")
# script parameters
script_params = {f"{k}": v for k, v in pipeline_step_info["params"].items()}
# Pass job id to the pipeline script as a parameter
script_params["--job-id"] = job_id
if input_data_zip_path is not None:
script_params["--input-data-zip"] = os.path.join(
sagemaker_input_data_path, os.path.basename(input_data_zip_path)
)
script_params["--output-data-path"] = sagemaker_output_data_path
script_params["--source-code-zip"] = os.path.join(
sagemaker_code_path, os.path.basename(source_code_zip_path)
)
script_params["--requirements-path"] = os.path.join(
sagemaker_req_path, "requirements.txt"
)
arguments = []
for k, v in script_params.items():
arguments.append(f"{k}")
arguments.append(f"{v}")
logger.info(f"Arguments: {arguments}")
inputs = [
ProcessingInput(
source=source_code_zip_path, destination=sagemaker_code_path
),
ProcessingInput(source=local_req_path, destination=sagemaker_req_path),
]
if input_data_zip_path:
inputs.append(
ProcessingInput(
source=input_data_zip_path,
destination=sagemaker_input_data_path,
)
)
sklearn_processor.run(
code=pipeline_step_info["code"],
inputs=inputs,
outputs=[
ProcessingOutput(
output_name=pipeline_step_info["name"],
source=sagemaker_output_data_path,
)
],
arguments=arguments,
)
s3_bucket = None
process_job_output_path = None
if instance_type != "local":
s3_bucket = creds["s3Bucket"]
process_job_output_path = (
f"{sklearn_processor.latest_job.job_name}/output/output-1/{job_id}"
)
else:
preprocessing_job_description = sklearn_processor.jobs[-1].describe()
output_config = preprocessing_job_description["ProcessingOutputConfig"]
for output in output_config["Outputs"]:
if output["OutputName"] != pipeline_step_info["name"]:
continue
output_s3_url = output["S3Output"]["S3Uri"]
s3_bucket, process_job_output_path = parse_s3_path(output_s3_url)
logger.info(f"S3 bucket: {s3_bucket}")
logger.info(f"Process job output path: {process_job_output_path}")
if s3_bucket is not None and process_job_output_path is not None:
local_output_path = os.path.join(
repository_path, pipeline_step_info["output_path"], f"{job_id}"
)
# Downloading model output files into local
s3_resource = get_s3_resource(creds)
download_s3_directory(
s3_resource, s3_bucket, process_job_output_path, local_output_path
)
@click.command(
epilog="""
The command to run given notebookes in the pipeline.
Examples:
$ rlabs aws run-pipeline --pipeline-file pipeline.yaml --credentials-file credentials.yaml --repository-path /path/to/repository --instance-type ml.t3.xlarge --job-id my-job-id
$ rlabs aws run-pipeline -p pipeline.yaml -c credentials.yaml -r /path/to/repository -i local -j my-job-id
"""
)
@click.option(
"-j",
"--job-id",
default=None,
help="Job id to be used for the pipeline, used to store output files in S3/local",
)
@click.option(
"-c",
"--credentials-file",
type=click.Path(exists=True, readable=True, resolve_path=True),
show_default=True,
default=os.path.join(os.path.realpath(os.curdir), "credentials.yaml"),
)
@click.option(
"-i",
"--instance-type",
default="ml.t3.xlarge",
show_default=True,
help="The instance type to use for the amazon sagemaker notebook instance.",
)
@click.option(
"-p",
"--pipeline-config-file",
type=click.Path(exists=True, readable=True, resolve_path=True),
help="The pipeline config file to use.",
)
@click.option(
"-r",
"--repository-path",
default=os.path.realpath(os.curdir),
show_default=True,
type=click.Path(exists=True, readable=True, resolve_path=True),
help="The repository path to use.",
)
@verbosity_option()
@rudderlabs.raise_on_error
def run_pipeline(
job_id: str,
credentials_file: click.Path,
instance_type: str,
pipeline_config_file: click.Path,
repository_path: click.Path,
) -> None:
logger.info("Running pipeline")
logger.info("credentials_file: %s", credentials_file)
logger.info("Instance type: %s", instance_type)
if job_id is None:
job_id = int(datetime.now().timestamp())
# Load the pipeline config file
pipeline_config = read_yaml(pipeline_config_file)
logger.info("Pipeline config: %s", pipeline_config)
# Load the credentials file
config = read_yaml(credentials_file)
exclude_files = pipeline_config.get("exclude", [])
print(exclude_files)
# Runing pipeline
for pipeline_step in pipeline_config["pipeline"]:
logger.info("Running pipeline step: %s", pipeline_step["name"])
exclude_folders = (
pipeline_step.get("exclude_folders", []) + EXCLUDE_FOLDERS
)
exclude_files = pipeline_step.get("exclude_files", []) + EXCLUDE_FILES
run_pipeline_step(
pipeline_step_info=pipeline_step,
creds=config,
instance_type=instance_type,
job_id=job_id,
repository_path=repository_path,
exclude_folders=exclude_folders,
exclude_files=exclude_files,
) | /rudderlabs.data.apps-0.0.1b3.tar.gz/rudderlabs.data.apps-0.0.1b3/rudderlabs/data/apps/scripts/run_pipeline.py | 0.515864 | 0.189559 | run_pipeline.py | pypi |
import os
import click
import jinja2
from ..constants import SAGEMAKER_CONTAINER_PATH_MAIN
from ..log import get_logger, verbosity_option
from . import rudderlabs
logger = get_logger(__name__)
def render_template(jenv, template, context, output_dir):
"""Renders a template to the output directory using specific context.
Args:
jenv: The Jinja2 environment to use for rendering the template
template: The path to the template, from the internal templates directory
context: A dictionary with the context to render the template with
output_dir: Where to save the output
"""
output_file = os.path.join(output_dir, template)
basedir = os.path.dirname(output_file)
if not os.path.exists(basedir):
logger.info("mkdir %s", basedir)
os.makedirs(basedir)
with open(output_file, "wt") as f:
logger.info("rendering %s", output_file)
T = jenv.get_template(template)
f.write(T.render(**context))
@click.command(
epilog="""
Examples:
1. Generates a new project for Bob:
$ rlabs new -vv data-apps-leadscoring -t "Lead Scoring" -o ~/Projects
$ rlabs new -vv <project> -t <title> -o <output_dir>
"""
)
@click.argument("project")
@click.option(
"-t",
"--title",
show_default=True,
default="New project",
help="This entry defines the project title. "
"The project title should be a few words only. It will appear "
"at the description of your project and as the title of your "
"documentation",
)
@click.option(
"-o",
"--output-dir",
help="Directory where to dump the new " "project - must not exist",
)
@verbosity_option()
@rudderlabs.raise_on_error
def new(project, title, output_dir):
"""Creates a folder structure for a new rudderlabs data apps project."""
# the jinja context defines the substitutions to be performed
context = dict(
project=project,
title=title,
sagemaker_container_path=SAGEMAKER_CONTAINER_PATH_MAIN,
)
# copy the whole template structure and de-templatize the needed files
if output_dir is None:
output_dir = os.path.join(os.path.realpath(os.curdir), project)
logger.info(
"Creating structure for %s at directory %s", project, output_dir
)
if os.path.exists(output_dir):
raise IOError(
"The project directory %s already exists - cannot "
"overwrite!" % output_dir
)
logger.info("mkdir %s", output_dir)
os.makedirs(output_dir)
# base jinja2 engine
env = jinja2.Environment(
loader=jinja2.PackageLoader("rudderlabs.data.apps", "templates"),
autoescape=jinja2.select_autoescape(["html", "xml"]),
)
# other standard files
simple = [
".gitignore",
".gitattributes",
"config/sample.yaml",
"credentials_template.yaml",
"data/.gitignore",
"notebooks/sample_notebook.ipynb",
"README.md",
"requirements.txt",
"conda/environment.yaml",
"data_loader.py",
"logs/.gitignore",
"pipelines/sample_pipeline.yaml",
"run_notebook_wrapper.py",
]
for k in simple:
render_template(env, k, context, output_dir)
logger.info(f"Creating base {project} structure in {output_dir}") | /rudderlabs.data.apps-0.0.1b3.tar.gz/rudderlabs.data.apps-0.0.1b3/rudderlabs/data/apps/scripts/new.py | 0.57821 | 0.285658 | new.py | pypi |
"""Compress and decompress a directory."""
import os
import tempfile
import zipfile
from fnmatch import fnmatch
from ..log import get_logger
TEMP_DIR = tempfile.gettempdir()
logger = get_logger(__name__)
def can_exclude(name: str, exclude_list: list) -> bool:
"""Check if the name can be excluded
Args:
name: Name to exclude
exlude_list: List of names to be excluded can be regular expressions
Returns:
bool: True if the name can be excluded
"""
for exclude_name in exclude_list:
if fnmatch(name, exclude_name):
return True
return False
def zip_directory(
dir_path: str, exclude_folders: list = [], exclude_files: list = []
) -> str:
"""Zip the directory
Args:
dir_path (str): Path to the directory to zip
exclude_folders (list): List of directories to be excluded from the zip
exclude_files (list): List of files to be excluded from the zip
Returns:
str: Path to the zip file
Raises:
ValueError: If the directory does not exist
"""
logger.info("Zipping directory")
if not os.path.exists(dir_path):
raise ValueError(f"Directory {dir_path} does not exist")
dir_name = os.path.basename(dir_path)
zip_path = os.path.join(TEMP_DIR, dir_name + ".zip")
with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zip_file:
for root, dirs, files in os.walk(dir_path):
# Exclude folders
for dirname in dirs:
if can_exclude(dirname, exclude_folders):
dirs.remove(dirname)
for file in files:
# Exclude files
if can_exclude(file, exclude_files):
continue
abs_file_path = os.path.join(root, file)
zip_file.write(
abs_file_path, abs_file_path.replace(dir_path, "")
)
return zip_path
def unzip_directory(zip_path: str, out_dir_path: str) -> None:
"""Unzip the directory
Args:
zip_path: Path to the zipped directory
dir_path: Output directory path
Returns:
None: None
Raises:
ValueError: If the zip file does not exist
ValueError: If the output directory does not exist
"""
logger.info("Unzipping directory")
if not os.path.exists(zip_path):
raise ValueError(f"Zip file {zip_path} does not exist")
if not os.path.exists(out_dir_path):
raise ValueError(f"Output directory {out_dir_path} does not exist")
with zipfile.ZipFile(zip_path, "r") as zip_file:
zip_file.extractall(out_dir_path) | /rudderlabs.data.apps-0.0.1b3.tar.gz/rudderlabs.data.apps-0.0.1b3/rudderlabs/data/apps/utils/zip.py | 0.698844 | 0.240763 | zip.py | pypi |
import datetime
from typing import List, Optional, Tuple, Union
import pandas as pd
from rudderlabs.data.apps.wh import Connector
from rudderlabs.data.apps.wh.query_utils import get_timestamp_where_condition
class DataIO:
def __init__(self, notebook_config: dict, creds_config: dict) -> None:
self.notebook_config = notebook_config
self.creds_config = creds_config
# Expect table information(name, schema, database) from data_warehouse section of
# credentials configurations
self.database = self.creds_config["data_warehouse"]["database"]
self.schema = self.creds_config["data_warehouse"]["schema"]
self.feature_store_table = creds_config["data_warehouse"][
"feature_store_table"
]
self.prediction_store_table = creds_config["data_warehouse"][
"prediction_store_table"
]
# Remaining table column information and preprocessing information will be
# read from notebook configuration
self.entity_column = notebook_config["data"]["entity_column"]
self.label_column = notebook_config["data"]["label_column"]
self.timestamp_column = notebook_config["data"]["timestamp_column"]
self.features_start_date = notebook_config["data"][
"features_start_date"
]
self.features_end_date = notebook_config["data"]["features_end_date"]
if self.features_end_date is None:
self.features_end_date = datetime.datetime.strftime(
datetime.datetime.today() - datetime.timedelta(days=14),
"%Y-%m-%d",
)
self.numeric_value_column = notebook_config["data"][
"numeric_value_column"
]
self.str_value_column = notebook_config["data"]["str_value_column"]
self.feature_name_column = notebook_config["data"][
"feature_name_column"
]
def get_data(
self,
feature_subset: Optional[Union[List[str], Tuple[str], str]] = "*",
no_of_timestamps: int = 1,
) -> pd.DataFrame:
"""Gets data from warehouse and performs preprocessing on the data.
Args:
feature_subset: Feature subset to get from warehouse
no_of_timestamps: Number of timestamps
Returns:
pd.DataFrame: Pandas dataframe containing the data
"""
# Generate query for latest data
print("Generating query for latest data")
if isinstance(feature_subset, list) or isinstance(
feature_subset, tuple
):
features_and_label_str = (
"("
+ ", ".join(
map(
lambda feat: "'" + feat + "'",
feature_subset + [self.label_column],
)
)
+ ")"
)
else:
features_and_label_str = None
table_name = f"{self.database}.{self.schema}.{self.feature_store_table}"
inner_query = (
f"select {self.entity_column}, {self.feature_name_column}, {self.numeric_value_column}, {self.str_value_column}, {self.timestamp_column}, "
f"rank() over (partition by {self.entity_column}, {self.feature_name_column} order by {self.timestamp_column} desc) as rnk from {table_name}"
)
timestamp_condition = get_timestamp_where_condition(
self.timestamp_column,
self.features_start_date,
self.features_end_date,
)
if features_and_label_str and timestamp_condition:
inner_query = f"{inner_query} where {timestamp_condition} and {self.feature_name_column} in {features_and_label_str}"
elif features_and_label_str:
inner_query = f"{inner_query} where {self.feature_name_column} in {features_and_label_str}"
elif timestamp_condition:
inner_query = f"{inner_query} where {timestamp_condition}"
query = f"select {self.entity_column}, {self.feature_name_column}, {self.numeric_value_column}, {self.str_value_column}, {self.timestamp_column} from ({inner_query}) as t where rnk <= {no_of_timestamps}"
# Execute query and return data
print("Query: ")
print(query)
warehouse_creds = self.creds_config["data_warehouse"]
aws_config = self.creds_config["aws"]
# For snow redshift connector we need to pass the aws config as well
# under parameter "aws_config"
print("Running query on warehouse")
connector = Connector(warehouse_creds, aws_config=aws_config)
data = connector.run_query(query)
numeric_data = data.query(
f"~{self.numeric_value_column}.isnull()", engine="python"
).pivot_table(
index=[self.entity_column, self.timestamp_column],
columns=self.feature_name_column,
values=self.numeric_value_column,
fill_value=0,
)
non_numeric_data = data.query(
f"~{self.str_value_column}.isnull() and {self.str_value_column}!=''",
engine="python",
).pivot(
index=[self.entity_column, self.timestamp_column],
columns=self.feature_name_column,
values=self.str_value_column,
)
return numeric_data.merge(
non_numeric_data, left_index=True, right_index=True, how="left"
)
def write_to_wh_table(
self,
df: pd.DataFrame,
table_name: str,
schema: str = None,
if_exists: str = "append",
) -> None:
"""Writes dataframe to warehouse feature store table.
Args:
df (pd.DataFrame): Dataframe to be written to warehouse feature store table
table_name (str): Feature store table name
schema (str, optional): Schema name, Defaults to None.
if_exists (str, optional): {"append", "replace", "fail"} Defaults to "append".
fail: If the table already exists, the write fails.
replace: If the table already exists, the table is dropped and the write is executed.
append: If the table already exists, the write is executed with new rows appended to existing table
"""
print("Writing to warehouse")
warehouse_creds = self.creds_config["data_warehouse"]
aws_config = self.creds_config["aws"]
wh_connector = Connector(warehouse_creds, aws_config=aws_config)
wh_connector.write_to_table(df, table_name, schema, if_exists) | /rudderlabs.data.apps-0.0.1b3.tar.gz/rudderlabs.data.apps-0.0.1b3/rudderlabs/data/apps/templates/data_loader.py | 0.838217 | 0.269482 | data_loader.py | pypi |
"""Functions for interacting with AWS S3."""
import os
from pathlib import Path
from typing import Optional, Tuple
import boto3
import pandas as pd
from ..log import get_logger
from .session import get_boto_session
try:
import StringIO
except ImportError:
from io import StringIO
logger = get_logger(__name__)
def get_s3_resource(creds: dict) -> boto3.resources.base.ServiceResource:
"""Get an S3 resource
Args:
creds: AWS credentials
Returns:
boto3.resources.base.ServiceResource: S3 resource
"""
logger.info("Getting S3 resource")
return get_boto_session(creds).resource("s3")
def download_s3_directory(
s3_resource: boto3.resources.base.ServiceResource,
s3_bucket_name: str,
s3_path: str,
local_path: str,
) -> None:
"""Download an S3 directory to a local directory
Args:
s3_resource: Amazon S3 resource
s3_bucket_name: S3 Bucket name
s3_path: S3 path to download
local_path: Local path to download to
Returns:
None: None
"""
Path(local_path).mkdir(parents=True, exist_ok=True)
logger.info(
f"Downloading S3 directory {s3_path} from bucket {s3_bucket_name} to {local_path}"
)
for obj in s3_resource.Bucket(s3_bucket_name).objects.filter(
Prefix=s3_path
):
# We want to preserve the folder structure, so we need to remove the
# S3 path from the object key.
s3_base_path = s3_path if s3_path.endswith("/") else s3_path + "/"
local_file_path = os.path.join(
local_path, obj.key.replace(s3_base_path, "")
)
print(f"Downloading {obj.key} to {local_file_path}")
if not os.path.exists(os.path.dirname(local_file_path)):
os.makedirs(os.path.dirname(local_file_path))
s3_resource.meta.client.download_file(
s3_bucket_name,
obj.key,
local_file_path,
)
def parse_s3_path(s3_path: str) -> Tuple[str, str]:
"""Parses an S3 path into a bucket name and path.
Args:
s3_path: Complete S3 path.
Returns:
Tuple[str, str]: Tuple containing bucket name and path.
"""
s3_bucket = s3_path.split("://")[1].split("/")[0]
s3_file_location = "/".join(s3_path.split("://")[1].split("/")[1:])
return s3_bucket, s3_file_location
def pd_to_csv_s3(
df: pd.DataFrame,
s3_bucket_name: str,
s3_path: str,
s3_resource,
index: bool = False,
header: bool = False,
) -> None:
"""Saves a pandas DataFrame to an S3 directory.
Args:
df: Pandas DataFrame to save.
s3_bucket_name: S3 Bucket name
s3_path: S3 path to save the DataFrame.
s3_resource: Amazon S3 resource
index: Wether to save the index while converting to CSV.
header: Wether to save the header while converting to CSV.
Returns:
None: None
"""
csv_buffer = StringIO()
df.to_csv(csv_buffer, index=index, header=header)
s3_resource.Object(s3_bucket_name, s3_path).put(Body=csv_buffer.getvalue())
def read_csv_from_s3(
s3_bucket,
file_path,
boto_session,
header: Optional[int] = None,
index: Optional[str] = None,
) -> pd.DataFrame:
"""Reads a CSV from an S3 directory.
Args:
s3_bucket: S3 Bucket name
file_path: S3 path to read the CSV from.
boto_session: Boto3 session.
header: Number of header rows to skip.
index: Name of the index column.
Returns:
pd.DataFrame:
"""
s3_client = boto_session.client("s3")
csv_obj = s3_client.get_object(Bucket=s3_bucket, Key=file_path)
body = csv_obj["Body"]
csv_string = body.read().decode("utf-8")
return pd.read_csv(StringIO(csv_string), header=header, index_col=index) | /rudderlabs.data.apps-0.0.1b3.tar.gz/rudderlabs.data.apps-0.0.1b3/rudderlabs/data/apps/aws/s3.py | 0.850779 | 0.210442 | s3.py | pypi |
from rudi_node_read.connectors.io_connector import Connector, STATUS, REDIRECTION
from rudi_node_read.utils.log import log_d, log_e
from rudi_node_read.utils.type_dict import is_dict
from rudi_node_read.utils.type_string import slash_join
REQ_LIMIT = 500
class RudiNodeConnector(Connector):
def __init__(self, server_url: str, headers_user_agent: str = "RudiNodeConnector"):
"""
Creates a connector to the API/proxy module of a RUDI Node
:param server_url: the URL of the RUDI Node
:param headers_user_agent: (optional) identifies the user launching the request (or at least the module)
in the request headers, for logging purpose.
"""
fun = "RudiNodeConnector.__init__"
super().__init__(server_url)
# log_d("RudiNodeConnector", "attributes", self)
self.test_rudi_api_connection()
self._headers = {
"User-Agent": headers_user_agent,
"Content-type": "text/plain",
"Accept": "application/json",
}
def get_api(self, url: str):
"""
Performs an identified GET request through /api/v1 path
:param url: part of the URL that comes after /api/admin
:return: a JSON
"""
return self.request(url=slash_join("api/v1", url), req_method="GET", headers=self._headers)
def test_rudi_api_connection(self):
test_path = "api/admin/hash"
test = self.request(test_path)
if test is None:
log_e("RudiNodeConnector", f"!! Node '{self.host}'", "no connection!")
raise ConnectionError(f"Connection failed to node '{self.host}'")
if is_dict(test) and test.get(STATUS) in [301, 302]:
server_url = str(test.get(REDIRECTION))
if not server_url.endswith(test_path):
log_e("RudiNodeConnector", f"!! Node '{self.host}'", "redirection incorrect!")
raise ConnectionError(f"Connection failed to node '{self.host}'")
self._set_url(server_url.replace(f"/{test_path}", ""))
test = self.request(test_path)
if test is None:
log_e("RudiNodeConnector", f"!! Node '{self.host}'", "redirection failed!")
raise ConnectionError(f"Connection failed to node '{self.host}'")
log_d("RudiNodeConnector", f"Node '{self.host}'", "redirection OK")
else:
log_d("RudiNodeConnector", f"Node '{self.host}'", "connection OK")
def get_metadata_with_uuid(self, metadata_uuid: str):
return self.get_api(f"resources/{metadata_uuid}")
def get_metadata_with_filter(self, rudi_fields_filter: dict):
filter_str = ""
for i, (key, val) in enumerate(rudi_fields_filter.items()):
# TODO: special cases of producer / contact / available_formats
filter_str += f"&{key}={val}"
return self.get_api(f"resources?{filter_str[1:]}")
def get_metadata_count(self):
return self.get_api("resources?limit=1")["total"]
def get_metadata_list(self, max_number: int = 0):
meta_nb = self.get_metadata_count()
meta_set = []
req_offset = 0
if not max_number:
max_number = meta_nb
while req_offset < meta_nb and req_offset < max_number:
req_limit = REQ_LIMIT if req_offset + REQ_LIMIT < max_number else max_number - req_offset
meta_list_partial = self.get_api(f"resources?sort_by=-updatedAt&limit={req_limit}&offset={req_offset}")
log_d("get_metadata_list", "total", meta_list_partial["total"])
log_d("get_metadata_list", "len", len(meta_list_partial["items"]))
meta_set += meta_list_partial["items"]
req_offset += REQ_LIMIT
return meta_set
def get_metadata_ids(self):
meta_list = self.get_api("resources?fields=global_id,resource_title")
return meta_list["items"]
def get_list_media_for_metadata(self, metadata_uuid):
meta = self.get_metadata_with_uuid(metadata_uuid)
media_list = meta["available_formats"]
media_list_final = []
for media in media_list:
media_list_final.append(
{
"url": media["connector"]["url"],
"type": media["media_type"],
"meta_contact": media["media_id"],
"id": media["media_id"],
}
)
return media_list_final
if __name__ == "__main__":
# rudi_node_connector = RudiNodeConnector("https://bacasable.fenix.rudi-univ-rennes1.fr")
rudi_node_connector = RudiNodeConnector("https://audiar.rudi.irisa.fr")
log_d("RudiNodeConnector", "metadata nb", rudi_node_connector.get_metadata_count())
log_d("RudiNodeConnector", "metadata_ids", rudi_node_connector.get_metadata_ids())
meta1 = rudi_node_connector.get_metadata_list()[0]
meta1_id = meta1["global_id"]
rudi_node_connector.get_metadata_with_uuid(meta1_id)
log_d("RudiNodeConnector", "meta1", meta1_id)
meta1_media = rudi_node_connector.get_list_media_for_metadata(meta1_id)
log_d("RudiNodeConnector", "meta1 media", meta1_media) | /rudi-node-read-0.1.5.tar.gz/rudi-node-read-0.1.5/src/rudi_node_read/connectors/io_rudi_node_read.py | 0.411229 | 0.160102 | io_rudi_node_read.py | pypi |
from urllib.parse import quote
from rudi_node_write.utils.log import log_d
from rudi_node_write.utils.typing_utils import get_type_name
def url_encode_req_params(url_str: str) -> str:
"""
Use urllib.parse.quote on every value of a key/value pair in request parameters (RFC 3986, see the documentation
of urllib.parse.quote for further info)
:param url_str: a URL that needs to be encoded
:return: the encoded URL
"""
here = "url_encode_req_params"
# log_d(here, 'url_str', url_str)
if not isinstance(url_str, str):
raise TypeError(f"input URL should be a string, got '{get_type_name(url_str)}'")
if url_str.find("=") == -1 & url_str.find("&") == -1:
return url_str # No request parameters to clean
url_bits = url_str.split("?")
base_url = ""
relative_url = ""
# log_d(here, "len(url_bits)", len(url_bits))
if len(url_bits) == 1:
relative_url = url_bits[0]
elif len(url_bits) == 2:
# case where we were given the relative url_str only
base_url = f"{url_bits[0]}?"
relative_url = url_bits[1]
# log_d(here, 'relative_url', relative_url)
clean_relative_url = ""
relative_url_bits = relative_url.split("&")
for bit in relative_url_bits:
key_val_pair = bit.split("=")
# log_d(here, 'key_val_pair', key_val_pair)
if len(key_val_pair) == 2:
clean_relative_url += f"{key_val_pair[0]}={quote(key_val_pair[1])}&"
else:
clean_relative_url += f"{bit}&"
return f"{base_url}{clean_relative_url[:-1]}"
if __name__ == "__main__": # pragma: no cover
tests = "URL utils"
url = (
"https://data.rennesmetropole.fr/api/explore/v2.1/catalog/datasets/loisirs-az-4bis/exports/json?lang=fr"
"&timezone=Europe/Paris&use_labels=true&delimiter=;"
)
log_d(tests, "url", url_encode_req_params(url))
log_d(tests, url := "https://url.com/?lang", url_encode_req_params(url))
log_d(tests, url := "https://url.com/", url_encode_req_params(url))
log_d(tests, url := "https://url.com", url_encode_req_params(url))
log_d(tests, url := "https://url.com?req", url_encode_req_params(url))
log_d(tests, url := "https://url.com?=", url_encode_req_params(url))
log_d(tests, url := "https://url.com/p1=val&p2", url_encode_req_params(url))
log_d(tests, url := "=", url_encode_req_params(url))
log_d(tests, url := "https://url.com/?req=é'()à!è", url_encode_req_params(url)) | /rudi-node-write-0.1.1.tar.gz/rudi-node-write-0.1.1/src/rudi_node_write/utils/url_utils.py | 0.502197 | 0.200753 | url_utils.py | pypi |
from re import compile
from typing import Type
def get_type_name(obj) -> str:
return type(obj).__name__
def is_type_name(obj, type_name: str) -> bool:
return get_type_name(obj) == type_name
def is_type(o, target_class: Type | tuple) -> bool:
return isinstance(o, target_class)
def are_same_type(obj, other) -> bool:
return isinstance(other, obj.__class__) and isinstance(obj, other.__class__)
def does_inherit_from(obj, mother_class) -> bool:
return issubclass(type(obj), mother_class)
def is_bool(b: bool) -> bool:
return isinstance(b, bool)
def check_is_bool(b: bool, accept_none: bool = False) -> bool | None:
if b is None and accept_none:
return None
if not isinstance(b, bool):
raise TypeError(f"input should be a bool, got '{get_type_name(b)}' for input '{b}'.")
return b
# https://stackoverflow.com/a/152596/1563072
def check_type(o, target_class: Type | tuple, accept_none: bool = False):
if o is None:
if accept_none:
return None
raise ValueError("input should not be null")
if isinstance(o, target_class):
return o
target_class_name = (
" | ".join([f"'{t.__name__}'" for t in target_class])
if isinstance(target_class, tuple)
else f"'{target_class.__name__}'"
)
raise TypeError(f"input should be of type {target_class_name}, got '{get_type_name(o)}'")
def check_is_int(n: int, accept_none: bool = False, accept_castable: bool = False) -> int | None:
if n is None and accept_none:
return None
if isinstance(n, int):
return n
if accept_castable:
return ensure_is_int(n, accept_none=accept_none)
raise TypeError(f"input parameter should be an int, got '{get_type_name(n)}' for input '{n}'.")
def ensure_is_int(n: int, accept_none: bool = False) -> int | None:
if n is None and accept_none:
return None
if isinstance(n, int):
return n
try:
return int(n)
except TypeError:
raise TypeError(f"input parameter of type '{get_type_name(n)}' cannot be cast into an int: '{n}'.")
def is_number(n) -> bool:
return isinstance(n, (int, float))
def ensure_is_number(n) -> int | float:
if not is_number(n):
try:
return to_number(n)
except TypeError:
pass
raise TypeError(f"input parameter should be a float or an int, got '{get_type_name(n)}' for input '{n}'.")
return n
REGEX_INT = compile(r"^[+-]?[0-9]+$")
REGEX_FLOAT = compile(r"^[+-]?[0-9]*[.][0-9]+([eE][+-]?[0-9]+)?$")
def to_number(n: str | int | float) -> int | float:
if is_number(n):
return n
if REGEX_INT.match(n):
return int(n)
if REGEX_FLOAT.match(n):
return float(n)
raise TypeError(f"input parameter of type '{get_type_name(n)}' cannot be cast into a float or an int: '{n}'.")
def to_float(val) -> float:
try:
f_val = float(val)
except (TypeError, ValueError):
raise ValueError(f"could not convert value into a float: '{val}'")
return f_val
def is_def(val, strict: bool = True) -> bool:
return not is_null(val, strict)
def check_is_def(val, strict: bool = False):
"""
Makes sure it returns None if input val is 'null' or 'None'
:param strict: True if the check should be strict, i.e. input is None, "None", "null", [], '', {}
:param val:
:return:
"""
if not is_null(val, strict):
return val
raise ValueError("input value is required")
def is_null(val, strict: bool = False) -> bool:
if val is None:
return True
if not strict:
return (not val) or (val in ["null", "None"])
# log_d("is_null", val, "strict=", strict)
return val != 0 and not is_bool(val) and not val and not (val in ["null", "None", "[]", "{}"]) | /rudi-node-write-0.1.1.tar.gz/rudi-node-write-0.1.1/src/rudi_node_write/utils/typing_utils.py | 0.748995 | 0.372106 | typing_utils.py | pypi |
from datetime import datetime, timezone, timedelta
from time import time
from rudi_node_write.rudi_types.serializable import Serializable
from rudi_node_write.utils.date_utils import parse_date
from rudi_node_write.utils.str_utils import is_string
class Date(Serializable):
def __init__(self, date_str: str | int):
if not is_string(date_str):
date_str = str(date_str)
reg_date = parse_date(date_str)
if not reg_date:
raise ValueError(f"this is not a valid date: '{date_str}'")
(
year,
month,
day,
hour,
minute,
second,
ms,
us,
tz_sign,
tz_hour,
tz_minute,
) = reg_date.groups()
self.year = self._to_int(year)
self.month = self._to_int(month, 1)
self.day = self._to_int(day, 1)
self.hour = self._to_int(hour)
self.minute = self._to_int(minute)
self.second = self._to_int(second)
self.ms = self._to_int(ms) if ms else None
self.us = self._to_int(us) if us else None
self.microseconds = self._to_int(ms) * 1000 + self._to_int(us)
self.tz_info = timezone(
-1 if tz_sign == "-" else 1 * timedelta(hours=self._to_int(tz_hour), minutes=self._to_int(tz_minute))
)
self.timespec = "microseconds" if self.us else "milliseconds" if self.ms else "seconds"
self._py_date = None
self._iso_date = None
@property
def class_name(self):
return self.__class__.__name__
@property
def datetime(self) -> datetime:
if self._py_date is None:
self._py_date = datetime(
year=self.year,
month=self.month,
day=self.day,
hour=self.hour,
minute=self.minute,
second=self.second,
microsecond=self.microseconds,
tzinfo=self.tz_info,
)
return self._py_date
@property
def iso(self) -> str:
if self._iso_date is None:
self._iso_date = self.datetime.isoformat(timespec=self.timespec)
return self._iso_date
def __str__(self) -> str:
return self.iso
def __eq__(self, other):
if not isinstance(other, (Date, str, int)):
return False
other_date = Date(other) if is_string(other) else other
return self.datetime == other_date.datetime
def __gt__(self, other):
if isinstance(other, Date):
other_date = other
elif isinstance(other, (int, str)):
other_date = Date(other)
else:
raise ValueError(f"Cannot compare a date and a '{other.__class__.__name__}' (got '{other}')")
return self.datetime > other_date.datetime
def __lt__(self, other):
return not self > other
def to_json_str(self, **kwargs) -> str:
return self.iso
def to_json(self, keep_nones: bool = False) -> str:
return self.iso
@staticmethod
def _to_int(val: str | None, default_val: int = 0):
return int(val if val else default_val)
@staticmethod
def from_str(date_str: str = None, default_date: str = None, is_none_accepted: bool = True):
if date_str is None:
if default_date:
return Date(default_date)
elif is_none_accepted:
return None
else:
raise ValueError("empty value not accepted")
return Date(date_str)
@staticmethod
def from_json(date_str: str):
return Date.from_str(date_str)
if __name__ == "__main__": # pragma: no cover
tests = "date_tests"
begin = time()
date = "2023-01-01 20:23:34.041456+02:00"
print(tests, "str_to_date:", f"'{date}'", "->", f"'{Date(date)}'")
print(tests, "==", date == Date(date))
date_list = [
"2020",
"2020-01",
"202001",
"2020-01-01",
"2020-01-01 00:00",
"2020-01-01 00:00:00",
"2020-01-01T00:00:00",
"2020-01-01T00:00:00Z",
"2020-01-01T00:00:00+00:00",
"2020-01-01T00:00:00.000Z",
"2020-01-01T00:00:00.000+00:00",
"20200101",
]
for str_date in date_list:
print(tests, f"Date('{str_date}')", Date(str_date))
for str_date in date_list:
print(tests, f"2020 == Date('{str_date}') ->", "2020" == Date(str_date))
print(tests, "exec. time:", time() - begin) | /rudi-node-write-0.1.1.tar.gz/rudi-node-write-0.1.1/src/rudi_node_write/utils/type_date.py | 0.727395 | 0.173044 | type_date.py | pypi |
from deepdiff import DeepDiff
from rudi_node_write.utils.typing_utils import get_type_name, check_type
def is_iterable(o) -> bool:
return hasattr(o, "__iter__")
def is_list(o) -> bool:
return isinstance(o, list)
def is_array(o) -> bool:
return isinstance(o, list)
def is_list_or_dict(o) -> bool:
return isinstance(o, list) or isinstance(o, dict)
def check_is_list(list_val: str | list, accept_none: bool = False):
if list_val is None and accept_none:
return None
if isinstance(list_val, list):
return list_val
raise TypeError(f"input should be a list, got '{get_type_name(list_val)}'")
def ensure_is_str_list(list_val: str | list[str]):
if isinstance(list_val, str):
list_val = list_val.split(",")
if not isinstance(list_val, list):
raise TypeError(f"input should be a list, got '{get_type_name(list_val)}'")
return [val.strip() for val in list_val]
def get_first_list_elt_or_none(elt_list):
if not elt_list or not is_list(elt_list) or len(elt_list) == 0:
return None
return elt_list[0]
def list_diff(list_a: list, list_b: list):
return [x for x in list_a + list_b if x not in list_a or x not in list_b]
def list_deep_diff(list_a: list, list_b: list, ignore_order: bool = True):
return DeepDiff(list_a, list_b, ignore_order=ignore_order)
def are_list_different(list_a: list | None, list_b: list | None, ignore_order: bool = True) -> bool:
"""
Compare two lists (with deep equality on each element of the list)
:param list_a: a list
:param list_b: another list
:param ignore_order: True if list order should be ignored in the comparison. If True, [5,'a'] == ['a',5]
:return: True if the lists are different
"""
check_type(list_a, list, accept_none=True)
check_type(list_b, list, accept_none=True)
if list_a is None:
return list_b is not None
if list_b is None:
return True
return bool(list_deep_diff(list_a, list_b, ignore_order=ignore_order))
def are_list_equal(list_a: list, list_b: list, ignore_order: bool = True):
return not are_list_different(list_a, list_b, ignore_order)
def merge_lists(list_a: list | None, list_b: list | None):
if list_b is None:
return list_a
if list_a is None:
return list_b
if is_list(list_a) and is_list(list_b):
return list_a + list_b
if is_list(list_a):
return list_a + [list_b]
if is_list(list_b):
return [list_a] + list_b
def clean_nones(value):
"""
Recursively remove all None values from dictionaries and lists, and returns
the result as a new dictionary or list.
https://stackoverflow.com/a/60124334/1563072
"""
if isinstance(value, list):
return [clean_nones(x) for x in value if x is not None]
elif isinstance(value, dict):
return {key: clean_nones(val) for key, val in value.items() if val is not None}
else:
return value
# if __name__ == "__main__": # pragma: no cover
# tests = "tests"
# begin = time()
# a = [1, 2, 3, 4, 5]
# b = [9, 8, 7, 6, {"r": [5, 6]}]
# c = [8, 7, 6, {"r": [5, 6]}, 9]
# print(tests, f"{a} Δ {b}", list_diff(a, b))
# print(tests, f"{a} Δ {c}", list_diff(a, c))
# print(tests, f"{b} Δ {c}", list_diff(b, c))
# print(tests, "b == c", are_list_equal(b, c))
# # print(tests, f'{b} == {c} ->', are_list_equal(b, c))
# # print(tests, f'dict eq ->', {'r': [6, 5]} != {'r': [5, 6]})
# log_d(tests, "exec. time", time() - begin) | /rudi-node-write-0.1.1.tar.gz/rudi-node-write-0.1.1/src/rudi_node_write/utils/list_utils.py | 0.497803 | 0.358521 | list_utils.py | pypi |
from typing import Final, Literal
from rudi_node_write.rudi_types.rudi_const import Language
# Default Language for the descriptions
DEFAULT_LANG: Language = "fr"
# Current version for RUDI Node metadata
RUDI_API_VERSION: Final[str] = "1.3.0"
# Current version for ODS
ODS_API_VERSION: str = "2.1"
# Default contact when none is provided in the source metadata
# Structure: https://app.swaggerhub.com/apis/OlivierMartineau/RUDI-PRODUCER/1.3.0#/Contact
DEFAULT_CONTACT: dict = {"contact_name": "Rudi node admin", "email": "community@rudi-univ-rennes1.fr"}
# Default producer when none is provided in the source metadata
# Structure: https://app.swaggerhub.com/apis/OlivierMartineau/RUDI-PRODUCER/1.3.0#/Organization
DEFAULT_PRODUCER: dict = {"organization_name": "Aucun Producteur"}
# Metadata publisher can differ from data producer.
# Select the value for the `metadata_info.publisher` field in RUDI metadata:
# - 'empty' if you want to let the field empty
# - 'producer' if the field should be filled with the data producer organization
# - 'default' if the field should be filled with the following default publisher
ENSURE_DEF_PUBLISHER: Literal["empty", "default", "producer"] = "default"
# Default organization when none is provided in the source metadata
# Structure: https://app.swaggerhub.com/apis/OlivierMartineau/RUDI-PRODUCER/1.3.0#/Organization
DEFAULT_PUBLISHER: dict = {
"organization_name": "Univ. Rennes / IRISA",
"organization_address": "263 avenue du Général Leclerc, 35 042 RENNES Cedex",
}
# Correspondences between ODS licences and RUDI standard licences
# Licences on an ODS server: https://data.rennesmetropole.fr/api/explore/v2.1/catalog/datasets?limit=100&offset=0&group_by=default.license
# RUDI standard licence codes: https://bacasable.fenix.rudi-univ-rennes1.fr/api/admin/licence_codes
STD_LICENCES_CORRESPONDENCES: dict = {
"odbl": "odbl-1.0",
"licence ouverte.*1": "etalab-1.0",
"licence ouverte": "etalab-2.0",
"open license.*1": "etalab-1.0",
"open license": "etalab-2.0",
"CC BY-ND": "cc-by-nd-4.0",
"geofla": "etalab-1.0",
"www.insee.fr.*information": "public-domain-cc0",
}
# Default licence that will be associated to the data when no licence is found
DEFAULT_LICENCE: str = "odbl-1.0" | /rudi-node-write-0.1.1.tar.gz/rudi-node-write-0.1.1/src/rudi_node_write/conf/meta_defaults.py | 0.818265 | 0.191554 | meta_defaults.py | pypi |
from re import compile
from typing import Literal, get_args, Final
from rudi_node_write.utils.err import LiteralUnexpectedValueException
from rudi_node_write.utils.str_utils import is_string
# -----[ RUDI version ]-------------------------------------------------------------------------------------------------
# RUDI version regular expression: "1.2.3beta" is OK
REGEX_RUDI_VERSION = compile(r"^[0-9]{1,2}\.[0-9]{1,2}(\.[0-9]{1,2})?[a-z]*$")
# -----[ RUDI objects ]-------------------------------------------------------------------------------------------------
RudiObjectType: Final = Literal["resources", "organizations", "contacts", "media"]
RUDI_OBJECT_TYPES = get_args(RudiObjectType)
CachedType: Final = Literal["resources", "organizations", "contacts", "media", "enum/themes"]
CACHED_TYPES = get_args(CachedType)
# -----[ Languages ]----------------------------------------------------------------------------------------------------
Language = Literal[
"cs",
"da",
"de",
"en",
"el",
"es",
"fr",
"hu",
"it",
"no",
"pl",
"pt",
"ro",
"ru",
"sk",
]
RECOGNIZED_LANGUAGES = get_args(Language)
# -----[ Themes ]-------------------------------------------------------------------------------------------------------
Themes: Final = {
"economy": {"fr": "Economie", "en": "Economy"},
"citizenship": {"fr": "Citoyenneté", "en": "Citizenship"},
"energyNetworks": {"fr": "Réseaux, Energie", "en": "Networks, Energy"},
"culture": {"fr": "Culture, Sports, Loisirs", "en": "Culture, Sports, Leisure"},
"transportation": {"fr": "Mobilité, Transport", "en": "Transportation"},
"children": {"fr": "Enfance", "en": "Children"},
"environment": {"fr": "Environnement", "en": "Environment"},
"townPlanning": {"fr": "Urbanisme", "en": "Town planning"},
"location": {"fr": "Référentiels géographiques", "en": "Location"},
"education": {"fr": "Education", "en": "Eduction"},
"publicSpace": {"fr": "Espace public", "en": "Public space"},
"health": {"fr": "Santé, Sécurité", "en": "Health, security"},
"housing": {"fr": "Logement", "en": "Housing"},
"society": {"fr": "Social", "en": "Society"},
}
theme_translations = {}
for key in Themes:
theme_translations[key] = key
for lang in ["fr", "en"]:
theme_translations[Themes[key][lang]] = key
ThemeTranslation: Final = theme_translations
# -----[ Licences ]-----------------------------------------------------------------------------------------------------
LicenceType: Final = Literal["STANDARD", "CUSTOM"]
LICENCE_TYPES = get_args(LicenceType)
LICENCE_TYPE_STANDARD: Final[str] = "STANDARD"
LICENCE_TYPE_CUSTOM: Final[str] = "CUSTOM"
LicenceCode = Literal[
"apache-2.0",
"cc-by-nd-4.0",
"etalab-1.0",
"etalab-2.0",
"gpl-3.0",
"mit",
"odbl-1.0",
"public-domain-cc0",
]
LICENCE_CODES = get_args(LicenceCode)
# -----[ Media types ]--------------------------------------------------------------------------------------------------
MediaType: Final = Literal["FILE", "SERVICE", "SERIES"]
MEDIA_TYPES = get_args(MediaType)
MEDIA_TYPE_FILE: Final[str] = "FILE"
MEDIA_TYPE_SERVICE: Final[str] = "SERVICE"
# -----[ Metadata general storage statuses ]----------------------------------------------------------------------------
StorageStatus: Final = Literal["pending", "online", "archived", "unavailable"]
STORAGE_STATUSES = get_args(StorageStatus)
# -----[ File storage statuses ]----------------------------------------------------------------------------------------
FileStorageStatus = Literal["nonexistant", "available", "missing", "archived", "removed"]
FILE_STORAGE_STATUSES = get_args(FileStorageStatus)
# -----[ Hash algorithms ]----------------------------------------------------------------------------------------------
HashAlgorithm = Literal["MD5", "SHA-256", "SHA-512"]
HASH_ALGORITHMS = get_args(HashAlgorithm)
# -----[ File extensions & MIME types ]---------------------------------------------------------------------------------
FileExtensions: Final = {
".3gp": "video/3gpp",
".3gpp": "video/3gpp",
".7z": "application/x-7z-compressed",
".aac": "audio/aac",
".apng": "image/apng",
".avi": "video/x-msvideo",
".bin": "application/octet-stream",
".bmp": "image/bmp",
".bz": "application/x-bzip",
".bz2": "application/x-bzip2",
".css": "text/css",
".csv": "text/csv",
".doc": "application/msword",
".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".epub": "application/epub+zip",
".exe": "application/x-executable",
".flif": "image/flif",
".geojson": "application/geo+json",
".gif": "image/gif",
".gz": "application/gzip",
".gzip": "application/gzip",
".htm": "text/html",
".html": "text/html",
".ico": "image/vnd.microsoft.icon",
".jpeg": "image/jpeg",
".jpg": "image/jpeg",
".js": "application/javascript",
".json": "application/json",
".jsonld": "application/ld+json",
".m4a": "audio/m4a",
".mkv": "video/x-matroska",
".mng": "image/x-mng",
".mov": "video/quicktime",
".mp3": "audio/mpeg",
".mp4": "video/mp4",
".mpeg": "video/mpeg",
".mpg": "video/mpeg",
".odp": "application/vnd.oasis.opendocument.presentation",
".ods": "application/vnd.oasis.opendocument.spreadsheet",
".odt": "application/vnd.oasis.opendocument.text",
".oga": "audio/ogg",
".ogg": "audio/ogg",
".ogv": "video/ogg",
".otf": "font/otf",
".pdf": "application/pdf",
".php": "text/php",
".png": "image/png",
".ppt": "application/vnd.ms-powerpoint",
".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".py": "text/x-python",
".sql": "application/sql",
".tar": "application/x-tar",
".tar.bz": "application/x-bzip",
".tar.bz2": "application/x-bzip2",
".tar.gz": "application/gzip",
".tgz": "application/gzip",
".tif": "image/tiff",
".tiff": "image/tiff",
".ttf": "font/ttf",
".txt": "text/plain",
".wav": "audio/wav",
".weba": "audio/webm",
".webm": "video/webm",
".webp": "image/webp",
".wmv": "video/x-ms-wmv",
".xls": "application/vnd.ms-excel",
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
".xml": "text/xml",
".yaml": "text/x-yaml",
".yml": "text/x-yaml",
".zip": "application/zip",
".zst": "application/zstd",
}
FILE_EXTENSIONS = get_args(FileExtensions)
MimeTypes = Literal[
"application/x-executable",
"application/graphql",
"application/javascript",
"application/json",
"application/ld+json",
"application/msword",
"application/pdf",
"application/sql",
"application/vnd.api+json",
"application/vnd.ms-excel",
"application/vnd.ms-powerpoint",
"application/vnd.oasis.opendocument.text",
"application/vnd.openxmlformats-officedocument.presentationml.presentation",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"application/x-www-form-urlencoded",
"application/xml",
"application/zip",
"application/zstd",
"audio/mpeg",
"audio/ogg",
"image/gif",
"image/apng",
"image/flif",
"image/webp",
"image/x-mng",
"image/jpeg",
"image/png",
"multipart/form-data",
"text/css",
"text/csv",
"text/html",
"text/php",
"text/plain",
"text/xml",
"application/x-executable+crypt",
"application/graphql+crypt",
"application/javascript+crypt",
"application/json+crypt",
"application/ld+json+crypt",
"application/msword+crypt",
"application/pdf+crypt",
"application/sql+crypt",
"application/vnd.api+json+crypt",
"application/vnd.ms-excel+crypt",
"application/vnd.ms-powerpoint+crypt",
"application/vnd.oasis.opendocument.text+crypt",
"application/vnd.openxmlformats-officedocument.presentationml.presentation+crypt",
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet+crypt",
"application/vnd.openxmlformats-officedocument.wordprocessingml.document+crypt",
"application/x-www-form-urlencoded+crypt",
"application/xml+crypt",
"application/zip+crypt",
"application/zstd+crypt",
"audio/mpeg+crypt",
"audio/ogg+crypt",
"image/gif+crypt",
"image/apng+crypt",
"image/flif+crypt",
"image/webp+crypt",
"image/x-mng+crypt",
"image/jpeg+crypt",
"image/png+crypt",
"multipart/form-data+crypt",
"text/css+crypt",
"text/csv+crypt",
"text/html+crypt",
"text/php+crypt",
"text/plain+crypt",
"text/xml+crypt",
"text/x-yaml+crypt",
]
MIME_TYPES = get_args(MimeTypes)
MimeTypesUtf8Text = Literal[
"application/geo+json",
"application/graphql",
"application/javascript",
"application/json",
"application/ld+json",
"application/x-yaml",
"application/xml",
]
MIME_TYPES_UTF8_TEXT = get_args(MimeTypesUtf8Text)
ConnectorParameterTypes = Literal["STRING", "BOOLEAN", "DATE", "LONG", "DOUBLE", "ENUM"]
CONNECTOR_PARAMS_TYPES = get_args(ConnectorParameterTypes)
def check_is_literal(val, series: tuple, err_msg: str = "incorrect value", accept_none: bool = False):
"""
Check if input value is in the given series, raise an error with input message otherwise
:param val: the value to check
:param series: the series of accepted values
:param err_msg: error message raised if the value was not found in the series
:param accept_none: None value is accepted if True
:return: the checked value
"""
if val is None and accept_none:
return None
if val not in series:
raise LiteralUnexpectedValueException(val, series, err_msg)
return val
def check_rudi_version(version: str):
if not (is_string(version) and REGEX_RUDI_VERSION.match(version)):
raise ValueError(f"Incorrect RUDI metadata version: '{version}'")
return version | /rudi-node-write-0.1.1.tar.gz/rudi-node-write-0.1.1/src/rudi_node_write/rudi_types/rudi_const.py | 0.477798 | 0.291857 | rudi_const.py | pypi |
from uuid import UUID
from rudi_node_write.rudi_types.serializable import Serializable
from rudi_node_write.utils.dict_utils import check_has_key, safe_get_key, check_is_dict
from rudi_node_write.utils.log import log_d
from rudi_node_write.utils.str_utils import check_is_uuid4, uuid4_str
class RudiOrganization(Serializable):
def __init__(
self,
organization_id: str | UUID,
organization_name: str,
organization_caption: str = None,
organization_summary: str = None,
organization_address: str = None,
organization_coordinates: dict = None,
collection_tag: str = None,
):
self.organization_id = check_is_uuid4(organization_id)
self.organization_name = organization_name
self.organization_caption = organization_caption
self.organization_summary = organization_summary
self.organization_address = organization_address
latitude = safe_get_key(organization_coordinates, "latitude")
longitude = safe_get_key(organization_coordinates, "longitude")
self.organization_coordinates = (
None if latitude is None or longitude is None else {"latitude": latitude, "longitude": longitude}
)
self.collection_tag = collection_tag
@staticmethod
def from_json(o: dict):
check_is_dict(o)
latitude = safe_get_key(o, "organization_coordinates", "latitude")
longitude = safe_get_key(o, "organization_coordinates", "longitude")
organization_coordinates = (
None if latitude is None and longitude is None else {"latitude": latitude, "longitude": longitude}
)
return RudiOrganization(
organization_id=check_is_uuid4(check_has_key(o, "organization_id")),
organization_name=check_has_key(o, "organization_name"),
organization_caption=o.get("organization_caption"),
organization_summary=o.get("organization_summary"),
organization_address=o.get("organization_address"),
organization_coordinates=organization_coordinates,
collection_tag=o.get("collection_tag"),
)
if __name__ == "__main__": # pragma: no cover
my_org = RudiOrganization(
organization_id=uuid4_str(),
organization_name="IRISA",
organization_address="263 avenue du Général Leclerc, 35000 RENNES",
organization_coordinates={"longitude": 1.456, "latitude": 0},
)
log_d(
"RudiOrganization",
"constructor",
RudiOrganization(
organization_id=uuid4_str(),
organization_name="IRISA",
organization_address="263 avenue du Général Leclerc, 35000 RENNES",
organization_coordinates={"longitude": 1.456, "latitude": 0},
),
)
log_d(
"RudiOrganization",
"make_producer",
RudiOrganization.from_json(
{
"organization_id": uuid4_str(),
"organization_name": "IRISA",
"organization_address": "263 avenue du Général Leclerc, 35000 RENNES",
"organization_coordinates": {"longitude": 1.456, "latitude": 0},
}
),
)
log_d(
"RudiOrganization",
"make basic producer",
RudiOrganization.from_json({"organization_id": uuid4_str(), "organization_name": "Noorg"}),
) | /rudi-node-write-0.1.1.tar.gz/rudi-node-write-0.1.1/src/rudi_node_write/rudi_types/rudi_org.py | 0.704465 | 0.228942 | rudi_org.py | pypi |
from rudi_node_write.rudi_types.serializable import Serializable
from rudi_node_write.utils.dict_utils import check_is_dict, check_has_key
from rudi_node_write.utils.log import log_d
from rudi_node_write.utils.str_utils import check_is_string
from rudi_node_write.utils.typing_utils import ensure_is_number, check_type
def check_is_latitude(latitude: float, alt_err_msg: str = None) -> float:
latitude = ensure_is_number(latitude)
if not -90 <= latitude <= 90:
raise ValueError(
f"{'latitude' if not alt_err_msg else alt_err_msg}"
f" should be a decimal between -90 and 90, got '{latitude}'"
)
return float(latitude)
def check_is_longitude(longitude: float, alt_err_msg: str = None) -> float:
longitude = ensure_is_number(longitude)
if not -180 <= longitude <= 180:
raise ValueError(
f"{'longitude' if not alt_err_msg else alt_err_msg}"
f" should be a decimal between -180 and 180, got '{longitude}'"
)
return float(longitude)
class BoundingBox(Serializable):
def __init__(
self,
south_latitude: float,
west_longitude: float,
north_latitude: float,
east_longitude: float,
):
"""
Coordinates of a bounding box, given as decimal numbers (ISO 6709)
:param south_latitude: southernmost latitude
:param west_longitude: westernmost longitude
:param north_latitude: northernmost latitude
:param east_longitude: easternmost longitude
"""
self.south_latitude = check_is_latitude(south_latitude, "southernmost latitude")
self.north_latitude = check_is_latitude(north_latitude, "northernmost latitude")
if self.south_latitude > self.north_latitude:
raise ValueError("southernmost latitude should be lower than northernmost latitude")
self.west_longitude = check_is_longitude(west_longitude, "westernmost longitude")
self.east_longitude = check_is_longitude(east_longitude, "easternmost longitude")
if self.west_longitude > self.east_longitude:
print(
f"! BoundingBox warning: westernmost latitude is generally lower than easternmost latitude. Got W: {self.west_longitude} > E: {self.east_longitude}"
)
@staticmethod
def from_json(o: dict):
check_is_dict(o)
south_latitude = check_is_latitude(check_has_key(o, "south_latitude"), "southernmost latitude")
north_latitude = check_is_latitude(check_has_key(o, "north_latitude"), "northernmost latitude")
west_longitude = check_is_longitude(check_has_key(o, "west_longitude"), "westernmost longitude")
east_longitude = check_is_longitude(check_has_key(o, "east_longitude"), "easternmost longitude")
return BoundingBox(
south_latitude=south_latitude,
north_latitude=north_latitude,
west_longitude=west_longitude,
east_longitude=east_longitude,
)
@staticmethod
def merge_bbox_list(bbox_list: list):
frst_bbox = bbox_list[0]
lowest_south = frst_bbox.south_latitude
highest_north = frst_bbox.north_latitude
lowest_west = frst_bbox.west_longitude
highest_east = frst_bbox.east_longitude
if len(bbox_list) > 1:
for bbox in bbox_list[1:]:
lowest_south = min(lowest_south, bbox.south_latitude)
highest_north = max(highest_north, bbox.north_latitude)
lowest_west = min(lowest_west, bbox.west_longitude)
highest_east = max(highest_east, bbox.east_longitude)
return BoundingBox(
south_latitude=lowest_south,
west_longitude=lowest_west,
north_latitude=highest_north,
east_longitude=highest_east,
)
class RudiGeography(Serializable):
def __init__(
self,
bounding_box: BoundingBox,
geographic_distribution: dict = None,
projection: str = None,
):
self.bounding_box = check_type(bounding_box, BoundingBox, accept_none=False)
self.geographic_distribution = check_is_dict(geographic_distribution, accept_none=True)
self.projection = check_is_string(projection, accept_none=True)
@staticmethod
def from_json(o: dict | None):
if o is None:
return None
return RudiGeography(
bounding_box=BoundingBox.from_json(check_has_key(o, "bounding_box")),
geographic_distribution=o.get("geographic_distribution"),
projection=o.get("projection"),
)
if __name__ == "__main__": # pragma: no cover
tests = "geo_tests"
bb1 = BoundingBox(10, 120, 30, 40)
bb = BoundingBox.from_json(
{
"south_latitude": -10,
"north_latitude": 24.8,
"west_longitude": 40.7,
"east_longitude": 104.8,
}
)
log_d(tests, "BoundingBox", bb)
geo = {
"bounding_box": {
"west_longitude": -1.96327,
"east_longitude": -1.46558,
"south_latitude": 47.93192,
"north_latitude": 48.30684,
},
"geographic_distribution": {
"type": "Polygon",
"coordinates": [
[
[-1.96327, 47.93192],
[-1.46558, 47.93192],
[-1.46558, 48.30684],
[-1.96327, 48.30684],
[-1.96327, 47.93192],
]
],
"bbox": [-1.96327, 47.93192, -1.46558, 48.30684],
},
"projection": "WGS 84 (EPSG:4326)",
}
log_d(tests, "RudiGeography", RudiGeography.from_json(geo)) | /rudi-node-write-0.1.1.tar.gz/rudi-node-write-0.1.1/src/rudi_node_write/rudi_types/rudi_geo.py | 0.802865 | 0.557243 | rudi_geo.py | pypi |
from abc import ABC
from json import dumps
from deepdiff import DeepDiff
from rudi_node_write.rudi_types.rudi_const import (
FileStorageStatus,
HashAlgorithm,
HASH_ALGORITHMS,
check_is_literal,
FILE_STORAGE_STATUSES,
MEDIA_TYPE_FILE,
MEDIA_TYPE_SERVICE,
MIME_TYPES,
CONNECTOR_PARAMS_TYPES,
)
from rudi_node_write.rudi_types.rudi_dates import RudiDates
from rudi_node_write.rudi_types.serializable import Serializable
from rudi_node_write.utils.dict_utils import check_is_dict, check_has_key, is_dict
from rudi_node_write.utils.list_utils import check_is_list, is_list
from rudi_node_write.utils.log import log_d
from rudi_node_write.utils.str_utils import check_is_uuid4, check_is_string
from rudi_node_write.utils.type_date import Date
from rudi_node_write.utils.typing_utils import check_is_int, get_type_name, check_type, is_type
NORMALIZED_CONNECTOR_PARAMS_TYPES = {
"STR": "STRING",
"BOOL": "BOOLEAN",
"INT": "LONG",
"FLOAT": "DOUBLE",
}
def normalize_connector_parameter_type(param_type: str, accept_none: bool = False):
# fun = "normalize_connector_parameter_type"
# log_d(fun, "param_type", param_type)
upper_param_type = check_is_string(param_type).upper()
if upper_param_type in CONNECTOR_PARAMS_TYPES:
return upper_param_type
normalized_type = NORMALIZED_CONNECTOR_PARAMS_TYPES.get(upper_param_type)
if normalized_type is None and accept_none:
return None
return check_is_literal(param_type if normalized_type is None else normalized_type, CONNECTOR_PARAMS_TYPES)
def check_is_accepted_value(value, accepted_values: list = None):
if (check_is_list(accepted_values, accept_none=True) is not None) and (value not in accepted_values):
raise ValueError(
f"incoherence in connector parameter: input {value} is not in accepted values {accepted_values}"
)
return value
def normalize_connector_values(value, value_type: str = None, accepted_values: list = None):
# fun = "normalize_connector_values"
if value_type is None:
type_name = get_type_name(value)
# log_d(fun, "type_name", type_name)
normalized_val_type = normalize_connector_parameter_type(type_name, accept_none=True)
if normalized_val_type is None:
# We'll need to cast the value into string for it to get accepted in RUDI
end_accepted_values = (
[str(av) for av in accepted_values]
if check_is_list(accepted_values, accept_none=True) is not None
else None
)
return [check_is_accepted_value(str(value), end_accepted_values), "STRING", end_accepted_values]
else:
return [check_is_accepted_value(value, accepted_values), normalized_val_type, accepted_values]
else:
# log_d(fun, "in", value, f"({value_type})")
normalized_val_type = normalize_connector_parameter_type(value_type)
if normalize_connector_parameter_type(get_type_name(value)) != normalized_val_type:
raise ValueError(f"incoherence in connector parameter type: input '{value}' is not of type '{value_type}'")
return [check_is_accepted_value(value, accepted_values), normalized_val_type, accepted_values]
class RudiMediaConnectorParameter(Serializable):
def __init__(self, key: str, value, value_type: str = None, accepted_values: list = None, usage: str = None):
fun = "RudiMediaConnectorParameter.init"
# log_d(fun, 'in', key, "=", value, f"({value_type})")
self.key = check_is_string(key)
[self.value, self.value_type, self.accepted_values] = normalize_connector_values(
value, value_type, accepted_values
)
self.usage = check_is_string(usage, accept_none=True)
def to_json(self, keep_nones: bool = False) -> dict:
self_obj = {"key": self.key, "value": self.value, "type": self.value_type}
if keep_nones or self.usage is not None:
self_obj["usage"] = self.usage
if keep_nones or self.accepted_values is not None:
self_obj["accepted_values"] = self.accepted_values
return self_obj
@staticmethod
def from_json(o: dict | list):
fun = "RudiMediaConnectorParameter.from_json"
if is_dict(o):
[value, value_type, accepted_values] = normalize_connector_values(
check_has_key(o, "value"),
check_is_string(o.get("type"), accept_none=True),
check_is_list(o.get("accepted_values"), accept_none=True),
)
# log_d(fun, 'out', self.key, "=", self.value, f"({self.value_type})")
# log_d(fun, 'value_type', o.get("type"), "(", o.get("key"), '=', o.get('value'), ")")
return RudiMediaConnectorParameter(
key=check_is_string(check_has_key(o, "key")),
value=value,
value_type=value_type,
accepted_values=accepted_values,
usage=check_is_string(o.get("usage"), accept_none=True),
)
if is_list(o):
# log_d(fun, 'is_list')
return RudiMediaConnectorParameterList.from_json(o)
raise TypeError(f"RudiMediaConnectorParameter.from_json input should be a dict")
class RudiMediaConnectorParameterList(Serializable, list[RudiMediaConnectorParameter]):
def __init__(self, list_entries: RudiMediaConnectorParameter | list[RudiMediaConnectorParameter]):
super().__init__()
if is_type(list_entries, RudiMediaConnectorParameter):
self.append(list_entries)
elif is_list(list_entries):
for entry in list_entries:
self.append(check_type(entry, RudiMediaConnectorParameter))
else:
raise TypeError(f"input parameter should be a list, got '{get_type_name(list_entries)}'")
def to_json(self, keep_nones: bool = False) -> dict | list:
return [entry.to_json(keep_nones) for entry in self]
@staticmethod
def from_json(o: list | dict):
fun = "RudiMediaConnectorParameterList.from_json"
if is_dict(o):
# log_d(fun, 'is_dict')
return RudiMediaConnectorParameterList([RudiMediaConnectorParameter.from_json(o)])
if is_list(o):
# log_d(fun, 'is_list', o)
return RudiMediaConnectorParameterList([RudiMediaConnectorParameter.from_json(entry) for entry in o])
raise TypeError(f"Property 'connector_parameters' should be a list")
class RudiMediaConnector(Serializable):
def __init__(
self,
url: str,
interface_contract: str = None,
connector_parameters: RudiMediaConnectorParameterList
| list[RudiMediaConnectorParameter]
| RudiMediaConnectorParameter = None,
):
self.url = check_is_string(url)
self.interface_contract = check_is_string(interface_contract, accept_none=True)
self.connector_parameters = None
if connector_parameters is not None:
if is_type(connector_parameters, RudiMediaConnectorParameterList):
self.connector_parameters = connector_parameters
elif is_list(connector_parameters) or is_type(connector_parameters, RudiMediaConnectorParameter):
self.connector_parameters = RudiMediaConnectorParameterList(connector_parameters)
else:
check_type(connector_parameters, RudiMediaConnectorParameterList)
def to_json(self, keep_nones: bool = False) -> dict:
self_json = {
"url": self.url,
}
if self.interface_contract is not None:
self_json["interface_contract"] = check_is_string(self.interface_contract)
if self.connector_parameters is not None:
self_json["connector_parameters"] = self.connector_parameters.to_json(keep_nones)
return self_json
@staticmethod
def from_json(o: dict):
params_list = o.get("connector_parameters")
connector_parameters = None if params_list is None else RudiMediaConnectorParameterList.from_json(params_list)
return RudiMediaConnector(
url=check_is_string(check_has_key(o, "url")),
interface_contract=check_is_string(o.get("interface_contract"), accept_none=True),
connector_parameters=connector_parameters,
)
class RudiChecksum(Serializable):
def __init__(self, algo: HashAlgorithm, hash_str: str):
self.algo = check_is_literal(algo, HASH_ALGORITHMS, "the value was not recognized as a hash algorithm ")
self.hash_str = check_is_string(hash_str)
def to_json(self, keep_nones: bool = False) -> dict:
# log_d("RudiChecksum.to_json")
return {"algo": self.algo, "hash": self.hash_str}
def to_json_str(self, keep_nones: bool = False, **kwargs) -> str:
# log_d("RudiChecksum.to_json")
return dumps(self.to_json(keep_nones=keep_nones))
@staticmethod
def from_json(o: dict):
check_is_dict(o)
algo = check_is_literal(
check_has_key(o, "algo"),
HASH_ALGORITHMS,
"the value was not recognized as a hash algorithm ",
)
hash_str = check_is_string(check_has_key(o, "hash"))
return RudiChecksum(algo=algo, hash_str=hash_str)
class RudiMedia(Serializable, ABC):
@staticmethod
def from_json(o: dict):
check_is_dict(o)
media_type = check_has_key(o, "media_type")
if media_type == MEDIA_TYPE_FILE:
return RudiMediaFile.from_json(o)
if media_type == MEDIA_TYPE_SERVICE:
return RudiMediaService.from_json(o)
raise NotImplementedError(f"cannot create a media for type '{media_type}'")
def to_json_str(self, keep_nones: bool = False, ensure_ascii: bool = False, sort_keys: bool = False) -> str:
return dumps(self.to_json(keep_nones), ensure_ascii=ensure_ascii, sort_keys=sort_keys)
class RudiMediaService(RudiMedia):
def __init__(
self,
media_id: str,
media_name: str,
connector: RudiMediaConnector,
media_caption: str = None,
media_dates: RudiDates = None,
api_documentation_url: str = None,
collection_tag: str = None,
):
self.media_type = MEDIA_TYPE_SERVICE
self.media_id = check_is_uuid4(media_id)
self.media_name = check_is_string(media_name)
self.connector: RudiMediaConnector = check_type(connector, RudiMediaConnector)
if connector.interface_contract is None:
self.connector.interface_contract = "external"
self.media_dates = check_type(media_dates, RudiDates) if media_dates else RudiDates()
self.media_caption = check_is_string(media_caption, accept_none=True)
self.api_documentation_url = check_is_string(api_documentation_url, accept_none=True)
self.collection_tag = check_is_string(collection_tag, accept_none=True)
def to_json(self, keep_nones: bool = False) -> dict:
out_obj = {
"media_type": MEDIA_TYPE_SERVICE,
"media_id": self.media_id,
"media_name": self.media_name,
"media_dates": self.media_dates.to_json(),
"connector": self.connector.to_json(),
}
if keep_nones or self.media_caption:
out_obj["media_caption"] = self.media_caption
if keep_nones or self.api_documentation_url:
out_obj["api_documentation_url"] = self.api_documentation_url
return out_obj
@staticmethod
def from_json(o: dict):
check_is_dict(o)
media_type = check_has_key(o, "media_type")
if media_type != MEDIA_TYPE_SERVICE:
raise ValueError(f"This cannot be structured as a RudiMediaService: got 'media_type' = '{media_type}'")
connector = RudiMediaConnector.from_json(check_has_key(o, "connector"))
media_dates = RudiDates.from_json(o.get("media_dates"))
return RudiMediaService(
media_id=check_is_uuid4(check_has_key(o, "media_id")),
media_name=check_has_key(o, "media_name"),
connector=connector,
media_caption=o.get("media_caption"),
media_dates=media_dates,
api_documentation_url=o.get("api_documentation_url"),
collection_tag=o.get("collection_tag"),
)
class RudiMediaFile(RudiMedia):
def __init__(
self,
media_id: str,
media_name: str,
connector: RudiMediaConnector,
file_type: str,
file_size: int,
checksum: RudiChecksum,
media_caption: str = None,
media_dates: RudiDates = None,
file_encoding: str = None,
file_structure: str = None,
file_storage_status: FileStorageStatus = None,
file_status_update: str = None,
collection_tag: str = None,
):
# Media mandatory attributes
self.media_type = MEDIA_TYPE_FILE
self.media_id = check_is_uuid4(media_id)
self.media_name = check_is_string(media_name)
self.connector: RudiMediaConnector = check_type(connector, RudiMediaConnector)
if connector.interface_contract is None:
self.connector.interface_contract = "dwnl"
# MediaFile mandatory attributes
self.file_type = check_is_literal(file_type, MIME_TYPES, "incorrect parameter for MIME type")
self.file_size = check_is_int(file_size)
self.checksum = check_type(checksum, RudiChecksum)
# Media optional attributes
self.media_caption = check_is_string(media_caption, accept_none=True)
self.media_dates = check_type(media_dates, RudiDates) if media_dates else RudiDates()
# MediaFile optional attributes
self.file_encoding = check_is_string(file_encoding, accept_none=True)
self.file_structure = check_is_string(file_structure, accept_none=True)
self.file_storage_status = check_is_literal(
val=file_storage_status,
series=FILE_STORAGE_STATUSES,
err_msg="incorrect value for a file storage status",
accept_none=True,
)
self.file_status_update = Date.from_str(file_status_update)
self.collection_tag = collection_tag
@staticmethod
def from_json(o: dict):
check_is_dict(o)
# Media mandatory attributes
media_type = check_has_key(o, "media_type")
if media_type != MEDIA_TYPE_FILE:
raise ValueError(f"This cannot be structured as a RudiMediaFile: got 'media_type' = '{media_type}'")
# MediaFile mandatory attributes
file_type = check_is_literal(check_has_key(o, "file_type"), MIME_TYPES, "incorrect parameter for MIME type")
# MediaFile optional attributes
file_storage_status = o.get("file_storage_status")
if file_storage_status:
check_is_literal(
file_storage_status,
FILE_STORAGE_STATUSES,
"value not accepted as a file storage status",
)
# log_d('RudiMediaFile.from_dict', 'preliminary checks OK')
return RudiMediaFile(
media_id=check_is_uuid4(check_has_key(o, "media_id")),
media_name=check_is_string(check_has_key(o, "media_name")),
connector=RudiMediaConnector.from_json(check_has_key(o, "connector")),
file_type=file_type,
file_size=check_is_int(check_has_key(o, "file_size")),
checksum=RudiChecksum.from_json(check_has_key(o, "checksum")),
media_caption=o.get("media_caption"),
media_dates=RudiDates.from_json(o.get("media_dates")),
file_encoding=o.get("file_encoding"),
file_structure=o.get("file_structure"),
file_storage_status=file_storage_status,
file_status_update=Date.from_str(o.get("file_status_update")),
collection_tag=o.get("collection_tag"),
)
if __name__ == "__main__": # pragma: no cover
tests = "RudiMedia tests"
rudi_file_json = {
"checksum": {
"algo": "SHA-256",
"hash": "f72d0035896447b55ff27998d6fd8773a68b2770027336c09da2bc6fd67e2dcf",
},
"media_dates": {
"created": "2022-01-21T10:40:28.781+00:00",
"updated": "2022-01-21T10:40:28.781+00:00",
},
"connector": {
"url": "https://bacasable.fenix.rudi-univ-rennes1.fr/media/download/2611547a-42f1-4d7c-b736-2fef5cca30fe",
"interface_contract": "dwnl",
"connector_parameters": [
{
"key": "random key 1",
"value": "random val 1",
"type": "STRING",
"usage": "test 1",
"accepted_values": ["random val 1", "random val 2"],
}
],
},
"file_type": "image/png",
"file_size": 414931,
"file_storage_status": "available",
"file_status_update": "2023-04-14T13:57:15.859+00:00",
"media_id": "2611547a-42f1-4d7c-b736-2fef5cca30fe",
"media_type": "FILE",
"media_name": "unicorn.png",
}
# log_d(tests, "RudiMediaFile.from_json")
log_d(tests, "RudiMediaFile.from_json", rudi_file := RudiMediaFile.from_json(rudi_file_json))
# log_d(tests, "RudiMediaFile.to_json")
log_d(tests, "RudiMediaFile.to_json", rudi_file.to_json())
rudi_service_json = {
"connector": {
"url": "https://data.rennesmetropole.fr/api/explore/v2.1/catalog/datasets/qualite-de-service-selon-operateurs-et-axe-de-transport-2g-3g-4g/exports",
"interface_contract": "external",
"connector_parameters": [
{
"key": "random key 2",
"value": "random val 2",
"type": "string",
"usage": "test 2",
"accepted_values": ["random val 1", "random val 2"],
}
],
},
"media_id": "e611547a-42f1-4d7c-b736-2fef5cca30fe",
"media_type": "SERVICE",
"media_name": "exports disponibles",
}
log_d(tests, "RudiMediaService.from_json", rudi_service := RudiMediaService.from_json(rudi_service_json))
log_d(tests, "RudiMediaService.to_json", rudi_service.to_json())
log_d(tests, "RudiMediaService.to_json diff", DeepDiff(rudi_service_json, rudi_service.to_json()))
params = RudiMediaConnectorParameter(key="key1", value=3)
log_d(tests, "RudiMediaConnectorParameters", params.to_json())
log_d(
tests,
"RudiMediaConnector",
RudiMediaConnector(
url="https://app.swaggerhub.com/apis/OlivierMartineau/RUDI-PRODUCER/1.3.0#/MediaFile",
connector_parameters=params,
).to_json(),
)
connect_params = RudiMediaConnectorParameter.from_json(
{
"key": "random key 3",
"value": "random val 3",
"type": "string",
"usage": "test 3",
"accepted_values": ["random val 1", "random val 2", "random val 3"],
}
)
log_d(tests, "connect_params", connect_params)
connect_params = RudiMediaConnectorParameter.from_json(
{
"key": "random key",
"value": {"e": "value is a dict and will be stringified"},
"usage": "test",
"accepted_values": [{"e": "value is a dict and will be stringified"}],
}
)
log_d(tests, "connect_params", connect_params)
connect_params = RudiMediaConnectorParameter.from_json(
{
"key": "random key",
"value": {"e": "value is a dict and will be stringified"},
# "type": "dict",
"usage": "test",
"accepted_values": [{"e": "value is a dict and will be stringified"}],
}
)
log_d(tests, "connect_params", connect_params)
log_d(tests, "rudi_file.to_json_str()", rudi_file.to_json_str(sort_keys=True))
log_d(tests, "dumps(rudi_file_json)", dumps(rudi_file_json, sort_keys=True)) | /rudi-node-write-0.1.1.tar.gz/rudi-node-write-0.1.1/src/rudi_node_write/rudi_types/rudi_media.py | 0.565419 | 0.212947 | rudi_media.py | pypi |
export interface ProtocolMessage {
/** Sequence number. */
seq: number;
/** Message type.
Values: 'request', 'response', 'event', etc.
*/
type: string;
}
/** A client or server-initiated request. */
export interface Request extends ProtocolMessage {
// type: 'request';
/** The command to execute. */
command: string;
/** Object containing arguments for the command. */
arguments?: any;
}
/** Server-initiated event. */
export interface Event extends ProtocolMessage {
// type: 'event';
/** Type of event. */
event: string;
/** Event-specific information. */
body?: any;
}
/** Response to a request. */
export interface Response extends ProtocolMessage {
// type: 'response';
/** Sequence number of the corresponding request. */
request_seq: number;
/** Outcome of the request. */
success: boolean;
/** The command requested. */
command: string;
/** Contains error message if success == false. */
message?: string;
/** Contains request result if success is true and optional error details if success is false. */
body?: any;
}
/** Event message for 'initialized' event type.
This event indicates that the debug adapter is ready to accept configuration requests (e.g. SetBreakpointsRequest, SetExceptionBreakpointsRequest).
A debug adapter is expected to send this event when it is ready to accept configuration requests (but not before the InitializeRequest has finished).
The sequence of events/requests is as follows:
- adapters sends InitializedEvent (after the InitializeRequest has returned)
- frontend sends zero or more SetBreakpointsRequest
- frontend sends one SetFunctionBreakpointsRequest
- frontend sends a SetExceptionBreakpointsRequest if one or more exceptionBreakpointFilters have been defined (or if supportsConfigurationDoneRequest is not defined or false)
- frontend sends other future configuration requests
- frontend sends one ConfigurationDoneRequest to indicate the end of the configuration
*/
export interface InitializedEvent extends Event {
// event: 'initialized';
}
/** Event message for 'stopped' event type.
The event indicates that the execution of the debuggee has stopped due to some condition.
This can be caused by a break point previously set, a stepping action has completed, by executing a debugger statement etc.
*/
export interface StoppedEvent extends Event {
// event: 'stopped';
body: {
/** The reason for the event.
For backward compatibility this string is shown in the UI if the 'description' attribute is missing (but it must not be translated).
Values: 'step', 'breakpoint', 'exception', 'pause', 'entry', etc.
*/
reason: string;
/** The full reason for the event, e.g. 'Paused on exception'. This string is shown in the UI as is. */
description?: string;
/** The thread which was stopped. */
threadId?: number;
/** Additional information. E.g. if reason is 'exception', text contains the exception name. This string is shown in the UI. */
text?: string;
/** If allThreadsStopped is true, a debug adapter can announce that all threads have stopped.
* The client should use this information to enable that all threads can be expanded to access their stacktraces.
* If the attribute is missing or false, only the thread with the given threadId can be expanded.
*/
allThreadsStopped?: boolean;
};
}
/** Event message for 'continued' event type.
The event indicates that the execution of the debuggee has continued.
Please note: a debug adapter is not expected to send this event in response to a request that implies that execution continues, e.g. 'launch' or 'continue'.
It is only necessary to send a ContinuedEvent if there was no previous request that implied this.
*/
export interface ContinuedEvent extends Event {
// event: 'continued';
body: {
/** The thread which was continued. */
threadId: number;
/** If allThreadsContinued is true, a debug adapter can announce that all threads have continued. */
allThreadsContinued?: boolean;
};
}
/** Event message for 'exited' event type.
The event indicates that the debuggee has exited.
*/
export interface ExitedEvent extends Event {
// event: 'exited';
body: {
/** The exit code returned from the debuggee. */
exitCode: number;
};
}
/** Event message for 'terminated' event types.
The event indicates that debugging of the debuggee has terminated.
*/
export interface TerminatedEvent extends Event {
// event: 'terminated';
body?: {
/** A debug adapter may set 'restart' to true (or to an arbitrary object) to request that the front end restarts the session.
The value is not interpreted by the client and passed unmodified as an attribute '__restart' to the launchRequest.
*/
restart?: any;
};
}
/** Event message for 'thread' event type.
The event indicates that a thread has started or exited.
*/
export interface ThreadEvent extends Event {
// event: 'thread';
body: {
/** The reason for the event.
Values: 'started', 'exited', etc.
*/
reason: string;
/** The identifier of the thread. */
threadId: number;
};
}
/** Event message for 'output' event type.
The event indicates that the target has produced some output.
*/
export interface OutputEvent extends Event {
// event: 'output';
body: {
/** The output category. If not specified, 'console' is assumed.
Values: 'console', 'stdout', 'stderr', 'telemetry', etc.
*/
category?: string;
/** The output to report. */
output: string;
/** If an attribute 'variablesReference' exists and its value is > 0, the output contains objects which can be retrieved by passing variablesReference to the VariablesRequest. */
variablesReference?: number;
/** An optional source location where the output was produced. */
source?: Source;
/** An optional source location line where the output was produced. */
line?: number;
/** An optional source location column where the output was produced. */
column?: number;
/** Optional data to report. For the 'telemetry' category the data will be sent to telemetry, for the other categories the data is shown in JSON format. */
data?: any;
};
}
/** Event message for 'breakpoint' event type.
The event indicates that some information about a breakpoint has changed.
*/
export interface BreakpointEvent extends Event {
// event: 'breakpoint';
body: {
/** The reason for the event.
Values: 'changed', 'new', 'removed', etc.
*/
reason: string;
/** The breakpoint. */
breakpoint: Breakpoint;
};
}
/** Event message for 'module' event type.
The event indicates that some information about a module has changed.
*/
export interface ModuleEvent extends Event {
// event: 'module';
body: {
/** The reason for the event. */
reason: 'new' | 'changed' | 'removed';
/** The new, changed, or removed module. In case of 'removed' only the module id is used. */
module: Module;
};
}
/** Event message for 'loadedSource' event type.
The event indicates that some source has been added, changed, or removed from the set of all loaded sources.
*/
export interface LoadedSourceEvent extends Event {
// event: 'loadedSource';
body: {
/** The reason for the event. */
reason: 'new' | 'changed' | 'removed';
/** The new, changed, or removed source. */
source: Source;
};
}
/** Event message for 'process' event type.
The event indicates that the debugger has begun debugging a new process. Either one that it has launched, or one that it has attached to.
*/
export interface ProcessEvent extends Event {
// event: 'process';
body: {
/** The logical name of the process. This is usually the full path to process's executable file. Example: /home/example/myproj/program.js. */
name: string;
/** The system process id of the debugged process. This property will be missing for non-system processes. */
systemProcessId?: number;
/** If true, the process is running on the same computer as the debug adapter. */
isLocalProcess?: boolean;
/** Describes how the debug engine started debugging this process.
'launch': Process was launched under the debugger.
'attach': Debugger attached to an existing process.
'attachForSuspendedLaunch': A project launcher component has launched a new process in a suspended state and then asked the debugger to attach.
*/
startMethod?: 'launch' | 'attach' | 'attachForSuspendedLaunch';
};
}
/** runInTerminal request; value of command field is 'runInTerminal'.
With this request a debug adapter can run a command in a terminal.
*/
export interface RunInTerminalRequest extends Request {
// command: 'runInTerminal';
arguments: RunInTerminalRequestArguments;
}
/** Arguments for 'runInTerminal' request. */
export interface RunInTerminalRequestArguments {
/** What kind of terminal to launch. */
kind?: 'integrated' | 'external';
/** Optional title of the terminal. */
title?: string;
/** Working directory of the command. */
cwd: string;
/** List of arguments. The first argument is the command to run. */
args: string[];
/** Environment key-value pairs that are added to the default environment. */
env?: { [key: string]: string; };
}
/** Response to Initialize request. */
export interface RunInTerminalResponse extends Response {
body: {
/** The process ID. */
processId?: number;
};
}
/** On error that is whenever 'success' is false, the body can provide more details. */
export interface ErrorResponse extends Response {
body: {
/** An optional, structured error message. */
error?: Message;
};
}
/** Initialize request; value of command field is 'initialize'. */
export interface InitializeRequest extends Request {
// command: 'initialize';
arguments: InitializeRequestArguments;
}
/** Arguments for 'initialize' request. */
export interface InitializeRequestArguments {
/** The ID of the (frontend) client using this adapter. */
clientID?: string;
/** The ID of the debug adapter. */
adapterID: string;
/** The ISO-639 locale of the (frontend) client using this adapter, e.g. en-US or de-CH. */
locale?: string;
/** If true all line numbers are 1-based (default). */
linesStartAt1?: boolean;
/** If true all column numbers are 1-based (default). */
columnsStartAt1?: boolean;
/** Determines in what format paths are specified. The default is 'path', which is the native format.
Values: 'path', 'uri', etc.
*/
pathFormat?: string;
/** Client supports the optional type attribute for variables. */
supportsVariableType?: boolean;
/** Client supports the paging of variables. */
supportsVariablePaging?: boolean;
/** Client supports the runInTerminal request. */
supportsRunInTerminalRequest?: boolean;
}
/** Response to 'initialize' request. */
export interface InitializeResponse extends Response {
/** The capabilities of this debug adapter. */
body?: Capabilities;
}
/** ConfigurationDone request; value of command field is 'configurationDone'.
The client of the debug protocol must send this request at the end of the sequence of configuration requests (which was started by the InitializedEvent).
*/
export interface ConfigurationDoneRequest extends Request {
// command: 'configurationDone';
arguments?: ConfigurationDoneArguments;
}
/** Arguments for 'configurationDone' request.
The configurationDone request has no standardized attributes.
*/
export interface ConfigurationDoneArguments {
}
/** Response to 'configurationDone' request. This is just an acknowledgement, so no body field is required. */
export interface ConfigurationDoneResponse extends Response {
}
/** Launch request; value of command field is 'launch'. */
export interface LaunchRequest extends Request {
// command: 'launch';
arguments: LaunchRequestArguments;
}
/** Arguments for 'launch' request. */
export interface LaunchRequestArguments {
/** If noDebug is true the launch request should launch the program without enabling debugging. */
noDebug?: boolean;
}
/** Response to 'launch' request. This is just an acknowledgement, so no body field is required. */
export interface LaunchResponse extends Response {
}
/** Attach request; value of command field is 'attach'. */
export interface AttachRequest extends Request {
// command: 'attach';
arguments: AttachRequestArguments;
}
/** Arguments for 'attach' request.
The attach request has no standardized attributes.
*/
export interface AttachRequestArguments {
}
/** Response to 'attach' request. This is just an acknowledgement, so no body field is required. */
export interface AttachResponse extends Response {
}
/** Restart request; value of command field is 'restart'.
Restarts a debug session. If the capability 'supportsRestartRequest' is missing or has the value false,
the client will implement 'restart' by terminating the debug adapter first and then launching it anew.
A debug adapter can override this default behaviour by implementing a restart request
and setting the capability 'supportsRestartRequest' to true.
*/
export interface RestartRequest extends Request {
// command: 'restart';
arguments?: RestartArguments;
}
/** Arguments for 'restart' request.
The restart request has no standardized attributes.
*/
export interface RestartArguments {
}
/** Response to 'restart' request. This is just an acknowledgement, so no body field is required. */
export interface RestartResponse extends Response {
}
/** Disconnect request; value of command field is 'disconnect'. */
export interface DisconnectRequest extends Request {
// command: 'disconnect';
arguments?: DisconnectArguments;
}
/** Arguments for 'disconnect' request. */
export interface DisconnectArguments {
/** Indicates whether the debuggee should be terminated when the debugger is disconnected.
If unspecified, the debug adapter is free to do whatever it thinks is best.
A client can only rely on this attribute being properly honored if a debug adapter returns true for the 'supportTerminateDebuggee' capability.
*/
terminateDebuggee?: boolean;
}
/** Response to 'disconnect' request. This is just an acknowledgement, so no body field is required. */
export interface DisconnectResponse extends Response {
}
/** SetBreakpoints request; value of command field is 'setBreakpoints'.
Sets multiple breakpoints for a single source and clears all previous breakpoints in that source.
To clear all breakpoint for a source, specify an empty array.
When a breakpoint is hit, a StoppedEvent (event type 'breakpoint') is generated.
*/
export interface SetBreakpointsRequest extends Request {
// command: 'setBreakpoints';
arguments: SetBreakpointsArguments;
}
/** Arguments for 'setBreakpoints' request. */
export interface SetBreakpointsArguments {
/** The source location of the breakpoints; either source.path or source.reference must be specified. */
source: Source;
/** The code locations of the breakpoints. */
breakpoints?: SourceBreakpoint[];
/** Deprecated: The code locations of the breakpoints. */
lines?: number[];
/** A value of true indicates that the underlying source has been modified which results in new breakpoint locations. */
sourceModified?: boolean;
}
/** Response to 'setBreakpoints' request.
Returned is information about each breakpoint created by this request.
This includes the actual code location and whether the breakpoint could be verified.
The breakpoints returned are in the same order as the elements of the 'breakpoints'
(or the deprecated 'lines') in the SetBreakpointsArguments.
*/
export interface SetBreakpointsResponse extends Response {
body: {
/** Information about the breakpoints. The array elements are in the same order as the elements of the 'breakpoints' (or the deprecated 'lines') in the SetBreakpointsArguments. */
breakpoints: Breakpoint[];
};
}
/** SetFunctionBreakpoints request; value of command field is 'setFunctionBreakpoints'.
Sets multiple function breakpoints and clears all previous function breakpoints.
To clear all function breakpoint, specify an empty array.
When a function breakpoint is hit, a StoppedEvent (event type 'function breakpoint') is generated.
*/
export interface SetFunctionBreakpointsRequest extends Request {
// command: 'setFunctionBreakpoints';
arguments: SetFunctionBreakpointsArguments;
}
/** Arguments for 'setFunctionBreakpoints' request. */
export interface SetFunctionBreakpointsArguments {
/** The function names of the breakpoints. */
breakpoints: FunctionBreakpoint[];
}
/** Response to 'setFunctionBreakpoints' request.
Returned is information about each breakpoint created by this request.
*/
export interface SetFunctionBreakpointsResponse extends Response {
body: {
/** Information about the breakpoints. The array elements correspond to the elements of the 'breakpoints' array. */
breakpoints: Breakpoint[];
};
}
/** SetExceptionBreakpoints request; value of command field is 'setExceptionBreakpoints'.
The request configures the debuggers response to thrown exceptions. If an exception is configured to break, a StoppedEvent is fired (event type 'exception').
*/
export interface SetExceptionBreakpointsRequest extends Request {
// command: 'setExceptionBreakpoints';
arguments: SetExceptionBreakpointsArguments;
}
/** Arguments for 'setExceptionBreakpoints' request. */
export interface SetExceptionBreakpointsArguments {
/** IDs of checked exception options. The set of IDs is returned via the 'exceptionBreakpointFilters' capability. */
filters: string[];
/** Configuration options for selected exceptions. */
exceptionOptions?: ExceptionOptions[];
}
/** Response to 'setExceptionBreakpoints' request. This is just an acknowledgement, so no body field is required. */
export interface SetExceptionBreakpointsResponse extends Response {
}
/** Continue request; value of command field is 'continue'.
The request starts the debuggee to run again.
*/
export interface ContinueRequest extends Request {
// command: 'continue';
arguments: ContinueArguments;
}
/** Arguments for 'continue' request. */
export interface ContinueArguments {
/** Continue execution for the specified thread (if possible). If the backend cannot continue on a single thread but will continue on all threads, it should set the allThreadsContinued attribute in the response to true. */
threadId: number;
}
/** Response to 'continue' request. */
export interface ContinueResponse extends Response {
body: {
/** If true, the continue request has ignored the specified thread and continued all threads instead. If this attribute is missing a value of 'true' is assumed for backward compatibility. */
allThreadsContinued?: boolean;
};
}
/** Next request; value of command field is 'next'.
The request starts the debuggee to run again for one step.
The debug adapter first sends the NextResponse and then a StoppedEvent (event type 'step') after the step has completed.
*/
export interface NextRequest extends Request {
// command: 'next';
arguments: NextArguments;
}
/** Arguments for 'next' request. */
export interface NextArguments {
/** Execute 'next' for this thread. */
threadId: number;
}
/** Response to 'next' request. This is just an acknowledgement, so no body field is required. */
export interface NextResponse extends Response {
}
/** StepIn request; value of command field is 'stepIn'.
The request starts the debuggee to step into a function/method if possible.
If it cannot step into a target, 'stepIn' behaves like 'next'.
The debug adapter first sends the StepInResponse and then a StoppedEvent (event type 'step') after the step has completed.
If there are multiple function/method calls (or other targets) on the source line,
the optional argument 'targetId' can be used to control into which target the 'stepIn' should occur.
The list of possible targets for a given source line can be retrieved via the 'stepInTargets' request.
*/
export interface StepInRequest extends Request {
// command: 'stepIn';
arguments: StepInArguments;
}
/** Arguments for 'stepIn' request. */
export interface StepInArguments {
/** Execute 'stepIn' for this thread. */
threadId: number;
/** Optional id of the target to step into. */
targetId?: number;
}
/** Response to 'stepIn' request. This is just an acknowledgement, so no body field is required. */
export interface StepInResponse extends Response {
}
/** StepOut request; value of command field is 'stepOut'.
The request starts the debuggee to run again for one step.
The debug adapter first sends the StepOutResponse and then a StoppedEvent (event type 'step') after the step has completed.
*/
export interface StepOutRequest extends Request {
// command: 'stepOut';
arguments: StepOutArguments;
}
/** Arguments for 'stepOut' request. */
export interface StepOutArguments {
/** Execute 'stepOut' for this thread. */
threadId: number;
}
/** Response to 'stepOut' request. This is just an acknowledgement, so no body field is required. */
export interface StepOutResponse extends Response {
}
/** StepBack request; value of command field is 'stepBack'.
The request starts the debuggee to run one step backwards.
The debug adapter first sends the StepBackResponse and then a StoppedEvent (event type 'step') after the step has completed. Clients should only call this request if the capability supportsStepBack is true.
*/
export interface StepBackRequest extends Request {
// command: 'stepBack';
arguments: StepBackArguments;
}
/** Arguments for 'stepBack' request. */
export interface StepBackArguments {
/** Exceute 'stepBack' for this thread. */
threadId: number;
}
/** Response to 'stepBack' request. This is just an acknowledgement, so no body field is required. */
export interface StepBackResponse extends Response {
}
/** ReverseContinue request; value of command field is 'reverseContinue'.
The request starts the debuggee to run backward. Clients should only call this request if the capability supportsStepBack is true.
*/
export interface ReverseContinueRequest extends Request {
// command: 'reverseContinue';
arguments: ReverseContinueArguments;
}
/** Arguments for 'reverseContinue' request. */
export interface ReverseContinueArguments {
/** Exceute 'reverseContinue' for this thread. */
threadId: number;
}
/** Response to 'reverseContinue' request. This is just an acknowledgement, so no body field is required. */
export interface ReverseContinueResponse extends Response {
}
/** RestartFrame request; value of command field is 'restartFrame'.
The request restarts execution of the specified stackframe.
The debug adapter first sends the RestartFrameResponse and then a StoppedEvent (event type 'restart') after the restart has completed.
*/
export interface RestartFrameRequest extends Request {
// command: 'restartFrame';
arguments: RestartFrameArguments;
}
/** Arguments for 'restartFrame' request. */
export interface RestartFrameArguments {
/** Restart this stackframe. */
frameId: number;
}
/** Response to 'restartFrame' request. This is just an acknowledgement, so no body field is required. */
export interface RestartFrameResponse extends Response {
}
/** Goto request; value of command field is 'goto'.
The request sets the location where the debuggee will continue to run.
This makes it possible to skip the execution of code or to executed code again.
The code between the current location and the goto target is not executed but skipped.
The debug adapter first sends the GotoResponse and then a StoppedEvent (event type 'goto').
*/
export interface GotoRequest extends Request {
// command: 'goto';
arguments: GotoArguments;
}
/** Arguments for 'goto' request. */
export interface GotoArguments {
/** Set the goto target for this thread. */
threadId: number;
/** The location where the debuggee will continue to run. */
targetId: number;
}
/** Response to 'goto' request. This is just an acknowledgement, so no body field is required. */
export interface GotoResponse extends Response {
}
/** Pause request; value of command field is 'pause'.
The request suspenses the debuggee.
The debug adapter first sends the PauseResponse and then a StoppedEvent (event type 'pause') after the thread has been paused successfully.
*/
export interface PauseRequest extends Request {
// command: 'pause';
arguments: PauseArguments;
}
/** Arguments for 'pause' request. */
export interface PauseArguments {
/** Pause execution for this thread. */
threadId: number;
}
/** Response to 'pause' request. This is just an acknowledgement, so no body field is required. */
export interface PauseResponse extends Response {
}
/** StackTrace request; value of command field is 'stackTrace'. The request returns a stacktrace from the current execution state. */
export interface StackTraceRequest extends Request {
// command: 'stackTrace';
arguments: StackTraceArguments;
}
/** Arguments for 'stackTrace' request. */
export interface StackTraceArguments {
/** Retrieve the stacktrace for this thread. */
threadId: number;
/** The index of the first frame to return; if omitted frames start at 0. */
startFrame?: number;
/** The maximum number of frames to return. If levels is not specified or 0, all frames are returned. */
levels?: number;
/** Specifies details on how to format the stack frames. */
format?: StackFrameFormat;
}
/** Response to 'stackTrace' request. */
export interface StackTraceResponse extends Response {
body: {
/** The frames of the stackframe. If the array has length zero, there are no stackframes available.
This means that there is no location information available.
*/
stackFrames: StackFrame[];
/** The total number of frames available. */
totalFrames?: number;
};
}
/** Scopes request; value of command field is 'scopes'.
The request returns the variable scopes for a given stackframe ID.
*/
export interface ScopesRequest extends Request {
// command: 'scopes';
arguments: ScopesArguments;
}
/** Arguments for 'scopes' request. */
export interface ScopesArguments {
/** Retrieve the scopes for this stackframe. */
frameId: number;
}
/** Response to 'scopes' request. */
export interface ScopesResponse extends Response {
body: {
/** The scopes of the stackframe. If the array has length zero, there are no scopes available. */
scopes: Scope[];
};
}
/** Variables request; value of command field is 'variables'.
Retrieves all child variables for the given variable reference.
An optional filter can be used to limit the fetched children to either named or indexed children.
*/
export interface VariablesRequest extends Request {
// command: 'variables';
arguments: VariablesArguments;
}
/** Arguments for 'variables' request. */
export interface VariablesArguments {
/** The Variable reference. */
variablesReference: number;
/** Optional filter to limit the child variables to either named or indexed. If ommited, both types are fetched. */
filter?: 'indexed' | 'named';
/** The index of the first variable to return; if omitted children start at 0. */
start?: number;
/** The number of variables to return. If count is missing or 0, all variables are returned. */
count?: number;
/** Specifies details on how to format the Variable values. */
format?: ValueFormat;
}
/** Response to 'variables' request. */
export interface VariablesResponse extends Response {
body: {
/** All (or a range) of variables for the given variable reference. */
variables: Variable[];
};
}
/** setVariable request; value of command field is 'setVariable'.
Set the variable with the given name in the variable container to a new value.
*/
export interface SetVariableRequest extends Request {
// command: 'setVariable';
arguments: SetVariableArguments;
}
/** Arguments for 'setVariable' request. */
export interface SetVariableArguments {
/** The reference of the variable container. */
variablesReference: number;
/** The name of the variable. */
name: string;
/** The value of the variable. */
value: string;
/** Specifies details on how to format the response value. */
format?: ValueFormat;
}
/** Response to 'setVariable' request. */
export interface SetVariableResponse extends Response {
body: {
/** The new value of the variable. */
value: string;
/** The type of the new value. Typically shown in the UI when hovering over the value. */
type?: string;
/** If variablesReference is > 0, the new value is structured and its children can be retrieved by passing variablesReference to the VariablesRequest. */
variablesReference?: number;
/** The number of named child variables.
The client can use this optional information to present the variables in a paged UI and fetch them in chunks.
*/
namedVariables?: number;
/** The number of indexed child variables.
The client can use this optional information to present the variables in a paged UI and fetch them in chunks.
*/
indexedVariables?: number;
};
}
/** Source request; value of command field is 'source'.
The request retrieves the source code for a given source reference.
*/
export interface SourceRequest extends Request {
// command: 'source';
arguments: SourceArguments;
}
/** Arguments for 'source' request. */
export interface SourceArguments {
/** Specifies the source content to load. Either source.path or source.sourceReference must be specified. */
source?: Source;
/** The reference to the source. This is the same as source.sourceReference. This is provided for backward compatibility since old backends do not understand the 'source' attribute. */
sourceReference: number;
}
/** Response to 'source' request. */
export interface SourceResponse extends Response {
body: {
/** Content of the source reference. */
content: string;
/** Optional content type (mime type) of the source. */
mimeType?: string;
};
}
/** Thread request; value of command field is 'threads'.
The request retrieves a list of all threads.
*/
export interface ThreadsRequest extends Request {
// command: 'threads';
}
/** Response to 'threads' request. */
export interface ThreadsResponse extends Response {
body: {
/** All threads. */
threads: Thread[];
};
}
/** Modules can be retrieved from the debug adapter with the ModulesRequest which can either return all modules or a range of modules to support paging. */
export interface ModulesRequest extends Request {
// command: 'modules';
arguments: ModulesArguments;
}
/** Arguments for 'modules' request. */
export interface ModulesArguments {
/** The index of the first module to return; if omitted modules start at 0. */
startModule?: number;
/** The number of modules to return. If moduleCount is not specified or 0, all modules are returned. */
moduleCount?: number;
}
/** Response to 'modules' request. */
export interface ModulesResponse extends Response {
body: {
/** All modules or range of modules. */
modules: Module[];
/** The total number of modules available. */
totalModules?: number;
};
}
/** Retrieves the set of all sources currently loaded by the debugged process. */
export interface LoadedSourcesRequest extends Request {
// command: 'loadedSources';
arguments?: LoadedSourcesArguments;
}
/** Arguments for 'loadedSources' request.
The 'loadedSources' request has no standardized arguments.
*/
export interface LoadedSourcesArguments {
}
/** Response to 'loadedSources' request. */
export interface LoadedSourcesResponse extends Response {
body: {
/** Set of loaded sources. */
sources: Source[];
};
}
/** Evaluate request; value of command field is 'evaluate'.
Evaluates the given expression in the context of the top most stack frame.
The expression has access to any variables and arguments that are in scope.
*/
export interface EvaluateRequest extends Request {
// command: 'evaluate';
arguments: EvaluateArguments;
}
/** Arguments for 'evaluate' request. */
export interface EvaluateArguments {
/** The expression to evaluate. */
expression: string;
/** Evaluate the expression in the scope of this stack frame. If not specified, the expression is evaluated in the global scope. */
frameId?: number;
/** The context in which the evaluate request is run.
Values:
'watch': evaluate is run in a watch.
'repl': evaluate is run from REPL console.
'hover': evaluate is run from a data hover.
etc.
*/
context?: string;
/** Specifies details on how to format the Evaluate result. */
format?: ValueFormat;
}
/** Response to 'evaluate' request. */
export interface EvaluateResponse extends Response {
body: {
/** The result of the evaluate request. */
result: string;
/** The optional type of the evaluate result. */
type?: string;
/** Properties of a evaluate result that can be used to determine how to render the result in the UI. */
presentationHint?: VariablePresentationHint;
/** If variablesReference is > 0, the evaluate result is structured and its children can be retrieved by passing variablesReference to the VariablesRequest. */
variablesReference: number;
/** The number of named child variables.
The client can use this optional information to present the variables in a paged UI and fetch them in chunks.
*/
namedVariables?: number;
/** The number of indexed child variables.
The client can use this optional information to present the variables in a paged UI and fetch them in chunks.
*/
indexedVariables?: number;
};
}
/** StepInTargets request; value of command field is 'stepInTargets'.
This request retrieves the possible stepIn targets for the specified stack frame.
These targets can be used in the 'stepIn' request.
The StepInTargets may only be called if the 'supportsStepInTargetsRequest' capability exists and is true.
*/
export interface StepInTargetsRequest extends Request {
// command: 'stepInTargets';
arguments: StepInTargetsArguments;
}
/** Arguments for 'stepInTargets' request. */
export interface StepInTargetsArguments {
/** The stack frame for which to retrieve the possible stepIn targets. */
frameId: number;
}
/** Response to 'stepInTargets' request. */
export interface StepInTargetsResponse extends Response {
body: {
/** The possible stepIn targets of the specified source location. */
targets: StepInTarget[];
};
}
/** GotoTargets request; value of command field is 'gotoTargets'.
This request retrieves the possible goto targets for the specified source location.
These targets can be used in the 'goto' request.
The GotoTargets request may only be called if the 'supportsGotoTargetsRequest' capability exists and is true.
*/
export interface GotoTargetsRequest extends Request {
// command: 'gotoTargets';
arguments: GotoTargetsArguments;
}
/** Arguments for 'gotoTargets' request. */
export interface GotoTargetsArguments {
/** The source location for which the goto targets are determined. */
source: Source;
/** The line location for which the goto targets are determined. */
line: number;
/** An optional column location for which the goto targets are determined. */
column?: number;
}
/** Response to 'gotoTargets' request. */
export interface GotoTargetsResponse extends Response {
body: {
/** The possible goto targets of the specified location. */
targets: GotoTarget[];
};
}
/** CompletionsRequest request; value of command field is 'completions'.
Returns a list of possible completions for a given caret position and text.
The CompletionsRequest may only be called if the 'supportsCompletionsRequest' capability exists and is true.
*/
export interface CompletionsRequest extends Request {
// command: 'completions';
arguments: CompletionsArguments;
}
/** Arguments for 'completions' request. */
export interface CompletionsArguments {
/** Returns completions in the scope of this stack frame. If not specified, the completions are returned for the global scope. */
frameId?: number;
/** One or more source lines. Typically this is the text a user has typed into the debug console before he asked for completion. */
text: string;
/** The character position for which to determine the completion proposals. */
column: number;
/** An optional line for which to determine the completion proposals. If missing the first line of the text is assumed. */
line?: number;
}
/** Response to 'completions' request. */
export interface CompletionsResponse extends Response {
body: {
/** The possible completions for . */
targets: CompletionItem[];
};
}
/** ExceptionInfoRequest request; value of command field is 'exceptionInfo'.
Retrieves the details of the exception that caused the StoppedEvent to be raised.
*/
export interface ExceptionInfoRequest extends Request {
// command: 'exceptionInfo';
arguments: ExceptionInfoArguments;
}
/** Arguments for 'exceptionInfo' request. */
export interface ExceptionInfoArguments {
/** Thread for which exception information should be retrieved. */
threadId: number;
}
/** Response to 'exceptionInfo' request. */
export interface ExceptionInfoResponse extends Response {
body: {
/** ID of the exception that was thrown. */
exceptionId: string;
/** Descriptive text for the exception provided by the debug adapter. */
description?: string;
/** Mode that caused the exception notification to be raised. */
breakMode: ExceptionBreakMode;
/** Detailed information about the exception. */
details?: ExceptionDetails;
};
}
/** Information about the capabilities of a debug adapter. */
export interface Capabilities {
/** The debug adapter supports the configurationDoneRequest. */
supportsConfigurationDoneRequest?: boolean;
/** The debug adapter supports function breakpoints. */
supportsFunctionBreakpoints?: boolean;
/** The debug adapter supports conditional breakpoints. */
supportsConditionalBreakpoints?: boolean;
/** The debug adapter supports breakpoints that break execution after a specified number of hits. */
supportsHitConditionalBreakpoints?: boolean;
/** The debug adapter supports a (side effect free) evaluate request for data hovers. */
supportsEvaluateForHovers?: boolean;
/** Available filters or options for the setExceptionBreakpoints request. */
exceptionBreakpointFilters?: ExceptionBreakpointsFilter[];
/** The debug adapter supports stepping back via the stepBack and reverseContinue requests. */
supportsStepBack?: boolean;
/** The debug adapter supports setting a variable to a value. */
supportsSetVariable?: boolean;
/** The debug adapter supports restarting a frame. */
supportsRestartFrame?: boolean;
/** The debug adapter supports the gotoTargetsRequest. */
supportsGotoTargetsRequest?: boolean;
/** The debug adapter supports the stepInTargetsRequest. */
supportsStepInTargetsRequest?: boolean;
/** The debug adapter supports the completionsRequest. */
supportsCompletionsRequest?: boolean;
/** The debug adapter supports the modules request. */
supportsModulesRequest?: boolean;
/** The set of additional module information exposed by the debug adapter. */
additionalModuleColumns?: ColumnDescriptor[];
/** Checksum algorithms supported by the debug adapter. */
supportedChecksumAlgorithms?: ChecksumAlgorithm[];
/** The debug adapter supports the RestartRequest. In this case a client should not implement 'restart' by terminating and relaunching the adapter but by calling the RestartRequest. */
supportsRestartRequest?: boolean;
/** The debug adapter supports 'exceptionOptions' on the setExceptionBreakpoints request. */
supportsExceptionOptions?: boolean;
/** The debug adapter supports a 'format' attribute on the stackTraceRequest, variablesRequest, and evaluateRequest. */
supportsValueFormattingOptions?: boolean;
/** The debug adapter supports the exceptionInfo request. */
supportsExceptionInfoRequest?: boolean;
/** The debug adapter supports the 'terminateDebuggee' attribute on the 'disconnect' request. */
supportTerminateDebuggee?: boolean;
/** The debug adapter supports the delayed loading of parts of the stack, which requires that both the 'startFrame' and 'levels' arguments and the 'totalFrames' result of the 'StackTrace' request are supported. */
supportsDelayedStackTraceLoading?: boolean;
/** The debug adapter supports the 'loadedSources' request. */
supportsLoadedSourcesRequest?: boolean;
}
/** An ExceptionBreakpointsFilter is shown in the UI as an option for configuring how exceptions are dealt with. */
export interface ExceptionBreakpointsFilter {
/** The internal ID of the filter. This value is passed to the setExceptionBreakpoints request. */
filter: string;
/** The name of the filter. This will be shown in the UI. */
label: string;
/** Initial value of the filter. If not specified a value 'false' is assumed. */
default?: boolean;
}
/** A structured message object. Used to return errors from requests. */
export interface Message {
/** Unique identifier for the message. */
id: number;
/** A format string for the message. Embedded variables have the form '{name}'.
If variable name starts with an underscore character, the variable does not contain user data (PII) and can be safely used for telemetry purposes.
*/
format: string;
/** An object used as a dictionary for looking up the variables in the format string. */
variables?: { [key: string]: string; };
/** If true send to telemetry. */
sendTelemetry?: boolean;
/** If true show user. */
showUser?: boolean;
/** An optional url where additional information about this message can be found. */
url?: string;
/** An optional label that is presented to the user as the UI for opening the url. */
urlLabel?: string;
}
/** A Module object represents a row in the modules view.
Two attributes are mandatory: an id identifies a module in the modules view and is used in a ModuleEvent for identifying a module for adding, updating or deleting.
The name is used to minimally render the module in the UI.
Additional attributes can be added to the module. They will show up in the module View if they have a corresponding ColumnDescriptor.
To avoid an unnecessary proliferation of additional attributes with similar semantics but different names
we recommend to re-use attributes from the 'recommended' list below first, and only introduce new attributes if nothing appropriate could be found.
*/
export interface Module {
/** Unique identifier for the module. */
id: number | string;
/** A name of the module. */
name: string;
/** optional but recommended attributes.
always try to use these first before introducing additional attributes.
Logical full path to the module. The exact definition is implementation defined, but usually this would be a full path to the on-disk file for the module.
*/
path?: string;
/** True if the module is optimized. */
isOptimized?: boolean;
/** True if the module is considered 'user code' by a debugger that supports 'Just My Code'. */
isUserCode?: boolean;
/** Version of Module. */
version?: string;
/** User understandable description of if symbols were found for the module (ex: 'Symbols Loaded', 'Symbols not found', etc. */
symbolStatus?: string;
/** Logical full path to the symbol file. The exact definition is implementation defined. */
symbolFilePath?: string;
/** Module created or modified. */
dateTimeStamp?: string;
/** Address range covered by this module. */
addressRange?: string;
}
/** A ColumnDescriptor specifies what module attribute to show in a column of the ModulesView, how to format it, and what the column's label should be.
It is only used if the underlying UI actually supports this level of customization.
*/
export interface ColumnDescriptor {
/** Name of the attribute rendered in this column. */
attributeName: string;
/** Header UI label of column. */
label: string;
/** Format to use for the rendered values in this column. TBD how the format strings looks like. */
format?: string;
/** Datatype of values in this column. Defaults to 'string' if not specified. */
type?: 'string' | 'number' | 'boolean' | 'unixTimestampUTC';
/** Width of this column in characters (hint only). */
width?: number;
}
/** The ModulesViewDescriptor is the container for all declarative configuration options of a ModuleView.
For now it only specifies the columns to be shown in the modules view.
*/
export interface ModulesViewDescriptor {
columns: ColumnDescriptor[];
}
/** A Thread */
export interface Thread {
/** Unique identifier for the thread. */
id: number;
/** A name of the thread. */
name: string;
}
/** A Source is a descriptor for source code. It is returned from the debug adapter as part of a StackFrame and it is used by clients when specifying breakpoints. */
export interface Source {
/** The short name of the source. Every source returned from the debug adapter has a name. When sending a source to the debug adapter this name is optional. */
name?: string;
/** The path of the source to be shown in the UI. It is only used to locate and load the content of the source if no sourceReference is specified (or its vaule is 0). */
path?: string;
/** If sourceReference > 0 the contents of the source must be retrieved through the SourceRequest (even if a path is specified). A sourceReference is only valid for a session, so it must not be used to persist a source. */
sourceReference?: number;
/** An optional hint for how to present the source in the UI. A value of 'deemphasize' can be used to indicate that the source is not available or that it is skipped on stepping. */
presentationHint?: 'normal' | 'emphasize' | 'deemphasize';
/** The (optional) origin of this source: possible values 'internal module', 'inlined content from source map', etc. */
origin?: string;
/** An optional list of sources that are related to this source. These may be the source that generated this source. */
sources?: Source[];
/** Optional data that a debug adapter might want to loop through the client. The client should leave the data intact and persist it across sessions. The client should not interpret the data. */
adapterData?: any;
/** The checksums associated with this file. */
checksums?: Checksum[];
}
/** A Stackframe contains the source location. */
export interface StackFrame {
/** An identifier for the stack frame. It must be unique across all threads. This id can be used to retrieve the scopes of the frame with the 'scopesRequest' or to restart the execution of a stackframe. */
id: number;
/** The name of the stack frame, typically a method name. */
name: string;
/** The optional source of the frame. */
source?: Source;
/** The line within the file of the frame. If source is null or doesn't exist, line is 0 and must be ignored. */
line: number;
/** The column within the line. If source is null or doesn't exist, column is 0 and must be ignored. */
column: number;
/** An optional end line of the range covered by the stack frame. */
endLine?: number;
/** An optional end column of the range covered by the stack frame. */
endColumn?: number;
/** The module associated with this frame, if any. */
moduleId?: number | string;
/** An optional hint for how to present this frame in the UI. A value of 'label' can be used to indicate that the frame is an artificial frame that is used as a visual label or separator. A value of 'subtle' can be used to change the appearance of a frame in a 'subtle' way. */
presentationHint?: 'normal' | 'label' | 'subtle';
}
/** A Scope is a named container for variables. Optionally a scope can map to a source or a range within a source. */
export interface Scope {
/** Name of the scope such as 'Arguments', 'Locals'. */
name: string;
/** The variables of this scope can be retrieved by passing the value of variablesReference to the VariablesRequest. */
variablesReference: number;
/** The number of named variables in this scope.
The client can use this optional information to present the variables in a paged UI and fetch them in chunks.
*/
namedVariables?: number;
/** The number of indexed variables in this scope.
The client can use this optional information to present the variables in a paged UI and fetch them in chunks.
*/
indexedVariables?: number;
/** If true, the number of variables in this scope is large or expensive to retrieve. */
expensive: boolean;
/** Optional source for this scope. */
source?: Source;
/** Optional start line of the range covered by this scope. */
line?: number;
/** Optional start column of the range covered by this scope. */
column?: number;
/** Optional end line of the range covered by this scope. */
endLine?: number;
/** Optional end column of the range covered by this scope. */
endColumn?: number;
}
/** A Variable is a name/value pair.
Optionally a variable can have a 'type' that is shown if space permits or when hovering over the variable's name.
An optional 'kind' is used to render additional properties of the variable, e.g. different icons can be used to indicate that a variable is public or private.
If the value is structured (has children), a handle is provided to retrieve the children with the VariablesRequest.
If the number of named or indexed children is large, the numbers should be returned via the optional 'namedVariables' and 'indexedVariables' attributes.
The client can use this optional information to present the children in a paged UI and fetch them in chunks.
*/
export interface Variable {
/** The variable's name. */
name: string;
/** The variable's value. This can be a multi-line text, e.g. for a function the body of a function. */
value: string;
/** The type of the variable's value. Typically shown in the UI when hovering over the value. */
type?: string;
/** Properties of a variable that can be used to determine how to render the variable in the UI. */
presentationHint?: VariablePresentationHint;
/** Optional evaluatable name of this variable which can be passed to the 'EvaluateRequest' to fetch the variable's value. */
evaluateName?: string;
/** If variablesReference is > 0, the variable is structured and its children can be retrieved by passing variablesReference to the VariablesRequest. */
variablesReference: number;
/** The number of named child variables.
The client can use this optional information to present the children in a paged UI and fetch them in chunks.
*/
namedVariables?: number;
/** The number of indexed child variables.
The client can use this optional information to present the children in a paged UI and fetch them in chunks.
*/
indexedVariables?: number;
}
/** Optional properties of a variable that can be used to determine how to render the variable in the UI. */
export interface VariablePresentationHint {
/** The kind of variable. Before introducing additional values, try to use the listed values.
Values: 'property', 'method', 'class', 'data', 'event', 'baseClass', 'innerClass', 'interface', 'mostDerivedClass', etc.
*/
kind?: string;
/** Set of attributes represented as an array of strings. Before introducing additional values, try to use the listed values.
Values:
'static': Indicates that the object is static.
'constant': Indicates that the object is a constant.
'readOnly': Indicates that the object is read only.
'rawString': Indicates that the object is a raw string.
'hasObjectId': Indicates that the object can have an Object ID created for it.
'canHaveObjectId': Indicates that the object has an Object ID associated with it.
'hasSideEffects': Indicates that the evaluation had side effects.
etc.
*/
attributes?: string[];
/** Visibility of variable. Before introducing additional values, try to use the listed values.
Values: 'public', 'private', 'protected', 'internal', 'final', etc.
*/
visibility?: string;
}
/** Properties of a breakpoint passed to the setBreakpoints request. */
export interface SourceBreakpoint {
/** The source line of the breakpoint. */
line: number;
/** An optional source column of the breakpoint. */
column?: number;
/** An optional expression for conditional breakpoints. */
condition?: string;
/** An optional expression that controls how many hits of the breakpoint are ignored. The backend is expected to interpret the expression as needed. */
hitCondition?: string;
}
/** Properties of a breakpoint passed to the setFunctionBreakpoints request. */
export interface FunctionBreakpoint {
/** The name of the function. */
name: string;
/** An optional expression for conditional breakpoints. */
condition?: string;
/** An optional expression that controls how many hits of the breakpoint are ignored. The backend is expected to interpret the expression as needed. */
hitCondition?: string;
}
/** Information about a Breakpoint created in setBreakpoints or setFunctionBreakpoints. */
export interface Breakpoint {
/** An optional unique identifier for the breakpoint. */
id?: number;
/** If true breakpoint could be set (but not necessarily at the desired location). */
verified: boolean;
/** An optional message about the state of the breakpoint. This is shown to the user and can be used to explain why a breakpoint could not be verified. */
message?: string;
/** The source where the breakpoint is located. */
source?: Source;
/** The start line of the actual range covered by the breakpoint. */
line?: number;
/** An optional start column of the actual range covered by the breakpoint. */
column?: number;
/** An optional end line of the actual range covered by the breakpoint. */
endLine?: number;
/** An optional end column of the actual range covered by the breakpoint. If no end line is given, then the end column is assumed to be in the start line. */
endColumn?: number;
}
/** A StepInTarget can be used in the 'stepIn' request and determines into which single target the stepIn request should step. */
export interface StepInTarget {
/** Unique identifier for a stepIn target. */
id: number;
/** The name of the stepIn target (shown in the UI). */
label: string;
}
/** A GotoTarget describes a code location that can be used as a target in the 'goto' request.
The possible goto targets can be determined via the 'gotoTargets' request.
*/
export interface GotoTarget {
/** Unique identifier for a goto target. This is used in the goto request. */
id: number;
/** The name of the goto target (shown in the UI). */
label: string;
/** The line of the goto target. */
line: number;
/** An optional column of the goto target. */
column?: number;
/** An optional end line of the range covered by the goto target. */
endLine?: number;
/** An optional end column of the range covered by the goto target. */
endColumn?: number;
}
/** CompletionItems are the suggestions returned from the CompletionsRequest. */
export interface CompletionItem {
/** The label of this completion item. By default this is also the text that is inserted when selecting this completion. */
label: string;
/** If text is not falsy then it is inserted instead of the label. */
text?: string;
/** The item's type. Typically the client uses this information to render the item in the UI with an icon. */
type?: CompletionItemType;
/** This value determines the location (in the CompletionsRequest's 'text' attribute) where the completion text is added.
If missing the text is added at the location specified by the CompletionsRequest's 'column' attribute.
*/
start?: number;
/** This value determines how many characters are overwritten by the completion text.
If missing the value 0 is assumed which results in the completion text being inserted.
*/
length?: number;
}
/** Some predefined types for the CompletionItem. Please note that not all clients have specific icons for all of them. */
export type CompletionItemType = 'method' | 'function' | 'constructor' | 'field' | 'variable' | 'class' | 'interface' | 'module' | 'property' | 'unit' | 'value' | 'enum' | 'keyword' | 'snippet' | 'text' | 'color' | 'file' | 'reference' | 'customcolor';
/** Names of checksum algorithms that may be supported by a debug adapter. */
export type ChecksumAlgorithm = 'MD5' | 'SHA1' | 'SHA256' | 'timestamp';
/** The checksum of an item calculated by the specified algorithm. */
export interface Checksum {
/** The algorithm used to calculate this checksum. */
algorithm: ChecksumAlgorithm;
/** Value of the checksum. */
checksum: string;
}
/** Provides formatting information for a value. */
export interface ValueFormat {
/** Display the value in hex. */
hex?: boolean;
}
/** Provides formatting information for a stack frame. */
export interface StackFrameFormat extends ValueFormat {
/** Displays parameters for the stack frame. */
parameters?: boolean;
/** Displays the types of parameters for the stack frame. */
parameterTypes?: boolean;
/** Displays the names of parameters for the stack frame. */
parameterNames?: boolean;
/** Displays the values of parameters for the stack frame. */
parameterValues?: boolean;
/** Displays the line number of the stack frame. */
line?: boolean;
/** Displays the module of the stack frame. */
module?: boolean;
/** Includes all stack frames, including those the debug adapter might otherwise hide. */
includeAll?: boolean;
}
/** An ExceptionOptions assigns configuration options to a set of exceptions. */
export interface ExceptionOptions {
/** A path that selects a single or multiple exceptions in a tree. If 'path' is missing, the whole tree is selected. By convention the first segment of the path is a category that is used to group exceptions in the UI. */
path?: ExceptionPathSegment[];
/** Condition when a thrown exception should result in a break. */
breakMode: ExceptionBreakMode;
}
/** This enumeration defines all possible conditions when a thrown exception should result in a break.
never: never breaks,
always: always breaks,
unhandled: breaks when excpetion unhandled,
userUnhandled: breaks if the exception is not handled by user code.
*/
export type ExceptionBreakMode = 'never' | 'always' | 'unhandled' | 'userUnhandled';
/** An ExceptionPathSegment represents a segment in a path that is used to match leafs or nodes in a tree of exceptions. If a segment consists of more than one name, it matches the names provided if 'negate' is false or missing or it matches anything except the names provided if 'negate' is true. */
export interface ExceptionPathSegment {
/** If false or missing this segment matches the names provided, otherwise it matches anything except the names provided. */
negate?: boolean;
/** Depending on the value of 'negate' the names that should match or not match. */
names: string[];
}
/** Detailed information about an exception that has occurred. */
export interface ExceptionDetails {
/** Message contained in the exception. */
message?: string;
/** Short type name of the exception object. */
typeName?: string;
/** Fully-qualified type name of the exception object. */
fullTypeName?: string;
/** Optional expression that can be evaluated in the current scope to obtain the exception object. */
evaluateName?: string;
/** Stack trace at the time the exception was thrown. */
stackTrace?: string;
/** Details of the exception contained by this exception, if any. */
innerException?: ExceptionDetails[];
}
} | /rueckenwind-0.5.3.tar.gz/rueckenwind-0.5.3/rw/debugger_protocol.py | 0.597843 | 0.202384 | debugger_protocol.py | pypi |
<!-- Begin section: Overview -->
# Ruff
[](https://github.com/charliermarsh/ruff)
[](https://pypi.python.org/pypi/ruff)
[](https://pypi.python.org/pypi/ruff)
[](https://pypi.python.org/pypi/ruff)
[](https://github.com/charliermarsh/ruff/actions)
[**Discord**](https://discord.gg/c9MhzV8aU5) | [**Docs**](https://beta.ruff.rs/docs/) | [**Playground**](https://play.ruff.rs/)
An extremely fast Python linter, written in Rust.
<p align="center">
<img alt="Shows a bar chart with benchmark results." src="https://user-images.githubusercontent.com/1309177/212613257-5f4bca12-6d6b-4c79-9bac-51a4c6d08928.svg">
</p>
<p align="center">
<i>Linting the CPython codebase from scratch.</i>
</p>
- ⚡️ 10-100x faster than existing linters
- 🐍 Installable via `pip`
- 🛠️ `pyproject.toml` support
- 🤝 Python 3.11 compatibility
- 📦 Built-in caching, to avoid re-analyzing unchanged files
- 🔧 Autofix support, for automatic error correction (e.g., automatically remove unused imports)
- 📏 Over [500 built-in rules](https://beta.ruff.rs/docs/rules/)
- ⚖️ [Near-parity](https://beta.ruff.rs/docs/faq/#how-does-ruff-compare-to-flake8) with the built-in Flake8 rule set
- 🔌 Native re-implementations of dozens of Flake8 plugins, like flake8-bugbear
- ⌨️ First-party editor integrations for [VS Code](https://github.com/charliermarsh/ruff-vscode) and [more](https://github.com/charliermarsh/ruff-lsp)
- 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://beta.ruff.rs/docs/configuration/#pyprojecttoml-discovery)
Ruff aims to be orders of magnitude faster than alternative tools while integrating more
functionality behind a single, common interface.
Ruff can be used to replace [Flake8](https://pypi.org/project/flake8/) (plus dozens of plugins),
[isort](https://pypi.org/project/isort/), [pydocstyle](https://pypi.org/project/pydocstyle/),
[yesqa](https://github.com/asottile/yesqa), [eradicate](https://pypi.org/project/eradicate/),
[pyupgrade](https://pypi.org/project/pyupgrade/), and [autoflake](https://pypi.org/project/autoflake/),
all while executing tens or hundreds of times faster than any individual tool.
Ruff is extremely actively developed and used in major open-source projects like:
- [Apache Airflow](https://github.com/apache/airflow)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Hugging Face](https://github.com/huggingface/transformers)
- [Pandas](https://github.com/pandas-dev/pandas)
- [SciPy](https://github.com/scipy/scipy)
...and many more.
Ruff is backed by [Astral](https://astral.sh). Read the [launch post](https://astral.sh/blog/announcing-astral-the-company-behind-ruff),
or the original [project announcement](https://notes.crmarsh.com/python-tooling-could-be-much-much-faster).
## Testimonials
[**Sebastián Ramírez**](https://twitter.com/tiangolo/status/1591912354882764802), creator
of [FastAPI](https://github.com/tiangolo/fastapi):
> Ruff is so fast that sometimes I add an intentional bug in the code just to confirm it's actually
> running and checking the code.
[**Nick Schrock**](https://twitter.com/schrockn/status/1612615862904827904), founder of [Elementl](https://www.elementl.com/),
co-creator of [GraphQL](https://graphql.org/):
> Why is Ruff a gamechanger? Primarily because it is nearly 1000x faster. Literally. Not a typo. On
> our largest module (dagster itself, 250k LOC) pylint takes about 2.5 minutes, parallelized across 4
> cores on my M1. Running ruff against our _entire_ codebase takes .4 seconds.
[**Bryan Van de Ven**](https://github.com/bokeh/bokeh/pull/12605), co-creator
of [Bokeh](https://github.com/bokeh/bokeh/), original author
of [Conda](https://docs.conda.io/en/latest/):
> Ruff is ~150-200x faster than flake8 on my machine, scanning the whole repo takes ~0.2s instead of
> ~20s. This is an enormous quality of life improvement for local dev. It's fast enough that I added
> it as an actual commit hook, which is terrific.
[**Timothy Crosley**](https://twitter.com/timothycrosley/status/1606420868514877440),
creator of [isort](https://github.com/PyCQA/isort):
> Just switched my first project to Ruff. Only one downside so far: it's so fast I couldn't believe it was working till I intentionally introduced some errors.
[**Tim Abbott**](https://github.com/charliermarsh/ruff/issues/465#issuecomment-1317400028), lead
developer of [Zulip](https://github.com/zulip/zulip):
> This is just ridiculously fast... `ruff` is amazing.
<!-- End section: Overview -->
## Table of Contents
For more, see the [documentation](https://beta.ruff.rs/docs/).
1. [Getting Started](#getting-started)
1. [Configuration](#configuration)
1. [Rules](#rules)
1. [Contributing](#contributing)
1. [Support](#support)
1. [Acknowledgements](#acknowledgements)
1. [Who's Using Ruff?](#whos-using-ruff)
1. [License](#license)
## Getting Started
For more, see the [documentation](https://beta.ruff.rs/docs/).
### Installation
Ruff is available as [`ruff`](https://pypi.org/project/ruff/) on PyPI:
```shell
pip install ruff
```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
and with [a variety of other package managers](https://beta.ruff.rs/docs/installation/).
### Usage
To run Ruff, try any of the following:
```shell
ruff check . # Lint all files in the current directory (and any subdirectories)
ruff check path/to/code/ # Lint all files in `/path/to/code` (and any subdirectories)
ruff check path/to/code/*.py # Lint all `.py` files in `/path/to/code`
ruff check path/to/code/to/file.py # Lint `file.py`
```
Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
```yaml
- repo: https://github.com/charliermarsh/ruff-pre-commit
# Ruff version.
rev: 'v0.0.267'
hooks:
- id: ruff
```
Ruff can also be used as a [VS Code extension](https://github.com/charliermarsh/ruff-vscode) or
alongside any other editor through the [Ruff LSP](https://github.com/charliermarsh/ruff-lsp).
Ruff can also be used as a [GitHub Action](https://github.com/features/actions) via
[`ruff-action`](https://github.com/chartboost/ruff-action):
```yaml
name: Ruff
on: [ push, pull_request ]
jobs:
ruff:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: chartboost/ruff-action@v1
```
### Configuration
Ruff can be configured through a `pyproject.toml`, `ruff.toml`, or `.ruff.toml` file (see:
[_Configuration_](https://beta.ruff.rs/docs/configuration/), or [_Settings_](https://beta.ruff.rs/docs/settings/)
for a complete list of all configuration options).
If left unspecified, the default configuration is equivalent to:
```toml
[tool.ruff]
# Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default.
select = ["E", "F"]
ignore = []
# Allow autofix for all enabled rules (when `--fix`) is provided.
fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"]
unfixable = []
# Exclude a variety of commonly ignored directories.
exclude = [
".bzr",
".direnv",
".eggs",
".git",
".git-rewrite",
".hg",
".mypy_cache",
".nox",
".pants.d",
".pytype",
".ruff_cache",
".svn",
".tox",
".venv",
"__pypackages__",
"_build",
"buck-out",
"build",
"dist",
"node_modules",
"venv",
]
# Same as Black.
line-length = 88
# Allow unused variables when underscore-prefixed.
dummy-variable-rgx = "^(_+|(_+[a-zA-Z0-9_]*[a-zA-Z0-9]+?))$"
# Assume Python 3.10.
target-version = "py310"
[tool.ruff.mccabe]
# Unlike Flake8, default to a complexity level of 10.
max-complexity = 10
```
Some configuration options can be provided via the command-line, such as those related to
rule enablement and disablement, file discovery, logging level, and more:
```shell
ruff check path/to/code/ --select F401 --select F403 --quiet
```
See `ruff help` for more on Ruff's top-level commands, or `ruff help check` for more on the
linting command.
## Rules
<!-- Begin section: Rules -->
**Ruff supports over 500 lint rules**, many of which are inspired by popular tools like Flake8,
isort, pyupgrade, and others. Regardless of the rule's origin, Ruff re-implements every rule in
Rust as a first-party feature.
By default, Ruff enables Flake8's `E` and `F` rules. Ruff supports all rules from the `F` category,
and a [subset](https://beta.ruff.rs/docs/rules/#error-e) of the `E` category, omitting those
stylistic rules made obsolete by the use of an autoformatter, like
[Black](https://github.com/psf/black).
If you're just getting started with Ruff, **the default rule set is a great place to start**: it
catches a wide variety of common errors (like unused imports) with zero configuration.
Beyond the defaults, Ruff re-implements some of the most popular Flake8 plugins and related code
quality tools, including:
- [autoflake](https://pypi.org/project/autoflake/)
- [eradicate](https://pypi.org/project/eradicate/)
- [flake8-2020](https://pypi.org/project/flake8-2020/)
- [flake8-annotations](https://pypi.org/project/flake8-annotations/)
- [flake8-bandit](https://pypi.org/project/flake8-bandit/) ([#1646](https://github.com/charliermarsh/ruff/issues/1646))
- [flake8-blind-except](https://pypi.org/project/flake8-blind-except/)
- [flake8-boolean-trap](https://pypi.org/project/flake8-boolean-trap/)
- [flake8-bugbear](https://pypi.org/project/flake8-bugbear/)
- [flake8-builtins](https://pypi.org/project/flake8-builtins/)
- [flake8-commas](https://pypi.org/project/flake8-commas/)
- [flake8-comprehensions](https://pypi.org/project/flake8-comprehensions/)
- [flake8-datetimez](https://pypi.org/project/flake8-datetimez/)
- [flake8-debugger](https://pypi.org/project/flake8-debugger/)
- [flake8-django](https://pypi.org/project/flake8-django/)
- [flake8-docstrings](https://pypi.org/project/flake8-docstrings/)
- [flake8-eradicate](https://pypi.org/project/flake8-eradicate/)
- [flake8-errmsg](https://pypi.org/project/flake8-errmsg/)
- [flake8-executable](https://pypi.org/project/flake8-executable/)
- [flake8-gettext](https://pypi.org/project/flake8-gettext/)
- [flake8-implicit-str-concat](https://pypi.org/project/flake8-implicit-str-concat/)
- [flake8-import-conventions](https://github.com/joaopalmeiro/flake8-import-conventions)
- [flake8-logging-format](https://pypi.org/project/flake8-logging-format/)
- [flake8-no-pep420](https://pypi.org/project/flake8-no-pep420)
- [flake8-pie](https://pypi.org/project/flake8-pie/)
- [flake8-print](https://pypi.org/project/flake8-print/)
- [flake8-pyi](https://pypi.org/project/flake8-pyi/)
- [flake8-pytest-style](https://pypi.org/project/flake8-pytest-style/)
- [flake8-quotes](https://pypi.org/project/flake8-quotes/)
- [flake8-raise](https://pypi.org/project/flake8-raise/)
- [flake8-return](https://pypi.org/project/flake8-return/)
- [flake8-self](https://pypi.org/project/flake8-self/)
- [flake8-simplify](https://pypi.org/project/flake8-simplify/)
- [flake8-super](https://pypi.org/project/flake8-super/)
- [flake8-tidy-imports](https://pypi.org/project/flake8-tidy-imports/)
- [flake8-type-checking](https://pypi.org/project/flake8-type-checking/)
- [flake8-use-pathlib](https://pypi.org/project/flake8-use-pathlib/)
- [flynt](https://pypi.org/project/flynt/) ([#2102](https://github.com/charliermarsh/ruff/issues/2102))
- [isort](https://pypi.org/project/isort/)
- [mccabe](https://pypi.org/project/mccabe/)
- [pandas-vet](https://pypi.org/project/pandas-vet/)
- [pep8-naming](https://pypi.org/project/pep8-naming/)
- [pydocstyle](https://pypi.org/project/pydocstyle/)
- [pygrep-hooks](https://github.com/pre-commit/pygrep-hooks)
- [pyupgrade](https://pypi.org/project/pyupgrade/)
- [tryceratops](https://pypi.org/project/tryceratops/)
- [yesqa](https://pypi.org/project/yesqa/)
<!-- End section: Rules -->
For a complete enumeration of the supported rules, see [_Rules_](https://beta.ruff.rs/docs/rules/).
## Contributing
Contributions are welcome and highly appreciated. To get started, check out the
[**contributing guidelines**](https://beta.ruff.rs/docs/contributing/).
You can also join us on [**Discord**](https://discord.gg/c9MhzV8aU5).
## Support
Having trouble? Check out the existing issues on [**GitHub**](https://github.com/charliermarsh/ruff/issues),
or feel free to [**open a new one**](https://github.com/charliermarsh/ruff/issues/new).
You can also ask for help on [**Discord**](https://discord.gg/c9MhzV8aU5).
## Acknowledgements
Ruff's linter draws on both the APIs and implementation details of many other
tools in the Python ecosystem, especially [Flake8](https://github.com/PyCQA/flake8), [Pyflakes](https://github.com/PyCQA/pyflakes),
[pycodestyle](https://github.com/PyCQA/pycodestyle), [pydocstyle](https://github.com/PyCQA/pydocstyle),
[pyupgrade](https://github.com/asottile/pyupgrade), and [isort](https://github.com/PyCQA/isort).
In some cases, Ruff includes a "direct" Rust port of the corresponding tool.
We're grateful to the maintainers of these tools for their work, and for all
the value they've provided to the Python community.
Ruff's autoformatter is built on a fork of Rome's [`rome_formatter`](https://github.com/rome/tools/tree/main/crates/rome_formatter),
and again draws on both the APIs and implementation details of [Rome](https://github.com/rome/tools),
[Prettier](https://github.com/prettier/prettier), and [Black](https://github.com/psf/black).
Ruff is also influenced by a number of tools outside the Python ecosystem, like
[Clippy](https://github.com/rust-lang/rust-clippy) and [ESLint](https://github.com/eslint/eslint).
Ruff is the beneficiary of a large number of [contributors](https://github.com/charliermarsh/ruff/graphs/contributors).
Ruff is released under the MIT license.
## Who's Using Ruff?
Ruff is used by a number of major open-source projects and companies, including:
- Amazon ([AWS SAM](https://github.com/aws/serverless-application-model))
- [Apache Airflow](https://github.com/apache/airflow)
- AstraZeneca ([Magnus](https://github.com/AstraZeneca/magnus-core))
- Benchling ([Refac](https://github.com/benchling/refac))
- [Babel](https://github.com/python-babel/babel)
- [Bokeh](https://github.com/bokeh/bokeh)
- [Cryptography (PyCA)](https://github.com/pyca/cryptography)
- [DVC](https://github.com/iterative/dvc)
- [Dagger](https://github.com/dagger/dagger)
- [Dagster](https://github.com/dagster-io/dagster)
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Gradio](https://github.com/gradio-app/gradio)
- [Great Expectations](https://github.com/great-expectations/great_expectations)
- Hugging Face ([Transformers](https://github.com/huggingface/transformers), [Datasets](https://github.com/huggingface/datasets), [Diffusers](https://github.com/huggingface/diffusers))
- [Hatch](https://github.com/pypa/hatch)
- [Home Assistant](https://github.com/home-assistant/core)
- [Ibis](https://github.com/ibis-project/ibis)
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
- [LangChain](https://github.com/hwchase17/langchain)
- [LlamaIndex](https://github.com/jerryjliu/llama_index)
- Matrix ([Synapse](https://github.com/matrix-org/synapse))
- Meltano ([Meltano CLI](https://github.com/meltano/meltano), [Singer SDK](https://github.com/meltano/sdk))
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python-sdk))
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
- [MegaLinter](https://github.com/oxsecurity/megalinter)
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel), [ONNX Runtime](https://github.com/microsoft/onnxruntime))
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
- [Neon](https://github.com/neondatabase/neon)
- [ONNX](https://github.com/onnx/onnx)
- [OpenBB](https://github.com/OpenBB-finance/OpenBBTerminal)
- [PDM](https://github.com/pdm-project/pdm)
- [PaddlePaddle](https://github.com/PaddlePaddle/Paddle)
- [Pandas](https://github.com/pandas-dev/pandas)
- [Poetry](https://github.com/python-poetry/poetry)
- [Polars](https://github.com/pola-rs/polars)
- [PostHog](https://github.com/PostHog/posthog)
- Prefect ([Python SDK](https://github.com/PrefectHQ/prefect), [Marvin](https://github.com/PrefectHQ/marvin))
- [PyInstaller](https://github.com/pyinstaller/pyinstaller)
- [PyTorch](https://github.com/pytorch/pytorch)
- [Pydantic](https://github.com/pydantic/pydantic)
- [Pylint](https://github.com/PyCQA/pylint)
- [Pynecone](https://github.com/pynecone-io/pynecone)
- [Robyn](https://github.com/sansyrox/robyn)
- Scale AI ([Launch SDK](https://github.com/scaleapi/launch-python-client))
- Snowflake ([SnowCLI](https://github.com/Snowflake-Labs/snowcli))
- [Saleor](https://github.com/saleor/saleor)
- [SciPy](https://github.com/scipy/scipy)
- [Sphinx](https://github.com/sphinx-doc/sphinx)
- [Stable Baselines3](https://github.com/DLR-RM/stable-baselines3)
- [Starlite](https://github.com/starlite-api/starlite)
- [The Algorithms](https://github.com/TheAlgorithms/Python)
- [Vega-Altair](https://github.com/altair-viz/altair)
- WordPress ([Openverse](https://github.com/WordPress/openverse))
- [ZenML](https://github.com/zenml-io/zenml)
- [Zulip](https://github.com/zulip/zulip)
- [build (PyPA)](https://github.com/pypa/build)
- [cibuildwheel (PyPA)](https://github.com/pypa/cibuildwheel)
- [delta-rs](https://github.com/delta-io/delta-rs)
- [featuretools](https://github.com/alteryx/featuretools)
- [meson-python](https://github.com/mesonbuild/meson-python)
- [nox](https://github.com/wntrblm/nox)
### Show Your Support
If you're using Ruff, consider adding the Ruff badge to project's `README.md`:
```md
[](https://github.com/charliermarsh/ruff)
```
...or `README.rst`:
```rst
.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json
:target: https://github.com/charliermarsh/ruff
:alt: Ruff
```
...or, as HTML:
```html
<a href="https://github.com/charliermarsh/ruff"><img src="https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json" alt="Ruff" style="max-width:100%;"></a>
```
## License
MIT
<div align="center">
<a target="_blank" href="https://astral.sh" style="background:none">
<img src="https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/svg/Astral.svg">
</a>
</div>
| /ruff-0.0.267.tar.gz/ruff-0.0.267/README.md | 0.590779 | 0.909667 | README.md | pypi |
from dataclasses import dataclass
from typing import Callable, Generic, Iterator, Optional, TypeVar, Union
# Generic types
T = TypeVar("T")
E = TypeVar("E")
U = TypeVar("U")
F = TypeVar("F")
@dataclass
class Result(Generic[T, E]):
value: Union[T, E]
def is_ok(self) -> bool:
"""
Returns true if the result is Ok.
"""
raise NotImplementedError
def is_err(self) -> bool:
"""
Returns true if the result is Err.
"""
raise NotImplementedError
def ok(self) -> Optional[T]:
"""
Converts from Result[T, E] to Optional[T].
Converts self into an Optional[T], consuming self, and discarding the error, if any.
"""
raise NotImplementedError
def err(self) -> Optional[E]:
"""
Converts from Result[T, E] to Optional[E]
Converts self into an Optional[E], consuming self, and discarding the success value, if any.
"""
raise NotImplementedError
def map(self, op: Callable[[T], U]) -> "Result[U, E]":
"""
Maps a Result[T, E] to Result[U, E] by applying a function to a contained Ok value, leaving an Err value untouched.
This function can be used to compose the results of two functions.
"""
raise NotImplementedError
def map_or(self, default: U, f: Callable[[T], U]) -> U:
"""
Returns the provided default (if Err), or applies a function to the contained value (if Ok),
Arguments passed to map_or are eagerly evaluated; if you are passing the result of a function call, it is recommended to use map_or_else, which is lazily evaluated.
"""
raise NotImplementedError
def map_or_else(self, default: Callable[[E], U], f: Callable[[T], U]) -> U:
"""
Maps a Result[T, E] to U by applying fallback function default to a contained Err value, or function f to a contained Ok value.
This function can be used to unpack a successful result while handling an error.
"""
raise NotImplementedError
def map_err(self, op: Callable[[E], F]) -> "Result[T, F]":
"""
Maps a Result[T, E] to Result[T, F] by applying a function to a contained Err value, leaving an Ok value untouched.
"""
raise NotImplementedError
def iter(self) -> Iterator[Optional[T]]:
"""
Returns an iterator over the possibly contained value.
The iterator yields one value if the result is Ok, otherwise none.
"""
raise NotImplementedError
def expect(self, msg: str) -> T:
"""
Returns the contained Ok value, consuming the self value.
Raises:
RuntimeError: If the value is an Err, with an error message including the passed message, and the content of the Err.
"""
raise NotImplementedError
def unwrap(self) -> T:
"""
Returns the contained Ok value, consuming the self value.
Because this function may raise a RuntimeError, its use is generally discouraged. Instead, prefer to use pattern matching and handle the Err case explicitly, or call unwrap_or, unwrap_or_else, or unwrap_or_default.
Raises:
RuntimeError: If the value is an Err, with an error message provided by the Err’s value.
"""
raise NotImplementedError
def unwrap_or_default(self) -> T:
"""
Returns the contained Ok value or a default.
Consumes the self argument then, if Ok, returns the contained value, otherwise if Err, returns the default value for that type.
"""
raise NotImplementedError
def expect_err(self, msg: str) -> E:
"""
Returns the contained Err value, consuming the self value.
Raises:
RuntimeError: If the value is an Ok, with an error message including the passed message, and the content of the Ok.
"""
raise NotImplementedError
def unwrap_err(self) -> E:
"""
Returns the contained Err value, consuming the self value.
Raises:
RuntimeError: Panics if the value is an Ok, with a custom error message provided by the Ok’s value.
"""
raise NotImplementedError
def re_and(self, res: "Result[U, E]") -> "Result[U, E]":
"""
Returns res if the result is Ok, otherwise returns the Err value of self.
I would have called this `and` if it weren't already a keyword...
"""
raise NotImplementedError
def and_then(self, op: Callable[[T], "Result[U, E]"]) -> "Result[U, E]":
"""
Calls op if the result is Ok, otherwise returns the Err value of self.
This function can be used for control flow based on Result values.
"""
raise NotImplementedError
def re_or(self, res: "Result[T, F]") -> "Result[T, F]":
"""
Returns res if the result is Err, otherwise returns the Ok value of self.
Arguments passed to or are eagerly evaluated; if you are passing the result of a function call, it is recommended to use or_else, which is lazily evaluated.
I would have called this `or` if it weren't already a keyword...
"""
raise NotImplementedError
def or_else(self, op: Callable[[E], "Result[T, F]"]) -> "Result[T, F]":
"""
Calls op if the result is Err, otherwise returns the Ok value of self.
This function can be used for control flow based on result values.
"""
raise NotImplementedError
def unwrap_or(self, default: T) -> T:
"""
Returns the contained Ok value or a provided default.
Arguments passed to unwrap_or are eagerly evaluated; if you are passing the result of a function call, it is recommended to use unwrap_or_else, which is lazily evaluated.
"""
raise NotImplementedError
def unwrap_or_else(self, op: Callable[[E], T]) -> T:
"""
Returns the contained Ok value or computes it from a closure.
"""
raise NotImplementedError
@dataclass
class Ok(Result):
def is_ok(self) -> bool:
return True
def is_err(self) -> bool:
return False
def ok(self) -> Optional[T]:
return self.value
def err(self) -> Optional[E]:
return None
def map(self, op: Callable[[T], U]) -> "Result[U, E]":
# @TODO: Figure out how to handle this mapping of types
return Ok(op(self.value)) # type: ignore
def map_or(self, default: U, f: Callable[[T], U]) -> U:
return f(self.value)
def map_or_else(self, default: Callable[[E], U], f: Callable[[T], U]) -> U:
return f(self.value)
def map_err(self, op: Callable[[E], F]) -> "Result[T, F]":
return self
def iter(self) -> Iterator[Optional[T]]:
yield self.value
def expect(self, msg: str) -> T:
return self.value
def unwrap(self) -> T:
return self.value
def unwrap_or_default(self) -> T:
return self.value
def expect_err(self, msg: str) -> E:
raise RuntimeError(f"{msg}: {self.value}")
def unwrap_err(self) -> E:
raise RuntimeError(f"{self.value}")
def re_and(self, res: "Result[U, E]") -> "Result[U, E]":
if res.is_err():
return res
return res
def and_then(self, op: Callable[[T], "Result[U, E]"]) -> "Result[U, E]":
return op(self.value)
def re_or(self, res: "Result[T, F]") -> "Result[T, F]":
return self
def or_else(self, op: Callable[[E], "Result[T, F]"]) -> "Result[T, F]":
return self
def unwrap_or(self, default: T) -> T:
return self.value
def unwrap_or_else(self, op: Callable[[E], T]) -> T:
return self.value
@dataclass
class Err(Result):
def is_ok(self) -> bool:
return False
def is_err(self) -> bool:
return True
def ok(self) -> Optional[T]:
return None
def err(self) -> Optional[E]:
return self.value
def map(self, op: Callable[[T], U]) -> "Result[U, E]":
return self
def map_or(self, default: U, f: Callable[[T], U]) -> U:
return default
def map_or_else(self, default: Callable[[E], U], f: Callable[[T], U]) -> U:
return default(self.value)
def map_err(self, op: Callable[[E], F]) -> "Result[T, F]":
# @TODO: Figure out how to handle this mapping of types
return Err(op(self.value)) # type: ignore
def iter(self) -> Iterator[Optional[T]]:
yield None
def expect(self, msg: str) -> T:
raise RuntimeError(f"{msg}: {self.value}")
def unwrap(self) -> T:
raise RuntimeError(self.value)
def unwrap_or_default(self) -> T:
c = self.value.__class__
try:
return c()
except:
raise RuntimeError(f"Cannot provide default value for: {c}")
def expect_err(self, msg: str) -> E:
return self.value
def unwrap_err(self) -> E:
return self.value
def re_and(self, res: "Result[U, E]") -> "Result[U, E]":
return self
def and_then(self, op: Callable[[T], "Result[U, E]"]) -> "Result[U, E]":
return self
def re_or(self, res: "Result[T, F]") -> "Result[T, F]":
return res
def or_else(self, op: Callable[[E], "Result[T, F]"]) -> "Result[T, F]":
return op(self.value)
def unwrap_or(self, default: T) -> T:
return default
def unwrap_or_else(self, op: Callable[[E], T]) -> T:
return op(self.value) | /rufous_result-0.1.0-py3-none-any.whl/rufous_result/result.py | 0.90243 | 0.588978 | result.py | pypi |
from types import MethodType
from ruia import Spider
from ruia_motor.motor_base import MotorBase
class RuiaMotorInsert:
"""
A Ruia plugin that uses the motor to insert data
"""
def __init__(self, db: str = None, *, collection: str, data: dict):
"""
Define parameters
Motor doc: https://motor.readthedocs.io/en/stable/api-asyncio/asyncio_motor_collection.html#motor.motor_asyncio.AsyncIOMotorCollection.insert_one
:param db:
:param collection:
:param data:
"""
self.db = db
self.collection = collection
self.data = data
@staticmethod
async def process(spider_ins, callback_result):
"""
Handle the insert operation by using motor
:param spider_ins:
:param callback_result:
:return:
"""
db = callback_result.db or spider_ins.mongodb_config["db"]
collection = callback_result.collection
data = callback_result.data
coll_conn = spider_ins.motor_base.get_collection(db=db, collection=collection)
try:
await coll_conn.insert_one(document=data)
# spider_ins.logger.info(f'<RuiaMotor: Insertsuccessful>')
except Exception as e:
spider_ins.logger.error(f"<RuiaMotor: Insert error {e}>")
class RuiaMotorUpdate:
"""
A Ruia plugin that uses the motor to update data
"""
def __init__(
self,
db: str = None,
*,
collection: str,
filter: dict,
update: dict,
upsert: bool = False,
):
"""
Define parameters
Motor doc: https://motor.readthedocs.io/en/stable/api-asyncio/asyncio_motor_collection.html#motor.motor_asyncio.AsyncIOMotorCollection.update_one
:param db:
:param collection:
:param filter: A query that matches the document to update.
:param update: The modifications to apply.
:param upsert: If True, perform an insert if no documents match the filter.
"""
self.db = db
self.collection = collection
self.filter = filter
self.update = update
self.upsert = upsert
@staticmethod
async def process(spider_ins, callback_result):
"""
Handle the insert operation by using motor
:param spider_ins:
:param callback_result:
:return:
"""
db = callback_result.db or spider_ins.mongodb_config["db"]
collection = callback_result.collection
filter = callback_result.filter
update = callback_result.update
upsert = callback_result.upsert
coll_conn = spider_ins.motor_base.get_collection(db=db, collection=collection)
try:
await coll_conn.update_one(filter=filter, update=update, upsert=upsert)
except Exception as e:
spider_ins.logger.error(f"<RuiaMotor: Update error {e}>")
def init_spider(*, spider_ins: Spider):
"""
Ruia configuration initialization
:param spider_ins:
:return:
"""
mongodb_config = getattr(spider_ins, "mongodb_config", None)
if not mongodb_config or not isinstance(mongodb_config, dict):
raise ValueError(
"""
RuiaMotor must have a param named mongodb_config, eg:
mongodb_config = {
'username': '',
'password': '',
'host': '127.0.0.1',
'port': 27017,
'db': 'ruia_motor'
}
"""
)
spider_ins.motor_base = MotorBase(
mongodb_config=mongodb_config, loop=spider_ins.loop
)
spider_ins.callback_result_map = spider_ins.callback_result_map or {}
# Insert
spider_ins.process_insert_callback_result = MethodType(
RuiaMotorInsert.process, spider_ins
)
spider_ins.callback_result_map.update(
{"RuiaMotorInsert": "process_insert_callback_result"}
)
# Update
spider_ins.process_update_callback_result = MethodType(
RuiaMotorUpdate.process, spider_ins
)
spider_ins.callback_result_map.update(
{"RuiaMotorUpdate": "process_update_callback_result"}
) | /ruia_motor-0.0.5-py3-none-any.whl/ruia_motor/__init__.py | 0.846562 | 0.324516 | __init__.py | pypi |
from enum import Enum
from functools import wraps
from ssl import SSLContext
from types import MethodType
from typing import Callable, Dict
from typing import Optional as TOptional
from typing import Sequence, Tuple, Union
from peewee import DoesNotExist, Model, Query
from peewee_async import (
AsyncQueryWrapper,
Manager,
MySQLDatabase,
PooledMySQLDatabase,
PooledPostgresqlDatabase,
PostgresqlDatabase,
)
from pymysql import OperationalError
from ruia import Spider as RuiaSpider
from schema import And, Optional, Or, Schema, SchemaError, Use
class Spider(RuiaSpider):
mysql_model: Model
mysql_manager: Manager
postgres_model: Model
postgres_manager: Manager
mysql_db: Union[MySQLDatabase, PooledMySQLDatabase]
postgres_db: Union[PostgresqlDatabase, PooledPostgresqlDatabase]
mysql_filters: TOptional[AsyncQueryWrapper]
postgres_filters: TOptional[AsyncQueryWrapper]
process_insert_callback_result: Callable
process_update_callback_result: Callable
class TargetDB(Enum):
MYSQL = 0
POSTGRES = 1
BOTH = 2
def logging(func):
@wraps(func)
async def decorator(spider_ins: Spider, callback_result):
data = callback_result.data
database = callback_result.database
query = getattr(callback_result, "query", None)
try:
result = await func(spider_ins, callback_result)
except OperationalError as ope: # pragma: no cover
method = "insert" if not query else "update"
spider_ins.logger.error(
f"<RuiaPeeweeAsync: {database.name} {method} data: {data} error: {ope}>"
)
except SchemaError as pae:
spider_ins.logger.error(pae)
raise pae
else:
spider_ins.logger.info(result)
return decorator
def _raise_no_attr(target, fields, pre_msg):
for field in fields:
if hasattr(target, field):
continue
raise SchemaError(
f"<{pre_msg} error: callback_result should have {field} attribute>"
)
def _check_result(data: Tuple):
target, type_dict, pre_msg = data
type_dict: Dict
_raise_no_attr(target, type_dict.keys(), pre_msg)
for name, vtype in type_dict.items():
msg = f"<{pre_msg} error: callback_result's {name} should be a {vtype}>"
attr = getattr(target, name)
if name in ["data", "query"] and not attr:
raise SchemaError(f"<{pre_msg} error: {name} cannot be empty>")
if not isinstance(attr, vtype):
raise SchemaError(msg)
result_validator = Schema(Use(_check_result))
async def filter_func(data, manager, model, filters) -> bool:
conditions = [getattr(model, fil) for fil in filters]
query = {x.name: data[x.name] for x in conditions}
try:
await manager.get(model, **query)
except DoesNotExist:
return False
else:
return True
class RuiaPeeweeInsert:
def __init__(
self,
data: Dict,
database: TargetDB = TargetDB.MYSQL,
filters: TOptional[Union[Sequence[str], str]] = None,
) -> None:
"""
Args:
data: A data that's going to be inserted into the database.
database: The target database type.
"""
self.data = data
self.database = database
self.filters = filters
@staticmethod
@logging
async def process(spider_ins: Spider, callback_result):
needs_check = (
callback_result,
{"data": dict, "database": TargetDB, "filters": (str, type(None), list)},
"RuiaPeeweeAsync: insert process",
)
result_validator.validate(needs_check)
data = callback_result.data
database = callback_result.database
filters = callback_result.filters
if database == TargetDB.BOTH:
databases = [TargetDB.MYSQL.name, TargetDB.POSTGRES.name]
else:
databases = [database.name]
msg = ""
if isinstance(filters, str):
filters = [filters]
for database in databases:
database = database.lower()
manager: Manager = getattr(spider_ins, f"{database}_manager")
model: Model = getattr(spider_ins, f"{database}_model")
if filters:
filtered = await filter_func(data, manager, model, filters)
if filtered:
msg += (
f"<RuiaPeeweeAsync: data: {data} was filtered by filters: {filters},"
f" won't insert into {database.upper()}>\n"
)
continue
msg += (
f"<RuiaPeeweeAsync: data: {data} wasn't filtered by filters: {filters}, "
f"success insert into {database.upper()}>\n"
)
await manager.create(model, **data)
if msg:
return msg
return f"<RuiaPeeweeAsync: Success insert {data} into database: {databases}>"
class RuiaPeeweeUpdate:
"""Ruia Peewee Update Class"""
def __init__(
self,
data: Dict,
query: Union[Query, Dict],
database: TargetDB = TargetDB.MYSQL,
filters: TOptional[Union[Sequence[str], str]] = None,
create_when_not_exists: bool = True,
not_update_when_exists: bool = True,
only: TOptional[Sequence[str]] = None,
) -> None:
"""
Args:
data: A dict that's going to be updated in the database.
query: A peewee's query or a dict to search for the target data in database.
database: The target database type.
filters: A str or List[str] of columns to avoid duplicate data and avoid unnecessary query execute.
create_when_not_exists: Default is True. If True, will create a record when query can't get the record.
not_update_when_exists: Default is True. If True and record exists, won't update data to the records.
only: A list or tuple of fields that should be updated only.
"""
self.data = data
self.query = query
self.database = database
self.filters = filters
self.create_when_not_exists = create_when_not_exists
self.not_update_when_exists = not_update_when_exists
self.only = only
@staticmethod
async def _deal_update(
spider_ins,
data,
query,
filters,
create_when_not_exists,
not_update_when_exists,
only,
databases,
): # pylint: disable=too-many-locals
msg = ""
if isinstance(filters, str):
filters = [filters]
for database in databases:
database = database.lower()
manager: Manager = getattr(spider_ins, f"{database}_manager")
model: Model = getattr(spider_ins, f"{database}_model")
if filters:
filtered = await filter_func(data, manager, model, filters)
if filtered:
msg += f"<RuiaPeeweeAsync: data: {data} was filtered by filters: {filters}\n"
continue
msg += f"<RuiaPeeweeAsync: data: {data} wasn't filtered by filters: {filters}\n"
try:
model_ins = await manager.get(model, **query)
except DoesNotExist:
if create_when_not_exists:
await manager.create(model, **data)
msg += f"<RuiaPeeweeAsync: data: {data} not exists in {database.upper()}, but success created>\n"
msg += (
f"<RuiaPeeweeAsync: data: {data} not exists in {database.upper()}, "
"won't create it because create_when_not_exists is False>\n"
)
else:
if not_update_when_exists:
msg += (
f"<RuiaPeeweeAsync: Won't update {data} in {database.upper()} "
"because not_update_when_exists is True>\n"
)
continue
model_ins.__data__.update(data)
await manager.update(model_ins, only=only)
if msg:
return msg
return f"<RuiaPeeweeAsync: Updated {data} in {databases}>"
@staticmethod
async def _update(
spider_ins,
data,
query,
filters,
database,
create_when_not_exists,
not_update_when_exists,
only,
):
if database == TargetDB.BOTH:
databases = [TargetDB.MYSQL.name, TargetDB.POSTGRES.name]
else:
databases = [database.name]
result = await RuiaPeeweeUpdate._deal_update(
spider_ins,
data,
query,
filters,
create_when_not_exists,
not_update_when_exists,
only,
databases,
)
return result
@staticmethod
@logging
async def process(spider_ins, callback_result):
data = callback_result.data
database = callback_result.database
query = callback_result.query
filters = callback_result.filters
create_when_not_exists = callback_result.create_when_not_exists
not_update_when_exists = callback_result.not_update_when_exists
only = callback_result.only
needs_check = (
callback_result,
{
"data": dict,
"database": TargetDB,
"query": (Query, dict),
"filters": (str, type(None), list),
"create_when_not_exists": bool,
"not_update_when_exists": bool,
"only": (list, tuple, type(None)),
},
"RuiaPeeweeAsync: update process",
)
result_validator.validate(needs_check)
result = await RuiaPeeweeUpdate._update(
spider_ins,
data,
query,
filters,
database,
create_when_not_exists,
not_update_when_exists,
only,
)
return result
def init_spider(*, spider_ins: Spider):
mysql_config = getattr(spider_ins, "mysql_config", {})
postgres_config = getattr(spider_ins, "postgres_config", {})
create_model(
spider_ins=spider_ins,
create_table=True,
mysql=mysql_config,
postgres=postgres_config,
)
spider_ins.callback_result_map = spider_ins.callback_result_map or {}
spider_ins.process_insert_callback_result = MethodType(
RuiaPeeweeInsert.process, spider_ins
)
spider_ins.callback_result_map.update(
{"RuiaPeeweeInsert": "process_insert_callback_result"}
)
spider_ins.process_update_callback_result = MethodType(
RuiaPeeweeUpdate.process, spider_ins
)
spider_ins.callback_result_map.update(
{"RuiaPeeweeUpdate": "process_update_callback_result"}
)
def check_config(kwargs) -> Sequence[Dict]:
# no_config_msg = """
# RuiaPeeweeAsync must have a param named mysql_config or postgres_config or both, eg:
# mysql_config = {
# 'user': 'yourusername',
# 'password': 'yourpassword',
# 'host': '127.0.0.1',
# 'port': 3306,
# 'database': 'ruia_mysql',
# 'model': {{
# 'table_name': 'ruia_mysql_table',
# "title": CharField(),
# 'url': CharField(),
# }},
# }
# postgres_config = {
# 'user': 'yourusername',
# 'password': 'yourpassword',
# 'host': '127.0.0.1',
# 'port': 5432,
# 'database': 'ruia_postgres',
# 'model': {{
# 'table_name': 'ruia_postgres_table',
# "title": CharField(),
# 'url': CharField(),
# }},
# }
# """
conf_validator = Schema(
{
Or("mysql", "postgres"): Or(
None,
And(
{
"host": And(str),
"user": And(str),
"password": And(str),
"database": And(str),
"model": And({"table_name": And(str), str: object}),
Optional("port"): And(int),
Optional("ssl"): And(SSLContext),
Optional("pool"): And(bool),
Optional("min_connections"): And(
int, lambda mic: 1 <= mic <= 10
),
Optional("max_connections"): And(
int, lambda mac: 10 < mac <= 20
),
}
),
)
}
)
kwval = conf_validator.validate(kwargs)
mysql = kwval.get("mysql", {})
postgres = kwval.get("postgres", {})
mysql_model = mysql.get("model", None)
postgres_model = postgres.get("model", None)
return mysql, mysql_model, postgres, postgres_model
def after_start(**kwargs):
mysql, mysql_model, postgres, postgres_model = check_config(kwargs)
async def init_after_start(spider_ins):
if mysql and mysql_model:
spider_ins.mysql_config = mysql
# spider_ins.mysql_model = mysql_model
if postgres and postgres_model:
spider_ins.postgres_config = postgres
# spider_ins.postgres_model = postgres_model
init_spider(spider_ins=spider_ins)
return init_after_start
async def before_stop(spider_ins):
if hasattr(spider_ins, "postgres_manager"):
await spider_ins.postgres_manager.close()
if hasattr(spider_ins, "mysql_manager"):
await spider_ins.mysql_manager.close()
def create_model(spider_ins=None, create_table=False, **kwargs) -> Tuple:
mysql, postgres = kwargs.get("mysql", {}), kwargs.get("postgres", {})
mysql_mconf = mysql.get("model", {})
postgres_mconf = postgres.get("model", {})
mysql_model, mysql_manager, postgres_model, postgres_manager = (
None,
None,
None,
None,
)
if mysql:
mysql_db = (
PooledMySQLDatabase(
**{
key: val
for key, val in mysql.items()
if key not in ("model", "pool")
}
)
if "pool" in mysql
else MySQLDatabase(
**{key: val for key, val in mysql.items() if key != "model"}
)
)
mysql_manager = Manager(mysql_db)
meta = type("Meta", (object,), {"database": mysql_db})
table_name = mysql_mconf.pop("table_name")
mysql_mconf["Meta"] = meta
mysql_model = type(table_name, (Model,), mysql_mconf)
if spider_ins:
spider_ins.mysql_db = mysql_db
spider_ins.mysql_model = mysql_model
spider_ins.mysql_manager = mysql_manager
if create_table:
with mysql_manager.allow_sync():
mysql_model.create_table(True)
mysql_mconf["table_name"] = table_name
if postgres:
postgres_db = (
PooledPostgresqlDatabase(
**{
key: val
for key, val in postgres.items()
if key not in ("model", "pool")
}
)
if "pool" in postgres
else PostgresqlDatabase(
**{key: val for key, val in postgres.items() if key != "model"}
)
)
postgres_manager = Manager(postgres_db)
meta = type("Meta", (object,), {"database": postgres_db})
table_name = postgres_mconf.pop("table_name")
postgres_mconf["Meta"] = meta
postgres_model = type(table_name, (Model,), postgres_mconf)
if spider_ins:
spider_ins.postgres_db = postgres_db
spider_ins.postgres_model = postgres_model
spider_ins.postgres_manager = postgres_manager
if create_table:
with postgres_manager.allow_sync():
postgres_model.create_table(True)
postgres_mconf["table_name"] = table_name
if mysql and not postgres:
return mysql_model, mysql_manager
if postgres and not mysql:
return postgres_model, postgres_manager
return mysql_model, mysql_manager, postgres_model, postgres_manager | /ruia_peewee_async-1.3.3.tar.gz/ruia_peewee_async-1.3.3/ruia_peewee_async/__init__.py | 0.837952 | 0.192179 | __init__.py | pypi |
import numpy as np
import pandas as pd
from scipy.stats import gamma
from scipy.stats import norm
from scipy.signal import detrend
'''
Scaled distribution mapping for climate data
This is a excerpt from pyCAT and the method after Switanek et al. (2017) containing the functions to perform a relative and absolute bias correction on climate data.
(cc) c.jackisch@tu-braunschweig.de, July 2020
It is intended to be used on pandas time series at single locations/pixels.
Switanek, M. B., P. A. Troch, C. L. Castro, A. Leuprecht, H.-I. Chang, R. Mukherjee, and E. M. C. Demaria (2017), Scaled distribution mapping: a bias correction method that preserves raw climate model projected changes, Hydrol. Earth Syst. Sci., 21(6), 2649–2666, https://doi.org/10.5194/hess-21-2649-2017
'''
def relSDM(obs, mod, sce, cdf_threshold=0.9999999, lower_limit=0.1):
'''relative scaled distribution mapping assuming a gamma distributed parameter (with lower limit zero)
rewritten from pyCAT for 1D data
obs :: observed variable time series
mod :: modelled variable for same time series as obs
sce :: to unbias modelled time series
cdf_threshold :: upper and lower threshold of CDF
lower_limit :: lower limit of data signal (values below will be masked!)
returns corrected timeseries
tested with pandas series.
'''
obs_r = obs[obs >= lower_limit]
mod_r = mod[mod >= lower_limit]
sce_r = sce[sce >= lower_limit]
obs_fr = 1. * len(obs_r) / len(obs)
mod_fr = 1. * len(mod_r) / len(mod)
sce_fr = 1. * len(sce_r) / len(sce)
sce_argsort = np.argsort(sce)
obs_gamma = gamma.fit(obs_r, floc=0)
mod_gamma = gamma.fit(mod_r, floc=0)
sce_gamma = gamma.fit(sce_r, floc=0)
obs_cdf = gamma.cdf(np.sort(obs_r), *obs_gamma)
mod_cdf = gamma.cdf(np.sort(mod_r), *mod_gamma)
obs_cdf[obs_cdf > cdf_threshold] = cdf_threshold
mod_cdf[mod_cdf > cdf_threshold] = cdf_threshold
expected_sce_raindays = min(int(np.round(len(sce) * obs_fr * sce_fr / mod_fr)), len(sce))
sce_cdf = gamma.cdf(np.sort(sce_r), *sce_gamma)
sce_cdf[sce_cdf > cdf_threshold] = cdf_threshold
# interpolate cdf-values for obs and mod to the length of the scenario
obs_cdf_intpol = np.interp(np.linspace(1, len(obs_r), len(sce_r)), np.linspace(1, len(obs_r), len(obs_r)), obs_cdf)
mod_cdf_intpol = np.interp(np.linspace(1, len(mod_r), len(sce_r)), np.linspace(1, len(mod_r), len(mod_r)), mod_cdf)
# adapt the observation cdfs
obs_inverse = 1. / (1 - obs_cdf_intpol)
mod_inverse = 1. / (1 - mod_cdf_intpol)
sce_inverse = 1. / (1 - sce_cdf)
adapted_cdf = 1 - 1. / (obs_inverse * sce_inverse / mod_inverse)
adapted_cdf[adapted_cdf < 0.] = 0.
# correct by adapted observation cdf-values
xvals = gamma.ppf(np.sort(adapted_cdf), *obs_gamma) * gamma.ppf(sce_cdf, *sce_gamma) / gamma.ppf(sce_cdf,
*mod_gamma)
# interpolate to the expected length of future raindays
correction = np.zeros(len(sce))
if len(sce_r) > expected_sce_raindays:
xvals = np.interp(np.linspace(1, len(sce_r), expected_sce_raindays), np.linspace(1, len(sce_r), len(sce_r)),
xvals)
else:
xvals = np.hstack((np.zeros(expected_sce_raindays - len(sce_r)), xvals))
correction[sce_argsort[-expected_sce_raindays:]] = xvals
return pd.Series(correction, index=sce.index)
def absSDM(obs, mod, sce, cdf_threshold=0.9999999):
'''absolute scaled distribution mapping assuming a normal distributed parameter
rewritten from pyCAT for 1D data
obs :: observed variable time series
mod :: modelled variable for same time series as obs
sce :: to unbias modelled time series
cdf_threshold :: upper and lower threshold of CDF
returns corrected timeseries
tested with pandas series.
'''
obs_len = len(obs)
mod_len = len(mod)
sce_len = len(sce)
obs_mean = np.mean(obs)
mod_mean = np.mean(mod)
smean = np.mean(sce)
odetrend = detrend(obs)
mdetrend = detrend(mod)
sdetrend = detrend(sce)
obs_norm = norm.fit(odetrend)
mod_norm = norm.fit(mdetrend)
sce_norm = norm.fit(sdetrend)
sce_diff = sce - sdetrend
sce_argsort = np.argsort(sdetrend)
obs_cdf = norm.cdf(np.sort(odetrend), *obs_norm)
mod_cdf = norm.cdf(np.sort(mdetrend), *mod_norm)
sce_cdf = norm.cdf(np.sort(sdetrend), *sce_norm)
obs_cdf = np.maximum(np.minimum(obs_cdf, cdf_threshold), 1 - cdf_threshold)
mod_cdf = np.maximum(np.minimum(mod_cdf, cdf_threshold), 1 - cdf_threshold)
sce_cdf = np.maximum(np.minimum(sce_cdf, cdf_threshold), 1 - cdf_threshold)
# interpolate cdf-values for obs and mod to the length of the scenario
obs_cdf_intpol = np.interp(np.linspace(1, obs_len, sce_len), np.linspace(1, obs_len, obs_len), obs_cdf)
mod_cdf_intpol = np.interp(np.linspace(1, mod_len, sce_len), np.linspace(1, mod_len, mod_len), mod_cdf)
# adapt the observation cdfs
# split the tails of the cdfs around the center
obs_cdf_shift = obs_cdf_intpol - .5
mod_cdf_shift = mod_cdf_intpol - .5
sce_cdf_shift = sce_cdf - .5
obs_inverse = 1. / (.5 - np.abs(obs_cdf_shift))
mod_inverse = 1. / (.5 - np.abs(mod_cdf_shift))
sce_inverse = 1. / (.5 - np.abs(sce_cdf_shift))
adapted_cdf = np.sign(obs_cdf_shift) * (1. - 1. / (obs_inverse * sce_inverse / mod_inverse))
adapted_cdf[adapted_cdf < 0] += 1.
adapted_cdf = np.maximum(np.minimum(adapted_cdf, cdf_threshold), 1 - cdf_threshold)
xvals = norm.ppf(np.sort(adapted_cdf), *obs_norm) \
+ obs_norm[-1] / mod_norm[-1] \
* (norm.ppf(sce_cdf, *sce_norm) - norm.ppf(sce_cdf, *mod_norm))
xvals -= xvals.mean()
xvals += obs_mean + (smean - mod_mean)
correction = np.zeros(sce_len)
correction[sce_argsort] = xvals
correction += sce_diff - smean
return correction
def SDM(obs, mod, sce, meth='rel', cdf_threshold=0.9999999, lower_limit=0.1):
'''scaled distribution mapping - wrapper to relative and absolute bias correction functions
rewritten from pyCAT for 1D data
obs :: observed variable time series
mod :: modelled variable for same time series as obs
sce :: to unbias modelled time series
meth :: 'rel' for relative SDM, else absolute SDM will be performed
cdf_threshold :: upper and lower threshold of CDF
lower_limit :: lower limit of data signal (values below will be masked when meth != 'rel')
The original authors suggest to use the absolute SDM for air temperature and the relative SDM for precipitation and radiation series.
returns corrected timeseries
tested with pandas series.
'''
if meth == 'rel':
return relSDM(obs, mod, sce, cdf_threshold, lower_limit)
else:
return absSDM(obs, mod, sce, cdf_threshold) | /ruins_app-0.19.0-py3-none-any.whl/ruins/processing/sdm.py | 0.83825 | 0.702592 | sdm.py | pypi |
import streamlit as st
import pandas as pd
from ruins.core import DataManager
INDICES = dict(
summer='Summer days (Tmax ≥ 25°C)',
ice='Ice days (Tmax < 0°C)',
frost='Frost days (Tmin < 0°C)',
hot='Hot days (Tmax ≥ 30°C)',
tropic='Tropic nights (Tmin ≥ 20°C)',
rainy='Rainy days (Precip ≥ 1mm)'
)
def climate_index_agg(ts, index):
"""Aggregate the index days based on the available INDICES"""
# drop NA
ts = ts.dropna()
if index == 'summer': # summer days
return (ts >= 25.).groupby(ts.index.year).sum()
elif index == 'ice': # ice days
return (ts < 0.).groupby(ts.index.year).sum()
elif index == 'frost': # frost days
return (ts < 0.).groupby(ts.index.year).sum()
elif index == 'hot': # hot days
return (ts >= 30.).groupby(ts.index.year).sum()
elif index == 'tropic': # tropic night
return (ts >= 20.).groupby(ts.index.year).sum()
elif index == 'rainy': # rainy days
return (ts >= 1.).groupby(ts.index.year).sum()
else:
raise ValueError(f"The Index {index} is not supported. Use one of: {','.join(INDICES.keys())}")
@st.experimental_memo
def calculate_climate_indices(_dataManager: DataManager, station: str, variable: str, ci: str, rolling_windows=(10, 5), rolling_center=True, rcps=('rcp26', 'rcp45', 'rcp85')) -> pd.DataFrame:
"""
Calculates all relevant climate indices for the given climate data, as configured in the DataManager.
The procedure will return a pandas DataFrame with aggregated index information for the weather data.
For each of the available RCP scenarios, the indices are calculated as well.
By default, for each scenario and the weather data, a rolling mean is calculated
Parameters
----------
_dataManager : ruins.core.DataManager
DataManager instance containing the 'weather' and 'climate' data
station : str
Station name for filtering weather data. Has to exist as data variable
in the weather netCDF
variable : str
Variable name for filtering. Has to exist as dimension value in both,
the weather and climate netCDF
ci : str
Index name. Can be any key of ruins.processing.climate_indices.INDICES
rolling_windows : Tuple[int, int]
The window sizes for weather (0) and climate (1) rolling means
rolling_center : bool
If True (default), the rollwing window center will be used as value
rcps : List[str]
Short names of the RCP scenarios to include. Usually only
('rcp26', 'rcp45', 'rcp85') are available.
Returns
-------
data : pd.DataFrame
DataFrame with all calcualted indices and the year as index
"""
dataManager = _dataManager
# load data
weather = dataManager['weather'].read()[station].sel(vars=variable).to_dataframe()[station].dropna()
climate = dataManager['cordex_krummh'].read().sel(vars=variable).to_dataframe()
climate.drop('vars', axis=1, inplace=True)
# get weather index and rolling
data = pd.DataFrame(climate_index_agg(weather, ci).astype(int))
data.columns = [variable]
data['rolling'] = data.rolling(rolling_windows[0], center=rolling_center).mean()
# get climate index
for col in climate.columns:
df = pd.DataFrame(climate_index_agg(climate[col], ci).astype(int))
data = pd.merge(data, df, right_index=True, left_index=True, how='outer')
# get RCP rolling
for rcp in rcps:
# select columns that end with rcp
criteria = [c.endswith(rcp) for c in data.columns]
# subset
df = data[data.columns[criteria]]
# rolling mean of mean rcp values
roll = df.mean(axis=1).rolling(rolling_windows[1], center=rolling_center).mean()
roll = pd.DataFrame(index=roll.index, data={f'{rcp}.rolling': roll.values})
# add back to data
data = pd.merge(data, roll, right_index=True, left_index=True, how='outer')
return data | /ruins_app-0.19.0-py3-none-any.whl/ruins/processing/climate_indices.py | 0.770637 | 0.593433 | climate_indices.py | pypi |
import streamlit as st
from ruins.core import DataManager, Config
_TRANSLATE_EN = dict(
title='Climate models have scale',
description="""
There are many scientists working in different groups to build and enhance computer models of the climate physics.
These models operate at different scales in space and time. In our case, we refer to i) **global climate model**
projections with a spatial resolution of approx. 120 km of one pixel at the surface and to ii)
**regional climate model** projections with a spatial resolution of approx. 11 km.
As the name suggests, global climate models (or GCMs) cover the whole globe.
Their speciality is to simulate the overall physics of the atmosphere and land surface interactions.
The regional climate models (or RCMs) build on top of GCMs and build physical and/or statistical relationships
between GCM-outputs and locally observed weather. They seek to compensate the very coarse approximations of GCMs at
the price of more data being extrapolated into the future.
For more detailed reading, please continue here:
[CarbonBrief - How do climate models work?](https://www.carbonbrief.org/qa-how-do-climate-models-work)
""",
options={
'Global': 'Global model',
'Regional': 'Regional model'
}
)
_TRANSATE_DE = dict(
title='Skalierung von Klimamodellen',
description="""
Es gibt unzählige Wissenschaftler_innen und Institutionen, die an einer Verbesserung der Computermodelle und dem
zugrundeliegenden physikalischen Verständnis arbeiten. Diese Modelle operieren auf verschiedenen *räumlichen* und
*zeitlichen* Skalen. In dieser App haben **Globale Klimamodelle** eine räumlcihe Auflösung von ca. 120km pro Pixel an der
Erdoberfläche. **Regionale Klimamodelle** haben hingegen eine räumliche Auflösung von ca. 11km.
Dafür bedecken globale Modelle, wie der Name schon sagt, die ganze Welt. Sie sind besonders gut geeignet um globale Strömungen
und Interaktionen zwischen der Atmophäre und der Landoberfläche zu simulieren.
Die Regionalen modelle (RCMs) bauen dann weiter auf den (GCMs) auf, indem Sie globale Modelle entweder über statistische
oder physikalische Zusammenhänge mit lokal erhobenen Messdaten verknüpfen. Damit soll die grobe Auflösung von GCMs
wettgemacht werden.
Für mehr Informationen, lese hier weiter:
[CarbonBrief - Wie funktionieren Klimamodelle?](https://www.carbonbrief.org/qa-how-do-climate-models-work)
""",
options={
'Global': 'Globales Modell',
'Regional': 'Regionales Modell'
}
)
def model_scale_selector(dataManager: DataManager, config: Config, expander_container=st.sidebar, **kwargs):
"""
"""
# get the container
container = st if 'container' not in kwargs else kwargs['container']
# get a translator
t = config.translator(en=_TRANSLATE_EN, de=_TRANSATE_DE)
# get the translated options
OPTIONS = t('options')
# check if main page was already shown
if config.has_key('climate_scale'):
expander_container.radio(
'Climate model' if config.lang == 'en' else 'Klimamodell',
options=list(OPTIONS.keys()),
format_func=lambda k: OPTIONS.get(k),
key='climate_scale'
)
return
# add the title
container.title(t('title'))
# add the description
container.markdown(t('description'), unsafe_allow_html=True)
# add the columns
right, left = container.columns(2)
right.info('THIS IS A THUMBNAIL')
use_global = right.button('USE GLOBAL MODELS' if config.lang == 'en' else 'GLOBALES MODELL')
left.info('THIS IS A THUMBNAIL')
use_regional = left.button('USE REGIONAL MODELS' if config.lang == 'en' else 'REGIONALES MODELL')
if use_global:
st.session_state.climate_scale = 'Global'
st.experimental_rerun()
elif use_regional:
st.session_state.climate_scale = 'Regional'
st.experimental_rerun()
else:
st.stop()
def model_scale_select(dataManager: DataManager, config: Config, expander_container=st.sidebar, **kwargs):
"""
"""
# set default story mode
story_mode = config.get('story_mode', True)
if 'story_mode' in kwargs:
story_mode = kwargs['story_mode']
# pre-select
if config.has_key('climate_scale') or not story_mode:
st.session_state.climate_scale = config.get('climate_scale', 'Global')
# call the seletor
model_scale_selector(dataManager, config, expander_container=expander_container)
def debug_main(**kwargs):
from ruins.core import build_config
params = st.experimental_get_query_params()
config, dataManager = build_config(url_params=params, **kwargs)
model_scale_select(dataManager, config)
st.json(st.session_state)
if __name__ == '__main__':
import fire
fire.Fire(debug_main) | /ruins_app-0.19.0-py3-none-any.whl/ruins/components/model_scale_select.py | 0.525369 | 0.606586 | model_scale_select.py | pypi |
from typing import Callable, Dict, Union
import os
from os.path import join as pjoin
import json
from collections.abc import Mapping
from ruins.core.i18n import get_translator
from streamlit import session_state
import streamlit as st
# check if streamlit is running
if not st._is_running_with_streamlit:
session_state = dict()
class Config(Mapping):
"""
Streamlit app Config object.
This class holds all configs needed to run the apps.
It can be instantiated just like this to load default
values.
If a path is provided, it will load the configs from
the referenced json file. Any config can be updated
by passed kwargs.
This design makes the config updateable and easy to
to manage. At the same time it can be persisted to
the disk and even mirrored to a database if needed in
the future.
"""
def __init__(self, path: str = None, **kwargs) -> None:
# set the default values
# debug mode
self._debug = False
self.lang = 'en'
# path
self.basepath = os.path.abspath(pjoin(os.path.dirname(__file__), '..', '..'))
self.datapath = pjoin(self.basepath, 'data')
self.hot_load = kwargs.get('hot_load', False)
# datafile names, without file extension
self.datafile_names = {
'stations': 'stats',
'cordex_grid': 'CORDEXgrid',
'cimp_grid': 'CIMP5grid',
'weather': 'weather',
'climate': 'cordex_krummh',
'pdsi': 'scPDSI',
'wind_timeseries': 'windenergy_timeseries'
#'climate_coast': 'cordex_coast',
#'hydro': 'hydro_krummh'
}
# mime readers
self.default_sources = {
'nc': 'HDF5Source',
'csv': 'CSVSource',
'dat': 'DATSource'
}
self.default_sources.update(kwargs.get('include_mimes', {}))
# reader args
self.sources_args = {
'stats.csv': dict(index_col=0),
'hsim_collect.csv': dict(index_col=0),
'windpowerx.csv': dict(index_col=0),
'estQ.csv': dict(index_col=[0], parse_dates=[0]),
'levelknock.csv': dict(index_col=[0], parse_dates=[0]),
'levelW.csv': dict(index_col=[0], parse_dates=[0]),
'prec.csv': dict(index_col=[0], parse_dates=[0]),
'Qknock.csv': dict(index_col=[0], parse_dates=[0]),
'scPDSI.csv': dict(index_col=[0])
}
self.sources_args.update(kwargs.get('include_args', {}))
# app management
self.layout = 'centered'
# store the keys
self._keys = ['debug', 'lang', 'basepath', 'datapath', 'hot_load', 'datafile_names', 'default_sources', 'sources_args', 'layout']
# check if a path was provided
conf_args = self.from_json(path) if path else {}
# update with kwargs
conf_args.update(kwargs)
self._update(conf_args)
@property
def debug(self):
return self._debug
@property
def story_mode(self):
return self._story_mode
@debug.setter
def debug(self, value: Union[str, bool]):
if isinstance(value, str):
self._debug = value.lower() != 'false'
else:
self._debug = bool(value)
@story_mode.setter
def story_mode(self, value: Union[str, bool]):
if isinstance(value, str):
self._story_mode = value.lower() != 'false'
else:
self._story_mode = bool(value)
def from_json(self, path: str) -> dict:
"""loads the content of the JSON config file"""
if os.path.exists(path):
with open(path, 'r') as f:
return json.load(f)
else:
raise AttributeError(f"Config file {path} does not exist")
def _update(self, new_settings: dict) -> None:
"""Update this instance with new settings"""
for k, v in new_settings.items():
setattr(self, k, v)
if k not in self._keys:
self._keys.append(k)
def get_control_policy(self, control_name: str) -> str:
"""
Get the control policy for the given control name.
allowed policies are:
- show: always show the control on the main container
- hide: hide the control on the main container, but move to the expander
- ignore: don't show anything
"""
if self.has_key(f'{control_name}_policy'):
return self.get(f'{control_name}_policy')
elif self.has_key('controls_policy'):
return self.get('controls_policy')
else:
# TODO: discuss with conrad to change this
return 'show'
def translator(self, **translations: Dict[str, str]) -> Callable[[str], str]:
"""Return a translator function"""
return get_translator(self.lang, **translations)
def get(self, key: str, default = None):
if hasattr(self, key):
return getattr(self, key)
elif hasattr(session_state, key):
return getattr(session_state, key)
elif key in session_state:
return session_state[key]
else:
return default
def has_key(self, key) -> bool:
if hasattr(self, key) and not key in session_state:
session_state[key] = getattr(self, key)
return hasattr(self, key) or hasattr(session_state, key) or key in session_state
def __len__(self) -> int:
return len(self._keys)
def __iter__(self):
for k in self._keys:
yield k
def __getitem__(self, key: str):
if hasattr(self, key):
return getattr(self, key)
elif key in session_state:
return session_state[key]
else:
raise KeyError(f"Key {key} not found")
def __setitem__(self, key: str, value):
setattr(self, key, value)
if key not in self._keys:
self._keys.append(key) | /ruins_app-0.19.0-py3-none-any.whl/ruins/core/config.py | 0.555073 | 0.171096 | config.py | pypi |
from typing import Union, Tuple, Dict, List
import os
import shutil
import requests
import io
import zipfile
import streamlit as st
from .config import Config
from .data_manager import DataManager
st.experimental_singleton
def contextualized_data_manager(**kwargs) -> DataManager:
return DataManager(**kwargs)
def build_config(omit_dataManager: bool = False, url_params: Dict[str, List[str]] = {}, **kwargs) -> Tuple[Config, Union[None, DataManager]]:
"""
"""
# prepare the url params, if any
# url params are always a list: https://docs.streamlit.io/library/api-reference/utilities/st.experimental_get_query_params
# TODO: This should be sanitzed to avoid injection attacks!
ukwargs = {k: v[0] if len(v) == 1 else v for k, v in url_params.items()}
kwargs.update(ukwargs)
# extract the DataManager, if it was already instantiated
if 'dataManager' in kwargs:
dataManager = kwargs.pop('dataManager')
else:
dataManager = None
# build the Config
config = Config(**kwargs)
if omit_dataManager:
return config, None
else:
if dataManager is None:
dataManager = contextualized_data_manager(**config)
return config, dataManager
def download_data_archive(path: str = None, url: str = 'http://116.203.189.3/data.zip', DOI: str = None, if_exists: str = 'error'):
"""Download the data archive and extract into the data folder.
If the path is None, the default path inside the repo itself is used.
Then, you also need to change the datapath property of the application config.
If the data folder already exists and is not empty, the function will error on default.
You can pass ``if_exists='prune'`` to remove the existing data folder and replace it with the new one.
"""
# use default path if none was provided
if path is None:
path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..', 'data'))
# check if the data folder already exists
if os.path.exists(path) and len(os.listdir(path)) > 0:
if if_exists == 'error':
raise OSError(f"The data path {path} already exists and is not empty. Pass if_exists='prune' to remove it.")
elif if_exists == 'prune':
shutil.rmtree(path)
os.mkdir(path)
else:
raise AttributeError(f'if_exists must be one of "error", "prune"')
# check which download route is used:
if DOI is None:
# now the data folder exists - download the archive
print(f'Found Server URL: {url}\nStart downloading...', end='', flush=True)
req = requests.get(url, stream=True)
zip = zipfile.ZipFile(io.BytesIO(req.content))
print(f'done.\nExtracting to {path}...', end='', flush=True)
zip.extractall(os.path.abspath(os.path.join(path, '..')))
print('done.', flush=True)
else:
# now the data folder exists - download the archive
print(f'Found DOI: {DOI}\nStart downloading...', end='', flush=True)
# Build the URL from Zenodo DOI
chunk = DOI.split('/')[-1]
record = chunk.split('.')[1]
# request the existing data from Zenodo API
dat = requests.get(f'https://zenodo.org/api/records/{record}').json()
for f in dat['files']:
if f['type'] == 'zip':
req = requests.get(f['links']['self'], stream=True)
zip = zipfile.ZipFile(io.BytesIO(req.content))
# extract the data to the data folder
print(f'done.\nExtracting to {path}...', end='', flush=True)
zip.extractall(os.path.abspath(os.path.join(path, '..')))
print('done.', flush=True)
break | /ruins_app-0.19.0-py3-none-any.whl/ruins/core/build.py | 0.447702 | 0.248377 | build.py | pypi |
import streamlit as st
import pandas as pd
from ruins.core import build_config, debug_view, DataManager, Config
from ruins.plotting import bubble_plot
_TRANSLATE_EN = dict(
title='Bubble plot',
introduction="""
Bubble plot of RCP scenarios.
"""
)
_TRANSLATE_DE = dict(
title='Bubble plot',
introduction="""
Bubble plot der RCP Szenarien.
"""
)
def concept_explainer(config: Config, **kwargs):
"""Show an explanation, if it was not already shown.
"""
# check if we saw the explainer already
if config.has_key('bubbles_explainer'):
return
# get the container and a translation function
container = kwargs['container'] if 'container' in kwargs else st
t = config.translator(en=_TRANSLATE_EN, de=_TRANSLATE_DE)
# place title and intro
container.title(t('title'))
container.markdown(t('introduction'), unsafe_allow_html=True)
# check if the user wants to continue
accept = container.button('WEITER' if config.lang == 'de' else 'CONTINUE')
if accept:
st.session_state.bubbles_explainer = True
st.experimental_rerun()
else:
st.stop()
def show_bubbles(dataManager: DataManager, config: Config):
"""
Shows the bubble plot.
"""
data = dataManager['cordex_krummh_nobias_chk_f32_ET'].read()
possibles = list(data.columns)
possibles[0] = 'None'
level1 = st.sidebar.radio('Select Index1:', possibles, index = 1)
level2 = st.sidebar.radio('Select Index2:', possibles, index = 2)
level3 = st.sidebar.radio('Select Index3:', possibles, index = 0)
if(level1 == possibles[0]):
select = []
else:
if (level2 == possibles[0]):
select = [level1]
else:
if (level3 == possibles[0]):
select = [level1, level2]
else:
select = [level1, level2, level3]
fig = bubble_plot.draw_bubbles(data, selectors = select)
st.pyplot(fig)
def main_app(**kwargs):
"""
"""
# build the config and dataManager from kwargs
url_params = st.experimental_get_query_params()
config, dataManager = build_config(url_params=url_params, **kwargs)
# set page properties and debug view
st.set_page_config(page_title='Bubble plot', layout=config.layout)
debug_view.debug_view(dataManager, config, debug_name='DEBUG - initial state')
# explainer
concept_explainer(config)
# show bubbles
show_bubbles(dataManager, config)
# end state debug
debug_view.debug_view(dataManager, config, debug_name='DEBUG - finished app')
if __name__ == '__main__':
import fire
fire.Fire(main_app) | /ruins_app-0.19.0-py3-none-any.whl/ruins/apps/bubbles.py | 0.471953 | 0.345906 | bubbles.py | pypi |
from typing import Tuple, List
import streamlit as st
import plotly.graph_objects as go
import plotly.express as px
from plotly.express.colors import unlabel_rgb, label_rgb, n_colors, sequential
import numpy as np
from scipy.stats import norm
from ruins.core import Config, build_config, debug_view
from ruins.plotting import distribution_plot
_TRANSLATE_EN = dict(
title='Uncertainty & Risk',
introduction="""
This first playground app will illustrate how uncertainty and risk influence our everyday decisions.
It is quite important to understand the difference between [knightian uncertaintry](https://en.wikipedia.org/wiki/Knightian_uncertainty)
and risk at this simplified example, before we move to climate modeling and weather data.
Taking the whole earth system into account, these concepts apply and are of high importance,
but their interpretration is way more complicated.
DESCRIPTION OF THE SIMPLIFIED EXAMPLE
""",
event_1_desc="""
The first event has two possible outcomes, from which we know their expected return distributions,
but we lack knowledge about the probabilities of occurence. Use the slider below to adjust their
mean outcome and the deviations from this mean.
""",
event_2_desc="""
In the first event, there was an uncalculatable uncertainty, as we can't predict the outcome. Instead of
throw the dice every time and risk being way off in terms of return, we can take a save route and make
an active decision for another event. The second event has only one outcome, from which we know that the
expected return is worse than the better outcome of the first event.
We trade off the posibility of very positive outcome at the cost of not being trapped into very bad outcomes.
But is that worth it?
"""
)
_TRANSLATE_DE = dict(
title='Unsicherheit und Risiko',
introduction="""
Dieses erste, vereinfachte Beispiel demonstriert wie sich Unsicherheit und Risiko auf
unsere alltäglichen Entscheidungen auswirkt. Es ist wichtig den Unterschied zwischen
[Knightsche Unsicherheit](https://de.wikipedia.org/wiki/Knightsche_Unsicherheit) anhand dieses stark vereinfachten
Beispiels zu erkunden und zu verstehen, bevor wir die Modelle und Daten der letzten Kapitel betrachten.
Betrachtet man das gesamte Erdsystem, sind Unsicherheit und Risiko für die Interpretierbarkeit der Daten
von fundamentaler Bedeutng, stellen sich jedoch in wesentlich komplexeren Zusammenhängen dar.
BESCHREIBUNG DES VEREINFACHTEN BEISPIELS
""",
event_1_desc="""
Das erste Ereignis hat zwei verschiedene Ergebnisse. Für jedes kennen wir die drchschnittliche Erwartung und deren
Verteilung, allerdings haben wir keine Information über die Wahrscheinlichkeit, dass eines der Ergebnisse eintritt.
Benutze die Schieberegler um die Verteilungen der Ergebnisse anzupassen.
""",
event_2_desc="""
Im ersten Ereignis mussten wir mit einer unbestimmbaren Unsicherheit umgehen, da wir das Ergebnis nicht vorhersagen konnten.
Anstatt hier die Würfel entscheiden zu lassen und ein schlechtes Ergebnis zu riskieren, können wir uns gänzlich
umentscheiden und Ereignis 2 eintreten lassen, das nur ein einziges Ergebnis hat. Allerdings ist das Ergebnis hier
schlechter als der bessere Ausgang des ersten Erignisses. Wir erkaufen uns die Sicherheit keine sehr schlechten Ergbnisse
zu haben damit, dass wir auch auf sehr positive Ergebnisse verzichten.
Aber lohnt sich das?
"""
)
def concept_explainer(config: Config, **kwargs):
"""Show an explanation, if it was not already shown.
"""
# check if we saw the explainer already
if config.has_key('uncertainty_playground_explainer'):
return
# get the container and a translation function
container = kwargs['container'] if 'container' in kwargs else st
t = config.translator(en=_TRANSLATE_EN, de=_TRANSLATE_DE)
# place title and intro
container.title(t('title'))
container.markdown(t('introduction'), unsafe_allow_html=True)
# check if the user wants to continue
accept = container.button('WEITER' if config.lang == 'de' else 'CONTINUE')
if accept:
st.session_state.uncertainty_playground_explainer = True
st.experimental_rerun()
else:
st.stop()
def _helper_plot(ev1: List[Tuple[float, float]], ev2: Tuple[float, float] = None, **kwargs) -> go.Figure:
# create figure
fig = go.Figure()
# build the colorscale with enough colors
cscale = getattr(sequential, kwargs.get('colorscale', 'Greens'))
cmap = n_colors(unlabel_rgb(cscale[-1]), unlabel_rgb(cscale[-3]), len(ev1))
# get a common x
x = np.linspace(0, 10, 200)
# iterate over all outcomes
for i,outcome in enumerate(ev1):
mu, std = outcome
y = norm.pdf(x, loc=mu, scale=std)
y_sum = y.sum()
y /= y_sum
# add the traces
fig.add_trace(
go.Scatter(x=x, y=y * 100, mode='lines', line=dict(color=label_rgb(cmap[i])), name=f'Outcome #{i + 1}', fill='tozerox')
)
fig.add_trace(
go.Scatter(x=[mu, mu], y=[0, norm.pdf(mu, loc=mu, scale=std) / y_sum * 100], mode='lines', line=dict(color=label_rgb(cmap[i]), width=3, dash='dash'), name=f'Mean #{i + 1}')
)
# handle second event
if ev2 is not None:
mu, std = ev2
y = norm.pdf(x, loc=mu, scale=std)
y_sum = y.sum()
y /= y_sum
# add distribution
fig.add_trace(
go.Scatter(x=x, y=y * 100, mode='lines', line=dict(color='orange', width=2), name='Alternative Event', fill='tozerox')
)
# add mean
fig.add_trace(
go.Scatter(x=[mu, mu], y=[0, norm.pdf(mu, loc=mu, scale=std) / y_sum * 100], mode='lines', line=dict(color='orange', width=2, dash='dash'), name='Alternative Event mean value')
)
# adjust figure
fig.update_layout(
template='plotly_white',
legend=dict(orientation='h')
)
return fig
def concept_graph(config: Config, expander_container=st.sidebar, **kwargs) -> go.Figure:
"""
# TODO: document this
"""
# get the container and translator
container = kwargs['container'] if 'container' in kwargs else st
t = config.translator(de=_TRANSLATE_DE, en=_TRANSLATE_EN)
# ------------------------
# First PDF
if not config.has_key('concept_event_1'):
container.markdown(t('event_1_desc'))
l1, c1, r1 = container.columns(3)
l2, c2, r2 = container.columns(3)
# outcome 1
l1.markdown('### Outcome 1')
ou1_mu = c1.slider('Expected value of outcome #1', min_value=1., max_value=10., value=2.5)
ou1_st = r1.slider('Certainty of outcome #1', min_value=0.1, max_value=3.0, value=0.5)
# outcome 2
l2.markdown('### Outcome 2')
ou2_mu = c2.slider('Expected value of outcome #2', min_value=1., max_value=10., value=6.0)
ou2_st = r2.slider('Certainty of outcome #2', min_value=0.1, max_value=3.0, value=0.4)
ev1 = [(ou1_mu, ou1_st), (ou2_mu, ou2_st)]
# add the continue button
ev1_ok = container.button('WEITER' if config.lang=='de' else 'CONTINUE')
if ev1_ok:
st.session_state.concept_event_1 = ev1
st.experimental_rerun()
else:
fig = distribution_plot({'outcomes': ev1, 'coloscale': 'Greens'})
return fig
else:
ev1 = config['concept_event_1']
ev1_new = []
for i, out in enumerate(ev1):
e = expander_container.expander(f'Outcome #{i + 1}', expanded=True)
mu = e.slider(f'Expected value of outcome # {i + 1}', min_value=1., max_value=10., value=out[0])
std = e.slider(f'Certainty of outcome # {i + 1}', min_value=0.1, max_value=2.0, value=out[1])
ev1_new.append((mu, std, ))
# ------------------------
# add second event
container.markdown(t('event_2_desc'))
l, c, r = container.columns(3)
# second event
l.markdown('### Second event')
e2_mu = c.slider('Expected value of alternative event', min_value=1., max_value=10., value=5.5)
e2_st = r.slider('Certainty of alternative event', min_value=0.1, max_value=3.0, value=0.2)
fig = distribution_plot({'outcomes': ev1_new, 'name': 'Original Event', 'colorscale': 'Greens'}, {'outcomes': [(e2_mu, e2_st)], 'name': 'Alternative Event', 'colorscale': 'Oranges'})
return fig
def concept_playground(config: Config) -> None:
"""
The concept playground demonstrates how knightian uncertainty
is different from risk and how it influences everyday decisions.
"""
# TODO: add the story mode stuff here
# explainer
concept_explainer(config)
# show the graph
fig = concept_graph(config)
plot_area = st.empty()
plot_area.plotly_chart(fig, use_container_width=True)
def main_app(**kwargs):
"""
"""
# build the config and the dataManager from kwargs
url_params = st.experimental_get_query_params()
config, dataManager = build_config(url_params=url_params, **kwargs)
# set page config and debug view
st.set_page_config(page_title='Uncertainty Explorer', layout=config.layout)
debug_view.debug_view(dataManager, config, debug_name='Initial Application State')
# --------------------------
# Main App
# TODO: right now, we have only the playground here
concept_playground(config)
if __name__ == '__main__':
main_app() | /ruins_app-0.19.0-py3-none-any.whl/ruins/apps/uncertainty.py | 0.836254 | 0.723609 | uncertainty.py | pypi |
import streamlit as st
from ruins.core import build_config, debug_view, Config
from ruins.plotting import plot_extreme_pdf
_TRANSLATE_DE = dict(
title="Temperaturen verschieben sich",
intro="""Dies ist ein langer introtext für dieses Beispiel""",
temp_intro="""
Wähle zuerst die **mittlere Temperatur** von der du ausgehen möchtest.
Im zweiten Slider musst du die *Variabilität* der mittleren Temperatur angeben.
""",
temp_outro="""Nun beschreibt die Abbildung den **Ist**-Zustand, also die Häufigkeit
mit der kalte oder warme Tage vorkommen und wie häufig es zu extremen Ereignissen kommt.
Im nächsten Schritt können wir die Erde erwärmen.
""",
increase_intro="""In diesem Schritt kannst du nun die Durchschnittstemperatur auf
der Erde erhöhen. Eine Erhöhung um **1°C** Celsius mag nicht nach sehr viel klingen, und
natürlich sind die Schwankungen an einem einzigen Tag sehr viel höher, doch beobachte
genau, was mit den **dunkelroten Punkten** in der Abbildung passiert, wenn die Temperatur
sich erhöht.
""",
increase_outro="""Wenn du die Abbildung genauer betrachtest siehst du zwei Dinge:
* Es gibt deutlich mehr heiße Tage
* Es gibt deutlich weniger kalte Tage
Mehr heiße Tage können wir gut mit Wetterbeobachtungen belegen. Allerdings kommt es
nach wie vor zu **kalten** Extremereignissen. Dies legt nahe, dass sich auch die **Verteilung**
der durchschnittlichen Temperaturen geändert haben muss
""",
)
_TRANSLATE_EN = dict(
title="Temperatures shift",
intro="""This is a long intro-text for this example""",
temp_intro="""
First select the **mean temperature** that you will rely on.
The second slider asks for the temperature's variability.
""",
temp_outro="""The charts illustrates the current situation.
That is the frequency of warm and cold days as well as extremes.
As a next step, you can increase temperature on earth.
""",
increase_intro="""Now it's time to increase the temperature on Earth.
A rise of **1°C** might not sound like much, and of course temperature is way more
volatile during the day, but watch out for the **darkred points** on the chart, when
you increase mean temperature.
""",
increase_outro="""The chart suggest two main observations:
* There are way more hot days
* There are way less frost days
We find evidence for more hot days in our observation data. But other than the
graph suggests, we still have cold and frost days.
This implies that also the **distribution** of mean temperatures must have changed.
""",
)
def explainer(config: Config) -> None:
"""Introduce the concept"""
# show the intro
t = config.translator(de=_TRANSLATE_DE, en=_TRANSLATE_EN)
st.title(t('title'))
st.markdown(t('intro'))
ok = st.button('GOT IT. Get started...')
if ok:
st.session_state.tshift_stage = 'temperature_intro'
st.experimental_rerun()
def temperature_dist_intro(config: Config) -> None:
"""Introduce temperature distributions"""
# explain
t = config.translator(de=_TRANSLATE_DE, en=_TRANSLATE_EN)
st.title(t('title'))
plot = st.empty()
st.markdown(t('temp_intro'))
# build a slider
mu = st.slider('Temperature', value=13.3, min_value=4., max_value=24., step=0.1, key='mu1')
std = st.slider('Variability', value=0.1 * mu, min_value=0.0, max_value=10.0, step=0.01, key='std1')
# make the plot
fig = plot_extreme_pdf(mu, std)
plot.plotly_chart(fig, use_container_width=True)
# outro
st.markdown("""<hr style="margin-top: 3rem; margin-bottom: 3rem;">""", unsafe_allow_html=True)
st.markdown(t('temp_outro'))
ok = st.button('ERDE ERWÄRMEN' if config.lang=='de' else 'WARM THE EARTH')
if ok:
st.session_state.tshift_stage = 't_increase'
st.experimental_rerun()
def increase_temperatures(config: Config) -> None:
"""Increase the temperature on earth"""
t = config.translator(de=_TRANSLATE_DE, en=_TRANSLATE_EN)
# create the sidebar
mu = st.sidebar.slider('Temperature', value=13.3, min_value=4., max_value=24., step=0.1, key='mu1')
std = st.sidebar.slider('Variability', value=0.1 * mu, min_value=0.0, max_value=10.0, step=0.01, key='std1')
# add the title
st.title(t('title'))
plot = st.empty()
st.markdown(t('increase_intro'))
# make the controller to increase temperature
mu2 = st.slider('New temperature', min_value=mu, value=mu + 1.5, max_value=25., step=0.1, key='mu2')
# make the plot
fig = plot_extreme_pdf([mu, mu2], [std, std])
plot.plotly_chart(fig, use_container_width=True)
# outro
st.markdown("""<hr style="margin-top: 3rem; margin-bottom: 3rem;">""", unsafe_allow_html=True)
st.markdown(t('increase_outro'))
# button
ok = st.button('DIE VERTEILUNG ÄNDERN...' if config.lang=='de' else 'CHANGE THE DISTRIBUTION')
if ok:
st.session_state.tshift_stage = 'final'
st.experimental_rerun()
def full_temperature_shift(config: Config) -> None:
"""Full application with all controls"""
t = config.translator(de=_TRANSLATE_DE, en=_TRANSLATE_EN)
# create sidebar
st.sidebar.markdown('#### Present temperature')
mu = st.sidebar.slider('Temperature', value=13.3, min_value=4., max_value=24., step=0.1, key='mu1')
std = st.sidebar.slider('Variability', value=0.1 * mu, min_value=0.0, max_value=10.0, step=0.01, key='std1')
st.sidebar.markdown('#### Future temperatures')
mu2 = st.sidebar.slider('New temperature', min_value=mu, value=mu + 1.5, max_value=25., step=0.1, key='mu2')
std2 = st.sidebar.slider('Variability', value=0.1 * mu2, min_value=0.0, max_value=10.0, step=0.01, key='std2')
# add the title
st.title(t('title'))
# make the plot
fig = plot_extreme_pdf([mu, mu2], [std, std2])
fig = st.plotly_chart(fig, use_container_width=True)
def main_app(**kwargs):
# build the config and dataManager from kwargs
url_params = st.experimental_get_query_params()
config, _ = build_config(url_params=url_params, omit_dataManager=True, **kwargs)
# set page properties and debug view
st.set_page_config(page_title='Temperature shift', layout=config.layout)
debug_view.debug_view(None, config, 'DEBUG - initial state')
# MAIN APP
# -----------
stage = config.get('tshift_stage', 'intro')
if stage == 'intro':
explainer(config)
elif stage == 'temperature_intro':
temperature_dist_intro(config)
elif stage == 't_increase':
increase_temperatures(config)
elif stage == 'final':
full_temperature_shift(config)
else:
st.error("""The application is in an undefinded state. Please contact the developer with the info below""")
st.json({'message': 'tshift_stage is unkown', 'value': stage, 'origin': 'temperature_shift.py',})
st.stop()
# end state debug
debug_view.debug_view(None, config, 'DEBUG - final state')
if __name__ == '__main__':
import fire
fire.Fire(main_app) | /ruins_app-0.19.0-py3-none-any.whl/ruins/apps/temperature_shift.py | 0.539469 | 0.441854 | temperature_shift.py | pypi |
from typing import List
import streamlit as st
import pandas as pd
from ruins.core import build_config, debug_view, Config, DataManager
from ruins.plotting import sunburst
from ruins.processing.sunburst import ordered_sunburst_data
_TRANSLATE_DE = dict(
title='Verwendete Klimamodelle',
intro="""Es gibt eine Vielzahl an unterschiedlichen Klimamodellen,
die auf unterschiedlichen Skalen arbeiten und von unterschiedlichen Voraussetzungen ausgehen.
Das macht es schwer von **dem Klimamodell** zu sprechen und reflektiert die
Unsicherheit über die Zukunft.
In der Abbildung unten findest du alle Modelle, die für die Erstellung unserer
Ergebnisse herangezogen wurden, kategorisiert nach dem Globalen und regionalen Modell, sowie
dem RCP Szenario.
"""
)
_TRANSLATE_EN = dict(
title='Climate models in use',
intro="""There are tons of different climate models, which operate at different
scales and have different underlying assumptions. That reflects the uncertainty about
our future and thus, there is not **the one correct climate model**.
All models used to calculate our results are shown in the chart. We categorized them by
the global and regional model in use as well as the RCP scenario.
"""
)
def plot_controls(config: Config, expander=st.sidebar) -> None:
"""Add the controls to the application"""
with expander.expander('Hierachie' if config.lang=='de' else 'Hierachy', expanded=True):
# set the order
o = st.radio('Reihenfolge' if config.lang=='de' else 'Order', options=['GCM -> RCM -> RCP', 'RCP -> GCM -> RCM', 'RCM -> GCM -> RCP'])
st.session_state.sunburst_order = o.split(' -> ')
# set the level
LVL = {2: '1 level', 3: '2 levels', 4: '3 levels'}
st.select_slider(
'Anzahl Stufen' if config.lang=='de' else 'Show levels',
options=list(LVL.keys()),
value=4,
format_func=lambda k: LVL.get(k),
key='sunburst_maxdepth'
)
@st.experimental_memo
def get_cached_data(_dataManager: DataManager, order: List[str]) -> pd.DataFrame:
return ordered_sunburst_data(_dataManager, order)
def sunburst_plot(dataManager: DataManager, config: Config):
"""Add the plot"""
# get the ordered data
df = get_cached_data(dataManager, config['sunburst_order'])
# get the figure
fig = sunburst(df, maxdepth=config.get('sunburst_maxdepth', 4))
# add
st.plotly_chart(fig, use_container_width=True)
def main_app(**kwargs):
""""""
# build the config and dataManager from kwargs
url_params = st.experimental_get_query_params()
config, dataManager = build_config(url_params=url_params, **kwargs)
# set page properties and debug view
st.set_page_config(page_title='Climate Model Sunburst', layout=config.layout)
debug_view.debug_view(dataManager, config, debug_name='DEBUG - initial state')
# get a translator
t = config.translator(de=_TRANSLATE_DE, en=_TRANSLATE_EN)
st.title(t('title'))
st.markdown(t('intro'))
# main application
plot_controls(config)
sunburst_plot(dataManager, config)
# end state debug
debug_view.debug_view(dataManager, config, debug_name='DEBUG - finished app')
if __name__=='__main__':
import fire
fire.Fire(main_app) | /ruins_app-0.19.0-py3-none-any.whl/ruins/apps/sunburst.py | 0.622459 | 0.364693 | sunburst.py | pypi |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def yrplot_hm(sr, ref=[1980, 2000], ag='sum', qa=0.95, cbar_title='Temperature anomaly (K)', cmx='coolwarm', cmxeq=True, li=False):
"""
@deprecated - use stripes_heatmap.yrplot_hm instead
"""
# plot of heatmap with monthyl and annual stripes
yrs = sr.index.year.unique()
dummy = np.zeros((len(yrs), 14)) * np.nan
dummy = pd.DataFrame(dummy)
dummy.index = yrs
dummy.columns = ['J', 'F', 'M', 'A', 'M', 'J', 'J', 'A', 'S', 'O', 'N', 'D', '', 'Year']
for i in sr.index:
dummy.iloc[i.year - yrs[0], i.month - 1] = sr.loc[i]
for i in yrs:
if ag == 'sum':
dummy.iloc[i - yrs[0], 13] = sr.loc[sr.index.year == i].sum()
cmx = 'coolwarm_r'
elif ag == 'min':
dummy.iloc[i - yrs[0], 13] = sr.loc[sr.index.year == i].min()
elif ag == 'max':
dummy.iloc[i - yrs[0], 13] = sr.loc[sr.index.year == i].max()
else: # ag == 'mean'
dummy.iloc[i - yrs[0], 13] = sr.loc[sr.index.year == i].mean()
if ref == None:
pass
else:
refx = dummy.loc[ref[0]:ref[1]].mean(axis=0)
dummy = dummy - refx
if cmxeq:
vxU = dummy.abs().quantile(qa).quantile(qa)
vxL = -1. * vxU
else:
vxU = dummy.quantile(qa).quantile(qa)
vxL = dummy.quantile(1. - qa).quantile(1. - qa)
if ag == 'sum':
dummy.iloc[:, 13] = dummy.iloc[:, 13] / 12
fig = plt.figure(figsize=(8,len(dummy)/15.))
ax = sns.heatmap(dummy, cmap=cmx, vmin=vxL, vmax=vxU, cbar_kws={'label': cbar_title})
if ref == None:
pass
else:
ax.add_patch(
plt.Rectangle((0, ref[0] - yrs[0]), 12, ref[1] - ref[0], fill=True, edgecolor='red', facecolor='gray', lw=2,
alpha=0.3, clip_on=False))
ax.add_patch(
plt.Rectangle((13, ref[0] - yrs[0]), 1, ref[1] - ref[0], fill=True, edgecolor='red', facecolor='gray', lw=2,
alpha=0.3, clip_on=False))
ax.annotate('Reference period', (0.5, ref[1] - yrs[0] - 2), color='white', weight='bold', ha='left',
va='bottom', alpha=0.6)
if type(li) == int:
ax.axhline(li - yrs[0], color='k', ls='--', lw=1, alpha=0.5)
ax.annotate(' >> observed', (12.5, li - yrs[0] - 0.5), color='k', ha='center', va='bottom', alpha=0.6,
rotation=90.)
ax.annotate('modelled << ', (12.5, li - yrs[0] + 0.5), color='k', ha='center', va='top', alpha=0.6,
rotation=90.)
# ax.add_patch(plt.Rectangle((0, li-yrs[0]), 12, 0, fill=False, edgecolor='k', ls='--', lw=1, alpha=0.5, clip_on=False))
ax.set_ylabel('Year')
ax.set_xlabel('Month ')
return fig
def monthlyx(dy, dyx=1, ylab='T (°C)', clab1='Monthly Mean in Year', clab2='Monthly Max in Year', pls='cividis_r') -> plt.Figure:
cmap = plt.cm.get_cmap(pls)
colors = cmap(np.linspace(0, 1, len(dy.index.year.unique())+1))
colorsx = cmap(np.arange(cmap.N))
idx1 = dy.index.year - dy.index.year.min()
idx1m = dy.index.month
if type(dyx) == int:
pass
else:
idx2 = (dyx.index.year - dyx.index.year.min()).astype(int)
idx2m = dyx.index.month
cmap1 = plt.cm.get_cmap('gist_heat_r')
colors1 = plt.cm.gist_heat_r(np.linspace(0, 1, len(dyx.index.year.unique()) + 1))
colorsx1 = cmap1(np.arange(cmap1.N))
for i in dy.columns:
plt.scatter(idx1m + (np.random.rand(len(dy)) - 1.5), dy[i].values.astype(np.float), c=colors[idx1], alpha=0.6, s=2)
if type(dyx) == int:
pass
else:
for i in dyx.columns:
plt.scatter(idx2m + (np.random.rand(len(dyx)) - 1.5), dyx[i].values.astype(np.float), c=colors1[idx2], alpha=0.6, s=2)
cbar = plt.colorbar(plt.cm.ScalarMappable(cmap=cmap), label=clab1, ticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
cbar.ax.set_yticklabels(np.round(np.linspace(dy.index.year.min(), dy.index.year.max(), 6)).astype(int).astype(str))
if type(dyx) == int:
pass
else:
cbar1 = plt.colorbar(plt.cm.ScalarMappable(cmap=cmap1), label=clab2, ticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
cbar1.ax.set_yticklabels(
np.round(np.linspace(dyx.index.year.min(), dyx.index.year.max(), 6)).astype(int).astype(str))
plt.xticks([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'])
plt.ylabel(ylab)
fig = plt.gcf()
return fig | /ruins_app-0.19.0-py3-none-any.whl/ruins/plotting/weather_data.py | 0.466116 | 0.626481 | weather_data.py | pypi |
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
import numpy as np
def sea_level(tide_data: pd.DataFrame, input_scale: float = 1/1000., knock_level: float = None, fig: go.Figure = None, row: int = 1, col: int = 1) -> go.Figure:
# build a figure, if there is None
if fig is None:
fig = make_subplots(1, 1)
# add tide data
fig.add_trace(
go.Scatter(x=tide_data.index, y=tide_data.values * input_scale, name='Sea level', line=dict(color='blue')), row=row, col=col
)
# add knock level
if knock_level is not None:
fig.add_hline(y=knock_level, name="0 mNHN - average sea level before sea level rise", line=dict(color='grey', dash='dash'), opacity=0.5)
fig.add_annotation(x=0.5, y=0.95, xref="x domain", yref="y domain", text="0 mNHN - average sea level before sea level rise", showarrow=False, font=dict(color='grey', size=16))
# update layout
fig.update_layout(**{
f'yaxis{row}': dict(title='Sea level [mNN]'),
'paper_bgcolor': 'rgba(0,0,0,0)',
'plot_bgcolor': 'rgba(0,0,0,0)',
'legend': dict(orientation='h')
})
return fig
def canal_recharge(recharge_data: pd.DataFrame, cumsum: bool = False, fig: go.Figure = None, row: int = 1, col: int = 1) -> go.Figure:
if fig is None:
fig = make_subplots(1, 1)
# handle cumsum
if cumsum:
recharge_data = np.cumsum(recharge_data)
label = "Cumulative recharge"
else:
label = "Absolute recharge"
# build the plot
fig.add_trace(
go.Scatter(x=recharge_data.index, y=recharge_data.values, name=label, line=dict(color='blue')),
row=row, col=col
)
# update layout
fig.update_layout(**{
f'yaxis{row}': dict(title='Recharge [mm]'),
'paper_bgcolor': 'rgba(0,0,0,0)',
'plot_bgcolor': 'rgba(0,0,0,0)',
'legend': dict(orientation='h')
})
return fig
def absolute_water_level(hg_model_runs: list,
EVEx5_lw_pegel_timesliced: pd.Series,
fig: go.Figure = None,
row: int = 1, col: int = 1) -> go.Figure:
# build a figure, if there is None
if fig is None:
fig = make_subplots(1, 1)
for run in hg_model_runs:
fig.add_trace(
go.Scatter(x=run[0].index, y=run[0]/1000,
line=dict(color='grey'), showlegend=False), row=row, col=col)
fig.update_traces(opacity=.3)
fig.add_trace(
go.Scatter(x=[-1], y=[-1], visible='legendonly',
name='$H_G in catchment (simulated)$',
line=dict(color='grey')), row=row, col=col) # Nur für die Legende
fig.add_trace(
go.Scatter(x=EVEx5_lw_pegel_timesliced.index, y=EVEx5_lw_pegel_timesliced,
name=r'$H_G [m] \text{(observed)}$',
line=dict(color='green')), row=row, col=col)
# add canal crest
fig.add_hline(y=-0.9, name=r'$Canal water level with first damages$',
line=dict(color='red', dash='dash'), opacity=0.5, row=row, col=col)
# update layout
fig.update_layout(**{
f'yaxis{row}': dict(title='Absolute Water Level $H_G$'),
'paper_bgcolor': 'rgba(0,0,0,0)',
'plot_bgcolor': 'rgba(0,0,0,0)',
#'legend': dict(orientation='h')
})
return fig
def pump_capacity(hg_model_runs: list,
pump_capacity_observed: pd.Series,
cumsum: bool = False,
fig: go.Figure = None,
row: int = 1, col: int = 1) -> go.Figure:
if fig is None:
fig = make_subplots(1, 1)
# build the plot
for run in hg_model_runs:
if cumsum:
fig.add_trace(
go.Scatter(x=run[0].index, y=np.cumsum(run[1]),
line=dict(color='grey'), showlegend=False), row=row, col=col)
else:
fig.add_trace(
go.Scatter(x=run[0].index, y=run[1] * 100,
line=dict(color='grey'), showlegend=False), row=row, col=col)
fig.update_traces(opacity=.3)
if cumsum:
fig.add_trace(
go.Scatter(x=[0], y=[0], visible='legendonly',
name='Cumulative pump capacity at Knock [-]',
line=dict(color='grey')), row=row, col=col) # Nur für die Legende
else:
fig.add_trace(
go.Scatter(x=[0], y=[0], visible='legendonly',
name='Used pump capacity (simulated)',
line=dict(color='grey')), row=row, col=col) # Nur für die Legende
fig.add_trace(
go.Scatter(x=pump_capacity_observed.index, y=pump_capacity_observed,
name='Used pump capacity (observed)',
line=dict(color='lightblue')), row=row, col=col)
if row == 2: # AAaaaaAAAaaarghh
fig.update_layout(yaxis2=dict(range=[-2,102]))
# update layout
fig.update_layout(**{
f'yaxis{row}': dict(title='[%]'),
'paper_bgcolor': 'rgba(0,0,0,0)',
'plot_bgcolor': 'rgba(0,0,0,0)',
#'legend': dict(orientation='h')
})
return fig | /ruins_app-0.19.0-py3-none-any.whl/ruins/plotting/floodmodel.py | 0.778733 | 0.451447 | floodmodel.py | pypi |
import numpy as np
import matplotlib.pyplot as plt
def kde(data, cmdata='none', split_ts=1, cplot=True, eq_period=True):
# plot of kde with stripes
from sklearn.neighbors import KernelDensity
cxx = ['#E69F00', '#009E73', '#0072B2', '#D55E00', '#CC79A7']
cxx2 = ['#8c6bb1', '#810f7c']
data = data[~np.isnan(data)]
x_d = np.linspace(np.min(data) * 0.9, np.max(data) * 1.1, len(data))
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=1.0, kernel='gaussian')
kde.fit(data.to_numpy()[:, None])
# score_samples returns the log of the probability density
logprob = kde.score_samples(x_d[:, None])
if cplot & (split_ts == 1):
plt.fill_between(x_d, np.exp(logprob), alpha=0.4, facecolor='grey')
lp = np.exp(logprob)
xd = x_d
if type(cmdata) != str:
fig, (ax, cax, cax2) = plt.subplots(ncols=3, figsize=(10.5, 2.3), gridspec_kw={"width_ratios": [1, 0.02, 0.02]})
x_d2 = np.linspace(np.min(cmdata) * 0.9, np.max(cmdata) * 1.1, len(cmdata))
# instantiate and fit second KDE model
kde2 = KernelDensity(bandwidth=1.0, kernel='gaussian')
kde2.fit(cmdata.to_numpy()[:, None])
# score_samples returns the log of the probability density
logprob2 = kde2.score_samples(x_d2[:, None])
if cplot & (split_ts == 1):
ax.fill_between(x_d2, np.exp(logprob2), alpha=0.4, facecolor='grey')
lp2 = np.exp(logprob2)
xd2 = x_d2
else:
fig, (ax, cax) = plt.subplots(ncols=2, figsize=(10.5, 2.3), gridspec_kw={"width_ratios": [1, 0.06]})
if split_ts > 1:
if eq_period:
spliti = [0, len(data) - 40, len(data) - 20, len(data)]
else:
spliti = np.linspace(0, len(data), split_ts + 1).astype(int)
for i in np.arange(split_ts):
datax = data.iloc[spliti[i]:spliti[i + 1]]
x_d = np.linspace(np.min(datax) * 0.9, np.max(datax) * 1.1, len(datax))
try:
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=1.0, kernel='gaussian')
kde.fit(datax.to_numpy()[:, None])
# score_samples returns the log of the probability density
logprob = kde.score_samples(x_d[:, None])
if cplot:
ax.fill_between(x_d, np.exp(logprob), alpha=0.4, facecolor=cxx[i],
label='-'.join([str(datax.index.year.min()), str(datax.index.year.max())]))
except:
pass
if type(cmdata) != str:
# add climate model data
csplit_ts = [2040, 2080]
for i in np.arange(2):
datax = cmdata.loc[(cmdata.index.year >= csplit_ts[i]) & (cmdata.index.year < csplit_ts[i] + 20)]
x_d = np.linspace(np.min(datax) * 0.9, np.max(datax) * 1.1, len(datax))
try:
# instantiate and fit the KDE model
kde = KernelDensity(bandwidth=1.0, kernel='gaussian')
kde.fit(datax.to_numpy()[:, None])
# score_samples returns the log of the probability density
logprob = kde.score_samples(x_d[:, None])
if cplot:
ax.fill_between(x_d, np.exp(logprob), alpha=0.4, facecolor=cxx2[i],
label='-'.join([str(datax.index.year.min()), str(datax.index.year.max())]))
except:
pass
ax.legend(loc=1, ncol=5)
if cplot:
# cmap = plt.cm.get_cmap('cividis_r')
cmap = plt.cm.get_cmap('viridis_r')
colors = plt.cm.cividis_r(np.linspace(0, 1, len(data)))
colorsx = cmap(np.arange(cmap.N))
for i in np.arange(len(data)):
ax.plot([data.iloc[i], data.iloc[i]], [0, np.max(lp) * 0.9], c=colors[i])
labcb = 'Year'
if type(cmdata) != str:
cmap2 = plt.cm.get_cmap('plasma')
colors2 = plt.cm.plasma(np.linspace(0, 1, len(cmdata)))
colorsx2 = cmap2(np.arange(cmap2.N))
for i in np.arange(len(cmdata)):
ax.plot([cmdata.iloc[i], cmdata.iloc[i]], [0, np.max(lp) * 0.9], c=colors2[i])
cbar2 = plt.colorbar(plt.cm.ScalarMappable(cmap=cmap2), cax=cax2, label=labcb,
ticks=[0, 0.2, 0.4, 0.6, 0.8, 1], fraction=0.0027, anchor=(1.0, 0.1))
cbar2.ax.set_yticklabels(
np.round(np.linspace(cmdata.index.year.min(), cmdata.index.year.max(), 6)).astype(int).astype(str))
labcb = ''
cbar = plt.colorbar(plt.cm.ScalarMappable(cmap=cmap), cax=cax, label=labcb, ticks=[0, 0.2, 0.4, 0.6, 0.8, 1])
cbar.ax.set_yticklabels(
np.round(np.linspace(data.index.year.min(), data.index.year.max(), 6)).astype(int).astype(
str)) # vertically oriented colorbar
ax.set_ylabel('Occurrence (KDE)')
return fig, ax | /ruins_app-0.19.0-py3-none-any.whl/ruins/plotting/kde.py | 0.62498 | 0.68988 | kde.py | pypi |
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import pandas as pd
DIMENSIONS = {l: dict() for l in ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']}
def climate_projection_parcoords(data: pd.DataFrame, fig: go.Figure = None, align_range: bool = True, colorscale = 'electric', row: int = 1, col: int = 1, lang='en'):
"""
Parallel coordinates plot for climate projections.
This plot uses each month in the year as a coordinate dimension. By sorting the
dimensions into the correct order, the cycle of annual temperature aggregates is preserved,
while the full dataset can easily be compared.
Parameters
----------
data : pandas.DataFrame
Dataframe with the data to plot. The DataFrame has to be indexed by a
Datetime Index and does accept more than one column (ground station, RCP scenario or grid cell).
fig : plotly.graph_objects.Figure
If not None, the given figure will be used to plot the data.
Note, that subfigures need to use the ``'domain'`` type.
align_range : bool
If True (default) each dimension (aka month) will use the same value range, to
focus the differences between the months. If False, the range will be adapted
to span from min to max for each dimension, putting more focus on the differences
between the years (decades).
colorscale : str
Name identifier of teh colorscale. See plotly to learn about available options.
row : int
If figure is not None, row and column can be used to plot into the
correct subplot.
col: int
If figure is not None, row and column can be used to plot into the
correct subplot.
lang : str
Can either be ``'en'`` or ``'de'``. As of now, the language does not
have any effect.
"""
# create the dimensions dictionary
dimensions = {k:v for k, v in DIMENSIONS.items()}
# colormap container
cmap = []
vmin = data.min().min()
vmax = data.max().max()
# group by Month
grp = data.groupby(data.index.map(lambda x: x.strftime('%B')))
# create dimensions
for label, df in grp:
df = pd.melt(df, ignore_index=False).drop('variable', axis=1)
dim = dict(label=label, values=df.values)
if align_range:
dim['range'] = (vmin, vmax)
# append
dimensions[label] = dim
cmap.extend(df.index.map(lambda x: x.year).values.tolist())
if fig is None:
fig = make_subplots(1, 1, specs=[[{'type': 'domain'}]])
# make plot
fig.add_trace(go.Parcoords(
line=dict(color=cmap, colorscale=colorscale, showscale=True),
dimensions = [dict(label='Year', values=cmap)] + list(dimensions.values())
), row=row, col=col)
return fig | /ruins_app-0.19.0-py3-none-any.whl/ruins/plotting/climate_parcoords.py | 0.903348 | 0.679684 | climate_parcoords.py | pypi |
import warnings
from itertools import tee
from typing import Union, Callable, Literal, Any, Dict, Optional, Iterable
import torch
import torchmetrics
from torch import nn
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def str2callable(cls: Union[str, Callable], restriction: str = "") -> Callable:
"""Dynamically import a callable from a string."""
if isinstance(cls, Callable): # type: ignore[arg-type]
return cls # type: ignore[return-value]
if isinstance(cls, str) and not cls.startswith(restriction):
raise ValueError(
f"Failed to import '{cls}' because "
f"imports are restricted to '{restriction}'."
)
module_name, class_name = cls.rsplit(".", 1) # type: ignore[union-attr]
module = __import__(module_name, fromlist=[class_name])
cls = getattr(module, class_name)
return cls # type: ignore[return-value]
def get_loss(loss_type: str) -> torchmetrics.Metric:
"""Get a loss instance by specifying a string."""
loss: torchmetrics.Metric
if loss_type == "mae":
loss = torchmetrics.MeanAbsoluteError()
elif loss_type == "mse":
loss = torchmetrics.MeanSquaredError()
elif loss_type == "rmse":
loss = torchmetrics.MeanSquaredError(squared=False)
else:
raise ValueError(
f"Unknown loss type '{loss_type}'. " "Use either 'mae', 'mse' or 'rmse'."
)
return loss
def dataloader2domain(dataloader_idx: int) -> Literal["source", "target"]:
if dataloader_idx == 0:
return "source"
elif dataloader_idx == 1:
return "target"
else:
raise RuntimeError(
f"Expected dataloader_idx to be 0 or 1 but got {dataloader_idx}."
)
class OptimizerFactory:
"""Factory for creating optimizers and schedulers.
After initialization, the factory can be called to create an optimizer with an
optional scheduler."""
def __init__(
self,
optim_type: str = "adam",
lr: float = 1e-3,
scheduler_type: Optional[str] = None,
**kwargs: Any,
) -> None:
"""
Create a new factory to efficiently create optimizers and schedulers.
The factory creates an optimizer of the specified `optim_type` and adds an
optional scheduler of the specified `scheduler_type`. Additional keyword
arguments for the optimizer can be passed by adding the 'optim_' prefix and
for the scheduler by adding the 'scheduler_' prefix. The factory will ignore
any other keyword arguments.
Available optimizers are 'adam', 'sgd' and 'rmsprop'. Available schedulers
are 'step', 'cosine', 'linear' and 'lambda'.
Args:
optim_type: The type of optimizer to create.
lr: The learning rate to use.
scheduler_type: The optional type of scheduler to create.
**kwargs: Additional keyword arguments for the optimizer and scheduler.
"""
self.optim_type = optim_type
self.lr = lr
self.scheduler_type = scheduler_type
self._kwargs = kwargs
self._warn_excess_kwargs()
def _warn_excess_kwargs(self) -> None:
def _is_excess_kwarg(key: str) -> bool:
return not (key.startswith("optim_") or key.startswith("scheduler_"))
excess_kwargs = list(filter(_is_excess_kwarg, self._kwargs.keys()))
if excess_kwargs:
warnings.warn(
"The following kwargs were passed but do not start "
"with 'optim_' or 'scheduler_' and therefore "
f"will be ignored: {excess_kwargs}."
)
def __call__(self, parameters: Iterable[nn.Parameter]) -> Dict[str, Any]:
"""
Create an optimizer with an optional scheduler for the given parameters.
The object returned by this method is a lightning optimizer config and can be
the return value of `configure_optimizers`.
Args:
parameters: The model parameters to optimize.
Returns:
A lightning optimizer config.
"""
optim_kwargs = {
key.replace("optim_", ""): value
for key, value in self._kwargs.items()
if key.startswith("optim_")
}
optim = self._optim_func(parameters, lr=self.lr, **optim_kwargs)
optim_config = {"optimizer": optim}
if self.scheduler_type is not None:
scheduler_kwargs = {
key.replace("scheduler_", ""): value
for key, value in self._kwargs.items()
if key.startswith("scheduler_")
}
optim_config["lr_scheduler"] = {
"scheduler": self._scheduler_func(optim, **scheduler_kwargs)
}
return optim_config
@property
def _optim_func(self) -> Callable:
if self.optim_type == "adam":
return torch.optim.Adam
elif self.optim_type == "sgd":
return torch.optim.SGD
elif self.optim_type == "rmsprop":
return torch.optim.RMSprop
else:
raise ValueError(
f"Unknown optimizer type '{self.optim_type}'. "
"Use either 'adam', 'sgd' or 'rmsprop'."
)
@property
def _scheduler_func(self) -> Callable:
if self.scheduler_type == "step":
return torch.optim.lr_scheduler.StepLR
elif self.scheduler_type == "cosine":
return torch.optim.lr_scheduler.CosineAnnealingLR
elif self.scheduler_type == "linear":
return torch.optim.lr_scheduler.LinearLR
elif self.scheduler_type == "lambda":
return torch.optim.lr_scheduler.LambdaLR
else:
raise ValueError(
f"Unknown scheduler type '{self.scheduler_type}'. "
"Use either 'step', 'cosine', 'linear' or 'lambda'."
) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/utils.py | 0.935516 | 0.32532 | utils.py | pypi |
from typing import Tuple, List, Any, Optional, Literal, Dict
import numpy as np
import torch
from rul_datasets.utils import feature_to_tensor
from torch import nn
import rul_adapt
from rul_adapt import utils
from rul_adapt.approach.abstract import AdaptionApproach
from rul_adapt.approach.evaluation import AdaptionEvaluator
class LatentAlignFttpApproach(AdaptionApproach):
"""This first-point-to-predict estimation approach trains a GAN on healthy state
bearing data. The discriminator can be used afterward to compute a health
indicator for each bearing.
The feature extractor and regressor models are used as the discriminator. The
regressor is not allowed to have an activation function on its last layer and
needs to use only a single output neuron because [BCEWithLogitsLoss]
[torch.nn.BCEWithLogitsLoss] is used. The generator receives noise with the shape
[batch_size, 1, noise_dim]. The generator needs an output with enough elements so
that it can be reshaped to the same shape as the real input data. The reshaping
is done internally.
Both generator and discriminator are trained at once by using a
[Gradient Reversal Layer][rul_adapt.loss.adaption.GradientReversalLayer]
between them.
Examples:
>>> from rul_adapt import model, approach
>>> feat_ex = model.CnnExtractor(1, [16, 16, 1], 10, fc_units=16)
>>> reg = model.FullyConnectedHead(16, [1])
>>> gen = model.CnnExtractor(1, [1], 10, padding=True)
>>> fttp_model = approach.LatentAlignFttpApproach(1e-4, 10)
>>> fttp_model.set_model(feat_ex, reg, gen)
>>> health_indicator = fttp_model(torch.randn(16, 1, 10)).std()
"""
CHECKPOINT_MODELS = ["_generator"]
_generator: nn.Module
def __init__(
self,
noise_dim: int,
**optim_kwargs: Any,
):
"""
Create a new FTTP estimation approach.
The generator is set by the `set_model` function together with the feature
extractor and regressor.
For more information about the possible optimizer keyword arguments,
see [here][rul_adapt.utils.OptimizerFactory].
Args:
noise_dim: The size of the last dimension of the noise tensor.
**optim_kwargs: Keyword arguments for the optimizer, e.g. learning rate.
"""
super().__init__()
self.noise_dim = noise_dim
self.optim_kwargs = optim_kwargs
self.gan_loss = torch.nn.BCEWithLogitsLoss()
self.grl = rul_adapt.loss.adaption.GradientReversalLayer()
self._get_optimizer = utils.OptimizerFactory(**self.optim_kwargs)
self.save_hyperparameters()
def set_model(
self,
feature_extractor: nn.Module,
regressor: nn.Module,
generator: Optional[nn.Module] = None,
*args: Any,
**kwargs: Any,
) -> None:
"""
Set the feature extractor, regressor (forming the discriminator) and
generator for this approach.
The regressor is not allowed to have an activation function on its last layer
and needs to use only a single output neuron. The generator receives noise
with the shape [batch_size, 1, noise_dim]. The generator needs an output with
enough elements so that it can be reshaped to the same shape as the real
input data. The reshaping is done internally.
Args:
feature_extractor: The feature extraction network.
regressor: The regressor functioning as the head of the discriminator.
generator: The generator network.
"""
super().set_model(feature_extractor, regressor)
if generator is None:
raise ValueError("Generator not set. This approach is unlikely to work.")
self._generator = generator
@property
def generator(self):
"""The generator network."""
if hasattr(self, "_generator"):
return self._generator
else:
raise RuntimeError("Generator used before 'set_model' was called.")
def configure_optimizers(self) -> Dict[str, Any]:
"""Configure an optimizer for the generator and discriminator."""
return self._get_optimizer(self.parameters())
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Predict the health indicator for the given inputs."""
return self.regressor(self.feature_extractor(inputs))
def training_step(self, batch: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
"""
Execute one training step.
The batch is a tuple of the features and the labels. The labels are ignored.
A noise tensor is passed to the generator to generate fake features. The
discriminator classifies if the features are real or fake and the binary
cross entropy loss is calculated. Real features receive the label zero and
the fake features one.
Both generator and discriminator are trained at once by using a
[Gradient Reversal Layer][rul_adapt.loss.adaption.GradientReversalLayer]
between them. At the end, the loss is logged.
Args:
batch: A tuple of feature and label tensors.
Returns:
The classification loss.
"""
features, _ = batch
batch_size = features.shape[0]
device: torch.device = self.device # type: ignore[assignment]
pred_real = self.forward(features)
loss_real = self.gan_loss(pred_real, torch.zeros(batch_size, 1, device=device))
noise = torch.randn(batch_size, 1, self.noise_dim, device=device)
fake_features = self.grl(self.generator(noise)).reshape_as(features)
pred_fake = self.forward(fake_features)
loss_fake = self.gan_loss(pred_fake, torch.ones(batch_size, 1, device=device))
loss = (loss_real + loss_fake) / 2
self.log("train/loss", loss)
return loss
def get_first_time_to_predict(
fttp_model: LatentAlignFttpApproach,
features: np.ndarray,
window_size: int,
chunk_size: int,
healthy_index: int,
threshold_coefficient: float,
) -> int:
"""
Get the first time step to predict for the given features.
The features are pre-processed via the [extract_chunk_windows]
[rul_adapt.approach.latent_align.extract_chunk_windows] function and fed in
batches to the `fttp_model`. Each batch consists of the chunk windows that end in
the same original feature window. The health indicator for the original window is
calculated as the standard deviation of the predictions of the `fttp_model`.
The first-time-to-predict is the first time step where the health indicator is
larger than `threshold_coefficient` times the mean of the health indicator for
the first `healthy_index` time steps. If the threshold is never exceeded,
a RuntimeError is raised.
Args:
fttp_model: The model to use for the health indicator calculation.
features: The features to calculate the first-time-to-predict for.
window_size: The size of the chunk windows to extract.
chunk_size: The size of the chunks for each chunk window to extract.
healthy_index: The index of the last healthy time step.
threshold_coefficient: The threshold coefficient for the health indicator.
Returns:
The original window index of the first-time-to-predict.
"""
if threshold_coefficient <= 1:
raise ValueError("Threshold coefficient needs to be greater than one.")
health_indicator = get_health_indicator(
fttp_model, features, window_size, chunk_size
)
offset = len(features) - len(health_indicator) # windowing cuts off first windows
healthy = np.mean(health_indicator[: healthy_index - offset])
over_thresh = np.argwhere(health_indicator > threshold_coefficient * healthy)
if len(over_thresh) == 0:
raise RuntimeError("Health indicator never passes threshold.")
fttp = over_thresh[0, 0] + offset
return fttp
@torch.no_grad()
def get_health_indicator(
fttp_model: nn.Module, features: np.ndarray, window_size: int, chunk_size: int
) -> np.ndarray:
"""
Get the health indicator for the given features.
The features are pre-processed via the [extract_chunk_windows]
[rul_adapt.approach.latent_align.extract_chunk_windows] function and fed in
batches to the `fttp_model`. Each batch consists of the chunk windows that end in
the same original feature window. The health indicator for the original window is
calculated as the standard deviation of the predictions of the `fttp_model`.
The length of the returned health indicator array is shorter than the `features`
array by `window_size - 1`, due to the chunk windowing. This means the first
health indicator value belongs to the original window with the index
`window_size - 1`.
Args:
fttp_model: The model to use for the health indicator calculation.
features: The features to calculate the health indicator for.
window_size: The size of the chunk windows to extract.
chunk_size: The size of the chunks for each chunk window to extract.
Returns:
The health indicator for the original windows.
"""
chunked = extract_chunk_windows(features, window_size, chunk_size)
chunks_per_window = features.shape[1] // chunk_size
batches = np.split(chunked, len(chunked) // chunks_per_window)
health_indicator = np.empty(len(batches))
for i, batch in enumerate(batches):
preds = fttp_model(feature_to_tensor(batch, torch.float))
health_indicator[i] = np.std(preds.detach().numpy())
return health_indicator
def extract_chunk_windows(
features: np.ndarray, window_size: int, chunk_size: int
) -> np.ndarray:
"""
Extract chunk windows from the given features of shape `[num_org_windows,
org_window_size, num_features]`.
A chunk window is a window that consists of `window_size` chunks. Each original
window is split into chunks of size `chunk_size`. A chunk window is then formed
by concatenating chunks from the same position inside `window_size` consecutive
original windows. Therefore, each original window is represented by
`org_window_size // chunk_size` chunk windows. The original window size must
therefor be divisible by the chunk size.
Args:
features: The features to extract the chunk windows from.
window_size: The number of consecutive original windows to form a chunk window
from.
chunk_size: The size of the chunks to extract from the original windows.
Returns:
Chunk windows of shape `[num_windows, window_size * chunk_size, num_features]`.
"""
old_window_size = features.shape[1]
window_multiplier = old_window_size // chunk_size
num_new_windows = (features.shape[0] - window_size + 1) * window_multiplier
chunk_idx = np.tile(np.arange(chunk_size), num_new_windows * window_size)
intra_offsets = np.tile(np.arange(window_size), num_new_windows) * old_window_size
inter_offsets = np.repeat(np.arange(num_new_windows), window_size) * chunk_size
offsets = np.repeat(intra_offsets + inter_offsets, chunk_size)
window_idx = chunk_idx + offsets
flat_features = features.reshape((-1, features.shape[2]))
flat_windows = flat_features[window_idx]
windows = flat_windows.reshape((num_new_windows, window_size * chunk_size, -1))
return windows
class ChunkWindowExtractor:
def __init__(self, window_size: int, chunk_size: int) -> None:
self.window_size = window_size
self.chunk_size = chunk_size
def __call__(
self, features: np.ndarray, targets: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
chunks_per_window = features.shape[1] // self.chunk_size
chunked = extract_chunk_windows(features, self.window_size, self.chunk_size)
targets = targets[self.window_size - 1 :].repeat(chunks_per_window)
return chunked, targets
class LatentAlignApproach(AdaptionApproach):
"""
The latent alignment approach introduces four latent space alignment losses to
align the latent space of a shared feature extractor to both source and target
domain.
Examples:
>>> from rul_adapt import model, approach
>>> feat_ex = model.CnnExtractor(1, [16, 16, 1], 10, fc_units=16)
>>> reg = model.FullyConnectedHead(16, [1])
>>> latent_align = approach.LatentAlignApproach(0.1, 0.1, 0.1, 0.1, lr=0.001)
>>> latent_align.set_model(feat_ex, reg)
"""
def __init__(
self,
alpha_healthy: float,
alpha_direction: float,
alpha_level: float,
alpha_fusion: float,
loss_type: Literal["mse", "mae", "rmse"] = "mse",
rul_score_mode: Literal["phm08", "phm12"] = "phm08",
evaluate_degraded_only: bool = False,
labels_as_percentage: bool = False,
**optim_kwargs: Any,
) -> None:
"""
Create a new latent alignment approach.
Each of the alphas controls the influence of the respective loss on the
training. Commonly they are all set to the same value.
For more information about the possible optimizer keyword arguments,
see [here][rul_adapt.utils.OptimizerFactory].
Args:
alpha_healthy: The influence of the healthy state alignment loss.
alpha_direction: The influence of the degradation direction alignment loss.
alpha_level: The influence of the degradation level regularization loss.
alpha_fusion: The influence of the degradation fusion (MMD) loss.
loss_type: The type of regression loss to use.
rul_score_mode: The mode for the val and test RUL score, either 'phm08'
or 'phm12'.
evaluate_degraded_only: Whether to only evaluate the RUL score on degraded
samples.
labels_as_percentage: Whether to multiply labels by 100 to get percentages
**optim_kwargs: Keyword arguments for the optimizer, e.g. learning rate.
"""
super().__init__()
self.alpha_healthy = alpha_healthy
self.alpha_direction = alpha_direction
self.alpha_level = alpha_level
self.alpha_fusion = alpha_fusion
self.loss_type = loss_type
self.rul_score_mode = rul_score_mode
self.evaluate_degraded_only = evaluate_degraded_only
self.labels_as_percentage = labels_as_percentage
self.optim_kwargs = optim_kwargs
# training metrics
self.train_mse = utils.get_loss(self.loss_type)
self.healthy_align = rul_adapt.loss.HealthyStateAlignmentLoss()
self.direction_align = rul_adapt.loss.DegradationDirectionAlignmentLoss()
self.level_align = rul_adapt.loss.DegradationLevelRegularizationLoss()
self.fusion_align = rul_adapt.loss.MaximumMeanDiscrepancyLoss(num_kernels=5)
self._get_optimizer = utils.OptimizerFactory(**self.optim_kwargs)
self.evaluator = AdaptionEvaluator(
self.forward, self.log, self.rul_score_mode, self.evaluate_degraded_only
)
self.save_hyperparameters()
def configure_optimizers(self) -> Dict[str, Any]:
"""Configure an optimizer."""
optim = self._get_optimizer(self.parameters())
return optim
def forward(self, features: torch.Tensor) -> torch.Tensor:
"""Predict the RUL values for a batch of input features."""
preds = self.regressor(self.feature_extractor(features))
if self.labels_as_percentage:
preds = self._from_percentage(preds)
return preds
def training_step(
self, batch: Tuple[torch.Tensor, ...], batch_idx: int
) -> torch.Tensor:
"""
Execute one training step.
The `batch` contains the following tensors in order:
* The source domain features.
* The steps in degradation for the source features.
* The RUL labels for the source features.
* The target domain features.
* The steps in degradation for the target features.
* The healthy state features for both domains.
The easies way to produce such a batch is using the [LatentAlignDataModule]
[rul_datasets.adaption.LatentAlignDataModule].
The source, target and healthy features are passed through the feature
extractor. Afterward, these high-level features are used to compute the
alignment losses. The source domain RUL predictions are computed using the
regressor and used to calculate the MSE loss. The losses are then combined.
Each separate and the combined loss are logged.
Args:
batch: The batch of data.
batch_idx: The index of the batch.
Returns:
The combined loss.
"""
source, source_degradation_steps, source_labels, *_ = batch
*_, target, target_degradation_steps, healthy = batch
if self.labels_as_percentage:
source_labels = self._to_percentage(source_labels)
else:
source_labels = source_labels[:, None]
healthy = self.feature_extractor(healthy)
source = self.feature_extractor(source)
target = self.feature_extractor(target)
rul_predictions = self.regressor(source)
mse_loss = self.train_mse(rul_predictions, source_labels)
healthy_loss = self.healthy_align(healthy)
direction_loss = self.direction_align(healthy, torch.cat([source, target]))
level_loss = self.level_align(
healthy, source, source_degradation_steps, target, target_degradation_steps
)
fusion_loss = self.fusion_align(source, target)
loss = (
mse_loss
+ self.alpha_healthy * healthy_loss
+ self.alpha_direction * direction_loss
+ self.alpha_level * level_loss
+ self.alpha_fusion * fusion_loss
)
self.log("train/loss", loss)
self.log("train/mse", self.train_mse)
self.log("train/healthy_align", self.healthy_align)
self.log("train/direction_align", self.direction_align)
self.log("train/level_align", self.level_align)
self.log("train/fusion_align", self.fusion_align)
return loss
def _to_percentage(self, source_labels):
"""Convert RUL labels to percentages assuming they are normed between [0, 1]."""
return source_labels[:, None] * 100
def _from_percentage(self, predictions: torch.Tensor) -> torch.Tensor:
return predictions / 100
def validation_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one validation step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `val`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.validation(batch, domain)
def test_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one test step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `test`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.test(batch, domain) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/latent_align.py | 0.975095 | 0.716876 | latent_align.py | pypi |
import inspect
import warnings
from abc import ABCMeta
from typing import Any, Dict, List, Set
import hydra.utils
import pytorch_lightning as pl
from torch import nn
EXCLUDED_ARGS = ["self", "device", "dtype"] # init args ignored when checkpointing
class AdaptionApproach(pl.LightningModule, metaclass=ABCMeta):
"""
This abstract class is the base of all adaption approaches.
It defines that there needs to be a `feature_extractor`, a `regressor`. These
members can be accessed via read-only properties. The `feature_extractor` and
`regressor` are trainable neural networks.
All child classes are supposed to implement their own constructors. The
`feature_extractor` and `regressor` should explicitly not be arguments of the
constructor and should be set by calling [set_model]
[rul_adapt.approach.abstract.AdaptionApproach.set_model]. This way, the approach can
be initialized with all hyperparameters first and afterward supplied with the
networks. This is useful for initializing the networks with pre-trained weights.
Because models are constructed outside the approach, the default checkpointing
mechanism of PyTorch Lightning fails to load checkpoints of AdaptionApproaches.
We extended the checkpointing mechanism by implementing the `on_save_checkpoint`
and `on_load_checkpoint` callbacks to make it work. If a subclass uses an
additional model, besides feature extractor and regressor, that is not
initialized in the constructor, the subclass needs to implement the
`CHECKPOINT_MODELS` class variable. This variable is a list of model names to be
included in the checkpoint. For example, if your approach has an additional model
`self._domain_disc`, the `CHECKPOINT_MODELS` variable should be set to
`['_domain_disc']`. Otherwise, loading a checkpoint of this approach will fail.
"""
CHECKPOINT_MODELS: List[str] = []
_feature_extractor: nn.Module
_regressor: nn.Module
_hparams_initial: Dict[str, Any]
_logged_models: Dict[str, Set[str]]
def set_model(
self,
feature_extractor: nn.Module,
regressor: nn.Module,
*args: Any,
**kwargs: Any,
) -> None:
"""
Set the feature extractor and regressor for this approach.
Child classes can override this function to add additional models to an
approach. The `args` and `kwargs` making this possible are ignored in this
function.
Args:
feature_extractor: The feature extraction network.
regressor: The RUL regression network.
"""
self._feature_extractor = feature_extractor
self._regressor = regressor
if args:
warnings.warn("Additional position args were supplied, which are ignored.")
if kwargs:
warnings.warn("Additional keyword args were supplied, which are ignored.")
self.log_model_hyperparameters("feature_extractor", "regressor")
def log_model_hyperparameters(self, *model_names: str) -> None:
if not hasattr(self, "_logged_models"):
self._logged_models = {}
hparams_initial = self.hparams_initial
for model_name in model_names:
model_hparams = self._get_model_hparams(model_name)
hparams_initial.update(model_hparams)
self._logged_models[model_name] = set(model_hparams.keys())
self._hparams_initial = hparams_initial
self._set_hparams(self._hparams_initial)
def _get_model_hparams(self, model_name):
prefix = f"model_{model_name.lstrip('_')}"
model = getattr(self, model_name)
hparams = {f"{prefix}_type": type(model).__name__}
init_args = _get_init_args(model, "logging model hyperparameters")
hparams.update({f"{prefix}_{k}": v for k, v in init_args.items()})
return hparams
@property
def feature_extractor(self) -> nn.Module:
"""The feature extraction network."""
if hasattr(self, "_feature_extractor"):
return self._feature_extractor
else:
raise RuntimeError("Feature extractor used before 'set_model' was called.")
@property
def regressor(self) -> nn.Module:
"""The RUL regression network."""
if hasattr(self, "_regressor"):
return self._regressor
else:
raise RuntimeError("Regressor used before 'set_model' was called.")
def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
self._make_model_hparams_storable(checkpoint)
to_checkpoint = ["_feature_extractor", "_regressor"] + self.CHECKPOINT_MODELS
configs = {m: _get_hydra_config(getattr(self, m)) for m in to_checkpoint}
checkpoint["model_configs"] = configs
def _make_model_hparams_storable(self, checkpoint: Dict[str, Any]) -> None:
excluded_keys = set()
for keys in self._logged_models.values():
excluded_keys.update(keys)
checkpoint["hyper_parameters"] = {
k: v
for k, v in checkpoint["hyper_parameters"].items()
if k not in excluded_keys
}
checkpoint["logged_models"] = list(self._logged_models)
def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None:
for name, config in checkpoint["model_configs"].items():
setattr(self, name, hydra.utils.instantiate(config))
self.log_model_hyperparameters(*checkpoint["logged_models"])
def _get_hydra_config(model: nn.Module) -> Dict[str, Any]:
model_type = type(model)
class_name = f"{model_type.__module__}.{model_type.__qualname__}"
config = {"_target_": class_name, **_get_init_args(model)}
return config
def _get_init_args(
obj: nn.Module, activity: str = "writing a checkpoint"
) -> Dict[str, Any]:
if isinstance(obj, nn.ModuleList):
# workaround because ModuleList's init arg is shadowed by a property
init_args = {"modules": [_get_hydra_config(m) for m in obj]}
elif isinstance(obj, nn.Sequential):
# workaround because Sequential expects positional args only
init_args = {"_args_": [_get_hydra_config(m) for m in obj]}
else:
signature = inspect.signature(type(obj).__init__)
init_args = dict()
arg_names = filter(lambda a: a not in EXCLUDED_ARGS, signature.parameters)
for arg_name in arg_names:
_check_has_attr(obj, arg_name, activity)
arg = getattr(obj, arg_name)
if isinstance(arg, nn.Module):
arg = _get_hydra_config(arg)
init_args[arg_name] = arg
return init_args
def _check_has_attr(obj: Any, param: str, activity: str) -> None:
if not hasattr(obj, param):
raise RuntimeError(
f"Error while {activity}. "
f"The nn.Module of type '{type(obj)}' has an initialization parameter "
f"named '{param}' which is not saved as a member variable, i.e. "
f"'self.{param}'. Therefore, we cannot retrieve the value of "
f"'{param}' the object was initialized with."
) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/abstract.py | 0.911774 | 0.408218 | abstract.py | pypi |
from typing import List, Literal, Any, Dict
import torch
import rul_adapt
from rul_adapt import utils
from rul_adapt.approach.abstract import AdaptionApproach
from rul_adapt.approach.evaluation import AdaptionEvaluator
class MmdApproach(AdaptionApproach):
"""The MMD uses the Maximum Mean Discrepancy to adapt a feature extractor to
be used with the source regressor.
The regressor needs the same number of input units as the feature extractor has
output units.
Examples:
>>> from rul_adapt import model
>>> from rul_adapt import approach
>>> feat_ex = model.CnnExtractor(1, [16, 16, 1], 10, fc_units=16)
>>> reg = model.FullyConnectedHead(16, [1])
>>> mmd = approach.MmdApproach(0.01)
>>> mmd.set_model(feat_ex, reg)
"""
def __init__(
self,
mmd_factor: float,
num_mmd_kernels: int = 5,
loss_type: Literal["mse", "rmse", "mae"] = "mse",
rul_score_mode: Literal["phm08", "phm12"] = "phm08",
evaluate_degraded_only: bool = False,
**optim_kwargs: Any,
) -> None:
"""
Create a new MMD approach.
The strength of the influence of the MMD loss on the feature
extractor is controlled by the `mmd_factor`. The higher it is, the stronger
the influence.
For more information about the possible optimizer keyword arguments,
see [here][rul_adapt.utils.OptimizerFactory].
Args:
mmd_factor: The strength of the MMD loss' influence.
num_mmd_kernels: The number of kernels for the MMD loss.
loss_type: The type of regression loss, either 'mse', 'rmse' or 'mae'.
rul_score_mode: The mode for the val and test RUL score, either 'phm08'
or 'phm12'.
evaluate_degraded_only: Whether to only evaluate the RUL score on degraded
samples.
**optim_kwargs: Keyword arguments for the optimizer, e.g. learning rate.
"""
super().__init__()
self.mmd_factor = mmd_factor
self.num_mmd_kernels = num_mmd_kernels
self.loss_type = loss_type
self.rul_score_mode = rul_score_mode
self.evaluate_degraded_only = evaluate_degraded_only
self.optim_kwargs = optim_kwargs
# training metrics
self.train_source_loss = utils.get_loss(self.loss_type)
self.mmd_loss = rul_adapt.loss.MaximumMeanDiscrepancyLoss(self.num_mmd_kernels)
self._get_optimizer = utils.OptimizerFactory(**self.optim_kwargs)
self.evaluator = AdaptionEvaluator(
self.forward, self.log, self.rul_score_mode, self.evaluate_degraded_only
)
self.save_hyperparameters()
def configure_optimizers(self) -> Dict[str, Any]:
"""Configure an optimizer."""
return self._get_optimizer(self.parameters())
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Predict the RUL values for a batch of input features."""
return self.regressor(self.feature_extractor(inputs))
def training_step(self, batch: List[torch.Tensor], batch_idx: int) -> torch.Tensor:
"""
Execute one training step.
The `batch` argument is a list of three tensors representing the source
features, source labels and target features. Both types of features are fed
to the feature extractor. Then the regression loss for the source domain and
the MMD loss between domains is computed. The regression, MMD and combined
loss are logged.
Args:
batch: A list of a source feature, source label and target feature tensors.
batch_idx: The index of the current batch.
Returns:
The combined loss.
"""
source, source_labels, target = batch
source_labels = source_labels[:, None]
source = self.feature_extractor(source)
target = self.feature_extractor(target)
rul_preds = self.regressor(source)
mse_loss = self.train_source_loss(rul_preds, source_labels)
mmd_loss = self.mmd_loss(source, target)
loss = mse_loss + self.mmd_factor * mmd_loss
self.log("train/loss", loss)
self.log("train/source_loss", self.train_source_loss)
self.log("train/mmd", self.mmd_loss)
return loss
def validation_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one validation step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `val`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.validation(batch, domain)
def test_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one test step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `test`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.test(batch, domain) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/mmd.py | 0.965454 | 0.643987 | mmd.py | pypi |
from itertools import product
from queue import Queue
from typing import List, Tuple, Optional
import numpy as np
import pywt # type: ignore
import scipy.stats # type: ignore
from dtaidistance import dtw # type: ignore
from rul_datasets.reader import AbstractReader
from rul_datasets.utils import extract_windows
from scipy.stats import wasserstein_distance # type: ignore
from sklearn.preprocessing import MinMaxScaler # type: ignore
def rms(inputs: np.ndarray) -> np.ndarray:
return np.sqrt(np.mean(inputs**2, axis=-2))
def p2p(inputs: np.ndarray) -> np.ndarray:
return np.max(inputs, axis=-2) - np.min(inputs, axis=-2)
def skewness(inputs: np.ndarray) -> np.ndarray:
sdev = inputs.std(axis=-2)
mean = inputs.mean(axis=-2, keepdims=True)
skew = np.sum((inputs - mean) ** 3, axis=-2) / (inputs.shape[-2] * sdev**3)
return skew
def impulse_factor(inputs: np.ndarray) -> np.ndarray:
absolute = np.abs(inputs)
imp_f = np.max(absolute, axis=-2) / np.mean(absolute, axis=-2)
return imp_f
def median_absolute(inputs: np.ndarray) -> np.ndarray:
return np.median(np.abs(inputs), axis=-2)
def mean_absolute(inputs: np.ndarray) -> np.ndarray:
return np.mean(np.abs(inputs), axis=-2)
def max_absolute(inputs: np.ndarray) -> np.ndarray:
return np.max(np.abs(inputs), axis=-2)
def kurtosis(inputs: np.ndarray) -> np.ndarray:
return scipy.stats.kurtosis(inputs, axis=-2, fisher=False)
def std(inputs: np.ndarray) -> np.ndarray:
return np.std(inputs, axis=-2)
def margin_factor(inputs: np.ndarray) -> np.ndarray:
absolute = np.abs(inputs)
mf = np.max(absolute, axis=-2) / (np.mean(np.sqrt(absolute), axis=-2)) ** 2
return mf
def energy(inputs: np.ndarray) -> np.ndarray:
return np.sum(inputs**2, axis=-2)
def gini_factor(inputs: np.ndarray) -> np.ndarray:
batched = len(inputs.shape) == 3
inputs = inputs if batched else inputs[None]
gini = _approx_batched_gini_factor(inputs)
gini = gini if batched else gini.squeeze(0)
return gini
def _approx_batched_gini_factor(inputs: np.ndarray) -> np.ndarray:
window_size = inputs.shape[1]
inputs = np.sort(inputs, axis=1)
cumsum = np.cumsum(inputs, axis=1)
gini = (window_size + 1 - 2 * np.sum(cumsum, axis=1) / cumsum[:, -1]) / window_size
return gini
def band_energies(inputs: np.ndarray) -> np.ndarray:
wp = pywt.WaveletPacket(inputs, wavelet="dmey", maxlevel=4, axis=-2)
bands = (node.data for node in wp.get_level(4))
energies = np.concatenate([energy(b) for b in bands], axis=-1)
return energies
def std_ihc(inputs: np.ndarray) -> np.ndarray:
return np.std((np.arccosh(inputs + 0j)), axis=-2)
def std_ihs(inputs: np.ndarray) -> np.ndarray:
return np.std(np.arcsinh(inputs), axis=-2)
class VibrationFeatureExtractor:
"""This class extracts 30 different features from a raw acceleration signal.
The features are: RMS, kurtosis, peak2peak, standard deviation, skewness,
margin factor, impulse factor, energy, median absolute, gini factor, maximum
absolute, mean absolute, energies of the 16 bands resulting from wavelet packet
decomposition, standard deviation of arccosh and arcsinh. If the input has n
features, n*30 features are extracted. Additionally, it features a scaler that
can be fit to scale all extracted features between [0, 1]."""
_scaler: Optional[MinMaxScaler]
def __init__(
self, num_input_features: int, feature_idx: Optional[List[int]] = None
) -> None:
"""
Create a new vibration feature extractor with the selected features.
The features are sorted as f1_1, .., f1_j, ..., fi_j, where i is the index of
the computed feature (between 0 and 30) and j is the index of the raw
feature (between 0 and `num_input_features`).
Args:
num_input_features: The number of input features.
feature_idx: The indices of the features to compute.
"""
self.num_input_features = num_input_features
self.feature_idx = list(range(60)) if feature_idx is None else feature_idx
if min(self.feature_idx) < 0 or max(self.feature_idx) > 60:
raise ValueError("Feature indices need to be between 0 and 60.")
self._scaler = None
def __call__(
self, features: np.ndarray, targets: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Extract the features from the input and optionally scale them.
The features should have the shape `[num_windows, window_size,
num_input_features]` and the targets `[num_windows]`.
Args:
features: The input features.
targets: The input targets.
Returns:
The extracted features and input targets.
"""
features = self._extract_selected(features)
if self._scaler is not None:
features = self._scaler.transform(features)
return features, targets
def _extract_selected(self, features: np.ndarray) -> np.ndarray:
return _extract_all(features, self.num_input_features)[:, self.feature_idx]
def fit(self, features: List[np.ndarray]) -> "VibrationFeatureExtractor":
"""
Fit the internal scaler on a list of raw feature time series.
The time series are passed through the feature extractor and then used to fit
the internal min-max scaler. Each time series in the list should have the
shape `[num_windows, window_size, num_input_features]`.
Args:
features: The list of raw feature time series.
Returns:
The feature extractor itself.
"""
features = [self._extract_selected(f) for f in features]
self._scaler = MinMaxScaler()
for feat in features:
self._scaler.partial_fit(feat)
return self
def _extract_all(features: np.ndarray, num_features: int) -> np.ndarray:
feature_list = [
rms(features),
kurtosis(features),
p2p(features),
std(features),
skewness(features),
margin_factor(features),
impulse_factor(features),
energy(features),
median_absolute(features),
gini_factor(features),
max_absolute(features),
mean_absolute(features),
*band_energies(features)
.reshape((-1, 16, num_features))
.transpose(1, 0, 2), # unpack features
std_ihc(features),
std_ihs(features),
]
features = np.concatenate(feature_list, axis=1)
return features
def select_features(
source: AbstractReader, target: AbstractReader, num_features: int
) -> List[int]:
"""
Select the most transferable features between source and target domain.
30 features are considered: RMS, kurtosis, peak2peak, standard deviation, skewness,
margin factor, impulse factor, energy, median absolute, gini factor, maximum
absolute, mean absolute, energies of the 16 bands resulting from wavelet packet
decomposition, standard deviation of arccosh and arcsinh. If the input has n raw
features, n*30 features are extracted.
The `dev` splits of both domains are used to calculate a distance metric based on
Dynamic Time Warping and the Wasserstein Distance. The indices of the
`num_feature` features with the lowest distances are returned.
Args:
source: The reader of the source domain.
target: The reader of the target domain.
num_features: The number of transferable features to return.
Returns:
The indices of features ordered by transferability.
"""
source.prepare_data()
target.prepare_data()
source_runs, _ = source.load_split("dev")
target_runs, _ = target.load_split("dev")
num_org_features = source_runs[0].shape[-1]
distances = np.zeros(30 * num_org_features)
for source_run, target_run in product(source_runs, target_runs):
source_feats = _extract_all(source_run, num_features=num_org_features)
target_feats = _extract_all(target_run, num_features=num_org_features)
for i, (s, f) in enumerate(zip(source_feats.T, target_feats.T)):
distances[i] += _domain_distance(s, f)
feature_idx = np.argsort(distances)[:num_features].tolist()
return feature_idx
def _domain_distance(source: np.ndarray, target: np.ndarray, ratio=0.4) -> float:
dtw_dist = dtw.distance_fast(source, target)
wasserstein_dist = wasserstein_distance(source, target)
distance = ratio * dtw_dist + (1 - ratio) * wasserstein_dist
return distance
def mac(inputs: np.ndarray, window_size: int, wavelet: str = "dmey") -> np.ndarray:
"""
Calculate the moving average correlation (MAC) of the energy entropies of four
levels of maximal overlap discrete wavelet transform (MODWT) decompositions.
The `wavelet` is a wavelet description that can be passed to `pywt`. The default
wavelet was confirmed by the original authors. For more options call
`pywt.wavelist`. The input signal should have the shape `[num_windows,
window_size, num_features]`.
Args:
inputs: The input acceleration signal.
window_size: The window size of the sliding window to calculate the average
over.
wavelet: The description of the wavelet, e.g. 'sym4'.
Returns:
The MAC of the input signal which is `window_size - 1` shorter.
"""
entropies = _energy_entropies(inputs, wavelet)
entropies = extract_windows(entropies, window_size)
anchor, queries = entropies[:, -2:-1], entropies[:, :-1]
corr = _pearson(anchor, queries)
corr = np.mean(np.abs(corr), axis=1)
return corr
def _energy_entropies(inputs: np.ndarray, wavelet: str = "sym4") -> np.ndarray:
coeffs = modwpt(inputs, wavelet, 4)
energies = energy(coeffs)
ratios = energies / np.sum(energies, axis=-1, keepdims=True)
entropy = -np.sum(ratios * np.log(ratios), axis=-1, keepdims=True)
entropies = ratios / entropy
return entropies
def _pearson(x: np.ndarray, y: np.ndarray) -> np.ndarray:
diff_x = x - np.mean(x, axis=2, keepdims=True)
diff_y = y - np.mean(y, axis=2, keepdims=True)
cov = np.sum(diff_y * diff_x, axis=2)
std_product = np.sqrt(np.sum(diff_y**2, axis=2) * np.sum(diff_x**2, axis=2))
corr = cov / std_product
return corr
def modwpt(inputs: np.ndarray, wavelet: str, level: int) -> np.ndarray:
"""
Apply Maximal Overlap Discrete Wavelet Packet Transformation (MODWT) of `level`
to the input.
The `wavelet` should be a string that can be passed to `pywt` to construct a
wavelet function. For more options call `pywt.wavelist`. The implementation was
inspired by [this repository](https://github.com/pistonly/modwtpy).
Args:
inputs: An input signal of shape `[num_windows, window_size, num_features]`.
wavelet: The description of the wavelet function, e.g. 'sym4'.
level: The decomposition level.
Returns:
The 2**level decompositions stacked in the last axis.
"""
if level < 1:
raise ValueError("The level needs to be a positive integer.")
wavelet_func = pywt.Wavelet(wavelet)
dec_hi = np.array(wavelet_func.dec_hi) / np.sqrt(2)
dec_lo = np.array(wavelet_func.dec_lo) / np.sqrt(2)
input_queue: Queue = Queue(maxsize=2**level)
input_queue.put(inputs)
for j in range(level):
coeffs = _decompose_level(input_queue, dec_hi, dec_lo, j)
for d in coeffs:
input_queue.put(d)
return np.concatenate(coeffs, axis=-1)
def _decompose_level(input_queue, dec_hi, dec_lo, level):
coeffs = []
while not input_queue.empty():
signal = input_queue.get()
detail = _circular_convolve_fast(dec_hi, signal, level + 1)
approx = _circular_convolve_fast(dec_lo, signal, level + 1)
coeffs.append(approx)
coeffs.append(detail)
return coeffs
def _circular_convolve_d(
kernel: np.ndarray, signal: np.ndarray, level: int
) -> np.ndarray:
len_signal = len(signal)
len_wavelet = len(kernel)
convolved = np.zeros(len_signal)
wavelet_range = np.arange(len_wavelet)
for t in range(len_signal):
index = np.mod(t - 2 ** (level - 1) * wavelet_range, len_signal)
element = np.array([signal[ind] for ind in index])
convolved[t] = (np.array(kernel) * element).sum()
return convolved
def _circular_convolve_fast(
kernel: np.ndarray, signal: np.ndarray, level: int
) -> np.ndarray:
len_signal = signal.shape[-2]
signal_range = np.arange(len_signal)[:, None]
wavelet_range = np.arange(len(kernel))[None, :]
idx = np.mod(signal_range - 2 ** (level - 1) * wavelet_range, len_signal)
convolved = np.sum(kernel[None, :, None] * signal[..., idx, :], axis=-2)
return convolved | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/tbigru.py | 0.944016 | 0.657043 | tbigru.py | pypi |
from typing import Any, Optional, Dict, Literal, List
import torch
from torch import nn
import rul_adapt.loss
from rul_adapt import utils
from rul_adapt.approach.abstract import AdaptionApproach
from rul_adapt.approach.evaluation import AdaptionEvaluator
from rul_adapt.model import FullyConnectedHead
class DannApproach(AdaptionApproach):
"""The DANN approach introduces a domain discriminator that is trained on
distinguishing source and target features as a binary classification problem. The
features are produced by a shared feature extractor. The loss in the domain
discriminator is binary cross-entropy.
The regressor and domain discriminator need the same number of input units as the
feature extractor has output units. The discriminator is not allowed to have an
activation function on its last layer and needs to use only a single output
neuron because [BCEWithLogitsLoss][torch.nn.BCEWithLogitsLoss] is used.
Examples:
>>> from rul_adapt import model
>>> from rul_adapt import approach
>>> feat_ex = model.CnnExtractor(1, [16, 16, 1], 10, fc_units=16)
>>> reg = model.FullyConnectedHead(16, [1])
>>> disc = model.FullyConnectedHead(16, [8, 1], act_func_on_last_layer=False)
>>> dann = approach.DannApproach(1.0)
>>> dann.set_model(feat_ex, reg, disc)
"""
CHECKPOINT_MODELS = ["dann_loss"]
dann_loss: rul_adapt.loss.DomainAdversarialLoss
def __init__(
self,
dann_factor: float,
loss_type: Literal["mae", "mse", "rmse"] = "mae",
rul_score_mode: Literal["phm08", "phm12"] = "phm08",
evaluate_degraded_only: bool = False,
**optim_kwargs: Any,
):
"""
Create a new DANN approach.
The strength of the domain discriminator's influence on the feature
extractor is controlled by the `dann_factor`. The higher it is, the stronger
the influence.
Possible options for the regression loss are `mae`, `mse` and `rmse`.
The domain discriminator is set by the `set_model` function together with the
feature extractor and regressor. For more information, see the [approach]
[rul_adapt.approach] module page.
For more information about the possible optimizer keyword arguments,
see [here][rul_adapt.utils.OptimizerFactory].
Args:
dann_factor: Strength of the domain DANN loss.
loss_type: Type of regression loss.
rul_score_mode: The mode for the val and test RUL score, either 'phm08'
or 'phm12'.
evaluate_degraded_only: Whether to only evaluate the RUL score on degraded
samples.
**optim_kwargs: Keyword arguments for the optimizer, e.g. learning rate.
"""
super().__init__()
self.dann_factor = dann_factor
self.loss_type = loss_type
self.rul_score_mode = rul_score_mode
self.evaluate_degraded_only = evaluate_degraded_only
self.optim_kwargs = optim_kwargs
self._get_optimizer = utils.OptimizerFactory(**self.optim_kwargs)
self.train_source_loss = utils.get_loss(self.loss_type)
self.evaluator = AdaptionEvaluator(
self.forward, self.log, self.rul_score_mode, self.evaluate_degraded_only
)
self.save_hyperparameters()
def set_model(
self,
feature_extractor: nn.Module,
regressor: nn.Module,
domain_disc: Optional[nn.Module] = None,
*args: Any,
**kwargs: Any,
) -> None:
"""
Set the feature extractor, regressor, and domain discriminator for this
approach.
The discriminator is not allowed to have an activation function on its last
layer and needs to use only a single output neuron. It is wrapped by a
[DomainAdversarialLoss][rul_adapt.loss.DomainAdversarialLoss].
Args:
feature_extractor: The feature extraction network.
regressor: The RUL regression network.
domain_disc: The domain discriminator network.
"""
domain_disc = self._check_domain_disc(domain_disc)
super().set_model(feature_extractor, regressor, *args, **kwargs)
self.dann_loss = rul_adapt.loss.DomainAdversarialLoss(domain_disc)
self.log_model_hyperparameters("domain_disc")
def _check_domain_disc(self, domain_disc: Optional[nn.Module]) -> nn.Module:
if domain_disc is None:
raise ValueError(
"No domain discriminator was set. This approach is unlikely to work."
)
if (
isinstance(domain_disc, FullyConnectedHead)
and domain_disc.act_func_on_last_layer
):
raise ValueError(
"Domain discriminator has an activation function on its last layer. "
"This is not allowed due to torch.nn.BCEWithLogitsLoss being used as "
"its loss. Please set 'act_func_on_last_layer' to False."
)
return domain_disc
@property
def domain_disc(self):
"""The domain discriminator network."""
if hasattr(self, "dann_loss"):
return self.dann_loss.domain_disc
else:
raise RuntimeError("Domain disc used before 'set_model' was called.")
def configure_optimizers(self) -> Dict[str, Any]:
"""Configure an optimizer for the whole model."""
return self._get_optimizer(self.parameters())
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Predict the RUL values for a batch of input features."""
return self.regressor(self.feature_extractor(inputs))
def training_step(self, batch: List[torch.Tensor], batch_idx: int) -> torch.Tensor:
"""
Execute one training step.
The `batch` argument is a list of three tensors representing the source
features, source labels and target features. Both types of features are fed
to the feature extractor. Then the regression loss for the source domain and
the DANN loss between domains is computed. The regression, DANN and combined
loss are logged.
Args:
batch: A list of a source feature, source label and target feature tensors.
batch_idx: The index of the current batch.
Returns:
The combined loss.
"""
source, source_labels, target = batch
source_labels = source_labels[:, None]
source = self.feature_extractor(source)
target = self.feature_extractor(target)
rul_preds = self.regressor(source)
mse_loss = self.train_source_loss(rul_preds, source_labels)
dann_loss = self.dann_loss(source, target)
loss = mse_loss + self.dann_factor * dann_loss
self.log("train/loss", loss)
self.log("train/source_loss", self.train_source_loss)
self.log("train/dann", self.dann_loss)
return loss
def validation_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one validation step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `val`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.validation(batch, domain)
def test_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one test step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `test`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.test(batch, domain) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/dann.py | 0.974215 | 0.598283 | dann.py | pypi |
import copy
from typing import Optional, Any, List, Dict, Literal
import torch
from torch import nn
from rul_adapt import utils
from rul_adapt.approach.abstract import AdaptionApproach
from rul_adapt.approach.evaluation import AdaptionEvaluator
from rul_adapt.model import FullyConnectedHead
class AdaRulApproach(AdaptionApproach):
"""The ADARUL approach uses a GAN setup to adapt a feature extractor. This
approach should only be used with a pre-trained feature extractor.
The regressor and domain discriminator need the same number of input units as the
feature extractor has output units. The discriminator is not allowed to have an
activation function on its last layer for it to work with its loss.
Examples:
>>> from rul_adapt import model
>>> from rul_adapt import approach
>>> feat_ex = model.CnnExtractor(1, [16, 16, 1], 10, fc_units=16)
>>> reg = model.FullyConnectedHead(16, [1])
>>> disc = model.FullyConnectedHead(16, [8, 1], act_func_on_last_layer=False)
>>> pre = approach.SupervisedApproach("mse", 125, lr=0.001)
>>> pre.set_model(feat_ex, reg)
>>> main = approach.AdaRulApproach(5, 1, 125, lr=0.001)
>>> main.set_model(pre.feature_extractor, pre.regressor, disc)
"""
CHECKPOINT_MODELS = ["_domain_disc", "frozen_feature_extractor"]
_domain_disc: nn.Module
frozen_feature_extractor: nn.Module
def __init__(
self,
num_disc_updates: int,
num_gen_updates: int,
max_rul: Optional[int] = None,
rul_score_mode: Literal["phm08", "phm12"] = "phm08",
evaluate_degraded_only: bool = False,
**optim_kwargs: Any,
) -> None:
"""
Create a new ADARUL approach.
The discriminator is first trained for `num_disc_updates` batches.
Afterward, the feature extractor (generator) is trained for
`num_gen_updates`. This cycle repeats until the epoch ends.
The regressor is supposed to output a value between [0, 1] which is then
scaled by `max_rul`.
For more information about the possible optimizer keyword arguments,
see [here][rul_adapt.utils.OptimizerFactory].
Args:
max_rul: Maximum RUL value of the training data.
num_disc_updates: Number of batches to update discriminator with.
num_gen_updates: Number of batches to update generator with.
rul_score_mode: The mode for the val and test RUL score, either 'phm08'
or 'phm12'.
evaluate_degraded_only: Whether to only evaluate the RUL score on degraded
samples.
**optim_kwargs: Keyword arguments for the optimizer, e.g. learning rate.
"""
super().__init__()
self.automatic_optimization = False # use manual optimization loop
self.num_disc_updates = num_disc_updates
self.num_gen_updates = num_gen_updates
self.max_rul = max_rul
self.rul_score_mode = rul_score_mode
self.evaluate_degraded_only = evaluate_degraded_only
self.optim_kwargs = optim_kwargs
self._disc_counter, self._gen_counter = 0, 0
self._get_optimizer = utils.OptimizerFactory(**self.optim_kwargs)
self.gan_loss = nn.BCEWithLogitsLoss()
self.evaluator = AdaptionEvaluator(
self.forward, self.log, self.rul_score_mode, self.evaluate_degraded_only
)
self.save_hyperparameters()
def set_model(
self,
feature_extractor: nn.Module,
regressor: nn.Module,
domain_disc: Optional[nn.Module] = None,
*args: Any,
**kwargs: Any,
) -> None:
"""
Set the feature extractor, regressor and domain discriminator for this approach.
The discriminator is not allowed to have an activation function on its last
layer and needs to use only a single output neuron.
A frozen copy of the feature extractor is produced to be used for the *real*
samples fed to the discriminator. The feature extractor should, therefore,
be pre-trained.
Args:
feature_extractor: The feature extraction network.
regressor: The RUL regression network.
domain_disc: The domain discriminator network.
"""
domain_disc = self._check_domain_disc(domain_disc)
super().set_model(feature_extractor, regressor, *args, **kwargs)
self._domain_disc = domain_disc
self.frozen_feature_extractor = copy.deepcopy(feature_extractor)
self.frozen_feature_extractor.requires_grad_(False) # freeze network
self.log_model_hyperparameters("_domain_disc")
def _check_domain_disc(self, domain_disc: Optional[nn.Module]) -> nn.Module:
if domain_disc is None:
raise ValueError(
"No domain discriminator was set. This approach is unlikely to work."
)
if (
isinstance(domain_disc, FullyConnectedHead)
and domain_disc.act_func_on_last_layer
):
raise ValueError(
"Domain discriminator has an activation function on its last layer. "
"This is not allowed due to torch.nn.BCEWithLogitsLoss being used as "
"its loss. Please set 'act_func_on_last_layer' to False."
)
return domain_disc
@property
def domain_disc(self):
"""The domain discriminator network."""
if hasattr(self, "_domain_disc"):
return self._domain_disc
else:
raise RuntimeError("Domain disc used before 'set_model' was called.")
def configure_optimizers(self) -> List[Dict[str, Any]]:
"""Configure an optimizer for the generator and discriminator respectively."""
return [
self._get_optimizer(self.domain_disc.parameters()),
self._get_optimizer(self.feature_extractor.parameters()),
]
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Predict the RUL values for a batch of input features."""
max_rul = self.max_rul or 1
pred = self.regressor(self.feature_extractor(inputs)) * max_rul
return pred
def on_train_epoch_start(self) -> None:
self._reset_update_counters()
def training_step(self, batch: List[torch.Tensor], batch_idx: int) -> torch.Tensor:
"""
Execute one training step.
The `batch` argument is a list of three tensors representing the source
features, source labels and target features. Each iteration either only the
discriminator or only the generator is trained. The respective loss is logged.
The *real* samples are source features passed though the frozen version of
the feature extractor. The *fake* samples are the target features passed
through the adapted feature extractor. The discriminator predicts if a sample
came from the source or target domain.
Args:
batch: A list of a source feature, source label and target feature tensors.
batch_idx: The index of the current batch.
Returns:
Either the discriminator or generator loss.
"""
source, _, target = batch
if self._updates_done():
self._reset_update_counters()
if self._should_update_disc():
optim, _ = self.optimizers() # type: ignore[misc]
loss = self._get_disc_loss(source, target)
self.log("train/disc_loss", loss)
self._disc_counter += 1
elif self._should_update_gen():
_, optim = self.optimizers() # type: ignore[misc]
loss = self._get_gen_loss(target)
self.log("train/gen_loss", loss)
self._gen_counter += 1
else:
raise RuntimeError("Configuration error. Did update neither disc nor gen.")
optim.zero_grad() # type: ignore[union-attr]
self.manual_backward(loss)
optim.step()
return loss
def _should_update_disc(self):
return self._disc_counter < self.num_disc_updates and self._gen_counter == 0
def _should_update_gen(self):
return (
self._disc_counter == self.num_disc_updates
and self._gen_counter < self.num_gen_updates
)
def _reset_update_counters(self):
self._disc_counter, self._gen_counter = 0, 0
def _updates_done(self) -> bool:
return (
self._disc_counter == self.num_disc_updates
and self._gen_counter == self.num_gen_updates
)
def _get_disc_loss(self, source, target):
batch_size = source.shape[0]
source = self.frozen_feature_extractor(source).detach()
target = self.feature_extractor(target).detach()
domain_pred = self.domain_disc(torch.cat([source, target]))
domain_labels = torch.cat(
[
torch.ones(batch_size, 1, device=self.device), # real labels
torch.zeros(batch_size, 1, device=self.device), # fake labels
]
)
loss = self.gan_loss(domain_pred, domain_labels)
return loss
def _get_gen_loss(self, target):
batch_size = target.shape[0]
target = self.feature_extractor(target)
domain_pred = self.domain_disc(target)
domain_labels = torch.ones(batch_size, 1, device=self.device) # flipped labels
loss = self.gan_loss(domain_pred, domain_labels)
return loss
def validation_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one validation step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for `dataloader_idx` zero
are assumed to be from the source domain and for `dataloader_idx` one from the
target domain. The metrics are written to the configured logger under the
prefix `val`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.validation(batch, domain)
def test_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one test step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for `dataloader_idx` zero
are assumed to be from the source domain and for `dataloader_idx` one from the
target domain. The metrics are written to the configured logger under the
prefix `test`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.test(batch, domain) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/adarul.py | 0.960722 | 0.548553 | adarul.py | pypi |
from typing import Callable, List, Literal, Tuple
import torch
import torchmetrics
from torch import nn
import rul_adapt
class AdaptionEvaluator(nn.Module):
def __init__(
self,
network_func: Callable[[torch.Tensor], torch.Tensor],
log_func: Callable[[str, torchmetrics.Metric], None],
score_mode: Literal["phm08", "phm12"] = "phm08",
degraded_only: bool = False,
):
super().__init__()
self.network_func = network_func
self.log_func = log_func
self.score_mode = score_mode
self.degraded_only = degraded_only
self.val_metrics = self._get_default_metrics()
self.test_metrics = self._get_default_metrics()
def _get_default_metrics(self) -> nn.ModuleDict:
return nn.ModuleDict(
{
"source": nn.ModuleDict(
{
"rmse": torchmetrics.MeanSquaredError(squared=False),
"score": rul_adapt.loss.RULScore(self.score_mode),
}
),
"target": nn.ModuleDict(
{
"rmse": torchmetrics.MeanSquaredError(squared=False),
"score": rul_adapt.loss.RULScore(self.score_mode),
}
),
}
)
def validation(
self, batch: List[torch.Tensor], domain: Literal["source", "target"]
) -> None:
self._evaluate("val", self.val_metrics, batch, domain)
def test(
self, batch: List[torch.Tensor], domain: Literal["source", "target"]
) -> None:
self._evaluate("test", self.test_metrics, batch, domain)
def _evaluate(
self,
prefix: str,
metrics: nn.ModuleDict,
batch: List[torch.Tensor],
domain: Literal["source", "target"],
) -> None:
self._check_domain(domain, prefix)
features, labels = batch
features, labels = filter_batch(features, labels, self.degraded_only)
labels = labels[:, None]
predictions = self.network_func(features)
for metric_name, metric in metrics[domain].items():
metric(predictions, labels)
self.log_func(f"{prefix}/{domain}/{metric_name}", metric)
def _check_domain(self, domain: str, prefix: str) -> None:
if domain not in ["source", "target"]:
raise RuntimeError(
f"Unexpected {prefix} domain '{domain}'. "
"Use either 'source' or 'target'."
)
def filter_batch(
features: torch.Tensor, labels: torch.Tensor, degraded_only: bool = False
) -> Tuple[torch.Tensor, torch.Tensor]:
if degraded_only:
if torch.any(labels > 1.0):
raise RuntimeError(
"Degradation-only evaluation configured which works only with "
"normalized RUL, but labels contain values greater than 1.0."
)
degraded = labels < 1.0
features = features[degraded]
labels = labels[degraded]
return features, labels | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/evaluation.py | 0.950801 | 0.395368 | evaluation.py | pypi |
from typing import Tuple, Literal, Any, Dict
import torch
import torchmetrics
from rul_adapt import utils
from rul_adapt.approach.abstract import AdaptionApproach
from rul_adapt.approach.evaluation import filter_batch
class SupervisedApproach(AdaptionApproach):
"""The supervised approach uses either MSE, MAE or RMSE loss to train a feature
extractor and regressor in a supervised fashion on the source domain. It can be
used either for pre-training or as a baseline to compare adaption approaches
against.
The regressor needs the same number of input units as the feature extractor has
output units.
Examples:
>>> from rul_adapt import model
>>> from rul_adapt import approach
>>> feat_ex = model.CnnExtractor(1, [16, 16, 1], 10, fc_units=16)
>>> reg = model.FullyConnectedHead(16, [1])
>>> disc = model.FullyConnectedHead(16, [8, 1], act_func_on_last_layer=False)
>>> main = approach.SupervisedApproach("mse")
>>> main.set_model(feat_ex, reg, disc)
"""
def __init__(
self,
loss_type: Literal["mse", "mae", "rmse"],
rul_scale: int = 1,
evaluate_degraded_only: bool = False,
**optim_kwargs: Any,
) -> None:
"""
Create a supervised approach.
The regressor output can be scaled with `rul_scale` to control its
magnitude. By default, the RUL values are not scaled.
For more information about the possible optimizer keyword arguments,
see [here][rul_adapt.utils.OptimizerFactory].
Args:
loss_type: Training loss function to use. Either 'mse', 'mae' or 'rmse'.
rul_scale: Scalar to multiply the RUL prediction with.
evaluate_degraded_only: Whether to only evaluate the RUL score on degraded
samples.
**optim_kwargs: Keyword arguments for the optimizer, e.g. learning rate.
"""
super().__init__()
self.loss_type = loss_type
self.rul_scale = rul_scale
self.evaluate_degraded_only = evaluate_degraded_only
self.optim_kwargs = optim_kwargs
self.train_loss = utils.get_loss(loss_type)
self._get_optimizer = utils.OptimizerFactory(**self.optim_kwargs)
self.val_loss = torchmetrics.MeanSquaredError(squared=False)
self.save_hyperparameters()
def configure_optimizers(self) -> Dict[str, Any]:
return self._get_optimizer(self.parameters())
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return self.regressor(self.feature_extractor(inputs)) * self.rul_scale
def training_step(
self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int
) -> torch.Tensor:
"""
Execute one training step.
The `batch` argument is a list of two tensors representing features and
labels. The features are used to predict RUL values that are compared against
the labels with the specified training loss. The loss is then logged.
Args:
batch: A list of feature and label tensors.
batch_idx: The index of the current batch.
Returns:
The training loss.
"""
inputs, labels = batch
predictions = self.forward(inputs)
loss = self.train_loss(predictions, labels[:, None])
self.log("train/loss", self.train_loss)
return loss
def validation_step(
self, batch: Tuple[torch.Tensor, torch.Tensor], batch_idx: int
) -> None:
"""
Execute one validation step.
The `batch` argument is a list of two tensors representing features and
labels. The features are used to predict RUL values that are compared against
the labels with an RMSE loss. The loss is then logged.
Args:
batch: A list of feature and label tensors.
batch_idx: The index of the current batch.
"""
inputs, labels = filter_batch(*batch, degraded_only=self.evaluate_degraded_only)
predictions = self.forward(inputs)
self.val_loss(predictions, labels[:, None])
self.log("val/loss", self.val_loss) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/supervised.py | 0.970702 | 0.632418 | supervised.py | pypi |
import copy
import math
from itertools import chain
from typing import Optional, Any, List, Tuple, Dict, Literal
import numpy as np
import torch
from torch import nn
import rul_adapt.loss
from rul_adapt import utils
from rul_adapt.approach.abstract import AdaptionApproach
from rul_adapt.approach.evaluation import AdaptionEvaluator
from rul_adapt.model import FullyConnectedHead
class ConsistencyApproach(AdaptionApproach):
"""The Consistency DANN approach introduces a consistency loss that keeps the
weights of the feature extractor close to the ones of a pre-trained version. This
approach should only be used with a pre-trained feature extractor. Otherwise,
the consistency loss would serve no purpose.
The regressor and domain discriminator need the same number of input units as the
feature extractor has output units. The discriminator is not allowed to have an
activation function on its last layer for it to work with the DANN loss.
Examples:
>>> from rul_adapt import model
>>> from rul_adapt import approach
>>> feat_ex = model.CnnExtractor(1, [16, 16, 1], 10, fc_units=16)
>>> reg = model.FullyConnectedHead(16, [1])
>>> disc = model.FullyConnectedHead(16, [8, 1], act_func_on_last_layer=False)
>>> pre = approach.SupervisedApproach("rmse")
>>> pre.set_model(feat_ex, reg, disc)
>>> main = approach.ConsistencyApproach(1.0, 100)
>>> main.set_model(pre.feature_extractor, pre.regressor, disc)
"""
CHECKPOINT_MODELS = ["dann_loss", "frozen_feature_extractor"]
dann_loss: rul_adapt.loss.DomainAdversarialLoss
frozen_feature_extractor: nn.Module
def __init__(
self,
consistency_factor: float,
max_epochs: int,
loss_type: Literal["mse", "mae", "rmse"] = "rmse",
rul_score_mode: Literal["phm08", "phm12"] = "phm08",
evaluate_degraded_only: bool = False,
**optim_kwargs: Any,
) -> None:
"""
Create a new consistency DANN approach.
The consistency factor is the strength of the consistency loss' influence.
The influence of the DANN loss is increased during the training process. It
starts at zero and reaches one at `max_epochs`.
The domain discriminator is set by the `set_model` function together with the
feature extractor and regressor. For more information, see the [approach]
[rul_adapt.approach] module page.
For more information about the possible optimizer keyword arguments,
see [here][rul_adapt.utils.OptimizerFactory].
Args:
consistency_factor: The strength of the consistency loss' influence.
max_epochs: The number of epochs after which the DANN loss' influence is
maximal.
loss_type: The type of regression loss, either 'mse', 'rmse' or 'mae'.
rul_score_mode: The mode for the val and test RUL score, either 'phm08'
or 'phm12'.
evaluate_degraded_only: Whether to only evaluate the RUL score on degraded
samples.
**optim_kwargs: Keyword arguments for the optimizer, e.g. learning rate.
"""
super().__init__()
self.consistency_factor = consistency_factor
self.max_epochs = max_epochs
self.loss_type = loss_type
self.rul_score_mode = rul_score_mode
self.evaluate_degraded_only = evaluate_degraded_only
self.optim_kwargs = optim_kwargs
self.train_source_loss = utils.get_loss(loss_type)
self.consistency_loss = rul_adapt.loss.ConsistencyLoss()
self._get_optimizer = utils.OptimizerFactory(**self.optim_kwargs)
self.evaluator = AdaptionEvaluator(
self.forward, self.log, self.rul_score_mode, self.evaluate_degraded_only
)
self.save_hyperparameters()
def set_model(
self,
feature_extractor: nn.Module,
regressor: nn.Module,
domain_disc: Optional[nn.Module] = None,
*args: Any,
**kwargs: Any,
) -> None:
"""
Set the feature extractor, regressor and domain discriminator for this approach.
The discriminator is not allowed to have an activation function on its last
layer and needs to use only a single output neuron. It is wrapped by a
[DomainAdversarialLoss][rul_adapt.loss.DomainAdversarialLoss].
A frozen copy of the feature extractor is produced to be used for the
consistency loss. The feature extractor should, therefore, be pre-trained.
Args:
feature_extractor: The pre-trained feature extraction network.
regressor: The optionally pre-trained RUL regression network.
domain_disc: The domain discriminator network.
"""
domain_disc = self._check_domain_disc(domain_disc)
super().set_model(feature_extractor, regressor, *args, **kwargs)
self.dann_loss = rul_adapt.loss.DomainAdversarialLoss(domain_disc)
self.frozen_feature_extractor = copy.deepcopy(feature_extractor)
self.frozen_feature_extractor.requires_grad_(False) # freeze network
self.log_model_hyperparameters("domain_disc")
def _check_domain_disc(self, domain_disc: Optional[nn.Module]) -> nn.Module:
if domain_disc is None:
raise ValueError(
"No domain discriminator was set. This approach is unlikely to work."
)
if (
isinstance(domain_disc, FullyConnectedHead)
and domain_disc.act_func_on_last_layer
):
raise ValueError(
"Domain discriminator has an activation function on its last layer. "
"This is not allowed due to torch.nn.BCEWithLogitsLoss being used as "
"its loss. Please set 'act_func_on_last_layer' to False."
)
return domain_disc
@property
def domain_disc(self):
"""The domain discriminator network."""
if hasattr(self, "dann_loss"):
return self.dann_loss.domain_disc
else:
raise RuntimeError("Domain disc used before 'set_model' was called.")
@property
def dann_factor(self):
"""
Return the influency of the DANN loss based on the current epoch.
It is calculated as: `2 / (1 + math.exp(-10 * current_epoch / max_epochs)) - 1`
"""
return 2 / (1 + math.exp(-10 * self.current_epoch / self.max_epochs)) - 1
def configure_optimizers(self) -> Dict[str, Any]:
"""Configure an optimizer to train the feature extractor, regressor and
domain discriminator."""
parameters = chain(
self.feature_extractor.parameters(),
self.regressor.parameters(),
self.dann_loss.parameters(),
) # excludes frozen_feature_extractor from optimization
optim = self._get_optimizer(parameters)
return optim
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Predict the RUL values for a batch of input features."""
return self.regressor(self.feature_extractor(inputs))
def training_step(self, batch: List[torch.Tensor], batch_idx: int) -> torch.Tensor:
"""
Execute one training step.
The `batch` argument is a list of three tensors representing the source
features, source labels and target features. Both types of features are fed
to the feature extractor. Then the regression loss for the source domain and
the DANN loss between domains is computed. Afterwards the consistency loss is
calculated. The regression, DANN, consistency and combined loss are logged.
Args:
batch: A list of a source feature, source label and target feature tensors.
batch_idx: The index of the current batch.
Returns:
The combined loss.
"""
source, source_labels, target = batch
source_labels = source_labels[:, None]
frozen_source = self.frozen_feature_extractor(source)
source = self.feature_extractor(source)
target = self.feature_extractor(target)
rul_preds = self.regressor(source)
rmse_loss = self.train_source_loss(rul_preds, source_labels)
dann_loss = self.dann_loss(source, target)
consistency_loss = self.consistency_loss(frozen_source, source)
loss = (
rmse_loss
+ self.dann_factor * dann_loss
+ self.consistency_factor * consistency_loss
)
self.log("train/loss", loss)
self.log("train/source_loss", self.train_source_loss)
self.log("train/dann", self.dann_loss)
self.log("train/consistency", self.consistency_loss)
return loss
def on_train_epoch_start(self) -> None:
self.log("train/dann_factor", self.dann_factor)
def validation_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one validation step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `val`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.validation(batch, domain)
def test_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one test step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `test`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.test(batch, domain)
class StdExtractor:
"""
This extractor can be used to extract the per-feature standard deviation from
windows of data. It can be used to pre-process datasets like FEMTO and XJTU-SY
with the help of the [RulDataModule][rul_datasets.core.RulDataModule].
Examples:
Extract the std of the horizontal acceleration and produce windows of size 30.
```pycon
>>> import rul_datasets
>>> import rul_adapt
>>> fd1 = rul_datasets.XjtuSyReader(fd=1)
>>> extractor = rul_adapt.approach.consistency.StdExtractor([0])
>>> dm = rul_datasets.RulDataModule(fd1, 32, extractor, window_size=30)
```
"""
def __init__(self, channels: List[int]) -> None:
"""
Create a new feature extractor for standard deviations.
Args:
channels: The list of channel indices to extract features from.
"""
self.channels = channels
def __call__(
self, inputs: np.ndarray, targets: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Extract features from the input data.
The input is expected to have a shape of `[num_windows, window_size,
num_features]`. The output will have a shape of `[num_windows,
len(self.channels)]`.
Args:
inputs: The input data.
Returns:
The features extracted from the input data.
"""
return np.std(inputs[:, :, self.channels], axis=1), targets
class TumblingWindowExtractor:
def __init__(self, window_size: int, channels: List[int]) -> None:
self.window_size = window_size
self.channels = channels
def __call__(
self, features: np.ndarray, targets: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
org_window_size = features.shape[1]
window_multiplier = org_window_size // self.window_size
crop_size = self.window_size * window_multiplier
num_channels = len(self.channels)
features = features[:, :, self.channels]
features = features[:, :crop_size].reshape(-1, self.window_size, num_channels)
targets = np.repeat(targets, window_multiplier)
return features, targets | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/consistency.py | 0.941352 | 0.467028 | consistency.py | pypi |
from copy import deepcopy
from typing import List, Tuple, Literal, Optional, Any, Dict
import torch
from torch import nn
import rul_adapt
from rul_adapt import utils
from rul_adapt.approach.abstract import AdaptionApproach
from rul_adapt.approach.evaluation import AdaptionEvaluator
from rul_adapt.model import FullyConnectedHead
class ConditionalMmdApproach(AdaptionApproach):
"""The conditional MMD uses a combination of a marginal and conditional MML loss
to adapt a feature extractor to be used with the source regressor.
The regressor needs the same number of input units as the feature extractor has
output units.
Examples:
>>> from rul_adapt import model
>>> from rul_adapt import approach
>>> feat_ex = model.CnnExtractor(1, [16, 16, 1], 10, fc_units=16)
>>> reg = model.FullyConnectedHead(16, [1])
>>> cond_mmd = approach.ConditionalMmdApproach(0.01, 5, 0.5, [(0, 1)])
>>> cond_mmd.set_model(feat_ex, reg)
"""
def __init__(
self,
mmd_factor: float,
num_mmd_kernels: int,
dynamic_adaptive_factor: float,
fuzzy_sets: List[Tuple[float, float]],
loss_type: Literal["mse", "rmse", "mae"] = "mae",
rul_score_mode: Literal["phm08", "phm12"] = "phm08",
evaluate_degraded_only: bool = False,
**optim_kwargs: Any,
) -> None:
"""
Create a new conditional MMD approach.
The strength of the influence of the MMD loss on the feature extractor is
controlled by the `mmd_factor`. The higher it is, the stronger the influence.
The dynamic adaptive factor controls the balance between the marginal MMD and
conditional MMD losses.
For more information about the possible optimizer keyword arguments,
see [here][rul_adapt.utils.OptimizerFactory].
Args:
mmd_factor: The strength of the MMD loss' influence.
num_mmd_kernels: The number of kernels for the MMD loss.
dynamic_adaptive_factor: The balance between marginal and conditional MMD.
fuzzy_sets: The fuzzy sets for the conditional MMD loss.
loss_type: The type of regression loss, either 'mse', 'rmse' or 'mae'.
rul_score_mode: The mode for the val and test RUL score, either 'phm08'
or 'phm12'.
evaluate_degraded_only: Whether to only evaluate the RUL score on degraded
samples.
**optim_kwargs: Keyword arguments for the optimizer, e.g. learning rate.
"""
super().__init__()
self.mmd_factor = mmd_factor
self.num_mmd_kernels = num_mmd_kernels
self.dynamic_adaptive_factor = dynamic_adaptive_factor
self.loss_type = loss_type
self.rul_score_mode = rul_score_mode
self.evaluate_degraded_only = evaluate_degraded_only
self.optim_kwargs = optim_kwargs
self._get_optimizer = utils.OptimizerFactory(**self.optim_kwargs)
# training metrics
self.train_source_loss = utils.get_loss(self.loss_type)
self.mmd_loss = rul_adapt.loss.MaximumMeanDiscrepancyLoss(self.num_mmd_kernels)
conditional_mmd_losses = [
rul_adapt.loss.MaximumMeanDiscrepancyLoss(self.num_mmd_kernels)
for _ in range(len(fuzzy_sets))
]
self.conditional_mmd_loss = rul_adapt.loss.ConditionalAdaptionLoss(
conditional_mmd_losses, fuzzy_sets, mean_over_sets=True
)
self.evaluator = AdaptionEvaluator(
self.forward, self.log, self.rul_score_mode, self.evaluate_degraded_only
)
self.save_hyperparameters()
@property
def fuzzy_sets(self) -> List[Tuple[float, float]]:
return self.conditional_mmd_loss.fuzzy_sets
def configure_optimizers(self) -> Dict[str, Any]:
"""Configure an Adam optimizer."""
return self._get_optimizer(self.parameters())
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Predict the RUL values for a batch of input features."""
return self.regressor(self.feature_extractor(inputs))
def training_step(self, batch: List[torch.Tensor], batch_idx: int) -> torch.Tensor:
"""
Execute one training step.
The `batch` argument is a list of three tensors representing the source
features, source labels and target features. Both types of features are fed
to the feature extractor. Then the regression loss for the source domain,
the MMD loss and the conditional MMD loss are computed. The
regression, MMD, conditional MMD and combined loss are logged.
Args:
batch: A list of a source feature, source label and target feature tensors.
batch_idx: The index of the current batch.
Returns:
The combined loss.
"""
source, source_labels, target = batch
source_labels = source_labels[:, None]
daf = self.dynamic_adaptive_factor
source = self.feature_extractor(source)
target = self.feature_extractor(target)
source_preds = self.regressor(source)
target_preds = self.regressor(target)
source_loss = self.train_source_loss(source_preds, source_labels)
mmd_loss = self.mmd_loss(source, target)
cond_mmd_loss = self.conditional_mmd_loss(
source, source_preds, target, target_preds
)
combined_mmd_loss = (1 - daf) * mmd_loss + daf * cond_mmd_loss
loss = source_loss + self.mmd_factor * combined_mmd_loss
self.log("train/loss", loss)
self.log("train/source_loss", self.train_source_loss)
self.log("train/mmd", self.mmd_loss)
self.log("train/conditional_mmd", self.conditional_mmd_loss)
return loss
def validation_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one validation step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `val`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.validation(batch, domain)
def test_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one test step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `test`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.test(batch, domain)
class ConditionalDannApproach(AdaptionApproach):
"""The conditional DANN approach uses a marginal and several conditional domain
discriminators. The features are produced by a shared feature extractor. The loss
in the domain discriminators is binary cross-entropy.
The regressor and domain discriminators need the same number of input units as the
feature extractor has output units. The discriminators are not allowed to have an
activation function on their last layer and need to use only a single output
neuron because [BCEWithLogitsLoss][torch.nn.BCEWithLogitsLoss] is used.
Examples:
>>> from rul_adapt import model
>>> from rul_adapt import approach
>>> feat_ex = model.CnnExtractor(1, [16, 16, 1], 10, fc_units=16)
>>> reg = model.FullyConnectedHead(16, [1])
>>> disc = model.FullyConnectedHead(16, [8, 1], act_func_on_last_layer=False)
>>> cond_dann = approach.ConditionalDannApproach(1.0, 0.5, [(0, 1)])
>>> cond_dann.set_model(feat_ex, reg, disc)
"""
CHECKPOINT_MODELS = ["dann_loss", "conditional_dann_loss"]
dann_loss: rul_adapt.loss.DomainAdversarialLoss
conditional_dann_loss: rul_adapt.loss.ConditionalAdaptionLoss
def __init__(
self,
dann_factor: float,
dynamic_adaptive_factor: float,
fuzzy_sets: List[Tuple[float, float]],
loss_type: Literal["mse", "rmse", "mae"] = "mae",
rul_score_mode: Literal["phm08", "phm12"] = "phm08",
evaluate_degraded_only: bool = False,
**optim_kwargs: Any,
) -> None:
"""
Create a new conditional DANN approach.
The strength of the domain discriminator's influence on the feature extractor
is controlled by the `dann_factor`. The higher it is, the stronger the
influence. The `dynamic_adaptive_factor` controls the balance between the
marginal and conditional DANN loss.
The domain discriminator is set by the `set_model` function together with the
feature extractor and regressor. For more information, see the [approach]
[rul_adapt.approach] module page.
For more information about the possible optimizer keyword arguments,
see [here][rul_adapt.utils.OptimizerFactory].
Args:
dann_factor: Strength of the domain DANN loss.
dynamic_adaptive_factor: Balance between the marginal and conditional DANN
loss.
fuzzy_sets: Fuzzy sets for the conditional DANN loss.
loss_type: The type of regression loss, either 'mse', 'rmse' or 'mae'.
rul_score_mode: The mode for the val and test RUL score, either 'phm08'
or 'phm12'.
**optim_kwargs: Keyword arguments for the optimizer, e.g. learning rate.
"""
super().__init__()
self.dann_factor = dann_factor
self.dynamic_adaptive_factor = dynamic_adaptive_factor
self.loss_type = loss_type
self._fuzzy_sets = fuzzy_sets
self.rul_score_mode = rul_score_mode
self.evaluate_degraded_only = evaluate_degraded_only
self.optim_kwargs = optim_kwargs
self.train_source_loss = utils.get_loss(self.loss_type)
self._get_optimizer = utils.OptimizerFactory(**self.optim_kwargs)
self.evaluator = AdaptionEvaluator(
self.forward, self.log, self.rul_score_mode, self.evaluate_degraded_only
)
self.save_hyperparameters()
@property
def fuzzy_sets(self) -> List[Tuple[float, float]]:
return self._fuzzy_sets
def set_model(
self,
feature_extractor: nn.Module,
regressor: nn.Module,
domain_disc: Optional[nn.Module] = None,
*args: Any,
**kwargs: Any,
) -> None:
"""
Set the feature extractor, regressor, and domain discriminator for this
approach.
The discriminator is not allowed to have an activation function on its last
layer and needs to use only a single output neuron.
It is wrapped by a
[DomainAdversarialLoss][rul_adapt.loss.DomainAdversarialLoss].
A copy of the discriminator is used for each conditional loss governing a
fuzzy set.
Args:
feature_extractor: The feature extraction network.
regressor: The RUL regression network.
domain_disc: The domain discriminator network.
Copied for each fuzzy set.
"""
domain_disc = self._check_domain_disc(domain_disc)
super().set_model(feature_extractor, regressor, *args, **kwargs)
self.dann_loss = rul_adapt.loss.DomainAdversarialLoss(domain_disc)
cond_losses = [deepcopy(self.dann_loss) for _ in range(len(self.fuzzy_sets))]
self.conditional_dann_loss = rul_adapt.loss.ConditionalAdaptionLoss(
cond_losses, self.fuzzy_sets
)
self.log_model_hyperparameters("domain_disc")
def _check_domain_disc(self, domain_disc: Optional[nn.Module]) -> nn.Module:
if domain_disc is None:
raise ValueError(
"No domain discriminator was set. This approach is unlikely to work."
)
if (
isinstance(domain_disc, FullyConnectedHead)
and domain_disc.act_func_on_last_layer
):
raise ValueError(
"Domain discriminator has an activation function on its last layer. "
"This is not allowed due to torch.nn.BCEWithLogitsLoss being used as "
"its loss. Please set 'act_func_on_last_layer' to False."
)
return domain_disc
@property
def domain_disc(self) -> nn.Module:
return self.dann_loss.domain_disc
def configure_optimizers(self) -> Dict[str, Any]:
"""Configure an Adam optimizer."""
return self._get_optimizer(self.parameters())
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
"""Predict the RUL values for a batch of input features."""
return self.regressor(self.feature_extractor(inputs))
def training_step(self, batch: List[torch.Tensor], batch_idx: int) -> torch.Tensor:
"""
Execute one training step.
The `batch` argument is a list of three tensors representing the source
features, source labels and target features. Both types of features are fed
to the feature extractor. Then the regression loss for the source domain,
the MMD loss and the conditional MMD loss are computed. The
regression, MMD, conditional MMD and combined loss are logged.
Args:
batch: A list of a source feature, source label and target feature tensors.
batch_idx: The index of the current batch.
Returns:
The combined loss.
"""
source, source_labels, target = batch
source_labels = source_labels[:, None]
daf = self.dynamic_adaptive_factor
source = self.feature_extractor(source)
target = self.feature_extractor(target)
source_preds = self.regressor(source)
target_preds = self.regressor(target)
source_loss = self.train_source_loss(source_preds, source_labels)
dann_loss = self.dann_loss(source, target)
cond_dann_loss = self.conditional_dann_loss(
source, source_preds, target, target_preds
)
combined_dann_loss = (1 - daf) * dann_loss + daf * cond_dann_loss
loss = source_loss + self.dann_factor * combined_dann_loss
self.log("train/loss", loss)
self.log("train/source_loss", self.train_source_loss)
self.log("train/dann", self.dann_loss)
self.log("train/conditional_dann", self.conditional_dann_loss)
return loss
def validation_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one validation step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `val`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.validation(batch, domain)
def test_step(
self, batch: List[torch.Tensor], batch_idx: int, dataloader_idx: int
) -> None:
"""
Execute one test step.
The `batch` argument is a list of two tensors representing features and
labels. A RUL prediction is made from the features and the validation RMSE
and RUL score are calculated. The metrics recorded for dataloader idx zero
are assumed to be from the source domain and for dataloader idx one from the
target domain. The metrics are written to the configured logger under the
prefix `test`.
Args:
batch: A list containing a feature and a label tensor.
batch_idx: The index of the current batch.
dataloader_idx: The index of the current dataloader (0: source, 1: target).
"""
domain = utils.dataloader2domain(dataloader_idx)
self.evaluator.test(batch, domain) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/approach/conditional.py | 0.971095 | 0.626767 | conditional.py | pypi |
from typing import List
import torch
import torchmetrics
from rul_adapt.loss.utils import calc_pairwise_dot, weighted_mean
class HealthyStateAlignmentLoss(torchmetrics.Metric):
"""
This loss is used to align the healthy state of the data in the latent space.
It computes the mean of the variance of each latent feature which is supposed to
be minimized. This way a single compact cluster of healthy state data should be
formed.
The loss is implemented as a [torchmetrics.Metric](
https://torchmetrics.readthedocs.io/en/stable/pages/quickstart.html#module
-metrics). See their documentation for more information.
Examples:
```pycon
>>> from rul_adapt.loss.alignment import HealthyStateAlignmentLoss
>>> healthy_align = HealthyStateAlignmentLoss()
>>> healthy_align(torch.zeros(10, 5))
tensor(0.)
```
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
loss: List[torch.Tensor]
total: List[torch.Tensor]
def __init__(self):
super().__init__()
self.add_state("loss", [], dist_reduce_fx="cat")
self.add_state("total", [], dist_reduce_fx="cat")
def update(self, healthy: torch.Tensor) -> None:
self.loss.append(torch.mean(torch.var(healthy, dim=0)))
self.total.append(torch.tensor(healthy.shape[0], device=self.device))
def compute(self) -> torch.Tensor:
return weighted_mean(self.loss, self.total)
class DegradationDirectionAlignmentLoss(torchmetrics.Metric):
"""
This loss is used to align the direction of the degradation data in relation to
the healthy state data in the latent space.
It computes the mean of the cosine of the pairwise-angle of the vectors from the
healthy state cluster to each degradation data point. The healthy state cluster
location is assumed to be the mean of the healthy state data in the latent space.
The loss is negated in order to maximize the cosine by minimizing the loss.
The loss is implemented as a [torchmetrics.Metric](
https://torchmetrics.readthedocs.io/en/stable/pages/quickstart.html#module
-metrics). See their documentation for more information.
Examples:
```pycon
>>> from rul_adapt.loss.alignment import DegradationDirectionAlignmentLoss
>>> degradation_align = DegradationDirectionAlignmentLoss()
>>> degradation_align(torch.zeros(10, 5), torch.ones(10, 5))
tensor(-1.0000)
```
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
loss: torch.Tensor
total: torch.Tensor
def __init__(self):
super().__init__()
self.add_state("loss", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, healthy: torch.Tensor, degraded: torch.Tensor) -> None:
healthy_mean = healthy.mean(dim=0)
trajectory = degraded - healthy_mean
trajectory = trajectory / torch.norm(trajectory, dim=1, keepdim=True)
pairwise_dist = calc_pairwise_dot(trajectory, trajectory)
loss = -pairwise_dist.sum()
self.loss = self.loss + loss
self.total = self.total + (degraded.shape[0] ** 2)
def compute(self) -> torch.Tensor:
return self.loss / self.total
class DegradationLevelRegularizationLoss(torchmetrics.Metric):
"""
This loss is used to regularize the degradation level of the data in the latent
space in relation to the healthy state data.
It computes the mean of the difference between the normalized distance of the
degradation data points from the healthy state cluster and the normalized
degradation steps. The healthy state cluster location is assumed to be the mean
of the healthy state data in the latent space.
The loss is implemented as a [torchmetrics.Metric](
https://torchmetrics.readthedocs.io/en/stable/pages/quickstart.html#module
-metrics). See their documentation for more information.
Examples:
```pycon
>>> from rul_adapt.loss.alignment import DegradationLevelRegularizationLoss
>>> degradation_align = DegradationLevelRegularizationLoss()
>>> degradation_align(torch.zeros(10, 5),
... torch.ones(10, 5),
... torch.ones(10),
... torch.ones(10, 5),
... torch.ones(10))
tensor(0.)
```
"""
is_differentiable: bool = True
higher_is_better: bool = False
full_state_update: bool = False
loss: torch.Tensor
total: torch.Tensor
def __init__(self) -> None:
super().__init__()
self.add_state("loss", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(
self,
healthy: torch.Tensor,
source: torch.Tensor,
source_degradation_steps: torch.Tensor,
target: torch.Tensor,
target_degradation_steps: torch.Tensor,
) -> None:
healthy_mean = healthy.mean(dim=0)
source_distances = self._calc_normed_distances(healthy_mean, source)
target_distances = self._calc_normed_distances(healthy_mean, target)
source_degradation_steps = source_degradation_steps / torch.max(
source_degradation_steps
)
target_degradation_steps = target_degradation_steps / torch.max(
target_degradation_steps
)
source_loss = torch.abs(source_distances - source_degradation_steps).sum()
target_loss = torch.abs(target_distances - target_degradation_steps).sum()
loss = source_loss + target_loss
self.loss = self.loss + loss
self.total = self.total + source.shape[0]
def compute(self) -> torch.Tensor:
return self.loss / self.total
def _calc_normed_distances(
self, healthy_mean: torch.Tensor, source: torch.Tensor
) -> torch.Tensor:
distances = torch.norm(source - healthy_mean, p=2, dim=1)
distances = distances / torch.max(distances)
return distances | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/loss/alignment.py | 0.966961 | 0.987496 | alignment.py | pypi |
from typing import List, Any, Tuple
import torch
import torchmetrics
from torch import nn
from rul_adapt.loss.utils import calc_pairwise_euclidean, weighted_mean
class MaximumMeanDiscrepancyLoss(torchmetrics.Metric):
"""The Maximum Mean Discrepancy Loss (MMD) is a distance measure between two
arbitrary distributions.
The distance is defined as the dot product in a reproducing Hilbert kernel space
(RHKS) and is zero if and only if the distributions are identical. The RHKS is
the space of the linear combination of multiple Gaussian kernels with bandwidths
derived by the median heuristic.
The source and target feature batches are treated as samples from their
respective distribution. The linear pairwise distances between the two batches
are transformed into distances in the RHKS via the kernel trick:
```python
rhks(x, y) = dot(to_rhks(x), to_rhks(y)) = multi_kernel(dot(x, y))
multi_kernel(distance) = mean([gaussian(distance, bw) for bw in bandwidths])
gaussian(distance, bandwidth) = exp(-distance * bandwidth)
```
The n kernels will use bandwidths between `median / (2**(n/ 2))` and `median * (
2**(n / 2))`, where `median` is the median of the linear distances.
The MMD loss is then calculated as:
```python
mean(rhks(source, source) + rhks(target, target) - 2 * rhks(source, target))
```
This version of MMD is biased, which is acceptable for training purposes.
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
s2s: torch.Tensor
t2t: torch.Tensor
s2t: torch.Tensor
s2s_total: torch.Tensor
t2t_total: torch.Tensor
s2t_total: torch.Tensor
def __init__(self, num_kernels: int) -> None:
"""
Create a new MMD loss module with `n` kernels.
The bandwidths of the Gaussian kernels are derived by the median heuristic.
Args:
num_kernels: Number of Gaussian kernels to use.
"""
super().__init__()
self.num_kernels = num_kernels
self.add_state("s2s", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("t2t", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("s2t", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("s2s_total", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("t2t_total", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("s2t_total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(
self, source_features: torch.Tensor, target_features: torch.Tensor
) -> None:
"""
Compute the MMD loss between source and target feature distributions.
Args:
source_features: Source features with shape `[batch_size, num_features]`
target_features: Target features with shape `[batch_size, num_features]`
Returns:
scalar MMD loss
"""
feats = torch.cat([source_features, target_features], dim=0)
distances = calc_pairwise_euclidean(feats, feats)
gammas = _get_gammas(distances, self.num_kernels)
distances = _calc_multi_kernel(distances, gammas)
batch_size = source_features.shape[0]
s2s, t2t, s2t = _split_distances(distances, batch_size)
self.s2s = self.s2s + s2s.sum()
self.t2t = self.t2t + t2t.sum()
self.s2t = self.s2t + s2t.sum()
self.s2s_total = self.s2s_total + s2s.numel()
self.t2t_total = self.t2t_total + t2t.numel()
self.s2t_total = self.s2t_total + s2t.numel()
def compute(self) -> torch.Tensor:
return (
self.s2s / self.s2s_total
+ self.t2t / self.t2t_total
- 2 * (self.s2t / self.s2t_total)
)
class JointMaximumMeanDiscrepancyLoss(torchmetrics.Metric):
"""The Joint Maximum Mean Discrepancy Loss (JMMD) is a distance measure between
multiple pairs of arbitrary distributions.
It is related to the MMD insofar as the distance of each distribution pair in a
reproducing Hilbert kernel space (RHKS) is calculated and then multiplied before
the discrepancy is computed.
```python
joint_rhks(xs, ys) = prod(rhks(x, y) for x, y in zip(xs, xs))
```
For more information see
[MaximumMeanDiscrepancyLoss] [rul_adapt.loss.adaption.MaximumMeanDiscrepancyLoss].
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
s2s: torch.Tensor
t2t: torch.Tensor
s2t: torch.Tensor
s2s_total: torch.Tensor
t2t_total: torch.Tensor
s2t_total: torch.Tensor
def __init__(self) -> None:
"""
Create a new JMMD loss module.
It features a single Gaussian kernel with a bandwidth chosen by the median
heuristic.
"""
super().__init__()
self.add_state("s2s", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("t2t", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("s2t", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("s2s_total", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("t2t_total", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("s2t_total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(
self, source_features: List[torch.Tensor], target_features: List[torch.Tensor]
) -> None:
"""
Compute the JMMD loss between multiple feature distributions.
Args:
source_features: The list of source features of shape
`[batch_size, num_features]`.
target_features: The list of target features of shape
`[batch_size, num_features]`.
Returns:
scalar JMMD loss
"""
distances = []
for source, target in zip(source_features, target_features):
feats = torch.cat([source, target], dim=0)
dist = calc_pairwise_euclidean(feats, feats)
(gamma,) = _get_gammas(dist, 1)
distances.append(_calc_gaussian_kernel(dist, gamma))
batch_size = source_features[0].shape[0]
merged_distances = torch.stack(distances, dim=0).prod(dim=0)
s2s, t2t, s2t = _split_distances(merged_distances, batch_size)
self.s2s = self.s2s + s2s.sum()
self.t2t = self.t2t + t2t.sum()
self.s2t = self.s2t + s2t.sum()
self.s2s_total = self.s2s_total + s2s.numel()
self.t2t_total = self.t2t_total + t2t.numel()
self.s2t_total = self.s2t_total + s2t.numel()
def compute(self) -> torch.Tensor:
return (
self.s2s / self.s2s_total
+ self.t2t / self.t2t_total
- 2 * (self.s2t / self.s2t_total)
)
class DomainAdversarialLoss(torchmetrics.Metric):
"""The Domain Adversarial Neural Network Loss (DANN) uses a domain discriminator
to measure the distance between two feature distributions.
The domain discriminator is a neural network that is jointly trained on
classifying its input as one of two domains. Its output should be a single
unscaled score (logit) which is fed to a binary cross entropy loss.
The domain discriminator is preceded by a [GradientReversalLayer]
[rul_adapt.loss.adaption.GradientReversalLayer]. This way, the discriminator is
trained to separate the domains while the network generating the inputs is
trained to marginalize the domain difference."""
is_differentiable = True
higher_is_better = False
full_state_update = False
loss: torch.Tensor
total: torch.Tensor
def __init__(self, domain_disc: nn.Module) -> None:
"""
Create a new DANN loss module.
Args:
domain_disc: The neural network to act as the domain discriminator.
"""
super().__init__()
self.domain_disc = domain_disc
self.grl = GradientReversalLayer()
self.add_state("loss", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, source: torch.Tensor, target: torch.Tensor) -> None:
"""
Calculate the DANN loss as the binary cross entropy of the discriminators
prediction for the source and target features.
The source features receive a domain label of zero and the target features a
domain label of one.
Args:
source: The source features with domain label zero.
target: The target features with domain label one.
"""
inputs = torch.cat([source, target])
combined_batch_size = inputs.shape[0]
labels = torch.ones(combined_batch_size, 1, device=self.device)
source_batch_size = source.shape[0]
labels[:source_batch_size] *= 0.0
predictions = self.domain_disc(self.grl(inputs))
loss = nn.functional.binary_cross_entropy_with_logits(
predictions, labels, reduction="sum"
)
self.loss = self.loss + loss
self.total = self.total + combined_batch_size
def compute(self) -> torch.Tensor:
return self.loss / self.total
class GradientReversalLayer(nn.Module):
"""The gradient reversal layer (GRL) acts as an identity function in the forward
pass and scales the gradient by a negative scalar in the backward pass.
```python
GRL(f(x)) = f(x)
GRL`(f(x)) = -lambda * f`(x)
```
"""
grad_weight: torch.Tensor
def __init__(self, grad_weight: float = 1.0) -> None:
"""
Create a new Gradient Reversal Layer.
Args:
grad_weight: The scalar that weights the negative gradient.
"""
super().__init__()
self.register_buffer("grad_weight", torch.tensor(grad_weight))
def forward(self, inputs: torch.Tensor) -> torch.Tensor:
return _gradient_reversal(inputs, self.grad_weight)
def _gradient_reversal(x: torch.Tensor, grad_weight: torch.Tensor) -> torch.Tensor:
"""Perform gradient reversal on input."""
return _GradientReverse.apply(x, grad_weight)
class _GradientReverse(torch.autograd.Function):
"""Gradient reversal forward and backward definitions."""
@staticmethod
def forward( # type: ignore
ctx: Any, inputs: torch.Tensor, grad_weight: torch.Tensor
) -> torch.Tensor:
"""Forward pass as identity mapping."""
ctx.grad_weight = grad_weight
return inputs
@staticmethod
def backward( # type: ignore
ctx: Any, grad: torch.Tensor
) -> Tuple[torch.Tensor, None]:
"""Backward pass as negative, scaled gradient."""
return -ctx.grad_weight * grad, None
@staticmethod
def jvp(ctx: Any, *grad_inputs: Any) -> Any:
pass
def _split_distances(distances, split_idx):
s2s = distances[:split_idx, :split_idx]
t2t = distances[split_idx:, split_idx:]
s2t = distances[:split_idx, split_idx:]
return s2s, t2t, s2t
def _calc_multi_kernel(distances: torch.Tensor, gammas: List[float]) -> torch.Tensor:
"""Compute the linear combination of Gaussian kernels."""
kernels = [_calc_gaussian_kernel(distances, gamma) for gamma in gammas]
combined: torch.Tensor = sum(kernels) / len(gammas) # type: ignore
return combined
def _calc_gaussian_kernel(distances: torch.Tensor, gamma: float) -> torch.Tensor:
"""Compute a single Gaussian kernel."""
return torch.exp(-distances * gamma)
def _get_gammas(distances: torch.Tensor, num_kernels: int) -> List[float]:
"""Compute gammas for n Gaussian kernels via median heuristic."""
n_samples = distances.shape[0]
bandwidth = (n_samples**2 - n_samples) / torch.sum(distances.detach())
bandwidth /= 2 ** (num_kernels // 2)
gammas = [bandwidth * (2**i) for i in range(num_kernels)]
return gammas
class ConsistencyLoss(torchmetrics.Metric):
is_differentiable = True
higher_is_better = False
full_state_update = False
loss: List[torch.Tensor]
total: List[torch.Tensor]
def __init__(self):
super().__init__()
self.add_state("loss", [], dist_reduce_fx="cat")
self.add_state("total", [], dist_reduce_fx="cat")
def update(
self, leader_features: torch.Tensor, follower_features: torch.Tensor
) -> None:
loss = torch.mean(torch.abs(leader_features - follower_features))
batch_size = torch.tensor(leader_features.shape[0], device=self.device)
self.loss.append(loss)
self.total.append(batch_size)
def compute(self) -> torch.Tensor:
return weighted_mean(self.loss, self.total) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/loss/adaption.py | 0.977543 | 0.953013 | adaption.py | pypi |
import math
from typing import Any, Literal, Optional
import torch
import torchmetrics
class RULScore(torchmetrics.Metric):
is_differentiable = True
higher_is_better = None
full_state_update = False
loss: torch.Tensor
total: torch.Tensor
def __init__(
self, mode: Literal["phm08", "phm12"] = "phm08", mean: Optional[bool] = None
):
super().__init__()
if mode == "phm08":
self.pos_factor = 10.0
self.neg_factor = -13.0
self.offset = -1.0
self.as_percentage = False
self.mean = False
elif mode == "phm12":
self.pos_factor = 20 / math.log(0.5)
self.neg_factor = -5 / math.log(0.5)
self.offset = 0.0
self.as_percentage = True
self.mean = True
else:
raise ValueError(f"Unknown RUL score mode: {mode}")
if mean is not None:
self.mean = mean
self.add_state("loss", torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("total", torch.tensor(0.0), dist_reduce_fx="sum")
def update(self, inputs: torch.Tensor, targets: torch.Tensor) -> None:
self.loss = self.loss + rul_score(
inputs,
targets,
self.pos_factor,
self.neg_factor,
self.offset,
self.as_percentage,
)
self.total = self.total + inputs.shape[0]
def compute(self) -> Any:
if self.mean:
return self.loss / self.total
else:
return self.loss
def rul_score(
inputs: torch.Tensor,
targets: torch.Tensor,
pos_factor: float,
neg_factor: float,
offset: float,
as_percentage: bool,
) -> torch.Tensor:
dist = inputs - targets
if as_percentage:
dist = dist / targets * 100
factors = torch.ones_like(dist)
factors[dist >= 0] /= pos_factor
factors[dist < 0] /= neg_factor
dist = torch.exp(dist * factors) + offset
score = dist.sum()
return score | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/loss/rul.py | 0.926633 | 0.437343 | rul.py | pypi |
from typing import List, Tuple, Sequence
import torch
import torchmetrics
from torch import nn
class ConditionalAdaptionLoss(torchmetrics.Metric):
"""The Conditional Adaptions loss is a combination of multiple losses, each of
which is only applied to a subset of the incoming data.
The subsets are defined by fuzzy sets with a rectangular membership function. The
prediction for each sample is checked against the fuzzy sets, and the
corresponding loss is applied to the sample. The combined loss can be set as the
sum of all components or their mean."""
is_differentiable = True
higher_is_better = False
full_state_update = True
loss: torch.Tensor
batch_counter: torch.Tensor
def __init__(
self,
adaption_losses: Sequence[torchmetrics.Metric],
fuzzy_sets: List[Tuple[float, float]],
mean_over_sets: bool = True,
) -> None:
"""
Create a new Conditional Adaption loss over fuzzy sets.
The fuzzy sets' boundaries are inclusive on the left and exclusive on the right.
The left boundary is supposed to be smaller than the right boundary.
This loss should not be used as a way to accumulate its value over multiple
batches.
Args:
adaption_losses: The list of losses to be applied to the subsets.
fuzzy_sets: The fuzzy sets to be used for subset membership.
mean_over_sets: Whether to take the mean or the sum of the losses.
"""
super().__init__()
self._check_fuzzy_sets(fuzzy_sets)
self.adaption_losses = nn.ModuleList(adaption_losses) # registers parameters
self.fuzzy_sets = fuzzy_sets
self.mean_over_sets = mean_over_sets
self.add_state("loss", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("batch_counter", default=torch.tensor(0), dist_reduce_fx="sum")
def _check_fuzzy_sets(self, fuzzy_sets: List[Tuple[float, float]]) -> None:
for i, (left_bound, right_bound) in enumerate(fuzzy_sets):
if left_bound >= right_bound:
raise ValueError(
f"Fuzzy set {i} with bounds ({left_bound}, {right_bound}) has "
f"a left bound greater than or equal to the right bound."
)
def update(
self,
source: torch.Tensor,
source_preds: torch.Tensor,
target: torch.Tensor,
target_preds: torch.Tensor,
) -> None:
"""
Update the loss with the given data.
The predictions for the source and target data are checked against the fuzzy
sets to determine membership.
Args:
source: The source features.
source_preds: The predictions for the source features.
target: The target features.
target_preds: The predictions for the target features.
"""
for fuzzy_set, adaption_loss in zip(self.fuzzy_sets, self.adaption_losses):
selected_source = source[_membership(source_preds, fuzzy_set)]
selected_target = target[_membership(target_preds, fuzzy_set)]
if selected_source.shape[0] > 0 and selected_target.shape[0] > 0:
self.loss = self.loss + adaption_loss(selected_source, selected_target)
self.batch_counter += 1
def compute(self) -> torch.Tensor:
"""
Compute the loss as either the sum or mean of all subset losses.
Returns:
The combined loss.
"""
if self.batch_counter > 1:
raise RuntimeError(
"The update method of this loss was called multiple times before "
"computing the loss. This is not supported."
)
if self.mean_over_sets:
return self.loss / len(self.adaption_losses)
else:
return self.loss
def _membership(preds: torch.Tensor, fuzzy_set: Tuple[float, float]) -> torch.Tensor:
preds = preds.squeeze() if len(preds.shape) > 1 else preds
membership = (preds >= fuzzy_set[0]) & (preds < fuzzy_set[1])
return membership | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/loss/conditional.py | 0.978052 | 0.842021 | conditional.py | pypi |
from typing import Tuple, Any
import hydra
import omegaconf
import pytorch_lightning as pl
import rul_datasets
from rul_adapt.approach import DannApproach
def get_lstm_dann(
source_fd: int, target_fd: int, **trainer_kwargs: Any
) -> Tuple[rul_datasets.DomainAdaptionDataModule, DannApproach, pl.Trainer]:
"""
Construct an LSTM-DANN approach for CMAPSS with the original hyperparameters.
Examples:
```pycon
>>> import rul_adapt
>>> dm, dann, trainer = rul_adapt.construct.get_lstm_dann(3, 1)
>>> trainer.fit(dann, dm)
>>> trainer.test(dann, dm)
```
Args:
source_fd: The source FD of CMAPSS.
target_fd: The target FD of CMAPSS.
trainer_kwargs: Overrides for the trainer class.
Returns:
dm: The data module for adaption of two CMAPSS sub-datasets.
dann: The DANN approach with feature extractor, regressor and domain disc.
trainer: The trainer object.
"""
config = get_lstm_dann_config(source_fd, target_fd)
dm, dann, trainer = lstm_dann_from_config(config, **trainer_kwargs)
return dm, dann, trainer
def get_lstm_dann_config(source_fd: int, target_fd: int) -> omegaconf.DictConfig:
"""
Get a configuration for the LSTM-DANN approach.
The configuration can be modified and fed to [lstm_dann_from_config]
[rul_adapt.construct.lstm_dann.lstm_dann_from_config] to create the approach.
Args:
source_fd: The source FD of CMAPSS.
target_fd: The target FD of CMAPSS.
Returns:
The LSTM-DANN configuration.
"""
_validate(source_fd, target_fd)
with hydra.initialize("config", version_base="1.1"):
config = hydra.compose("base", overrides=[f"+task={source_fd}-{target_fd}"])
return config
def lstm_dann_from_config(
config: omegaconf.DictConfig, **trainer_kwargs: Any
) -> Tuple[rul_datasets.DomainAdaptionDataModule, DannApproach, pl.Trainer]:
"""
Construct a LSTM-DANN approach from a configuration.
The configuration can be created by calling [get_lstm_dann_config]
[rul_adapt.construct.lstm_dann.get_lstm_dann_config].
Args:
config: The LSTM-DANN configuration.
trainer_kwargs: Overrides for the trainer class.
Returns:
dm: The data module for adaption of two CMAPSS sub-datasets.
dann: The DANN approach with feature extractor, regressor and domain disc.
trainer: The trainer object.
"""
source = hydra.utils.instantiate(config.dm.source)
target = source.get_compatible(**config.dm.target)
dm = rul_datasets.DomainAdaptionDataModule(
rul_datasets.RulDataModule(source, config.dm.batch_size),
rul_datasets.RulDataModule(target, config.dm.batch_size),
)
feature_extractor = hydra.utils.instantiate(config.feature_extractor)
regressor = hydra.utils.instantiate(config.regressor)
domain_disc = hydra.utils.instantiate(config.domain_disc)
dann = hydra.utils.instantiate(config.dann)
dann.set_model(feature_extractor, regressor, domain_disc)
trainer = hydra.utils.instantiate(config.trainer, **trainer_kwargs)
return dm, dann, trainer
def _validate(source_fd: int, target_fd: int) -> None:
if source_fd == target_fd:
raise ValueError(
f"No configuration for adapting from FD{source_fd:03} to itself."
)
elif 1 > source_fd or source_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{source_fd:03}")
elif 1 > target_fd or target_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{target_fd:03}") | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/construct/lstm_dann/functional.py | 0.887656 | 0.793366 | functional.py | pypi |
from typing import Tuple, Any
import hydra
import omegaconf
import pytorch_lightning as pl
import rul_datasets
from rul_adapt.approach import MmdApproach
def get_tbigru(
source_fd: int, target_fd: int, **trainer_kwargs: Any
) -> Tuple[rul_datasets.DomainAdaptionDataModule, MmdApproach, pl.Trainer]:
"""
Construct a TBiGRU approach for FEMTO with the original hyperparameters.
Examples:
```pycon
>>> import rul_adapt
>>> dm, tbigru, trainer = rul_adapt.construct.get_tbigru(3, 1)
>>> trainer.fit(tbigru, dm)
>>> trainer.test(tbigru, dm)
```
Args:
source_fd: The source FD of FEMTO.
target_fd: The target FD of FEMTO.
trainer_kwargs: Overrides for the trainer class.
Returns:
dm: The data module for adaption of two FEMTO sub-datasets.
dann: The TBiGRU approach with feature extractor and regressor.
trainer: The trainer object.
"""
config = get_tbigru_config(source_fd, target_fd)
dm, dann, trainer = tbigru_from_config(config, **trainer_kwargs)
return dm, dann, trainer
def get_tbigru_config(source_fd: int, target_fd: int) -> omegaconf.DictConfig:
"""
Get a configuration for the TBiGRU approach.
The configuration can be modified and fed to [tbigru_from_config]
[rul_adapt.construct.tbigru.tbigru_from_config] to create the approach.
Args:
source_fd: The source FD of FEMTO.
target_fd: The target FD of FEMTO.
Returns:
The TBiGRU configuration.
"""
_validate(source_fd, target_fd)
with hydra.initialize("config", version_base="1.1"):
config = hydra.compose("base", overrides=[f"+task={source_fd}-{target_fd}"])
return config
def tbigru_from_config(
config: omegaconf.DictConfig, **trainer_kwargs: Any
) -> Tuple[rul_datasets.DomainAdaptionDataModule, MmdApproach, pl.Trainer]:
"""
Construct a TBiGRU approach from a configuration.
The configuration can be created by calling [get_tbigru_config]
[rul_adapt.construct.tbigru.get_tbigru_config].
Args:
config: The TBiGRU configuration.
trainer_kwargs: Overrides for the trainer class.
Returns:
dm: The data module for adaption of two FEMTO sub-datasets.
dann: The TBiGRU approach with feature extractor and regressor.
trainer: The trainer object.
"""
source = hydra.utils.instantiate(config.dm.source)
target = hydra.utils.instantiate(config.dm.target)
source.prepare_data() # needed in case this is the first use of FEMTO
target.prepare_data()
extractor = hydra.utils.instantiate(config.dm.feature_extractor)
extractor.fit(source.load_split("dev")[0] + target.load_split("dev")[0])
dm = rul_datasets.DomainAdaptionDataModule(
rul_datasets.RulDataModule(
source, config.dm.batch_size, extractor, config.dm.window_size
),
rul_datasets.RulDataModule(
target, config.dm.batch_size, extractor, config.dm.window_size
),
)
feature_extractor = hydra.utils.instantiate(config.feature_extractor)
regressor = hydra.utils.instantiate(config.regressor)
tbigru = hydra.utils.instantiate(config.tbigru)
tbigru.set_model(feature_extractor, regressor)
trainer = hydra.utils.instantiate(config.trainer, **trainer_kwargs)
return dm, tbigru, trainer
def _validate(source_fd: int, target_fd: int) -> None:
if source_fd == target_fd:
raise ValueError(
f"No configuration for adapting from FD{source_fd:03} to itself."
)
elif 1 > source_fd or source_fd > 3:
raise ValueError(f"FEMTO has only FD001 to FD003 but no FD{source_fd:03}")
elif 1 > target_fd or target_fd > 3:
raise ValueError(f"FEMTO has only FD001 to FD003 but no FD{target_fd:03}") | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/construct/tbigru/functional.py | 0.847132 | 0.845273 | functional.py | pypi |
from typing import Literal, Tuple, Any, Dict, Optional
import hydra
import omegaconf
import rul_datasets
import pytorch_lightning as pl
from torch import nn
from rul_adapt.approach import ConsistencyApproach, SupervisedApproach
def get_consistency_dann(
dataset: Literal["cmapss", "xjtu-sy"],
source_fd: int,
target_fd: int,
pre_trainer_kwargs: Optional[Dict[str, Any]] = None,
trainer_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[
Tuple[rul_datasets.RulDataModule, SupervisedApproach, pl.Trainer],
Tuple[
rul_datasets.DomainAdaptionDataModule,
nn.Module,
ConsistencyApproach,
pl.Trainer,
],
]:
"""
Construct a Consistency DANN approach with the original hyperparameters.
Examples:
```pycon
>>> import rul_adapt
>>> pre, main = rul_adapt.construct.get_consistency_dann("cmapss", 3, 1)
>>> pre_dm, pre_approach, pre_trainer = pre
>>> dm, approach, domain_disc, trainer = main
```
Args:
dataset: The name of the dataset, either `cmapss` or `xjtu-sy`.
source_fd: The source FD of CMAPSS.
target_fd: The target FD of CMAPSS.
pre_trainer_kwargs: Overrides for the pre-training trainer class.
trainer_kwargs: Overrides for the main trainer class.
Returns:
pre: The data module, approach and trainer for the pre-training stage
main: The data module, approach, domain discriminator and trainer for
the main stage
"""
config = get_consistency_dann_config(dataset, source_fd, target_fd)
consistency_dann = consistency_dann_from_config(
config, pre_trainer_kwargs, trainer_kwargs
)
return consistency_dann
def get_consistency_dann_config(
dataset: Literal["cmapss", "xjtu-sy"], source_fd: int, target_fd: int
) -> omegaconf.DictConfig:
"""
Get a configuration for the Consistency DANN approach.
The configuration can be modified and fed to [consistency_dann_from_config]
[rul_adapt.construct.consistency.consistency_dann_from_config] to create the
approach.
Args:
dataset: The name of the dataset, either `cmapss` or `xjtu-sy`.
source_fd: The source FD of CMAPSS.
target_fd: The target FD of CMAPSS.
Returns:
The Consistency DANN configuration.
"""
_validate(dataset, source_fd, target_fd)
with hydra.initialize("config", version_base="1.1"):
config = hydra.compose(
"base",
overrides=[
f"+dataset={dataset}",
f"dm.source.fd={source_fd}",
f"dm.target.fd={target_fd}",
],
)
return config
def consistency_dann_from_config(
config: omegaconf.DictConfig,
pre_trainer_kwargs: Optional[Dict[str, Any]] = None,
trainer_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[
Tuple[rul_datasets.RulDataModule, SupervisedApproach, pl.Trainer],
Tuple[
rul_datasets.DomainAdaptionDataModule,
nn.Module,
ConsistencyApproach,
pl.Trainer,
],
]:
"""
Construct a Consistency DANN approach from a configuration.
The configuration can be created by calling [get_consistency_dann_config]
[rul_adapt.construct.consistency.get_consistency_dann_config].
Args:
config: The Consistency DANN config.
pre_trainer_kwargs: Overrides for the pre-training trainer class.
trainer_kwargs: Overrides for the main trainer class.
Returns:
pre: The data module, approach and trainer for the pre-training stage
main: The data module, approach, domain discriminator and trainer for
the main stage
"""
source = hydra.utils.instantiate(config.dm.source)
target = source.get_compatible(**config.dm.target)
kwargs = hydra.utils.instantiate(config.dm.kwargs)
dm_pre = rul_datasets.RulDataModule(source, **kwargs)
dm = rul_datasets.DomainAdaptionDataModule(
dm_pre, rul_datasets.RulDataModule(target, **kwargs)
)
feature_extractor = hydra.utils.instantiate(config.feature_extractor)
regressor = hydra.utils.instantiate(config.regressor)
domain_disc = hydra.utils.instantiate(config.domain_disc)
approach_pre = hydra.utils.instantiate(config.consistency_pre)
approach_pre.set_model(feature_extractor, regressor)
approach = hydra.utils.instantiate(config.consistency)
pre_trainer_kwargs = pre_trainer_kwargs or {}
trainer_kwargs = trainer_kwargs or {}
trainer_pre = hydra.utils.instantiate(config.trainer_pre, **pre_trainer_kwargs)
trainer = hydra.utils.instantiate(config.trainer, **trainer_kwargs)
return (dm_pre, approach_pre, trainer_pre), (dm, approach, domain_disc, trainer)
def _validate(
dataset: Literal["cmapss", "xjtu-sy"], source_fd: int, target_fd: int
) -> None:
if dataset not in ["cmapss", "xjtu-sy"]:
raise ValueError(f"No configuration for '{dataset}'.")
elif source_fd == target_fd:
raise ValueError(
f"No configuration for adapting from FD{source_fd:03} to itself."
)
elif dataset == "cmapss":
_validate_cmapss(source_fd, target_fd)
elif dataset == "xjtu-sy":
_validate_xjtu_sy(source_fd, target_fd)
def _validate_cmapss(source_fd: int, target_fd: int):
if 1 > source_fd or source_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{source_fd:03}")
elif 1 > target_fd or target_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{target_fd:03}")
def _validate_xjtu_sy(source_fd: int, target_fd: int):
if 1 > source_fd or source_fd > 3:
raise ValueError(f"XJTU-SY has only FD001 to FD003 but no FD{source_fd:03}")
elif 1 > target_fd or target_fd > 3:
raise ValueError(f"XJTU-SY has only FD001 to FD003 but no FD{target_fd:03}") | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/construct/consistency/functional.py | 0.928627 | 0.745815 | functional.py | pypi |
from typing import Tuple, Optional, Dict, Any
import hydra
import omegaconf
import rul_datasets
import pytorch_lightning as pl
from torch import nn
from rul_adapt.approach import SupervisedApproach, AdaRulApproach
def get_adarul(
source_fd: int,
target_fd: int,
pre_trainer_kwargs: Optional[Dict[str, Any]] = None,
trainer_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[
Tuple[rul_datasets.RulDataModule, SupervisedApproach, pl.Trainer],
Tuple[
rul_datasets.DomainAdaptionDataModule,
nn.Module,
AdaRulApproach,
pl.Trainer,
],
]:
"""
Construct an ADARUL approach with the original hyperparameters on CMAPSS.
Examples:
```pycon
>>> import rul_adapt
>>> pre, main = rul_adapt.construct.get_adarul(, 3, 1)
>>> pre_dm, pre_approach, pre_trainer = pre
>>> dm, approach, domain_disc, trainer = main
```
Args:
source_fd: The source FD of CMAPSS.
target_fd: The target FD of CMAPSS.
pre_trainer_kwargs: Overrides for the pre-training trainer class.
trainer_kwargs: Overrides for the main trainer class.
Returns:
pre: The data module, approach and trainer for the pre-training stage
main: The data module, approach, domain discriminator and trainer for
the main stage
"""
config = get_adarul_config(source_fd, target_fd)
adarul = adarul_from_config(config, pre_trainer_kwargs, trainer_kwargs)
return adarul
def get_adarul_config(source_fd: int, target_fd: int) -> omegaconf.DictConfig:
"""
Get a configuration for the ADARUL approach.
The configuration can be modified and fed to [adarul_from_config]
[rul_adapt.construct.adarul.adarul_from_config] to create the
approach.
Args:
source_fd: The source FD of CMAPSS.
target_fd: The target FD of CMAPSS.
Returns:
The ADARUL configuration.
"""
_validate(source_fd, target_fd)
with hydra.initialize("config", version_base="1.1"):
config = hydra.compose(
"base",
overrides=[
f"dm.source.fd={source_fd}",
f"dm.target.fd={target_fd}",
f"+task_source=fd{source_fd}",
],
)
return config
def adarul_from_config(
config: omegaconf.DictConfig,
pre_trainer_kwargs: Optional[Dict[str, Any]] = None,
trainer_kwargs: Optional[Dict[str, Any]] = None,
) -> Tuple[
Tuple[rul_datasets.RulDataModule, SupervisedApproach, pl.Trainer],
Tuple[
rul_datasets.DomainAdaptionDataModule,
nn.Module,
AdaRulApproach,
pl.Trainer,
],
]:
"""
Construct an ADARUL approach from a configuration.
The configuration can be created by calling [get_adarul_config]
[rul_adapt.construct.adarul.get_adarul_config].
Args:
config: The ADARUL config.
pre_trainer_kwargs: Overrides for the pre-training trainer class.
trainer_kwargs: Overrides for the main trainer class.
Returns:
pre: The data module, approach and trainer for the pre-training stage
main: The data module, approach, domain discriminator and trainer for
the main stage
"""
source = hydra.utils.instantiate(config.dm.source)
target = source.get_compatible(**config.dm.target)
dm_pre = rul_datasets.RulDataModule(source, config.dm.batch_size)
dm = rul_datasets.DomainAdaptionDataModule(
dm_pre, rul_datasets.RulDataModule(target, config.dm.batch_size)
)
feature_extractor = hydra.utils.instantiate(config.feature_extractor)
regressor = hydra.utils.instantiate(config.regressor)
domain_disc = hydra.utils.instantiate(config.domain_disc)
approach_pre = hydra.utils.instantiate(config.adarul_pre)
approach_pre.set_model(feature_extractor, regressor)
approach = hydra.utils.instantiate(config.adarul)
pre_trainer_kwargs = pre_trainer_kwargs or {}
trainer_kwargs = trainer_kwargs or {}
trainer_pre = hydra.utils.instantiate(config.trainer_pre, **pre_trainer_kwargs)
trainer = hydra.utils.instantiate(config.trainer, **trainer_kwargs)
return (dm_pre, approach_pre, trainer_pre), (dm, approach, domain_disc, trainer)
def _validate(source_fd: int, target_fd: int):
if 1 > source_fd or source_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{source_fd:03}")
elif 1 > target_fd or target_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{target_fd:03}") | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/construct/adarul/functional.py | 0.926379 | 0.696913 | functional.py | pypi |
from typing import Tuple, Any
import hydra
import omegaconf
import pytorch_lightning as pl
import rul_datasets
import rul_adapt
from rul_adapt.approach import DannApproach
def get_cnn_dann(
source_fd: int, target_fd: int, **trainer_kwargs: Any
) -> Tuple[rul_datasets.DomainAdaptionDataModule, DannApproach, pl.Trainer]:
"""
Construct an CNN-DANN approach for CMAPSS with the original hyperparameters.
The adaption tasks 1-->4, 2-->3, 3-->2 and 4-->1 are missing because they were not
investigated in the paper.
Examples:
```pycon
>>> import rul_adapt
>>> dm, dann, trainer = rul_adapt.construct.get_cnn_dann(3, 1)
>>> trainer.fit(dann, dm)
>>> trainer.test(dann, dm)
```
Args:
source_fd: The source FD of CMAPSS.
target_fd: The target FD of CMAPSS.
trainer_kwargs: Overrides for the trainer class.
Returns:
dm: The data module for adaption of two CMAPSS sub-datasets.
dann: The DANN approach with feature extractor, regressor and domain disc.
trainer: The trainer object.
"""
config = get_cnn_dann_config(source_fd, target_fd)
dm, dann, trainer = cnn_dann_from_config(config, **trainer_kwargs)
return dm, dann, trainer
def get_cnn_dann_config(source_fd: int, target_fd: int) -> omegaconf.DictConfig:
"""
Get a configuration for the CNN-DANN approach.
The adaption tasks 1-->4, 2-->3, 3-->2 and 4-->1 are missing because they were
not investigated in the paper. The configuration can be modified and fed to
[cnn_dann_from_config][rul_adapt.construct.cnn_dann.cnn_dann_from_config] to
create the approach.
Args:
source_fd: The source FD of CMAPSS.
target_fd: The target FD of CMAPSS.
Returns:
The CNN-DANN configuration.
"""
_validate(source_fd, target_fd)
with hydra.initialize("config", version_base="1.1"):
config = hydra.compose(
"base",
overrides=[f"dm.source.fd={source_fd}", f"dm.target.fd={target_fd}"],
)
return config
def cnn_dann_from_config(
config: omegaconf.DictConfig, **trainer_kwargs: Any
) -> Tuple[rul_datasets.DomainAdaptionDataModule, DannApproach, pl.Trainer]:
"""
Construct a CNN-DANN approach from a configuration.
The configuration can be created by calling [get_cnn_dann_config]
[rul_adapt.construct.cnn_dann.get_cnn_dann_config].
Args:
config: The CNN-DANN configuration.
trainer_kwargs: Overrides for the trainer class.
Returns:
dm: The data module for adaption of two CMAPSS sub-datasets.
dann: The DANN approach with feature extractor, regressor and domain disc.
trainer: The trainer object.
"""
source = hydra.utils.instantiate(config.dm.source)
target = source.get_compatible(**config.dm.target)
dm = rul_datasets.DomainAdaptionDataModule(
rul_datasets.RulDataModule(source, config.dm.batch_size),
rul_datasets.RulDataModule(target, config.dm.batch_size),
)
feature_extractor = hydra.utils.instantiate(config.feature_extractor)
regressor = hydra.utils.instantiate(config.regressor)
domain_disc = hydra.utils.instantiate(config.domain_disc)
rul_adapt.approach.cnn_dann.init_weights(feature_extractor, regressor)
dann = hydra.utils.instantiate(config.dann)
dann.set_model(feature_extractor, regressor, domain_disc)
trainer = hydra.utils.instantiate(config.trainer, **trainer_kwargs)
return dm, dann, trainer
def _validate(source_fd: int, target_fd: int) -> None:
task = (source_fd, target_fd)
if source_fd == target_fd:
raise ValueError(
f"No configuration for adapting from FD{source_fd:03} to itself."
)
elif 1 > source_fd or source_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{source_fd:03}")
elif 1 > target_fd or target_fd > 4:
raise ValueError(f"CMAPSS has only FD001 to FD004 but no FD{target_fd:03}")
elif task == (1, 4) or task == (2, 3) or task == (3, 2) or task == (4, 1):
raise ValueError(
f"No configuration for adapting from FD{source_fd:03} to FD{target_fd:03}."
) | /rul_adapt-0.2.0-py3-none-any.whl/rul_adapt/construct/cnn_dann/functional.py | 0.89913 | 0.790934 | functional.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.