body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
5d70dc6e08cc7c1257b31a4fef397266af96ba0ea6560443c3c2c88a10ca046b
def is_equal(self, state1, state2): 'Trivial implementation' return (state1 == state2)
Trivial implementation
cam/sgnmt/predictors/structure.py
is_equal
cimeister/sgnmt
59
python
def is_equal(self, state1, state2): return (state1 == state2)
def is_equal(self, state1, state2): return (state1 == state2)<|docstring|>Trivial implementation<|endoftext|>
062a78687c57476ee52503eca0817731886927e1ae7018a3f47ff72892b0362e
def queue_identification(self, queue, project): 'Restrictions on a project id & queue name pair.\n\n :param queue: Name of the queue\n :param project: Project id\n :raises ValidationFailed: if the `name` is longer than 64\n characters or contains anything other than ASCII digits and\n letters, underscores, and dashes. Also raises if `project`\n is not None but longer than 256 characters.\n ' if ((project is not None) and (len(project) > PROJECT_ID_MAX_LEN)): msg = _(u'Project ids may not be more than {0} characters long.') raise ValidationFailed(msg, PROJECT_ID_MAX_LEN) if (len(queue) > QUEUE_NAME_MAX_LEN): msg = _(u'Queue names may not be more than {0} characters long.') raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN) if (not QUEUE_NAME_REGEX.match(queue)): raise ValidationFailed(_(u'Queue names may only contain ASCII letters, digits, underscores, and dashes.'))
Restrictions on a project id & queue name pair. :param queue: Name of the queue :param project: Project id :raises ValidationFailed: if the `name` is longer than 64 characters or contains anything other than ASCII digits and letters, underscores, and dashes. Also raises if `project` is not None but longer than 256 characters.
zaqar/transport/validation.py
queue_identification
g894404753/zaqar
97
python
def queue_identification(self, queue, project): 'Restrictions on a project id & queue name pair.\n\n :param queue: Name of the queue\n :param project: Project id\n :raises ValidationFailed: if the `name` is longer than 64\n characters or contains anything other than ASCII digits and\n letters, underscores, and dashes. Also raises if `project`\n is not None but longer than 256 characters.\n ' if ((project is not None) and (len(project) > PROJECT_ID_MAX_LEN)): msg = _(u'Project ids may not be more than {0} characters long.') raise ValidationFailed(msg, PROJECT_ID_MAX_LEN) if (len(queue) > QUEUE_NAME_MAX_LEN): msg = _(u'Queue names may not be more than {0} characters long.') raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN) if (not QUEUE_NAME_REGEX.match(queue)): raise ValidationFailed(_(u'Queue names may only contain ASCII letters, digits, underscores, and dashes.'))
def queue_identification(self, queue, project): 'Restrictions on a project id & queue name pair.\n\n :param queue: Name of the queue\n :param project: Project id\n :raises ValidationFailed: if the `name` is longer than 64\n characters or contains anything other than ASCII digits and\n letters, underscores, and dashes. Also raises if `project`\n is not None but longer than 256 characters.\n ' if ((project is not None) and (len(project) > PROJECT_ID_MAX_LEN)): msg = _(u'Project ids may not be more than {0} characters long.') raise ValidationFailed(msg, PROJECT_ID_MAX_LEN) if (len(queue) > QUEUE_NAME_MAX_LEN): msg = _(u'Queue names may not be more than {0} characters long.') raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN) if (not QUEUE_NAME_REGEX.match(queue)): raise ValidationFailed(_(u'Queue names may only contain ASCII letters, digits, underscores, and dashes.'))<|docstring|>Restrictions on a project id & queue name pair. :param queue: Name of the queue :param project: Project id :raises ValidationFailed: if the `name` is longer than 64 characters or contains anything other than ASCII digits and letters, underscores, and dashes. Also raises if `project` is not None but longer than 256 characters.<|endoftext|>
c87cdd873c0acafa2d3e9a2ad1aec19689fac9b7759b29ae3e664fd8bab06694
def _decode_json_pointer(self, pointer): 'Parse a json pointer.\n\n Json Pointers are defined in\n http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer .\n The pointers use \'/\' for separation between object attributes, such\n that \'/A/B\' would evaluate to C in {"A": {"B": "C"}}. A \'/\' character\n in an attribute name is encoded as "~1" and a \'~\' character is encoded\n as "~0".\n ' self._validate_json_pointer(pointer) ret = [] for part in pointer.lstrip('/').split('/'): ret.append(part.replace('~1', '/').replace('~0', '~').strip()) return ret
Parse a json pointer. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes, such that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0".
zaqar/transport/validation.py
_decode_json_pointer
g894404753/zaqar
97
python
def _decode_json_pointer(self, pointer): 'Parse a json pointer.\n\n Json Pointers are defined in\n http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer .\n The pointers use \'/\' for separation between object attributes, such\n that \'/A/B\' would evaluate to C in {"A": {"B": "C"}}. A \'/\' character\n in an attribute name is encoded as "~1" and a \'~\' character is encoded\n as "~0".\n ' self._validate_json_pointer(pointer) ret = [] for part in pointer.lstrip('/').split('/'): ret.append(part.replace('~1', '/').replace('~0', '~').strip()) return ret
def _decode_json_pointer(self, pointer): 'Parse a json pointer.\n\n Json Pointers are defined in\n http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer .\n The pointers use \'/\' for separation between object attributes, such\n that \'/A/B\' would evaluate to C in {"A": {"B": "C"}}. A \'/\' character\n in an attribute name is encoded as "~1" and a \'~\' character is encoded\n as "~0".\n ' self._validate_json_pointer(pointer) ret = [] for part in pointer.lstrip('/').split('/'): ret.append(part.replace('~1', '/').replace('~0', '~').strip()) return ret<|docstring|>Parse a json pointer. Json Pointers are defined in http://tools.ietf.org/html/draft-pbryan-zyp-json-pointer . The pointers use '/' for separation between object attributes, such that '/A/B' would evaluate to C in {"A": {"B": "C"}}. A '/' character in an attribute name is encoded as "~1" and a '~' character is encoded as "~0".<|endoftext|>
7c2ba4f94ceeb5341942226fe22e2c826c83b63dd3568dc813fa2c364aca944d
def _validate_json_pointer(self, pointer): 'Validate a json pointer.\n\n We only accept a limited form of json pointers.\n ' if (not pointer.startswith('/')): msg = (_('Pointer `%s` does not start with "/".') % pointer) raise ValidationFailed(msg) if re.search('/\\s*?/', pointer[1:]): msg = (_('Pointer `%s` contains adjacent "/".') % pointer) raise ValidationFailed(msg) if ((len(pointer) > 1) and pointer.endswith('/')): msg = (_('Pointer `%s` end with "/".') % pointer) raise ValidationFailed(msg) if (pointer[1:].strip() == '/'): msg = (_('Pointer `%s` does not contains valid token.') % pointer) raise ValidationFailed(msg) if (re.search('~[^01]', pointer) or pointer.endswith('~')): msg = (_('Pointer `%s` contains "~" not part of a recognized escape sequence.') % pointer) raise ValidationFailed(msg)
Validate a json pointer. We only accept a limited form of json pointers.
zaqar/transport/validation.py
_validate_json_pointer
g894404753/zaqar
97
python
def _validate_json_pointer(self, pointer): 'Validate a json pointer.\n\n We only accept a limited form of json pointers.\n ' if (not pointer.startswith('/')): msg = (_('Pointer `%s` does not start with "/".') % pointer) raise ValidationFailed(msg) if re.search('/\\s*?/', pointer[1:]): msg = (_('Pointer `%s` contains adjacent "/".') % pointer) raise ValidationFailed(msg) if ((len(pointer) > 1) and pointer.endswith('/')): msg = (_('Pointer `%s` end with "/".') % pointer) raise ValidationFailed(msg) if (pointer[1:].strip() == '/'): msg = (_('Pointer `%s` does not contains valid token.') % pointer) raise ValidationFailed(msg) if (re.search('~[^01]', pointer) or pointer.endswith('~')): msg = (_('Pointer `%s` contains "~" not part of a recognized escape sequence.') % pointer) raise ValidationFailed(msg)
def _validate_json_pointer(self, pointer): 'Validate a json pointer.\n\n We only accept a limited form of json pointers.\n ' if (not pointer.startswith('/')): msg = (_('Pointer `%s` does not start with "/".') % pointer) raise ValidationFailed(msg) if re.search('/\\s*?/', pointer[1:]): msg = (_('Pointer `%s` contains adjacent "/".') % pointer) raise ValidationFailed(msg) if ((len(pointer) > 1) and pointer.endswith('/')): msg = (_('Pointer `%s` end with "/".') % pointer) raise ValidationFailed(msg) if (pointer[1:].strip() == '/'): msg = (_('Pointer `%s` does not contains valid token.') % pointer) raise ValidationFailed(msg) if (re.search('~[^01]', pointer) or pointer.endswith('~')): msg = (_('Pointer `%s` contains "~" not part of a recognized escape sequence.') % pointer) raise ValidationFailed(msg)<|docstring|>Validate a json pointer. We only accept a limited form of json pointers.<|endoftext|>
1a2394180e5ea132d30261fa03d406bc25d394da85581949782bd7bfe355857d
def queue_listing(self, limit=None, **kwargs): 'Restrictions involving a list of queues.\n\n :param limit: The expected number of queues in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_queues_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_queues_per_page)
Restrictions involving a list of queues. :param limit: The expected number of queues in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded
zaqar/transport/validation.py
queue_listing
g894404753/zaqar
97
python
def queue_listing(self, limit=None, **kwargs): 'Restrictions involving a list of queues.\n\n :param limit: The expected number of queues in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_queues_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_queues_per_page)
def queue_listing(self, limit=None, **kwargs): 'Restrictions involving a list of queues.\n\n :param limit: The expected number of queues in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_queues_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_queues_per_page)<|docstring|>Restrictions involving a list of queues. :param limit: The expected number of queues in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded<|endoftext|>
c3abdf6ee14962b051ef989ee541e98efab718f57a8e412631f91a928cf9d2df
def queue_metadata_length(self, content_length): "Restrictions on queue's length.\n\n :param content_length: Queue request's length.\n :raises ValidationFailed: if the metadata is oversize.\n " if (content_length is None): return if (content_length > self._limits_conf.max_queue_metadata): msg = _(u'Queue metadata is too large. Max size: {0}') raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)
Restrictions on queue's length. :param content_length: Queue request's length. :raises ValidationFailed: if the metadata is oversize.
zaqar/transport/validation.py
queue_metadata_length
g894404753/zaqar
97
python
def queue_metadata_length(self, content_length): "Restrictions on queue's length.\n\n :param content_length: Queue request's length.\n :raises ValidationFailed: if the metadata is oversize.\n " if (content_length is None): return if (content_length > self._limits_conf.max_queue_metadata): msg = _(u'Queue metadata is too large. Max size: {0}') raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)
def queue_metadata_length(self, content_length): "Restrictions on queue's length.\n\n :param content_length: Queue request's length.\n :raises ValidationFailed: if the metadata is oversize.\n " if (content_length is None): return if (content_length > self._limits_conf.max_queue_metadata): msg = _(u'Queue metadata is too large. Max size: {0}') raise ValidationFailed(msg, self._limits_conf.max_queue_metadata)<|docstring|>Restrictions on queue's length. :param content_length: Queue request's length. :raises ValidationFailed: if the metadata is oversize.<|endoftext|>
c869e373a52954a0a54aa18f406f072c74f356238e4475752403c57e286ab84c
def queue_metadata_putting(self, queue_metadata): "Checking if the reserved attributes of the queue are valid.\n\n :param queue_metadata: Queue's metadata.\n :raises ValidationFailed: if any reserved attribute is invalid.\n " if (not queue_metadata): return queue_default_ttl = queue_metadata.get('_default_message_ttl') if (queue_default_ttl and (not isinstance(queue_default_ttl, int))): msg = _(u'_default_message_ttl must be integer.') raise ValidationFailed(msg) if (queue_default_ttl is not None): if (not (MIN_MESSAGE_TTL <= queue_default_ttl <= self._limits_conf.max_message_ttl)): msg = _(u'_default_message_ttl can not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) queue_max_msg_size = queue_metadata.get('_max_messages_post_size', None) if (queue_max_msg_size and (not isinstance(queue_max_msg_size, int))): msg = _(u'_max_messages_post_size must be integer.') raise ValidationFailed(msg) if (queue_max_msg_size is not None): if (not (0 < queue_max_msg_size <= self._limits_conf.max_messages_post_size)): raise ValidationFailed(_(u'_max_messages_post_size can not exceed {0}, and must be at least greater than 0.'), self._limits_conf.max_messages_post_size) max_claim_count = queue_metadata.get('_max_claim_count', None) if (max_claim_count and (not isinstance(max_claim_count, int))): msg = _(u'_max_claim_count must be integer.') raise ValidationFailed(msg) dlq_ttl = queue_metadata.get('_dead_letter_queue_messages_ttl', None) if (dlq_ttl and (not isinstance(dlq_ttl, int))): msg = _(u'_dead_letter_queue_messages_ttl must be integer.') raise ValidationFailed(msg) if ((dlq_ttl is not None) and (not (MIN_MESSAGE_TTL <= dlq_ttl <= self._limits_conf.max_message_ttl))): msg = _(u'The TTL for a message may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) queue_delay = queue_metadata.get('_default_message_delay', None) if (queue_delay and (not isinstance(queue_delay, int))): msg = _(u'_default_message_delay must be integer.') raise ValidationFailed(msg) if (queue_delay is not None): if (not (MIN_DELAY_TTL <= queue_delay <= self._limits_conf.max_message_delay)): msg = _(u'The TTL can not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_delay, MIN_DELAY_TTL) encrypted_queue = queue_metadata.get('_enable_encrypt_messages', False) if (encrypted_queue and (not isinstance(encrypted_queue, bool))): msg = _(u'_enable_encrypt_messages must be boolean.') raise ValidationFailed(msg) self._validate_retry_policy(queue_metadata)
Checking if the reserved attributes of the queue are valid. :param queue_metadata: Queue's metadata. :raises ValidationFailed: if any reserved attribute is invalid.
zaqar/transport/validation.py
queue_metadata_putting
g894404753/zaqar
97
python
def queue_metadata_putting(self, queue_metadata): "Checking if the reserved attributes of the queue are valid.\n\n :param queue_metadata: Queue's metadata.\n :raises ValidationFailed: if any reserved attribute is invalid.\n " if (not queue_metadata): return queue_default_ttl = queue_metadata.get('_default_message_ttl') if (queue_default_ttl and (not isinstance(queue_default_ttl, int))): msg = _(u'_default_message_ttl must be integer.') raise ValidationFailed(msg) if (queue_default_ttl is not None): if (not (MIN_MESSAGE_TTL <= queue_default_ttl <= self._limits_conf.max_message_ttl)): msg = _(u'_default_message_ttl can not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) queue_max_msg_size = queue_metadata.get('_max_messages_post_size', None) if (queue_max_msg_size and (not isinstance(queue_max_msg_size, int))): msg = _(u'_max_messages_post_size must be integer.') raise ValidationFailed(msg) if (queue_max_msg_size is not None): if (not (0 < queue_max_msg_size <= self._limits_conf.max_messages_post_size)): raise ValidationFailed(_(u'_max_messages_post_size can not exceed {0}, and must be at least greater than 0.'), self._limits_conf.max_messages_post_size) max_claim_count = queue_metadata.get('_max_claim_count', None) if (max_claim_count and (not isinstance(max_claim_count, int))): msg = _(u'_max_claim_count must be integer.') raise ValidationFailed(msg) dlq_ttl = queue_metadata.get('_dead_letter_queue_messages_ttl', None) if (dlq_ttl and (not isinstance(dlq_ttl, int))): msg = _(u'_dead_letter_queue_messages_ttl must be integer.') raise ValidationFailed(msg) if ((dlq_ttl is not None) and (not (MIN_MESSAGE_TTL <= dlq_ttl <= self._limits_conf.max_message_ttl))): msg = _(u'The TTL for a message may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) queue_delay = queue_metadata.get('_default_message_delay', None) if (queue_delay and (not isinstance(queue_delay, int))): msg = _(u'_default_message_delay must be integer.') raise ValidationFailed(msg) if (queue_delay is not None): if (not (MIN_DELAY_TTL <= queue_delay <= self._limits_conf.max_message_delay)): msg = _(u'The TTL can not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_delay, MIN_DELAY_TTL) encrypted_queue = queue_metadata.get('_enable_encrypt_messages', False) if (encrypted_queue and (not isinstance(encrypted_queue, bool))): msg = _(u'_enable_encrypt_messages must be boolean.') raise ValidationFailed(msg) self._validate_retry_policy(queue_metadata)
def queue_metadata_putting(self, queue_metadata): "Checking if the reserved attributes of the queue are valid.\n\n :param queue_metadata: Queue's metadata.\n :raises ValidationFailed: if any reserved attribute is invalid.\n " if (not queue_metadata): return queue_default_ttl = queue_metadata.get('_default_message_ttl') if (queue_default_ttl and (not isinstance(queue_default_ttl, int))): msg = _(u'_default_message_ttl must be integer.') raise ValidationFailed(msg) if (queue_default_ttl is not None): if (not (MIN_MESSAGE_TTL <= queue_default_ttl <= self._limits_conf.max_message_ttl)): msg = _(u'_default_message_ttl can not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) queue_max_msg_size = queue_metadata.get('_max_messages_post_size', None) if (queue_max_msg_size and (not isinstance(queue_max_msg_size, int))): msg = _(u'_max_messages_post_size must be integer.') raise ValidationFailed(msg) if (queue_max_msg_size is not None): if (not (0 < queue_max_msg_size <= self._limits_conf.max_messages_post_size)): raise ValidationFailed(_(u'_max_messages_post_size can not exceed {0}, and must be at least greater than 0.'), self._limits_conf.max_messages_post_size) max_claim_count = queue_metadata.get('_max_claim_count', None) if (max_claim_count and (not isinstance(max_claim_count, int))): msg = _(u'_max_claim_count must be integer.') raise ValidationFailed(msg) dlq_ttl = queue_metadata.get('_dead_letter_queue_messages_ttl', None) if (dlq_ttl and (not isinstance(dlq_ttl, int))): msg = _(u'_dead_letter_queue_messages_ttl must be integer.') raise ValidationFailed(msg) if ((dlq_ttl is not None) and (not (MIN_MESSAGE_TTL <= dlq_ttl <= self._limits_conf.max_message_ttl))): msg = _(u'The TTL for a message may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) queue_delay = queue_metadata.get('_default_message_delay', None) if (queue_delay and (not isinstance(queue_delay, int))): msg = _(u'_default_message_delay must be integer.') raise ValidationFailed(msg) if (queue_delay is not None): if (not (MIN_DELAY_TTL <= queue_delay <= self._limits_conf.max_message_delay)): msg = _(u'The TTL can not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_delay, MIN_DELAY_TTL) encrypted_queue = queue_metadata.get('_enable_encrypt_messages', False) if (encrypted_queue and (not isinstance(encrypted_queue, bool))): msg = _(u'_enable_encrypt_messages must be boolean.') raise ValidationFailed(msg) self._validate_retry_policy(queue_metadata)<|docstring|>Checking if the reserved attributes of the queue are valid. :param queue_metadata: Queue's metadata. :raises ValidationFailed: if any reserved attribute is invalid.<|endoftext|>
fc4dd55409d6cc67122c7949ada52fb275b94e4173a1bf493b16d9052af674cc
def queue_purging(self, document): 'Restrictions the resource types to be purged for a queue.\n\n :param resource_types: Type list of all resource under a queue\n :raises ValidationFailed: if the resource types are invalid\n ' if ('resource_types' not in document): msg = _(u'Post body must contain key "resource_types".') raise ValidationFailed(msg) if (not set(document['resource_types']).issubset(_PURGBLE_RESOURCE_TYPES)): msg = _(u'Resource types must be a sub set of {0}.') raise ValidationFailed(msg, _PURGBLE_RESOURCE_TYPES)
Restrictions the resource types to be purged for a queue. :param resource_types: Type list of all resource under a queue :raises ValidationFailed: if the resource types are invalid
zaqar/transport/validation.py
queue_purging
g894404753/zaqar
97
python
def queue_purging(self, document): 'Restrictions the resource types to be purged for a queue.\n\n :param resource_types: Type list of all resource under a queue\n :raises ValidationFailed: if the resource types are invalid\n ' if ('resource_types' not in document): msg = _(u'Post body must contain key "resource_types".') raise ValidationFailed(msg) if (not set(document['resource_types']).issubset(_PURGBLE_RESOURCE_TYPES)): msg = _(u'Resource types must be a sub set of {0}.') raise ValidationFailed(msg, _PURGBLE_RESOURCE_TYPES)
def queue_purging(self, document): 'Restrictions the resource types to be purged for a queue.\n\n :param resource_types: Type list of all resource under a queue\n :raises ValidationFailed: if the resource types are invalid\n ' if ('resource_types' not in document): msg = _(u'Post body must contain key "resource_types".') raise ValidationFailed(msg) if (not set(document['resource_types']).issubset(_PURGBLE_RESOURCE_TYPES)): msg = _(u'Resource types must be a sub set of {0}.') raise ValidationFailed(msg, _PURGBLE_RESOURCE_TYPES)<|docstring|>Restrictions the resource types to be purged for a queue. :param resource_types: Type list of all resource under a queue :raises ValidationFailed: if the resource types are invalid<|endoftext|>
d6356c23af1958b6f5252376956cc879a8c79207bd6232b70b8f743fe5df6963
def message_posting(self, messages): 'Restrictions on a list of messages.\n\n :param messages: A list of messages\n :raises ValidationFailed: if any message has a out-of-range\n TTL.\n ' if (not messages): raise ValidationFailed(_(u'No messages to enqueu.')) for msg in messages: self.message_content(msg)
Restrictions on a list of messages. :param messages: A list of messages :raises ValidationFailed: if any message has a out-of-range TTL.
zaqar/transport/validation.py
message_posting
g894404753/zaqar
97
python
def message_posting(self, messages): 'Restrictions on a list of messages.\n\n :param messages: A list of messages\n :raises ValidationFailed: if any message has a out-of-range\n TTL.\n ' if (not messages): raise ValidationFailed(_(u'No messages to enqueu.')) for msg in messages: self.message_content(msg)
def message_posting(self, messages): 'Restrictions on a list of messages.\n\n :param messages: A list of messages\n :raises ValidationFailed: if any message has a out-of-range\n TTL.\n ' if (not messages): raise ValidationFailed(_(u'No messages to enqueu.')) for msg in messages: self.message_content(msg)<|docstring|>Restrictions on a list of messages. :param messages: A list of messages :raises ValidationFailed: if any message has a out-of-range TTL.<|endoftext|>
3837ae7b83693e569b886ba53202f28eec9026f7cbfa799852de2b134d6b979e
def message_length(self, content_length, max_msg_post_size=None): "Restrictions on message post length.\n\n :param content_length: Queue request's length.\n :raises ValidationFailed: if the metadata is oversize.\n " if (content_length is None): return if max_msg_post_size: try: min_max_size = min(max_msg_post_size, self._limits_conf.max_messages_post_size) if (content_length > min_max_size): raise ValidationFailed(_(u'Message collection size is too large. The max size for current queue is {0}. It is calculated by max size = min(max_messages_post_size_config: {1}, max_messages_post_size_queue: {2}).'), min_max_size, self._limits_conf.max_messages_post_size, max_msg_post_size) except TypeError: pass if (content_length > self._limits_conf.max_messages_post_size): raise ValidationFailed(_(u'Message collection size is too large. Max size {0}'), self._limits_conf.max_messages_post_size)
Restrictions on message post length. :param content_length: Queue request's length. :raises ValidationFailed: if the metadata is oversize.
zaqar/transport/validation.py
message_length
g894404753/zaqar
97
python
def message_length(self, content_length, max_msg_post_size=None): "Restrictions on message post length.\n\n :param content_length: Queue request's length.\n :raises ValidationFailed: if the metadata is oversize.\n " if (content_length is None): return if max_msg_post_size: try: min_max_size = min(max_msg_post_size, self._limits_conf.max_messages_post_size) if (content_length > min_max_size): raise ValidationFailed(_(u'Message collection size is too large. The max size for current queue is {0}. It is calculated by max size = min(max_messages_post_size_config: {1}, max_messages_post_size_queue: {2}).'), min_max_size, self._limits_conf.max_messages_post_size, max_msg_post_size) except TypeError: pass if (content_length > self._limits_conf.max_messages_post_size): raise ValidationFailed(_(u'Message collection size is too large. Max size {0}'), self._limits_conf.max_messages_post_size)
def message_length(self, content_length, max_msg_post_size=None): "Restrictions on message post length.\n\n :param content_length: Queue request's length.\n :raises ValidationFailed: if the metadata is oversize.\n " if (content_length is None): return if max_msg_post_size: try: min_max_size = min(max_msg_post_size, self._limits_conf.max_messages_post_size) if (content_length > min_max_size): raise ValidationFailed(_(u'Message collection size is too large. The max size for current queue is {0}. It is calculated by max size = min(max_messages_post_size_config: {1}, max_messages_post_size_queue: {2}).'), min_max_size, self._limits_conf.max_messages_post_size, max_msg_post_size) except TypeError: pass if (content_length > self._limits_conf.max_messages_post_size): raise ValidationFailed(_(u'Message collection size is too large. Max size {0}'), self._limits_conf.max_messages_post_size)<|docstring|>Restrictions on message post length. :param content_length: Queue request's length. :raises ValidationFailed: if the metadata is oversize.<|endoftext|>
0675d940a807e008a3640f69ee22424bebb3f87563fb926aecf739ccf22731a9
def message_content(self, message): 'Restrictions on each message.' ttl = message['ttl'] if (not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl)): msg = _(u'The TTL for a message may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) delay = message.get('delay', 0) if (not (MIN_DELAY_TTL <= delay <= self._limits_conf.max_message_delay)): msg = _(u'The Delay TTL for a message may not exceed {0} seconds,and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_delay, MIN_DELAY_TTL)
Restrictions on each message.
zaqar/transport/validation.py
message_content
g894404753/zaqar
97
python
def message_content(self, message): ttl = message['ttl'] if (not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl)): msg = _(u'The TTL for a message may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) delay = message.get('delay', 0) if (not (MIN_DELAY_TTL <= delay <= self._limits_conf.max_message_delay)): msg = _(u'The Delay TTL for a message may not exceed {0} seconds,and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_delay, MIN_DELAY_TTL)
def message_content(self, message): ttl = message['ttl'] if (not (MIN_MESSAGE_TTL <= ttl <= self._limits_conf.max_message_ttl)): msg = _(u'The TTL for a message may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_ttl, MIN_MESSAGE_TTL) delay = message.get('delay', 0) if (not (MIN_DELAY_TTL <= delay <= self._limits_conf.max_message_delay)): msg = _(u'The Delay TTL for a message may not exceed {0} seconds,and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_message_delay, MIN_DELAY_TTL)<|docstring|>Restrictions on each message.<|endoftext|>
c1216730f6645b59c5eef01fc9dd566f5470ad746df1e0b1664c8ba3ccb78ef1
def message_listing(self, limit=None, **kwargs): 'Restrictions involving a list of messages.\n\n :param limit: The expected number of messages in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_messages_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_messages_per_page)
Restrictions involving a list of messages. :param limit: The expected number of messages in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded
zaqar/transport/validation.py
message_listing
g894404753/zaqar
97
python
def message_listing(self, limit=None, **kwargs): 'Restrictions involving a list of messages.\n\n :param limit: The expected number of messages in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_messages_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_messages_per_page)
def message_listing(self, limit=None, **kwargs): 'Restrictions involving a list of messages.\n\n :param limit: The expected number of messages in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_messages_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_messages_per_page)<|docstring|>Restrictions involving a list of messages. :param limit: The expected number of messages in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded<|endoftext|>
602041d60df4dd8acda6f16baf3e3d51e31ac78a419081e49045e8e070479d46
def message_deletion(self, ids=None, pop=None, claim_ids=None): 'Restrictions involving deletion of messages.\n\n :param ids: message ids passed in by the delete request\n :param pop: count of messages to be POPped\n :param claim_ids: claim ids passed in by the delete request\n :raises ValidationFailed: if,\n pop AND id params are present together\n neither pop or id params are present\n message count to be popped > maximum allowed\n ' if ((pop is not None) and (ids is not None)): msg = _(u'pop and id params cannot be present together in the delete request.') raise ValidationFailed(msg) if ((pop is None) and (ids is None)): msg = _(u'The request should have either "ids" or "pop" parameter in the request, to be able to delete.') raise ValidationFailed(msg) if self._limits_conf.message_delete_with_claim_id: if ((ids and (claim_ids is None)) or ((ids is None) and claim_ids)): msg = _(u'The request should have both "ids" and "claim_ids" parameter in the request when message_delete_with_claim_id is True.') raise ValidationFailed(msg) pop_uplimit = self._limits_conf.max_messages_per_claim_or_pop if ((pop is not None) and (not (0 < pop <= pop_uplimit))): msg = _(u'Pop value must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, pop_uplimit) delete_uplimit = self._limits_conf.max_messages_per_page if ((ids is not None) and (not (0 < len(ids) <= delete_uplimit))): msg = _(u'ids parameter should have at least 1 and not greater than {0} values.') raise ValidationFailed(msg, delete_uplimit)
Restrictions involving deletion of messages. :param ids: message ids passed in by the delete request :param pop: count of messages to be POPped :param claim_ids: claim ids passed in by the delete request :raises ValidationFailed: if, pop AND id params are present together neither pop or id params are present message count to be popped > maximum allowed
zaqar/transport/validation.py
message_deletion
g894404753/zaqar
97
python
def message_deletion(self, ids=None, pop=None, claim_ids=None): 'Restrictions involving deletion of messages.\n\n :param ids: message ids passed in by the delete request\n :param pop: count of messages to be POPped\n :param claim_ids: claim ids passed in by the delete request\n :raises ValidationFailed: if,\n pop AND id params are present together\n neither pop or id params are present\n message count to be popped > maximum allowed\n ' if ((pop is not None) and (ids is not None)): msg = _(u'pop and id params cannot be present together in the delete request.') raise ValidationFailed(msg) if ((pop is None) and (ids is None)): msg = _(u'The request should have either "ids" or "pop" parameter in the request, to be able to delete.') raise ValidationFailed(msg) if self._limits_conf.message_delete_with_claim_id: if ((ids and (claim_ids is None)) or ((ids is None) and claim_ids)): msg = _(u'The request should have both "ids" and "claim_ids" parameter in the request when message_delete_with_claim_id is True.') raise ValidationFailed(msg) pop_uplimit = self._limits_conf.max_messages_per_claim_or_pop if ((pop is not None) and (not (0 < pop <= pop_uplimit))): msg = _(u'Pop value must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, pop_uplimit) delete_uplimit = self._limits_conf.max_messages_per_page if ((ids is not None) and (not (0 < len(ids) <= delete_uplimit))): msg = _(u'ids parameter should have at least 1 and not greater than {0} values.') raise ValidationFailed(msg, delete_uplimit)
def message_deletion(self, ids=None, pop=None, claim_ids=None): 'Restrictions involving deletion of messages.\n\n :param ids: message ids passed in by the delete request\n :param pop: count of messages to be POPped\n :param claim_ids: claim ids passed in by the delete request\n :raises ValidationFailed: if,\n pop AND id params are present together\n neither pop or id params are present\n message count to be popped > maximum allowed\n ' if ((pop is not None) and (ids is not None)): msg = _(u'pop and id params cannot be present together in the delete request.') raise ValidationFailed(msg) if ((pop is None) and (ids is None)): msg = _(u'The request should have either "ids" or "pop" parameter in the request, to be able to delete.') raise ValidationFailed(msg) if self._limits_conf.message_delete_with_claim_id: if ((ids and (claim_ids is None)) or ((ids is None) and claim_ids)): msg = _(u'The request should have both "ids" and "claim_ids" parameter in the request when message_delete_with_claim_id is True.') raise ValidationFailed(msg) pop_uplimit = self._limits_conf.max_messages_per_claim_or_pop if ((pop is not None) and (not (0 < pop <= pop_uplimit))): msg = _(u'Pop value must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, pop_uplimit) delete_uplimit = self._limits_conf.max_messages_per_page if ((ids is not None) and (not (0 < len(ids) <= delete_uplimit))): msg = _(u'ids parameter should have at least 1 and not greater than {0} values.') raise ValidationFailed(msg, delete_uplimit)<|docstring|>Restrictions involving deletion of messages. :param ids: message ids passed in by the delete request :param pop: count of messages to be POPped :param claim_ids: claim ids passed in by the delete request :raises ValidationFailed: if, pop AND id params are present together neither pop or id params are present message count to be popped > maximum allowed<|endoftext|>
af88c1a0d9698fe2f20708032c6a02d0ef51f606712551eb6c0cd43210623d00
def claim_creation(self, metadata, limit=None): 'Restrictions on the claim parameters upon creation.\n\n :param metadata: The claim metadata\n :param limit: The number of messages to claim\n :raises ValidationFailed: if either TTL or grace is out of range,\n or the expected number of messages exceed the limit.\n ' self.claim_updating(metadata) uplimit = self._limits_conf.max_messages_per_claim_or_pop if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_messages_per_claim_or_pop) grace = metadata['grace'] if (not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace)): msg = _(u'The grace for a claim may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE)
Restrictions on the claim parameters upon creation. :param metadata: The claim metadata :param limit: The number of messages to claim :raises ValidationFailed: if either TTL or grace is out of range, or the expected number of messages exceed the limit.
zaqar/transport/validation.py
claim_creation
g894404753/zaqar
97
python
def claim_creation(self, metadata, limit=None): 'Restrictions on the claim parameters upon creation.\n\n :param metadata: The claim metadata\n :param limit: The number of messages to claim\n :raises ValidationFailed: if either TTL or grace is out of range,\n or the expected number of messages exceed the limit.\n ' self.claim_updating(metadata) uplimit = self._limits_conf.max_messages_per_claim_or_pop if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_messages_per_claim_or_pop) grace = metadata['grace'] if (not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace)): msg = _(u'The grace for a claim may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE)
def claim_creation(self, metadata, limit=None): 'Restrictions on the claim parameters upon creation.\n\n :param metadata: The claim metadata\n :param limit: The number of messages to claim\n :raises ValidationFailed: if either TTL or grace is out of range,\n or the expected number of messages exceed the limit.\n ' self.claim_updating(metadata) uplimit = self._limits_conf.max_messages_per_claim_or_pop if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_messages_per_claim_or_pop) grace = metadata['grace'] if (not (MIN_CLAIM_GRACE <= grace <= self._limits_conf.max_claim_grace)): msg = _(u'The grace for a claim may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_claim_grace, MIN_CLAIM_GRACE)<|docstring|>Restrictions on the claim parameters upon creation. :param metadata: The claim metadata :param limit: The number of messages to claim :raises ValidationFailed: if either TTL or grace is out of range, or the expected number of messages exceed the limit.<|endoftext|>
fba04eb09f0461eb6feea12cbce957667bee00c2d60793b28cc1406daa6c2478
def claim_updating(self, metadata): 'Restrictions on the claim TTL.\n\n :param metadata: The claim metadata\n :raises ValidationFailed: if the TTL is out of range\n ' ttl = metadata['ttl'] if (not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl)): msg = _(u'The TTL for a claim may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_claim_ttl, MIN_CLAIM_TTL)
Restrictions on the claim TTL. :param metadata: The claim metadata :raises ValidationFailed: if the TTL is out of range
zaqar/transport/validation.py
claim_updating
g894404753/zaqar
97
python
def claim_updating(self, metadata): 'Restrictions on the claim TTL.\n\n :param metadata: The claim metadata\n :raises ValidationFailed: if the TTL is out of range\n ' ttl = metadata['ttl'] if (not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl)): msg = _(u'The TTL for a claim may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_claim_ttl, MIN_CLAIM_TTL)
def claim_updating(self, metadata): 'Restrictions on the claim TTL.\n\n :param metadata: The claim metadata\n :raises ValidationFailed: if the TTL is out of range\n ' ttl = metadata['ttl'] if (not (MIN_CLAIM_TTL <= ttl <= self._limits_conf.max_claim_ttl)): msg = _(u'The TTL for a claim may not exceed {0} seconds, and must be at least {1} seconds long.') raise ValidationFailed(msg, self._limits_conf.max_claim_ttl, MIN_CLAIM_TTL)<|docstring|>Restrictions on the claim TTL. :param metadata: The claim metadata :raises ValidationFailed: if the TTL is out of range<|endoftext|>
bdcc86ebe6eeb362bb74dca9f0d11354689376738cc1971f155806a5357f8683
def subscription_posting(self, subscription): 'Restrictions on a creation of subscription.\n\n :param subscription: dict of subscription\n :raises ValidationFailed: if the subscription is invalid.\n ' for p in ('subscriber',): if (p not in subscription.keys()): raise ValidationFailed((_(u'Missing parameter %s in body.') % p)) self.subscription_patching(subscription)
Restrictions on a creation of subscription. :param subscription: dict of subscription :raises ValidationFailed: if the subscription is invalid.
zaqar/transport/validation.py
subscription_posting
g894404753/zaqar
97
python
def subscription_posting(self, subscription): 'Restrictions on a creation of subscription.\n\n :param subscription: dict of subscription\n :raises ValidationFailed: if the subscription is invalid.\n ' for p in ('subscriber',): if (p not in subscription.keys()): raise ValidationFailed((_(u'Missing parameter %s in body.') % p)) self.subscription_patching(subscription)
def subscription_posting(self, subscription): 'Restrictions on a creation of subscription.\n\n :param subscription: dict of subscription\n :raises ValidationFailed: if the subscription is invalid.\n ' for p in ('subscriber',): if (p not in subscription.keys()): raise ValidationFailed((_(u'Missing parameter %s in body.') % p)) self.subscription_patching(subscription)<|docstring|>Restrictions on a creation of subscription. :param subscription: dict of subscription :raises ValidationFailed: if the subscription is invalid.<|endoftext|>
6b444ad9ebcaadf44a9ad934670b0c60e1a53185d5520798527e488580c81bb7
def subscription_patching(self, subscription): 'Restrictions on an update of subscription.\n\n :param subscription: dict of subscription\n :raises ValidationFailed: if the subscription is invalid.\n ' if (not subscription): raise ValidationFailed(_(u'No subscription to create.')) if (not isinstance(subscription, dict)): msg = _('Subscriptions must be a dict.') raise ValidationFailed(msg) subscriber = subscription.get('subscriber') subscriber_type = None if subscriber: parsed_uri = urllib_parse.urlparse(subscriber) subscriber_type = parsed_uri.scheme if (subscriber_type not in self._limits_conf.subscriber_types): msg = _(u'The subscriber type of subscription must be supported in the list {0}.') raise ValidationFailed(msg, self._limits_conf.subscriber_types) options = subscription.get('options') if (options and (not isinstance(options, dict))): msg = _(u'Options must be a dict.') raise ValidationFailed(msg) self._validate_retry_policy(options) ttl = subscription.get('ttl') if ttl: if (not isinstance(ttl, int)): msg = _(u'TTL must be an integer.') raise ValidationFailed(msg) if (ttl < MIN_SUBSCRIPTION_TTL): msg = _(u'The TTL for a subscription must be at least {0} seconds long.') raise ValidationFailed(msg, MIN_SUBSCRIPTION_TTL) now = timeutils.utcnow_ts() now_dt = datetime.datetime.utcfromtimestamp(now) msg = _(u'The TTL seconds for a subscription plus current time must be less than {0}.') try: (now_dt + datetime.timedelta(seconds=ttl)) except OverflowError: raise ValidationFailed(msg, datetime.datetime.max)
Restrictions on an update of subscription. :param subscription: dict of subscription :raises ValidationFailed: if the subscription is invalid.
zaqar/transport/validation.py
subscription_patching
g894404753/zaqar
97
python
def subscription_patching(self, subscription): 'Restrictions on an update of subscription.\n\n :param subscription: dict of subscription\n :raises ValidationFailed: if the subscription is invalid.\n ' if (not subscription): raise ValidationFailed(_(u'No subscription to create.')) if (not isinstance(subscription, dict)): msg = _('Subscriptions must be a dict.') raise ValidationFailed(msg) subscriber = subscription.get('subscriber') subscriber_type = None if subscriber: parsed_uri = urllib_parse.urlparse(subscriber) subscriber_type = parsed_uri.scheme if (subscriber_type not in self._limits_conf.subscriber_types): msg = _(u'The subscriber type of subscription must be supported in the list {0}.') raise ValidationFailed(msg, self._limits_conf.subscriber_types) options = subscription.get('options') if (options and (not isinstance(options, dict))): msg = _(u'Options must be a dict.') raise ValidationFailed(msg) self._validate_retry_policy(options) ttl = subscription.get('ttl') if ttl: if (not isinstance(ttl, int)): msg = _(u'TTL must be an integer.') raise ValidationFailed(msg) if (ttl < MIN_SUBSCRIPTION_TTL): msg = _(u'The TTL for a subscription must be at least {0} seconds long.') raise ValidationFailed(msg, MIN_SUBSCRIPTION_TTL) now = timeutils.utcnow_ts() now_dt = datetime.datetime.utcfromtimestamp(now) msg = _(u'The TTL seconds for a subscription plus current time must be less than {0}.') try: (now_dt + datetime.timedelta(seconds=ttl)) except OverflowError: raise ValidationFailed(msg, datetime.datetime.max)
def subscription_patching(self, subscription): 'Restrictions on an update of subscription.\n\n :param subscription: dict of subscription\n :raises ValidationFailed: if the subscription is invalid.\n ' if (not subscription): raise ValidationFailed(_(u'No subscription to create.')) if (not isinstance(subscription, dict)): msg = _('Subscriptions must be a dict.') raise ValidationFailed(msg) subscriber = subscription.get('subscriber') subscriber_type = None if subscriber: parsed_uri = urllib_parse.urlparse(subscriber) subscriber_type = parsed_uri.scheme if (subscriber_type not in self._limits_conf.subscriber_types): msg = _(u'The subscriber type of subscription must be supported in the list {0}.') raise ValidationFailed(msg, self._limits_conf.subscriber_types) options = subscription.get('options') if (options and (not isinstance(options, dict))): msg = _(u'Options must be a dict.') raise ValidationFailed(msg) self._validate_retry_policy(options) ttl = subscription.get('ttl') if ttl: if (not isinstance(ttl, int)): msg = _(u'TTL must be an integer.') raise ValidationFailed(msg) if (ttl < MIN_SUBSCRIPTION_TTL): msg = _(u'The TTL for a subscription must be at least {0} seconds long.') raise ValidationFailed(msg, MIN_SUBSCRIPTION_TTL) now = timeutils.utcnow_ts() now_dt = datetime.datetime.utcfromtimestamp(now) msg = _(u'The TTL seconds for a subscription plus current time must be less than {0}.') try: (now_dt + datetime.timedelta(seconds=ttl)) except OverflowError: raise ValidationFailed(msg, datetime.datetime.max)<|docstring|>Restrictions on an update of subscription. :param subscription: dict of subscription :raises ValidationFailed: if the subscription is invalid.<|endoftext|>
57e5ea30da19c23e24c451e87fccaf017b9dd1cf111baba1afee5d8a86eac09c
def subscription_listing(self, limit=None, **kwargs): 'Restrictions involving a list of subscriptions.\n\n :param limit: The expected number of subscriptions in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_subscriptions_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_subscriptions_per_page)
Restrictions involving a list of subscriptions. :param limit: The expected number of subscriptions in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded
zaqar/transport/validation.py
subscription_listing
g894404753/zaqar
97
python
def subscription_listing(self, limit=None, **kwargs): 'Restrictions involving a list of subscriptions.\n\n :param limit: The expected number of subscriptions in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_subscriptions_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_subscriptions_per_page)
def subscription_listing(self, limit=None, **kwargs): 'Restrictions involving a list of subscriptions.\n\n :param limit: The expected number of subscriptions in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_subscriptions_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and may not be greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_subscriptions_per_page)<|docstring|>Restrictions involving a list of subscriptions. :param limit: The expected number of subscriptions in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded<|endoftext|>
268fd3f442ee5bf610fe384369c184293c825f551f129b8eefe384fb612452d0
def get_limit_conf_value(self, limit_conf_name=None): 'Return the value of limit configuration.\n\n :param limit_conf_name: configuration name\n ' return self._limits_conf[limit_conf_name]
Return the value of limit configuration. :param limit_conf_name: configuration name
zaqar/transport/validation.py
get_limit_conf_value
g894404753/zaqar
97
python
def get_limit_conf_value(self, limit_conf_name=None): 'Return the value of limit configuration.\n\n :param limit_conf_name: configuration name\n ' return self._limits_conf[limit_conf_name]
def get_limit_conf_value(self, limit_conf_name=None): 'Return the value of limit configuration.\n\n :param limit_conf_name: configuration name\n ' return self._limits_conf[limit_conf_name]<|docstring|>Return the value of limit configuration. :param limit_conf_name: configuration name<|endoftext|>
954a3b76ab55e3e4e89f06d4a78a76874635fa5cc9cc972b873ef262837eefd0
def flavor_listing(self, limit=None, **kwargs): 'Restrictions involving a list of pools.\n\n :param limit: The expected number of flavors in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_flavors_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_flavors_per_page)
Restrictions involving a list of pools. :param limit: The expected number of flavors in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded
zaqar/transport/validation.py
flavor_listing
g894404753/zaqar
97
python
def flavor_listing(self, limit=None, **kwargs): 'Restrictions involving a list of pools.\n\n :param limit: The expected number of flavors in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_flavors_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_flavors_per_page)
def flavor_listing(self, limit=None, **kwargs): 'Restrictions involving a list of pools.\n\n :param limit: The expected number of flavors in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_flavors_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_flavors_per_page)<|docstring|>Restrictions involving a list of pools. :param limit: The expected number of flavors in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded<|endoftext|>
9c6440bca0cf46d37b6d1ce9937d3144b8facedb3db06bcf75b5d8215b3f9bcc
def pool_listing(self, limit=None, **kwargs): 'Restrictions involving a list of pools.\n\n :param limit: The expected number of flavors in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_pools_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_pools_per_page)
Restrictions involving a list of pools. :param limit: The expected number of flavors in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded
zaqar/transport/validation.py
pool_listing
g894404753/zaqar
97
python
def pool_listing(self, limit=None, **kwargs): 'Restrictions involving a list of pools.\n\n :param limit: The expected number of flavors in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_pools_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_pools_per_page)
def pool_listing(self, limit=None, **kwargs): 'Restrictions involving a list of pools.\n\n :param limit: The expected number of flavors in the list\n :param kwargs: Ignored arguments passed to storage API\n :raises ValidationFailed: if the limit is exceeded\n ' uplimit = self._limits_conf.max_pools_per_page if ((limit is not None) and (not (0 < limit <= uplimit))): msg = _(u'Limit must be at least 1 and no greater than {0}.') raise ValidationFailed(msg, self._limits_conf.max_pools_per_page)<|docstring|>Restrictions involving a list of pools. :param limit: The expected number of flavors in the list :param kwargs: Ignored arguments passed to storage API :raises ValidationFailed: if the limit is exceeded<|endoftext|>
3a5942b158412aa72ed159a4e8cb79b846baad2ea7a6d0b7d4799611f2659007
def client_id_uuid_safe(self, client_id): 'Restrictions the format of client id\n\n :param client_id: the client id of request\n :raises ValidationFailed: if the limit is exceeded\n ' if (self._limits_conf.client_id_uuid_safe == 'off'): if ((len(client_id) < self._limits_conf.min_length_client_id) or (len(client_id) > self._limits_conf.max_length_client_id)): msg = _(u'Length of client id must be at least {0} and no greater than {1}.') raise ValidationFailed(msg, self._limits_conf.min_length_client_id, self._limits_conf.max_length_client_id) if (self._limits_conf.client_id_uuid_safe == 'strict'): uuid.UUID(client_id)
Restrictions the format of client id :param client_id: the client id of request :raises ValidationFailed: if the limit is exceeded
zaqar/transport/validation.py
client_id_uuid_safe
g894404753/zaqar
97
python
def client_id_uuid_safe(self, client_id): 'Restrictions the format of client id\n\n :param client_id: the client id of request\n :raises ValidationFailed: if the limit is exceeded\n ' if (self._limits_conf.client_id_uuid_safe == 'off'): if ((len(client_id) < self._limits_conf.min_length_client_id) or (len(client_id) > self._limits_conf.max_length_client_id)): msg = _(u'Length of client id must be at least {0} and no greater than {1}.') raise ValidationFailed(msg, self._limits_conf.min_length_client_id, self._limits_conf.max_length_client_id) if (self._limits_conf.client_id_uuid_safe == 'strict'): uuid.UUID(client_id)
def client_id_uuid_safe(self, client_id): 'Restrictions the format of client id\n\n :param client_id: the client id of request\n :raises ValidationFailed: if the limit is exceeded\n ' if (self._limits_conf.client_id_uuid_safe == 'off'): if ((len(client_id) < self._limits_conf.min_length_client_id) or (len(client_id) > self._limits_conf.max_length_client_id)): msg = _(u'Length of client id must be at least {0} and no greater than {1}.') raise ValidationFailed(msg, self._limits_conf.min_length_client_id, self._limits_conf.max_length_client_id) if (self._limits_conf.client_id_uuid_safe == 'strict'): uuid.UUID(client_id)<|docstring|>Restrictions the format of client id :param client_id: the client id of request :raises ValidationFailed: if the limit is exceeded<|endoftext|>
4842231413a60c8356dd80e20b60fa7f1590d40de818aecba0294fed76f13cc4
def topic_identification(self, topic, project): 'Restrictions on a project id & topic name pair.\n\n :param queue: Name of the topic\n :param project: Project id\n :raises ValidationFailed: if the `name` is longer than 64\n characters or contains anything other than ASCII digits and\n letters, underscores, and dashes. Also raises if `project`\n is not None but longer than 256 characters.\n ' if ((project is not None) and (len(project) > PROJECT_ID_MAX_LEN)): msg = _(u'Project ids may not be more than {0} characters long.') raise ValidationFailed(msg, PROJECT_ID_MAX_LEN) if (len(topic) > QUEUE_NAME_MAX_LEN): msg = _(u'Topic names may not be more than {0} characters long.') raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN) if (not QUEUE_NAME_REGEX.match(topic)): raise ValidationFailed(_(u'Topic names may only contain ASCII letters, digits, underscores, and dashes.'))
Restrictions on a project id & topic name pair. :param queue: Name of the topic :param project: Project id :raises ValidationFailed: if the `name` is longer than 64 characters or contains anything other than ASCII digits and letters, underscores, and dashes. Also raises if `project` is not None but longer than 256 characters.
zaqar/transport/validation.py
topic_identification
g894404753/zaqar
97
python
def topic_identification(self, topic, project): 'Restrictions on a project id & topic name pair.\n\n :param queue: Name of the topic\n :param project: Project id\n :raises ValidationFailed: if the `name` is longer than 64\n characters or contains anything other than ASCII digits and\n letters, underscores, and dashes. Also raises if `project`\n is not None but longer than 256 characters.\n ' if ((project is not None) and (len(project) > PROJECT_ID_MAX_LEN)): msg = _(u'Project ids may not be more than {0} characters long.') raise ValidationFailed(msg, PROJECT_ID_MAX_LEN) if (len(topic) > QUEUE_NAME_MAX_LEN): msg = _(u'Topic names may not be more than {0} characters long.') raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN) if (not QUEUE_NAME_REGEX.match(topic)): raise ValidationFailed(_(u'Topic names may only contain ASCII letters, digits, underscores, and dashes.'))
def topic_identification(self, topic, project): 'Restrictions on a project id & topic name pair.\n\n :param queue: Name of the topic\n :param project: Project id\n :raises ValidationFailed: if the `name` is longer than 64\n characters or contains anything other than ASCII digits and\n letters, underscores, and dashes. Also raises if `project`\n is not None but longer than 256 characters.\n ' if ((project is not None) and (len(project) > PROJECT_ID_MAX_LEN)): msg = _(u'Project ids may not be more than {0} characters long.') raise ValidationFailed(msg, PROJECT_ID_MAX_LEN) if (len(topic) > QUEUE_NAME_MAX_LEN): msg = _(u'Topic names may not be more than {0} characters long.') raise ValidationFailed(msg, QUEUE_NAME_MAX_LEN) if (not QUEUE_NAME_REGEX.match(topic)): raise ValidationFailed(_(u'Topic names may only contain ASCII letters, digits, underscores, and dashes.'))<|docstring|>Restrictions on a project id & topic name pair. :param queue: Name of the topic :param project: Project id :raises ValidationFailed: if the `name` is longer than 64 characters or contains anything other than ASCII digits and letters, underscores, and dashes. Also raises if `project` is not None but longer than 256 characters.<|endoftext|>
e3696e55dd0abbd049042af01cb3ffba2be5c0186bf8806f688cca4bf4b7102c
def test_ImageProcessor(): 'Check that ImageProcessor is correctly subtracting images.' data_key = 'pe1_image' def verify(_name, _doc): if (_name != 'event'): return data = _doc['data'][data_key] assert isinstance(data, list) assert np.array_equal(np.asarray(data), np.zeros((3, 3))) ip = cbs.ImageProcessor(data_key=data_key, subtrahend=np.ones((3, 3))) ip.subscribe(verify, name='event') frames = np.ones((2, 3, 3)) for (name, doc) in gen_stream([{data_key: frames}], {}): ip(name, doc)
Check that ImageProcessor is correctly subtracting images.
crystalmapping/tests/test_callbacks.py
test_ImageProcessor
st3107/crystalmapping
0
python
def test_ImageProcessor(): data_key = 'pe1_image' def verify(_name, _doc): if (_name != 'event'): return data = _doc['data'][data_key] assert isinstance(data, list) assert np.array_equal(np.asarray(data), np.zeros((3, 3))) ip = cbs.ImageProcessor(data_key=data_key, subtrahend=np.ones((3, 3))) ip.subscribe(verify, name='event') frames = np.ones((2, 3, 3)) for (name, doc) in gen_stream([{data_key: frames}], {}): ip(name, doc)
def test_ImageProcessor(): data_key = 'pe1_image' def verify(_name, _doc): if (_name != 'event'): return data = _doc['data'][data_key] assert isinstance(data, list) assert np.array_equal(np.asarray(data), np.zeros((3, 3))) ip = cbs.ImageProcessor(data_key=data_key, subtrahend=np.ones((3, 3))) ip.subscribe(verify, name='event') frames = np.ones((2, 3, 3)) for (name, doc) in gen_stream([{data_key: frames}], {}): ip(name, doc)<|docstring|>Check that ImageProcessor is correctly subtracting images.<|endoftext|>
cdba546e3ab248048bd439eb5e763ce671e2a0e859b997650f979ea340674d08
def test_gen_processed_images(): 'Test gen_processed_images.' images1 = (np.ones((2, 3, 3)) for _ in range(3)) subtrahend = np.ones((3, 3)) subtrahend[(0, 0)] = 2 images2 = cbs.gen_processed_images(images1, subtrahend=subtrahend) for image in images2: assert np.array_equal(image, np.zeros((3, 3)))
Test gen_processed_images.
crystalmapping/tests/test_callbacks.py
test_gen_processed_images
st3107/crystalmapping
0
python
def test_gen_processed_images(): images1 = (np.ones((2, 3, 3)) for _ in range(3)) subtrahend = np.ones((3, 3)) subtrahend[(0, 0)] = 2 images2 = cbs.gen_processed_images(images1, subtrahend=subtrahend) for image in images2: assert np.array_equal(image, np.zeros((3, 3)))
def test_gen_processed_images(): images1 = (np.ones((2, 3, 3)) for _ in range(3)) subtrahend = np.ones((3, 3)) subtrahend[(0, 0)] = 2 images2 = cbs.gen_processed_images(images1, subtrahend=subtrahend) for image in images2: assert np.array_equal(image, np.zeros((3, 3)))<|docstring|>Test gen_processed_images.<|endoftext|>
f5ddc5e940a77a2b52f0b09c9031475ba2e623593d53532982b472067cbf5d55
def test_PeakTracker(tmpdir): 'Check that PeakTrack and TrackLinker works without errors.' tp.quiet() image_file = resource_filename('crystalmapping', 'data/image.png') image = plt.imread(image_file) images = ([image] * 3) db = databroker.v1.temp() data_key = 'pe1_image' pt = cbs.PeakTracker(data_key=data_key, diameter=(11, 11)) pt.subscribe(db.insert) data = [{data_key: image} for image in images] for (name, doc) in gen_stream(data, {}): pt(name, doc) df = cbs.get_dataframe(db[(- 1)]) print(df.to_string()) tl = cbs.TrackLinker(db=db, search_range=3) tl.subscribe(db.insert) for (name, doc) in db[(- 1)].documents(fill='yes'): tl(name, doc) df = cbs.get_dataframe(db[(- 1)]) print(df.to_string())
Check that PeakTrack and TrackLinker works without errors.
crystalmapping/tests/test_callbacks.py
test_PeakTracker
st3107/crystalmapping
0
python
def test_PeakTracker(tmpdir): tp.quiet() image_file = resource_filename('crystalmapping', 'data/image.png') image = plt.imread(image_file) images = ([image] * 3) db = databroker.v1.temp() data_key = 'pe1_image' pt = cbs.PeakTracker(data_key=data_key, diameter=(11, 11)) pt.subscribe(db.insert) data = [{data_key: image} for image in images] for (name, doc) in gen_stream(data, {}): pt(name, doc) df = cbs.get_dataframe(db[(- 1)]) print(df.to_string()) tl = cbs.TrackLinker(db=db, search_range=3) tl.subscribe(db.insert) for (name, doc) in db[(- 1)].documents(fill='yes'): tl(name, doc) df = cbs.get_dataframe(db[(- 1)]) print(df.to_string())
def test_PeakTracker(tmpdir): tp.quiet() image_file = resource_filename('crystalmapping', 'data/image.png') image = plt.imread(image_file) images = ([image] * 3) db = databroker.v1.temp() data_key = 'pe1_image' pt = cbs.PeakTracker(data_key=data_key, diameter=(11, 11)) pt.subscribe(db.insert) data = [{data_key: image} for image in images] for (name, doc) in gen_stream(data, {}): pt(name, doc) df = cbs.get_dataframe(db[(- 1)]) print(df.to_string()) tl = cbs.TrackLinker(db=db, search_range=3) tl.subscribe(db.insert) for (name, doc) in db[(- 1)].documents(fill='yes'): tl(name, doc) df = cbs.get_dataframe(db[(- 1)]) print(df.to_string())<|docstring|>Check that PeakTrack and TrackLinker works without errors.<|endoftext|>
92710b4aedc9f82b8b33c40347c6525d88922bceb1977da3aa079c3672e8c39d
def test_DataFrameDumper(): 'Test DataFrameDumper.' db = databroker.v1.temp() dfd = cbs.DataFrameDumper(db) data = [1, 2, 3] df = pd.DataFrame({'a': [1, 2, 3]}) metadata = {'key': 'a'} dfd.dump_df(df, metadata) run = db[(- 1)] assert (run.start['key'] == metadata['key']) assert (list(run.data('a')) == data)
Test DataFrameDumper.
crystalmapping/tests/test_callbacks.py
test_DataFrameDumper
st3107/crystalmapping
0
python
def test_DataFrameDumper(): db = databroker.v1.temp() dfd = cbs.DataFrameDumper(db) data = [1, 2, 3] df = pd.DataFrame({'a': [1, 2, 3]}) metadata = {'key': 'a'} dfd.dump_df(df, metadata) run = db[(- 1)] assert (run.start['key'] == metadata['key']) assert (list(run.data('a')) == data)
def test_DataFrameDumper(): db = databroker.v1.temp() dfd = cbs.DataFrameDumper(db) data = [1, 2, 3] df = pd.DataFrame({'a': [1, 2, 3]}) metadata = {'key': 'a'} dfd.dump_df(df, metadata) run = db[(- 1)] assert (run.start['key'] == metadata['key']) assert (list(run.data('a')) == data)<|docstring|>Test DataFrameDumper.<|endoftext|>
2333ac5c45a972949da897d27d2886a576750a0a81377ef1691be47cee4fd4d6
@jax.jit @functools.partial(jax.vmap, in_axes=(1, 1, 1, None, None), out_axes=1) def gae_advantages(rewards: np.ndarray, terminal_masks: np.ndarray, values: np.ndarray, discount: float, gae_param: float): 'Use Generalized Advantage Estimation (GAE) to compute advantages.\n\n As defined by eqs. (11-12) in PPO paper arXiv: 1707.06347. Implementation uses\n key observation that A_{t} = delta_t + gamma*lambda*A_{t+1}.\n\n Args:\n rewards: array shaped (actor_steps, num_agents), rewards from the game\n terminal_masks: array shaped (actor_steps, num_agents), zeros for terminal\n and ones for non-terminal states\n values: array shaped (actor_steps, num_agents), values estimated by critic\n discount: RL discount usually denoted with gamma\n gae_param: GAE parameter usually denoted with lambda\n\n Returns:\n advantages: calculated advantages shaped (actor_steps, num_agents)\n ' assert ((rewards.shape[0] + 1) == values.shape[0]), 'One more value needed; Eq. (12) in PPO paper requires V(s_{t+1}) for delta_t' advantages = [] gae = 0.0 for t in reversed(range(len(rewards))): value_diff = (((discount * values[(t + 1)]) * terminal_masks[t]) - values[t]) delta = (rewards[t] + value_diff) gae = (delta + (((discount * gae_param) * terminal_masks[t]) * gae)) advantages.append(gae) advantages = advantages[::(- 1)] return jnp.array(advantages)
Use Generalized Advantage Estimation (GAE) to compute advantages. As defined by eqs. (11-12) in PPO paper arXiv: 1707.06347. Implementation uses key observation that A_{t} = delta_t + gamma*lambda*A_{t+1}. Args: rewards: array shaped (actor_steps, num_agents), rewards from the game terminal_masks: array shaped (actor_steps, num_agents), zeros for terminal and ones for non-terminal states values: array shaped (actor_steps, num_agents), values estimated by critic discount: RL discount usually denoted with gamma gae_param: GAE parameter usually denoted with lambda Returns: advantages: calculated advantages shaped (actor_steps, num_agents)
examples/ppo/ppo_lib.py
gae_advantages
cccntu/flax
4
python
@jax.jit @functools.partial(jax.vmap, in_axes=(1, 1, 1, None, None), out_axes=1) def gae_advantages(rewards: np.ndarray, terminal_masks: np.ndarray, values: np.ndarray, discount: float, gae_param: float): 'Use Generalized Advantage Estimation (GAE) to compute advantages.\n\n As defined by eqs. (11-12) in PPO paper arXiv: 1707.06347. Implementation uses\n key observation that A_{t} = delta_t + gamma*lambda*A_{t+1}.\n\n Args:\n rewards: array shaped (actor_steps, num_agents), rewards from the game\n terminal_masks: array shaped (actor_steps, num_agents), zeros for terminal\n and ones for non-terminal states\n values: array shaped (actor_steps, num_agents), values estimated by critic\n discount: RL discount usually denoted with gamma\n gae_param: GAE parameter usually denoted with lambda\n\n Returns:\n advantages: calculated advantages shaped (actor_steps, num_agents)\n ' assert ((rewards.shape[0] + 1) == values.shape[0]), 'One more value needed; Eq. (12) in PPO paper requires V(s_{t+1}) for delta_t' advantages = [] gae = 0.0 for t in reversed(range(len(rewards))): value_diff = (((discount * values[(t + 1)]) * terminal_masks[t]) - values[t]) delta = (rewards[t] + value_diff) gae = (delta + (((discount * gae_param) * terminal_masks[t]) * gae)) advantages.append(gae) advantages = advantages[::(- 1)] return jnp.array(advantages)
@jax.jit @functools.partial(jax.vmap, in_axes=(1, 1, 1, None, None), out_axes=1) def gae_advantages(rewards: np.ndarray, terminal_masks: np.ndarray, values: np.ndarray, discount: float, gae_param: float): 'Use Generalized Advantage Estimation (GAE) to compute advantages.\n\n As defined by eqs. (11-12) in PPO paper arXiv: 1707.06347. Implementation uses\n key observation that A_{t} = delta_t + gamma*lambda*A_{t+1}.\n\n Args:\n rewards: array shaped (actor_steps, num_agents), rewards from the game\n terminal_masks: array shaped (actor_steps, num_agents), zeros for terminal\n and ones for non-terminal states\n values: array shaped (actor_steps, num_agents), values estimated by critic\n discount: RL discount usually denoted with gamma\n gae_param: GAE parameter usually denoted with lambda\n\n Returns:\n advantages: calculated advantages shaped (actor_steps, num_agents)\n ' assert ((rewards.shape[0] + 1) == values.shape[0]), 'One more value needed; Eq. (12) in PPO paper requires V(s_{t+1}) for delta_t' advantages = [] gae = 0.0 for t in reversed(range(len(rewards))): value_diff = (((discount * values[(t + 1)]) * terminal_masks[t]) - values[t]) delta = (rewards[t] + value_diff) gae = (delta + (((discount * gae_param) * terminal_masks[t]) * gae)) advantages.append(gae) advantages = advantages[::(- 1)] return jnp.array(advantages)<|docstring|>Use Generalized Advantage Estimation (GAE) to compute advantages. As defined by eqs. (11-12) in PPO paper arXiv: 1707.06347. Implementation uses key observation that A_{t} = delta_t + gamma*lambda*A_{t+1}. Args: rewards: array shaped (actor_steps, num_agents), rewards from the game terminal_masks: array shaped (actor_steps, num_agents), zeros for terminal and ones for non-terminal states values: array shaped (actor_steps, num_agents), values estimated by critic discount: RL discount usually denoted with gamma gae_param: GAE parameter usually denoted with lambda Returns: advantages: calculated advantages shaped (actor_steps, num_agents)<|endoftext|>
ab8a07bf9656fba073f3e59d256726fc5c08849187e67f8555122968701a7ac2
@functools.partial(jax.jit, static_argnums=1) def loss_fn(params: flax.core.frozen_dict.FrozenDict, module: models.ActorCritic, minibatch: Tuple, clip_param: float, vf_coeff: float, entropy_coeff: float): 'Evaluate the loss function.\n\n Compute loss as a sum of three components: the negative of the PPO clipped\n surrogate objective, the value function loss and the negative of the entropy\n bonus.\n\n Args:\n params: the parameters of the actor-critic model\n module: the actor-critic model\n minibatch: Tuple of five elements forming one experience batch:\n states: shape (batch_size, 84, 84, 4)\n actions: shape (batch_size, 84, 84, 4)\n old_log_probs: shape (batch_size,)\n returns: shape (batch_size,)\n advantages: shape (batch_size,)\n clip_param: the PPO clipping parameter used to clamp ratios in loss function\n vf_coeff: weighs value function loss in total loss\n entropy_coeff: weighs entropy bonus in the total loss\n\n Returns:\n loss: the PPO loss, scalar quantity\n ' (states, actions, old_log_probs, returns, advantages) = minibatch (log_probs, values) = agent.policy_action(params, module, states) values = values[(:, 0)] probs = jnp.exp(log_probs) value_loss = jnp.mean(jnp.square((returns - values)), axis=0) entropy = jnp.sum(((- probs) * log_probs), axis=1).mean() log_probs_act_taken = jax.vmap((lambda lp, a: lp[a]))(log_probs, actions) ratios = jnp.exp((log_probs_act_taken - old_log_probs)) advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-08)) PG_loss = (ratios * advantages) clipped_loss = (advantages * jax.lax.clamp((1.0 - clip_param), ratios, (1.0 + clip_param))) PPO_loss = (- jnp.mean(jnp.minimum(PG_loss, clipped_loss), axis=0)) return ((PPO_loss + (vf_coeff * value_loss)) - (entropy_coeff * entropy))
Evaluate the loss function. Compute loss as a sum of three components: the negative of the PPO clipped surrogate objective, the value function loss and the negative of the entropy bonus. Args: params: the parameters of the actor-critic model module: the actor-critic model minibatch: Tuple of five elements forming one experience batch: states: shape (batch_size, 84, 84, 4) actions: shape (batch_size, 84, 84, 4) old_log_probs: shape (batch_size,) returns: shape (batch_size,) advantages: shape (batch_size,) clip_param: the PPO clipping parameter used to clamp ratios in loss function vf_coeff: weighs value function loss in total loss entropy_coeff: weighs entropy bonus in the total loss Returns: loss: the PPO loss, scalar quantity
examples/ppo/ppo_lib.py
loss_fn
cccntu/flax
4
python
@functools.partial(jax.jit, static_argnums=1) def loss_fn(params: flax.core.frozen_dict.FrozenDict, module: models.ActorCritic, minibatch: Tuple, clip_param: float, vf_coeff: float, entropy_coeff: float): 'Evaluate the loss function.\n\n Compute loss as a sum of three components: the negative of the PPO clipped\n surrogate objective, the value function loss and the negative of the entropy\n bonus.\n\n Args:\n params: the parameters of the actor-critic model\n module: the actor-critic model\n minibatch: Tuple of five elements forming one experience batch:\n states: shape (batch_size, 84, 84, 4)\n actions: shape (batch_size, 84, 84, 4)\n old_log_probs: shape (batch_size,)\n returns: shape (batch_size,)\n advantages: shape (batch_size,)\n clip_param: the PPO clipping parameter used to clamp ratios in loss function\n vf_coeff: weighs value function loss in total loss\n entropy_coeff: weighs entropy bonus in the total loss\n\n Returns:\n loss: the PPO loss, scalar quantity\n ' (states, actions, old_log_probs, returns, advantages) = minibatch (log_probs, values) = agent.policy_action(params, module, states) values = values[(:, 0)] probs = jnp.exp(log_probs) value_loss = jnp.mean(jnp.square((returns - values)), axis=0) entropy = jnp.sum(((- probs) * log_probs), axis=1).mean() log_probs_act_taken = jax.vmap((lambda lp, a: lp[a]))(log_probs, actions) ratios = jnp.exp((log_probs_act_taken - old_log_probs)) advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-08)) PG_loss = (ratios * advantages) clipped_loss = (advantages * jax.lax.clamp((1.0 - clip_param), ratios, (1.0 + clip_param))) PPO_loss = (- jnp.mean(jnp.minimum(PG_loss, clipped_loss), axis=0)) return ((PPO_loss + (vf_coeff * value_loss)) - (entropy_coeff * entropy))
@functools.partial(jax.jit, static_argnums=1) def loss_fn(params: flax.core.frozen_dict.FrozenDict, module: models.ActorCritic, minibatch: Tuple, clip_param: float, vf_coeff: float, entropy_coeff: float): 'Evaluate the loss function.\n\n Compute loss as a sum of three components: the negative of the PPO clipped\n surrogate objective, the value function loss and the negative of the entropy\n bonus.\n\n Args:\n params: the parameters of the actor-critic model\n module: the actor-critic model\n minibatch: Tuple of five elements forming one experience batch:\n states: shape (batch_size, 84, 84, 4)\n actions: shape (batch_size, 84, 84, 4)\n old_log_probs: shape (batch_size,)\n returns: shape (batch_size,)\n advantages: shape (batch_size,)\n clip_param: the PPO clipping parameter used to clamp ratios in loss function\n vf_coeff: weighs value function loss in total loss\n entropy_coeff: weighs entropy bonus in the total loss\n\n Returns:\n loss: the PPO loss, scalar quantity\n ' (states, actions, old_log_probs, returns, advantages) = minibatch (log_probs, values) = agent.policy_action(params, module, states) values = values[(:, 0)] probs = jnp.exp(log_probs) value_loss = jnp.mean(jnp.square((returns - values)), axis=0) entropy = jnp.sum(((- probs) * log_probs), axis=1).mean() log_probs_act_taken = jax.vmap((lambda lp, a: lp[a]))(log_probs, actions) ratios = jnp.exp((log_probs_act_taken - old_log_probs)) advantages = ((advantages - advantages.mean()) / (advantages.std() + 1e-08)) PG_loss = (ratios * advantages) clipped_loss = (advantages * jax.lax.clamp((1.0 - clip_param), ratios, (1.0 + clip_param))) PPO_loss = (- jnp.mean(jnp.minimum(PG_loss, clipped_loss), axis=0)) return ((PPO_loss + (vf_coeff * value_loss)) - (entropy_coeff * entropy))<|docstring|>Evaluate the loss function. Compute loss as a sum of three components: the negative of the PPO clipped surrogate objective, the value function loss and the negative of the entropy bonus. Args: params: the parameters of the actor-critic model module: the actor-critic model minibatch: Tuple of five elements forming one experience batch: states: shape (batch_size, 84, 84, 4) actions: shape (batch_size, 84, 84, 4) old_log_probs: shape (batch_size,) returns: shape (batch_size,) advantages: shape (batch_size,) clip_param: the PPO clipping parameter used to clamp ratios in loss function vf_coeff: weighs value function loss in total loss entropy_coeff: weighs entropy bonus in the total loss Returns: loss: the PPO loss, scalar quantity<|endoftext|>
a64b285f52ae5c71b160a8ee0bf406bf3dc8abb208855aee0c0039d91bc45202
@functools.partial(jax.jit, static_argnums=(0, 7)) def train_step(module: models.ActorCritic, optimizer: flax.optim.base.Optimizer, trajectories: Tuple, clip_param: float, vf_coeff: float, entropy_coeff: float, lr: float, batch_size: int): 'Compilable train step.\n\n Runs an entire epoch of training (i.e. the loop over minibatches within\n an epoch is included here for performance reasons).\n\n Args:\n module: the actor-critic model\n optimizer: optimizer for the actor-critic model\n trajectories: Tuple of the following five elements forming the experience:\n states: shape (steps_per_agent*num_agents, 84, 84, 4)\n actions: shape (steps_per_agent*num_agents, 84, 84, 4)\n old_log_probs: shape (steps_per_agent*num_agents, )\n returns: shape (steps_per_agent*num_agents, )\n advantages: (steps_per_agent*num_agents, )\n clip_param: the PPO clipping parameter used to clamp ratios in loss function\n vf_coeff: weighs value function loss in total loss\n entropy_coeff: weighs entropy bonus in the total loss\n lr: learning rate, varies between optimization steps\n if decaying_lr_and_clip_param is set to true\n batch_size: the minibatch size, static argument\n\n Returns:\n optimizer: new optimizer after the parameters update\n loss: loss summed over training steps\n ' iterations = (trajectories[0].shape[0] // batch_size) trajectories = jax.tree_map((lambda x: x.reshape(((iterations, batch_size) + x.shape[1:]))), trajectories) loss = 0.0 for batch in zip(*trajectories): grad_fn = jax.value_and_grad(loss_fn) (l, grad) = grad_fn(optimizer.target, module, batch, clip_param, vf_coeff, entropy_coeff) loss += l optimizer = optimizer.apply_gradient(grad, learning_rate=lr) return (optimizer, loss)
Compilable train step. Runs an entire epoch of training (i.e. the loop over minibatches within an epoch is included here for performance reasons). Args: module: the actor-critic model optimizer: optimizer for the actor-critic model trajectories: Tuple of the following five elements forming the experience: states: shape (steps_per_agent*num_agents, 84, 84, 4) actions: shape (steps_per_agent*num_agents, 84, 84, 4) old_log_probs: shape (steps_per_agent*num_agents, ) returns: shape (steps_per_agent*num_agents, ) advantages: (steps_per_agent*num_agents, ) clip_param: the PPO clipping parameter used to clamp ratios in loss function vf_coeff: weighs value function loss in total loss entropy_coeff: weighs entropy bonus in the total loss lr: learning rate, varies between optimization steps if decaying_lr_and_clip_param is set to true batch_size: the minibatch size, static argument Returns: optimizer: new optimizer after the parameters update loss: loss summed over training steps
examples/ppo/ppo_lib.py
train_step
cccntu/flax
4
python
@functools.partial(jax.jit, static_argnums=(0, 7)) def train_step(module: models.ActorCritic, optimizer: flax.optim.base.Optimizer, trajectories: Tuple, clip_param: float, vf_coeff: float, entropy_coeff: float, lr: float, batch_size: int): 'Compilable train step.\n\n Runs an entire epoch of training (i.e. the loop over minibatches within\n an epoch is included here for performance reasons).\n\n Args:\n module: the actor-critic model\n optimizer: optimizer for the actor-critic model\n trajectories: Tuple of the following five elements forming the experience:\n states: shape (steps_per_agent*num_agents, 84, 84, 4)\n actions: shape (steps_per_agent*num_agents, 84, 84, 4)\n old_log_probs: shape (steps_per_agent*num_agents, )\n returns: shape (steps_per_agent*num_agents, )\n advantages: (steps_per_agent*num_agents, )\n clip_param: the PPO clipping parameter used to clamp ratios in loss function\n vf_coeff: weighs value function loss in total loss\n entropy_coeff: weighs entropy bonus in the total loss\n lr: learning rate, varies between optimization steps\n if decaying_lr_and_clip_param is set to true\n batch_size: the minibatch size, static argument\n\n Returns:\n optimizer: new optimizer after the parameters update\n loss: loss summed over training steps\n ' iterations = (trajectories[0].shape[0] // batch_size) trajectories = jax.tree_map((lambda x: x.reshape(((iterations, batch_size) + x.shape[1:]))), trajectories) loss = 0.0 for batch in zip(*trajectories): grad_fn = jax.value_and_grad(loss_fn) (l, grad) = grad_fn(optimizer.target, module, batch, clip_param, vf_coeff, entropy_coeff) loss += l optimizer = optimizer.apply_gradient(grad, learning_rate=lr) return (optimizer, loss)
@functools.partial(jax.jit, static_argnums=(0, 7)) def train_step(module: models.ActorCritic, optimizer: flax.optim.base.Optimizer, trajectories: Tuple, clip_param: float, vf_coeff: float, entropy_coeff: float, lr: float, batch_size: int): 'Compilable train step.\n\n Runs an entire epoch of training (i.e. the loop over minibatches within\n an epoch is included here for performance reasons).\n\n Args:\n module: the actor-critic model\n optimizer: optimizer for the actor-critic model\n trajectories: Tuple of the following five elements forming the experience:\n states: shape (steps_per_agent*num_agents, 84, 84, 4)\n actions: shape (steps_per_agent*num_agents, 84, 84, 4)\n old_log_probs: shape (steps_per_agent*num_agents, )\n returns: shape (steps_per_agent*num_agents, )\n advantages: (steps_per_agent*num_agents, )\n clip_param: the PPO clipping parameter used to clamp ratios in loss function\n vf_coeff: weighs value function loss in total loss\n entropy_coeff: weighs entropy bonus in the total loss\n lr: learning rate, varies between optimization steps\n if decaying_lr_and_clip_param is set to true\n batch_size: the minibatch size, static argument\n\n Returns:\n optimizer: new optimizer after the parameters update\n loss: loss summed over training steps\n ' iterations = (trajectories[0].shape[0] // batch_size) trajectories = jax.tree_map((lambda x: x.reshape(((iterations, batch_size) + x.shape[1:]))), trajectories) loss = 0.0 for batch in zip(*trajectories): grad_fn = jax.value_and_grad(loss_fn) (l, grad) = grad_fn(optimizer.target, module, batch, clip_param, vf_coeff, entropy_coeff) loss += l optimizer = optimizer.apply_gradient(grad, learning_rate=lr) return (optimizer, loss)<|docstring|>Compilable train step. Runs an entire epoch of training (i.e. the loop over minibatches within an epoch is included here for performance reasons). Args: module: the actor-critic model optimizer: optimizer for the actor-critic model trajectories: Tuple of the following five elements forming the experience: states: shape (steps_per_agent*num_agents, 84, 84, 4) actions: shape (steps_per_agent*num_agents, 84, 84, 4) old_log_probs: shape (steps_per_agent*num_agents, ) returns: shape (steps_per_agent*num_agents, ) advantages: (steps_per_agent*num_agents, ) clip_param: the PPO clipping parameter used to clamp ratios in loss function vf_coeff: weighs value function loss in total loss entropy_coeff: weighs entropy bonus in the total loss lr: learning rate, varies between optimization steps if decaying_lr_and_clip_param is set to true batch_size: the minibatch size, static argument Returns: optimizer: new optimizer after the parameters update loss: loss summed over training steps<|endoftext|>
698d148d2bbb0de53bd9d65122609ae0f9d2d879a03091b6556130855c378d8c
def get_experience(params: flax.core.frozen_dict.FrozenDict, module: models.ActorCritic, simulators: List[agent.RemoteSimulator], steps_per_actor: int): 'Collect experience from agents.\n\n Runs `steps_per_actor` time steps of the game for each of the `simulators`.\n ' all_experience = [] for _ in range((steps_per_actor + 1)): states = [] for sim in simulators: state = sim.conn.recv() states.append(state) states = np.concatenate(states, axis=0) (log_probs, values) = agent.policy_action(params, module, states) (log_probs, values) = jax.device_get((log_probs, values)) probs = np.exp(np.array(log_probs)) for (i, sim) in enumerate(simulators): probabilities = probs[i] action = np.random.choice(probs.shape[1], p=probabilities) sim.conn.send(action) experiences = [] for (i, sim) in enumerate(simulators): (state, action, reward, done) = sim.conn.recv() value = values[(i, 0)] log_prob = log_probs[i][action] sample = agent.ExpTuple(state, action, reward, value, log_prob, done) experiences.append(sample) all_experience.append(experiences) return all_experience
Collect experience from agents. Runs `steps_per_actor` time steps of the game for each of the `simulators`.
examples/ppo/ppo_lib.py
get_experience
cccntu/flax
4
python
def get_experience(params: flax.core.frozen_dict.FrozenDict, module: models.ActorCritic, simulators: List[agent.RemoteSimulator], steps_per_actor: int): 'Collect experience from agents.\n\n Runs `steps_per_actor` time steps of the game for each of the `simulators`.\n ' all_experience = [] for _ in range((steps_per_actor + 1)): states = [] for sim in simulators: state = sim.conn.recv() states.append(state) states = np.concatenate(states, axis=0) (log_probs, values) = agent.policy_action(params, module, states) (log_probs, values) = jax.device_get((log_probs, values)) probs = np.exp(np.array(log_probs)) for (i, sim) in enumerate(simulators): probabilities = probs[i] action = np.random.choice(probs.shape[1], p=probabilities) sim.conn.send(action) experiences = [] for (i, sim) in enumerate(simulators): (state, action, reward, done) = sim.conn.recv() value = values[(i, 0)] log_prob = log_probs[i][action] sample = agent.ExpTuple(state, action, reward, value, log_prob, done) experiences.append(sample) all_experience.append(experiences) return all_experience
def get_experience(params: flax.core.frozen_dict.FrozenDict, module: models.ActorCritic, simulators: List[agent.RemoteSimulator], steps_per_actor: int): 'Collect experience from agents.\n\n Runs `steps_per_actor` time steps of the game for each of the `simulators`.\n ' all_experience = [] for _ in range((steps_per_actor + 1)): states = [] for sim in simulators: state = sim.conn.recv() states.append(state) states = np.concatenate(states, axis=0) (log_probs, values) = agent.policy_action(params, module, states) (log_probs, values) = jax.device_get((log_probs, values)) probs = np.exp(np.array(log_probs)) for (i, sim) in enumerate(simulators): probabilities = probs[i] action = np.random.choice(probs.shape[1], p=probabilities) sim.conn.send(action) experiences = [] for (i, sim) in enumerate(simulators): (state, action, reward, done) = sim.conn.recv() value = values[(i, 0)] log_prob = log_probs[i][action] sample = agent.ExpTuple(state, action, reward, value, log_prob, done) experiences.append(sample) all_experience.append(experiences) return all_experience<|docstring|>Collect experience from agents. Runs `steps_per_actor` time steps of the game for each of the `simulators`.<|endoftext|>
673336685f3d7f17d7f1b004094c41192e016f16b963f43fd8f281eef581d99c
def process_experience(experience: List[List[agent.ExpTuple]], actor_steps: int, num_agents: int, gamma: float, lambda_: float): 'Process experience for training, including advantage estimation.\n\n Args:\n experience: collected from agents in the form of nested lists/namedtuple\n actor_steps: number of steps each agent has completed\n num_agents: number of agents that collected experience\n gamma: dicount parameter\n lambda_: GAE parameter\n\n Returns:\n trajectories: trajectories readily accessible for `train_step()` function\n ' obs_shape = (84, 84, 4) exp_dims = (actor_steps, num_agents) values_dims = ((actor_steps + 1), num_agents) states = np.zeros((exp_dims + obs_shape), dtype=np.float32) actions = np.zeros(exp_dims, dtype=np.int32) rewards = np.zeros(exp_dims, dtype=np.float32) values = np.zeros(values_dims, dtype=np.float32) log_probs = np.zeros(exp_dims, dtype=np.float32) dones = np.zeros(exp_dims, dtype=np.float32) for t in range((len(experience) - 1)): for (agent_id, exp_agent) in enumerate(experience[t]): states[(t, agent_id, ...)] = exp_agent.state actions[(t, agent_id)] = exp_agent.action rewards[(t, agent_id)] = exp_agent.reward values[(t, agent_id)] = exp_agent.value log_probs[(t, agent_id)] = exp_agent.log_prob dones[(t, agent_id)] = float((not exp_agent.done)) for a in range(num_agents): values[((- 1), a)] = experience[(- 1)][a].value advantages = gae_advantages(rewards, dones, values, gamma, lambda_) returns = (advantages + values[(:(- 1), :)]) trajectories = (states, actions, log_probs, returns, advantages) trajectory_len = (num_agents * actor_steps) trajectories = tuple(map((lambda x: np.reshape(x, ((trajectory_len,) + x.shape[2:]))), trajectories)) return trajectories
Process experience for training, including advantage estimation. Args: experience: collected from agents in the form of nested lists/namedtuple actor_steps: number of steps each agent has completed num_agents: number of agents that collected experience gamma: dicount parameter lambda_: GAE parameter Returns: trajectories: trajectories readily accessible for `train_step()` function
examples/ppo/ppo_lib.py
process_experience
cccntu/flax
4
python
def process_experience(experience: List[List[agent.ExpTuple]], actor_steps: int, num_agents: int, gamma: float, lambda_: float): 'Process experience for training, including advantage estimation.\n\n Args:\n experience: collected from agents in the form of nested lists/namedtuple\n actor_steps: number of steps each agent has completed\n num_agents: number of agents that collected experience\n gamma: dicount parameter\n lambda_: GAE parameter\n\n Returns:\n trajectories: trajectories readily accessible for `train_step()` function\n ' obs_shape = (84, 84, 4) exp_dims = (actor_steps, num_agents) values_dims = ((actor_steps + 1), num_agents) states = np.zeros((exp_dims + obs_shape), dtype=np.float32) actions = np.zeros(exp_dims, dtype=np.int32) rewards = np.zeros(exp_dims, dtype=np.float32) values = np.zeros(values_dims, dtype=np.float32) log_probs = np.zeros(exp_dims, dtype=np.float32) dones = np.zeros(exp_dims, dtype=np.float32) for t in range((len(experience) - 1)): for (agent_id, exp_agent) in enumerate(experience[t]): states[(t, agent_id, ...)] = exp_agent.state actions[(t, agent_id)] = exp_agent.action rewards[(t, agent_id)] = exp_agent.reward values[(t, agent_id)] = exp_agent.value log_probs[(t, agent_id)] = exp_agent.log_prob dones[(t, agent_id)] = float((not exp_agent.done)) for a in range(num_agents): values[((- 1), a)] = experience[(- 1)][a].value advantages = gae_advantages(rewards, dones, values, gamma, lambda_) returns = (advantages + values[(:(- 1), :)]) trajectories = (states, actions, log_probs, returns, advantages) trajectory_len = (num_agents * actor_steps) trajectories = tuple(map((lambda x: np.reshape(x, ((trajectory_len,) + x.shape[2:]))), trajectories)) return trajectories
def process_experience(experience: List[List[agent.ExpTuple]], actor_steps: int, num_agents: int, gamma: float, lambda_: float): 'Process experience for training, including advantage estimation.\n\n Args:\n experience: collected from agents in the form of nested lists/namedtuple\n actor_steps: number of steps each agent has completed\n num_agents: number of agents that collected experience\n gamma: dicount parameter\n lambda_: GAE parameter\n\n Returns:\n trajectories: trajectories readily accessible for `train_step()` function\n ' obs_shape = (84, 84, 4) exp_dims = (actor_steps, num_agents) values_dims = ((actor_steps + 1), num_agents) states = np.zeros((exp_dims + obs_shape), dtype=np.float32) actions = np.zeros(exp_dims, dtype=np.int32) rewards = np.zeros(exp_dims, dtype=np.float32) values = np.zeros(values_dims, dtype=np.float32) log_probs = np.zeros(exp_dims, dtype=np.float32) dones = np.zeros(exp_dims, dtype=np.float32) for t in range((len(experience) - 1)): for (agent_id, exp_agent) in enumerate(experience[t]): states[(t, agent_id, ...)] = exp_agent.state actions[(t, agent_id)] = exp_agent.action rewards[(t, agent_id)] = exp_agent.reward values[(t, agent_id)] = exp_agent.value log_probs[(t, agent_id)] = exp_agent.log_prob dones[(t, agent_id)] = float((not exp_agent.done)) for a in range(num_agents): values[((- 1), a)] = experience[(- 1)][a].value advantages = gae_advantages(rewards, dones, values, gamma, lambda_) returns = (advantages + values[(:(- 1), :)]) trajectories = (states, actions, log_probs, returns, advantages) trajectory_len = (num_agents * actor_steps) trajectories = tuple(map((lambda x: np.reshape(x, ((trajectory_len,) + x.shape[2:]))), trajectories)) return trajectories<|docstring|>Process experience for training, including advantage estimation. Args: experience: collected from agents in the form of nested lists/namedtuple actor_steps: number of steps each agent has completed num_agents: number of agents that collected experience gamma: dicount parameter lambda_: GAE parameter Returns: trajectories: trajectories readily accessible for `train_step()` function<|endoftext|>
b16f579149023f40639b6c00cba65362c9390184b139f0fdf52f7e5a34c4bd05
def train(module: models.ActorCritic, optimizer: flax.optim.base.Optimizer, config: ml_collections.ConfigDict, model_dir: str): 'Main training loop.\n\n Args:\n module: the actor-critic model\n optimizer: optimizer for the actor-critic model\n config: object holding hyperparameters and the training information\n model_dir: path to dictionary where checkpoints and logging info are stored\n\n Returns:\n optimizer: the trained optimizer\n ' game = (config.game + 'NoFrameskip-v4') simulators = [agent.RemoteSimulator(game) for _ in range(config.num_agents)] summary_writer = tensorboard.SummaryWriter(model_dir) summary_writer.hparams(dict(config)) loop_steps = (config.total_frames // (config.num_agents * config.actor_steps)) log_frequency = 40 checkpoint_frequency = 500 for s in range(loop_steps): if ((s % log_frequency) == 0): score = test_episodes.policy_test(1, module, optimizer.target, game) frames = ((s * config.num_agents) * config.actor_steps) summary_writer.scalar('game_score', score, frames) print(f'''Step {s}: frames seen {frames} score {score} ''') if ((s % checkpoint_frequency) == 0): checkpoints.save_checkpoint(model_dir, optimizer, s) alpha = ((1.0 - (s / loop_steps)) if config.decaying_lr_and_clip_param else 1.0) all_experiences = get_experience(optimizer.target, module, simulators, config.actor_steps) trajectories = process_experience(all_experiences, config.actor_steps, config.num_agents, config.gamma, config.lambda_) lr = (config.learning_rate * alpha) clip_param = (config.clip_param * alpha) for e in range(config.num_epochs): permutation = np.random.permutation((config.num_agents * config.actor_steps)) trajectories = tuple(map((lambda x: x[permutation]), trajectories)) (optimizer, loss) = train_step(module, optimizer, trajectories, clip_param, config.vf_coeff, config.entropy_coeff, lr, config.batch_size) return optimizer
Main training loop. Args: module: the actor-critic model optimizer: optimizer for the actor-critic model config: object holding hyperparameters and the training information model_dir: path to dictionary where checkpoints and logging info are stored Returns: optimizer: the trained optimizer
examples/ppo/ppo_lib.py
train
cccntu/flax
4
python
def train(module: models.ActorCritic, optimizer: flax.optim.base.Optimizer, config: ml_collections.ConfigDict, model_dir: str): 'Main training loop.\n\n Args:\n module: the actor-critic model\n optimizer: optimizer for the actor-critic model\n config: object holding hyperparameters and the training information\n model_dir: path to dictionary where checkpoints and logging info are stored\n\n Returns:\n optimizer: the trained optimizer\n ' game = (config.game + 'NoFrameskip-v4') simulators = [agent.RemoteSimulator(game) for _ in range(config.num_agents)] summary_writer = tensorboard.SummaryWriter(model_dir) summary_writer.hparams(dict(config)) loop_steps = (config.total_frames // (config.num_agents * config.actor_steps)) log_frequency = 40 checkpoint_frequency = 500 for s in range(loop_steps): if ((s % log_frequency) == 0): score = test_episodes.policy_test(1, module, optimizer.target, game) frames = ((s * config.num_agents) * config.actor_steps) summary_writer.scalar('game_score', score, frames) print(f'Step {s}: frames seen {frames} score {score} ') if ((s % checkpoint_frequency) == 0): checkpoints.save_checkpoint(model_dir, optimizer, s) alpha = ((1.0 - (s / loop_steps)) if config.decaying_lr_and_clip_param else 1.0) all_experiences = get_experience(optimizer.target, module, simulators, config.actor_steps) trajectories = process_experience(all_experiences, config.actor_steps, config.num_agents, config.gamma, config.lambda_) lr = (config.learning_rate * alpha) clip_param = (config.clip_param * alpha) for e in range(config.num_epochs): permutation = np.random.permutation((config.num_agents * config.actor_steps)) trajectories = tuple(map((lambda x: x[permutation]), trajectories)) (optimizer, loss) = train_step(module, optimizer, trajectories, clip_param, config.vf_coeff, config.entropy_coeff, lr, config.batch_size) return optimizer
def train(module: models.ActorCritic, optimizer: flax.optim.base.Optimizer, config: ml_collections.ConfigDict, model_dir: str): 'Main training loop.\n\n Args:\n module: the actor-critic model\n optimizer: optimizer for the actor-critic model\n config: object holding hyperparameters and the training information\n model_dir: path to dictionary where checkpoints and logging info are stored\n\n Returns:\n optimizer: the trained optimizer\n ' game = (config.game + 'NoFrameskip-v4') simulators = [agent.RemoteSimulator(game) for _ in range(config.num_agents)] summary_writer = tensorboard.SummaryWriter(model_dir) summary_writer.hparams(dict(config)) loop_steps = (config.total_frames // (config.num_agents * config.actor_steps)) log_frequency = 40 checkpoint_frequency = 500 for s in range(loop_steps): if ((s % log_frequency) == 0): score = test_episodes.policy_test(1, module, optimizer.target, game) frames = ((s * config.num_agents) * config.actor_steps) summary_writer.scalar('game_score', score, frames) print(f'Step {s}: frames seen {frames} score {score} ') if ((s % checkpoint_frequency) == 0): checkpoints.save_checkpoint(model_dir, optimizer, s) alpha = ((1.0 - (s / loop_steps)) if config.decaying_lr_and_clip_param else 1.0) all_experiences = get_experience(optimizer.target, module, simulators, config.actor_steps) trajectories = process_experience(all_experiences, config.actor_steps, config.num_agents, config.gamma, config.lambda_) lr = (config.learning_rate * alpha) clip_param = (config.clip_param * alpha) for e in range(config.num_epochs): permutation = np.random.permutation((config.num_agents * config.actor_steps)) trajectories = tuple(map((lambda x: x[permutation]), trajectories)) (optimizer, loss) = train_step(module, optimizer, trajectories, clip_param, config.vf_coeff, config.entropy_coeff, lr, config.batch_size) return optimizer<|docstring|>Main training loop. Args: module: the actor-critic model optimizer: optimizer for the actor-critic model config: object holding hyperparameters and the training information model_dir: path to dictionary where checkpoints and logging info are stored Returns: optimizer: the trained optimizer<|endoftext|>
4acf066582fbb45f9e7a1bce90ab610b7d3c1c435583b7b72ac95663ebdc15e2
def unit_root(x, pvalue=0.05, noprint=False): 'test if input series has unit root using augmented dickey fuller' dftest = adfuller(x, autolag='AIC') if (not noprint): results = Series(dftest[0:4], index=['Test Statistic', 'p-value', 'Lags Used', 'Obs Used']) for (k, v) in dftest[4].items(): results[f'Critical Value ({k})'] = v print(results.to_frame().T.to_string(index=False)) return (dftest[1] > pvalue)
test if input series has unit root using augmented dickey fuller
examples/econometric_forecast.py
unit_root
terence-lim/investment-data-science
2
python
def unit_root(x, pvalue=0.05, noprint=False): dftest = adfuller(x, autolag='AIC') if (not noprint): results = Series(dftest[0:4], index=['Test Statistic', 'p-value', 'Lags Used', 'Obs Used']) for (k, v) in dftest[4].items(): results[f'Critical Value ({k})'] = v print(results.to_frame().T.to_string(index=False)) return (dftest[1] > pvalue)
def unit_root(x, pvalue=0.05, noprint=False): dftest = adfuller(x, autolag='AIC') if (not noprint): results = Series(dftest[0:4], index=['Test Statistic', 'p-value', 'Lags Used', 'Obs Used']) for (k, v) in dftest[4].items(): results[f'Critical Value ({k})'] = v print(results.to_frame().T.to_string(index=False)) return (dftest[1] > pvalue)<|docstring|>test if input series has unit root using augmented dickey fuller<|endoftext|>
9243bf1e32b189fcd838d261b7427f518a0b01845f91363d699ea2ac377dc9f4
def integration_order(df, noprint=True, max_order=5, pvalue=0.05): 'returns order of integration by iteratively testing for unit root' for i in range(max_order): if (not noprint): print(f'Augmented Dickey-Fuller unit root test of I({i}):') if (not unit_root(df, pvalue=pvalue, noprint=noprint)): return i df = df.diff().dropna()
returns order of integration by iteratively testing for unit root
examples/econometric_forecast.py
integration_order
terence-lim/investment-data-science
2
python
def integration_order(df, noprint=True, max_order=5, pvalue=0.05): for i in range(max_order): if (not noprint): print(f'Augmented Dickey-Fuller unit root test of I({i}):') if (not unit_root(df, pvalue=pvalue, noprint=noprint)): return i df = df.diff().dropna()
def integration_order(df, noprint=True, max_order=5, pvalue=0.05): for i in range(max_order): if (not noprint): print(f'Augmented Dickey-Fuller unit root test of I({i}):') if (not unit_root(df, pvalue=pvalue, noprint=noprint)): return i df = df.diff().dropna()<|docstring|>returns order of integration by iteratively testing for unit root<|endoftext|>
37b59d6673efbbfd755ded666c1e09ba038ab2f321832c508134d2f666821928
def __init__(self, params): '\n Initialise the environmental parametrs for the NILM experiment. For the \n hyper-parameters, it takes the default values defined in the config module \n and updates only the subset of values specified in params.\n\n :param params: Dictionnary with different values of hyper-parameters.\n :type params: dictionnary\n ' super().__init__() hparams = get_exp_parameters() hparams = vars(hparams.parse_args()) hparams.update(params) pl.seed_everything(hparams['seed']) self._data = None self.models = OrderedDict() self.data_loaders = OrderedDict() self.MODEL_NAME = hparams['model_name'] self.hparams = hparams self.run_id = OrderedDict() self.optuna_params = OrderedDict() self.best_trials = {} self.appliance_params = {}
Initialise the environmental parametrs for the NILM experiment. For the hyper-parameters, it takes the default values defined in the config module and updates only the subset of values specified in params. :param params: Dictionnary with different values of hyper-parameters. :type params: dictionnary
deep_nilmtk/disaggregate/nilm_experiment.py
__init__
reviwe/deep-nilmtk-v1
0
python
def __init__(self, params): '\n Initialise the environmental parametrs for the NILM experiment. For the \n hyper-parameters, it takes the default values defined in the config module \n and updates only the subset of values specified in params.\n\n :param params: Dictionnary with different values of hyper-parameters.\n :type params: dictionnary\n ' super().__init__() hparams = get_exp_parameters() hparams = vars(hparams.parse_args()) hparams.update(params) pl.seed_everything(hparams['seed']) self._data = None self.models = OrderedDict() self.data_loaders = OrderedDict() self.MODEL_NAME = hparams['model_name'] self.hparams = hparams self.run_id = OrderedDict() self.optuna_params = OrderedDict() self.best_trials = {} self.appliance_params = {}
def __init__(self, params): '\n Initialise the environmental parametrs for the NILM experiment. For the \n hyper-parameters, it takes the default values defined in the config module \n and updates only the subset of values specified in params.\n\n :param params: Dictionnary with different values of hyper-parameters.\n :type params: dictionnary\n ' super().__init__() hparams = get_exp_parameters() hparams = vars(hparams.parse_args()) hparams.update(params) pl.seed_everything(hparams['seed']) self._data = None self.models = OrderedDict() self.data_loaders = OrderedDict() self.MODEL_NAME = hparams['model_name'] self.hparams = hparams self.run_id = OrderedDict() self.optuna_params = OrderedDict() self.best_trials = {} self.appliance_params = {}<|docstring|>Initialise the environmental parametrs for the NILM experiment. For the hyper-parameters, it takes the default values defined in the config module and updates only the subset of values specified in params. :param params: Dictionnary with different values of hyper-parameters. :type params: dictionnary<|endoftext|>
c32a7df333ca00a92064e150782c27610c3591a0d3e9eb6b3df94bd9a3ecac48
def _prepare_data(self, mains, sub_main): "\n Performs data pre-processing and formating. By default, the default pre-processing \n method in used. Neverthless, custom pre-processing methdos are also possible\n to use and need only to be specified in the corresponding entry of the model\n within the config module within the extra_params. For example:\n \n NILM_MODELS = {\n ...\n 'NILMmodel': {\n 'model': modelClass,\n 'loader': dataLoaderClass,\n 'extra_params':{\n 'pre-process': preprocessingFunction\n }\n },\n ...\n }\n\n :param mains: aggregtae power consumption\n :type mains: List of pd.DataFrame\n :param sub_main: sub metered energy consumption\n :type sub_main: List of pd.DataFrame\n " if (self._data is None): preprocess_func = (NILM_MODELS[self.hparams['model_name']]['extra_params']['pre-process'] if ('pre-process' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else data_preprocessing) (mains, multi_appliance_meters, single_appliance_meters) = preprocess_func(mains, sub_main, self.hparams['feature_type'], self.hparams['alpha'], self.hparams['input_norm'], self.hparams['main_mu'], self.hparams['main_std'], self.hparams['q_filter'], (self.hparams['cutoff'] if ('cutoff' in self.hparams) else None)) self._data = ({'features': mains, 'targets': multi_appliance_meters} if self.hparams['multi_appliance'] else {'features': mains, 'targets': single_appliance_meters})
Performs data pre-processing and formating. By default, the default pre-processing method in used. Neverthless, custom pre-processing methdos are also possible to use and need only to be specified in the corresponding entry of the model within the config module within the extra_params. For example: NILM_MODELS = { ... 'NILMmodel': { 'model': modelClass, 'loader': dataLoaderClass, 'extra_params':{ 'pre-process': preprocessingFunction } }, ... } :param mains: aggregtae power consumption :type mains: List of pd.DataFrame :param sub_main: sub metered energy consumption :type sub_main: List of pd.DataFrame
deep_nilmtk/disaggregate/nilm_experiment.py
_prepare_data
reviwe/deep-nilmtk-v1
0
python
def _prepare_data(self, mains, sub_main): "\n Performs data pre-processing and formating. By default, the default pre-processing \n method in used. Neverthless, custom pre-processing methdos are also possible\n to use and need only to be specified in the corresponding entry of the model\n within the config module within the extra_params. For example:\n \n NILM_MODELS = {\n ...\n 'NILMmodel': {\n 'model': modelClass,\n 'loader': dataLoaderClass,\n 'extra_params':{\n 'pre-process': preprocessingFunction\n }\n },\n ...\n }\n\n :param mains: aggregtae power consumption\n :type mains: List of pd.DataFrame\n :param sub_main: sub metered energy consumption\n :type sub_main: List of pd.DataFrame\n " if (self._data is None): preprocess_func = (NILM_MODELS[self.hparams['model_name']]['extra_params']['pre-process'] if ('pre-process' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else data_preprocessing) (mains, multi_appliance_meters, single_appliance_meters) = preprocess_func(mains, sub_main, self.hparams['feature_type'], self.hparams['alpha'], self.hparams['input_norm'], self.hparams['main_mu'], self.hparams['main_std'], self.hparams['q_filter'], (self.hparams['cutoff'] if ('cutoff' in self.hparams) else None)) self._data = ({'features': mains, 'targets': multi_appliance_meters} if self.hparams['multi_appliance'] else {'features': mains, 'targets': single_appliance_meters})
def _prepare_data(self, mains, sub_main): "\n Performs data pre-processing and formating. By default, the default pre-processing \n method in used. Neverthless, custom pre-processing methdos are also possible\n to use and need only to be specified in the corresponding entry of the model\n within the config module within the extra_params. For example:\n \n NILM_MODELS = {\n ...\n 'NILMmodel': {\n 'model': modelClass,\n 'loader': dataLoaderClass,\n 'extra_params':{\n 'pre-process': preprocessingFunction\n }\n },\n ...\n }\n\n :param mains: aggregtae power consumption\n :type mains: List of pd.DataFrame\n :param sub_main: sub metered energy consumption\n :type sub_main: List of pd.DataFrame\n " if (self._data is None): preprocess_func = (NILM_MODELS[self.hparams['model_name']]['extra_params']['pre-process'] if ('pre-process' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else data_preprocessing) (mains, multi_appliance_meters, single_appliance_meters) = preprocess_func(mains, sub_main, self.hparams['feature_type'], self.hparams['alpha'], self.hparams['input_norm'], self.hparams['main_mu'], self.hparams['main_std'], self.hparams['q_filter'], (self.hparams['cutoff'] if ('cutoff' in self.hparams) else None)) self._data = ({'features': mains, 'targets': multi_appliance_meters} if self.hparams['multi_appliance'] else {'features': mains, 'targets': single_appliance_meters})<|docstring|>Performs data pre-processing and formating. By default, the default pre-processing method in used. Neverthless, custom pre-processing methdos are also possible to use and need only to be specified in the corresponding entry of the model within the config module within the extra_params. For example: NILM_MODELS = { ... 'NILMmodel': { 'model': modelClass, 'loader': dataLoaderClass, 'extra_params':{ 'pre-process': preprocessingFunction } }, ... } :param mains: aggregtae power consumption :type mains: List of pd.DataFrame :param sub_main: sub metered energy consumption :type sub_main: List of pd.DataFrame<|endoftext|>
786e3be9742daa34d4e4dd1f75fba70e7d4cbb42bf9f1d0554ef7205270cc447
def partial_fit(self, mains, sub_main, do_preprocessing=True, **load_kwargs): " Trains the model for appliances according to the model name specified \n in the experiment's definition. It starts with the data pre-processing and \n formatting and then train the model based on the type of the model(single \n or multi-task).\n\n\n :param mains: Aggregate power measurements.\n :type mains: Liste of pd.DataFrame\n :param sub_main: Appliances power measurements.\n :type sub_main: Liste of pd.DataFrame\n :param do_preprocessing: Performs pre-processing or not. Defaults to True., defaults to True\n :type do_preprocessing: bool, optional\n " if (self._data is None): if do_preprocessing: self._prepare_data(mains, sub_main) logs = Path(self.hparams['logs_path']) results = Path(self.hparams['results_path']) figures = Path(self.hparams['figure_path']) logs.mkdir(parents=True, exist_ok=True) logs.mkdir(parents=True, exist_ok=True) results.mkdir(parents=True, exist_ok=True) figures.mkdir(parents=True, exist_ok=True) self.hparams['appliances'] = [app_name for (app_name, data) in sub_main] if (not self.hparams['multi_appliance']): self.single_appliance_fit() else: self.multi_appliance_fit()
Trains the model for appliances according to the model name specified in the experiment's definition. It starts with the data pre-processing and formatting and then train the model based on the type of the model(single or multi-task). :param mains: Aggregate power measurements. :type mains: Liste of pd.DataFrame :param sub_main: Appliances power measurements. :type sub_main: Liste of pd.DataFrame :param do_preprocessing: Performs pre-processing or not. Defaults to True., defaults to True :type do_preprocessing: bool, optional
deep_nilmtk/disaggregate/nilm_experiment.py
partial_fit
reviwe/deep-nilmtk-v1
0
python
def partial_fit(self, mains, sub_main, do_preprocessing=True, **load_kwargs): " Trains the model for appliances according to the model name specified \n in the experiment's definition. It starts with the data pre-processing and \n formatting and then train the model based on the type of the model(single \n or multi-task).\n\n\n :param mains: Aggregate power measurements.\n :type mains: Liste of pd.DataFrame\n :param sub_main: Appliances power measurements.\n :type sub_main: Liste of pd.DataFrame\n :param do_preprocessing: Performs pre-processing or not. Defaults to True., defaults to True\n :type do_preprocessing: bool, optional\n " if (self._data is None): if do_preprocessing: self._prepare_data(mains, sub_main) logs = Path(self.hparams['logs_path']) results = Path(self.hparams['results_path']) figures = Path(self.hparams['figure_path']) logs.mkdir(parents=True, exist_ok=True) logs.mkdir(parents=True, exist_ok=True) results.mkdir(parents=True, exist_ok=True) figures.mkdir(parents=True, exist_ok=True) self.hparams['appliances'] = [app_name for (app_name, data) in sub_main] if (not self.hparams['multi_appliance']): self.single_appliance_fit() else: self.multi_appliance_fit()
def partial_fit(self, mains, sub_main, do_preprocessing=True, **load_kwargs): " Trains the model for appliances according to the model name specified \n in the experiment's definition. It starts with the data pre-processing and \n formatting and then train the model based on the type of the model(single \n or multi-task).\n\n\n :param mains: Aggregate power measurements.\n :type mains: Liste of pd.DataFrame\n :param sub_main: Appliances power measurements.\n :type sub_main: Liste of pd.DataFrame\n :param do_preprocessing: Performs pre-processing or not. Defaults to True., defaults to True\n :type do_preprocessing: bool, optional\n " if (self._data is None): if do_preprocessing: self._prepare_data(mains, sub_main) logs = Path(self.hparams['logs_path']) results = Path(self.hparams['results_path']) figures = Path(self.hparams['figure_path']) logs.mkdir(parents=True, exist_ok=True) logs.mkdir(parents=True, exist_ok=True) results.mkdir(parents=True, exist_ok=True) figures.mkdir(parents=True, exist_ok=True) self.hparams['appliances'] = [app_name for (app_name, data) in sub_main] if (not self.hparams['multi_appliance']): self.single_appliance_fit() else: self.multi_appliance_fit()<|docstring|>Trains the model for appliances according to the model name specified in the experiment's definition. It starts with the data pre-processing and formatting and then train the model based on the type of the model(single or multi-task). :param mains: Aggregate power measurements. :type mains: Liste of pd.DataFrame :param sub_main: Appliances power measurements. :type sub_main: Liste of pd.DataFrame :param do_preprocessing: Performs pre-processing or not. Defaults to True., defaults to True :type do_preprocessing: bool, optional<|endoftext|>
db6a1cc9a8ad664754a038ddef6adb35fb26e00b7d71c28b65a9ce69db268075
def disaggregate_chunk(self, test_main_list, do_preprocessing=True): '\n Uses trained models to disaggregate the test_main_list. It is compatible with both single and multi-appliance models. \n\n :param test_main_list: Aggregate power measurements.\n :type test_main_list: Liste of pd.DataFrame\n :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True\n :type do_preprocessing: bool, optional\n :return: Appliances power measurements.\n :rtype: list of pd.DataFrame\n ' if self.hparams['multi_appliance']: test_predictions = self.multi_appliance_disaggregate(test_main_list, do_preprocessing=do_preprocessing) return test_predictions else: test_predictions = self.single_appliance_disaggregate(test_main_list, do_preprocessing=do_preprocessing) return test_predictions
Uses trained models to disaggregate the test_main_list. It is compatible with both single and multi-appliance models. :param test_main_list: Aggregate power measurements. :type test_main_list: Liste of pd.DataFrame :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True :type do_preprocessing: bool, optional :return: Appliances power measurements. :rtype: list of pd.DataFrame
deep_nilmtk/disaggregate/nilm_experiment.py
disaggregate_chunk
reviwe/deep-nilmtk-v1
0
python
def disaggregate_chunk(self, test_main_list, do_preprocessing=True): '\n Uses trained models to disaggregate the test_main_list. It is compatible with both single and multi-appliance models. \n\n :param test_main_list: Aggregate power measurements.\n :type test_main_list: Liste of pd.DataFrame\n :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True\n :type do_preprocessing: bool, optional\n :return: Appliances power measurements.\n :rtype: list of pd.DataFrame\n ' if self.hparams['multi_appliance']: test_predictions = self.multi_appliance_disaggregate(test_main_list, do_preprocessing=do_preprocessing) return test_predictions else: test_predictions = self.single_appliance_disaggregate(test_main_list, do_preprocessing=do_preprocessing) return test_predictions
def disaggregate_chunk(self, test_main_list, do_preprocessing=True): '\n Uses trained models to disaggregate the test_main_list. It is compatible with both single and multi-appliance models. \n\n :param test_main_list: Aggregate power measurements.\n :type test_main_list: Liste of pd.DataFrame\n :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True\n :type do_preprocessing: bool, optional\n :return: Appliances power measurements.\n :rtype: list of pd.DataFrame\n ' if self.hparams['multi_appliance']: test_predictions = self.multi_appliance_disaggregate(test_main_list, do_preprocessing=do_preprocessing) return test_predictions else: test_predictions = self.single_appliance_disaggregate(test_main_list, do_preprocessing=do_preprocessing) return test_predictions<|docstring|>Uses trained models to disaggregate the test_main_list. It is compatible with both single and multi-appliance models. :param test_main_list: Aggregate power measurements. :type test_main_list: Liste of pd.DataFrame :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True :type do_preprocessing: bool, optional :return: Appliances power measurements. :rtype: list of pd.DataFrame<|endoftext|>
dced02b4e7d13a3bce04a3780c9983e5a88e047a278af86437bb790caf8d48a3
def single_appliance_disaggregate(self, test_main_list, model=None, do_preprocessing=True): "\n Perfroms load disaggregtaion for single appliance models. If Optuna was used during the \n training phase, it disaggregtaes the test_main_list using only the best trial. \n If cross-validation is used during training, it returns the average of predictions \n cross all folds for each applaince. In this later case, the predictions for each fold \n are also logged in the results folder under the name \n ['model_name']_[appliance_name]_all_folds_predictions.p. \n Alternatively, when both Optuna and cross-validation are used, it returns the average predictions \n of all folds for only the best trial.\n\n :param test_main_list: Aggregate power measurements\n :type test_main_list: liste of pd.DataFrame\n :param model: Pre-trained appliance's models. Defaults to None.\n :type model: dict, optional\n :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True\n :type do_preprocessing: bool, optional\n :return: estimated power consumption of the considered appliances.\n :rtype: liste of dict\n " if (model is not None): self.models = model if do_preprocessing: test_main_list = data_preprocessing(test_main_list, None, self.hparams['feature_type'], self.hparams['alpha'], self.hparams['input_norm'], self.hparams['main_mu'], self.hparams['main_std'], self.hparams['q_filter']) test_predictions = [] test_results = [] for test_main in test_main_list: test_main = test_main.values disggregation_dict = {} result_dict = {} for appliance in self.models: dataloader = self.data_loaders[appliance] model = self.models[appliance] data = dataloader(inputs=test_main, targets=None, params=self.hparams) test_loader = torch.utils.data.DataLoader(data, self.hparams['batch_size'], collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), shuffle=False, num_workers=self.hparams['num_workers']) exp_name = (self.hparams['checkpoints_path'] + f'{self.exp_name}_{appliance}') if self.hparams['use_optuna']: exp_name += f'/trial_{self.best_trials[appliance]}/' if (self.hparams['kfolds'] > 1): app_result_cross_fold = [] dump_results = {} for fold in model: checkpoint_path = get_latest_checkpoint((exp_name + f'/{fold}')) chechpoint = torch.load(checkpoint_path) model_fold = model[fold] model_fold.load_state_dict(chechpoint['state_dict']) model_fold.eval() network = model_fold.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params[appliance]['mean'] network.std = self.appliance_params[appliance]['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params[appliance]['min'] network.max = self.appliance_params[appliance]['max'] results = network.predict(model_fold, test_loader) df = results['pred'].cpu().numpy().flatten() app_result_cross_fold.append(df) dump_results[fold] = df dump_results['mean_preditions'] = pd.Series(np.mean(np.array(app_result_cross_fold), axis=0)) dump_results['std_predictions'] = pd.Series(np.std(np.array(app_result_cross_fold), axis=0)) dump_results['min_predictions'] = pd.Series(np.min(np.array(app_result_cross_fold), axis=0)) dump_results['max_predictions'] = pd.Series(np.max(np.array(app_result_cross_fold), axis=0)) pickle.dump(dump_results, open(f"{self.hparams['results_path']}/{self.hparams['model_name']}_{appliance}_all_folds_predictions.p", 'wb')) df = pd.Series(np.mean(np.array(app_result_cross_fold), axis=0)) else: checkpoint_path = get_latest_checkpoint(exp_name) chechpoint = torch.load(checkpoint_path) model.load_state_dict(chechpoint['state_dict']) model.eval() network = model.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params[appliance]['mean'] network.std = self.appliance_params[appliance]['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params[appliance]['min'] network.max = self.appliance_params[appliance]['max'] results = network.predict(model, test_loader) df = pd.Series(results['pred'].cpu().numpy().flatten()) disggregation_dict[appliance] = df result_dict[appliance] = results appliance_results = {} for key in results: appliance_results[key] = pd.Series(results[key].cpu().numpy().flatten()) appliance_results = pd.DataFrame(appliance_results) os.makedirs((self.hparams['results_path'] + f'/{appliance}'), exist_ok=True) appliance_results.to_csv((self.hparams['results_path'] + f'/{appliance}/{self.exp_name}.csv'), index=False) mlflow.set_experiment(appliance) if self.hparams['log_artificat']: with mlflow.start_run(self.run_id[appliance]): mlflow.log_artifacts((self.hparams['results_path'] + f'/{appliance}'), artifact_path='test_results') results = pd.DataFrame(disggregation_dict, dtype='float32') test_predictions.append(results) test_results.append(result_dict) np.save((self.hparams['results_path'] + f'{self.exp_name}.npy'), test_results) return test_predictions
Perfroms load disaggregtaion for single appliance models. If Optuna was used during the training phase, it disaggregtaes the test_main_list using only the best trial. If cross-validation is used during training, it returns the average of predictions cross all folds for each applaince. In this later case, the predictions for each fold are also logged in the results folder under the name ['model_name']_[appliance_name]_all_folds_predictions.p. Alternatively, when both Optuna and cross-validation are used, it returns the average predictions of all folds for only the best trial. :param test_main_list: Aggregate power measurements :type test_main_list: liste of pd.DataFrame :param model: Pre-trained appliance's models. Defaults to None. :type model: dict, optional :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True :type do_preprocessing: bool, optional :return: estimated power consumption of the considered appliances. :rtype: liste of dict
deep_nilmtk/disaggregate/nilm_experiment.py
single_appliance_disaggregate
reviwe/deep-nilmtk-v1
0
python
def single_appliance_disaggregate(self, test_main_list, model=None, do_preprocessing=True): "\n Perfroms load disaggregtaion for single appliance models. If Optuna was used during the \n training phase, it disaggregtaes the test_main_list using only the best trial. \n If cross-validation is used during training, it returns the average of predictions \n cross all folds for each applaince. In this later case, the predictions for each fold \n are also logged in the results folder under the name \n ['model_name']_[appliance_name]_all_folds_predictions.p. \n Alternatively, when both Optuna and cross-validation are used, it returns the average predictions \n of all folds for only the best trial.\n\n :param test_main_list: Aggregate power measurements\n :type test_main_list: liste of pd.DataFrame\n :param model: Pre-trained appliance's models. Defaults to None.\n :type model: dict, optional\n :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True\n :type do_preprocessing: bool, optional\n :return: estimated power consumption of the considered appliances.\n :rtype: liste of dict\n " if (model is not None): self.models = model if do_preprocessing: test_main_list = data_preprocessing(test_main_list, None, self.hparams['feature_type'], self.hparams['alpha'], self.hparams['input_norm'], self.hparams['main_mu'], self.hparams['main_std'], self.hparams['q_filter']) test_predictions = [] test_results = [] for test_main in test_main_list: test_main = test_main.values disggregation_dict = {} result_dict = {} for appliance in self.models: dataloader = self.data_loaders[appliance] model = self.models[appliance] data = dataloader(inputs=test_main, targets=None, params=self.hparams) test_loader = torch.utils.data.DataLoader(data, self.hparams['batch_size'], collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), shuffle=False, num_workers=self.hparams['num_workers']) exp_name = (self.hparams['checkpoints_path'] + f'{self.exp_name}_{appliance}') if self.hparams['use_optuna']: exp_name += f'/trial_{self.best_trials[appliance]}/' if (self.hparams['kfolds'] > 1): app_result_cross_fold = [] dump_results = {} for fold in model: checkpoint_path = get_latest_checkpoint((exp_name + f'/{fold}')) chechpoint = torch.load(checkpoint_path) model_fold = model[fold] model_fold.load_state_dict(chechpoint['state_dict']) model_fold.eval() network = model_fold.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params[appliance]['mean'] network.std = self.appliance_params[appliance]['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params[appliance]['min'] network.max = self.appliance_params[appliance]['max'] results = network.predict(model_fold, test_loader) df = results['pred'].cpu().numpy().flatten() app_result_cross_fold.append(df) dump_results[fold] = df dump_results['mean_preditions'] = pd.Series(np.mean(np.array(app_result_cross_fold), axis=0)) dump_results['std_predictions'] = pd.Series(np.std(np.array(app_result_cross_fold), axis=0)) dump_results['min_predictions'] = pd.Series(np.min(np.array(app_result_cross_fold), axis=0)) dump_results['max_predictions'] = pd.Series(np.max(np.array(app_result_cross_fold), axis=0)) pickle.dump(dump_results, open(f"{self.hparams['results_path']}/{self.hparams['model_name']}_{appliance}_all_folds_predictions.p", 'wb')) df = pd.Series(np.mean(np.array(app_result_cross_fold), axis=0)) else: checkpoint_path = get_latest_checkpoint(exp_name) chechpoint = torch.load(checkpoint_path) model.load_state_dict(chechpoint['state_dict']) model.eval() network = model.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params[appliance]['mean'] network.std = self.appliance_params[appliance]['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params[appliance]['min'] network.max = self.appliance_params[appliance]['max'] results = network.predict(model, test_loader) df = pd.Series(results['pred'].cpu().numpy().flatten()) disggregation_dict[appliance] = df result_dict[appliance] = results appliance_results = {} for key in results: appliance_results[key] = pd.Series(results[key].cpu().numpy().flatten()) appliance_results = pd.DataFrame(appliance_results) os.makedirs((self.hparams['results_path'] + f'/{appliance}'), exist_ok=True) appliance_results.to_csv((self.hparams['results_path'] + f'/{appliance}/{self.exp_name}.csv'), index=False) mlflow.set_experiment(appliance) if self.hparams['log_artificat']: with mlflow.start_run(self.run_id[appliance]): mlflow.log_artifacts((self.hparams['results_path'] + f'/{appliance}'), artifact_path='test_results') results = pd.DataFrame(disggregation_dict, dtype='float32') test_predictions.append(results) test_results.append(result_dict) np.save((self.hparams['results_path'] + f'{self.exp_name}.npy'), test_results) return test_predictions
def single_appliance_disaggregate(self, test_main_list, model=None, do_preprocessing=True): "\n Perfroms load disaggregtaion for single appliance models. If Optuna was used during the \n training phase, it disaggregtaes the test_main_list using only the best trial. \n If cross-validation is used during training, it returns the average of predictions \n cross all folds for each applaince. In this later case, the predictions for each fold \n are also logged in the results folder under the name \n ['model_name']_[appliance_name]_all_folds_predictions.p. \n Alternatively, when both Optuna and cross-validation are used, it returns the average predictions \n of all folds for only the best trial.\n\n :param test_main_list: Aggregate power measurements\n :type test_main_list: liste of pd.DataFrame\n :param model: Pre-trained appliance's models. Defaults to None.\n :type model: dict, optional\n :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True\n :type do_preprocessing: bool, optional\n :return: estimated power consumption of the considered appliances.\n :rtype: liste of dict\n " if (model is not None): self.models = model if do_preprocessing: test_main_list = data_preprocessing(test_main_list, None, self.hparams['feature_type'], self.hparams['alpha'], self.hparams['input_norm'], self.hparams['main_mu'], self.hparams['main_std'], self.hparams['q_filter']) test_predictions = [] test_results = [] for test_main in test_main_list: test_main = test_main.values disggregation_dict = {} result_dict = {} for appliance in self.models: dataloader = self.data_loaders[appliance] model = self.models[appliance] data = dataloader(inputs=test_main, targets=None, params=self.hparams) test_loader = torch.utils.data.DataLoader(data, self.hparams['batch_size'], collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), shuffle=False, num_workers=self.hparams['num_workers']) exp_name = (self.hparams['checkpoints_path'] + f'{self.exp_name}_{appliance}') if self.hparams['use_optuna']: exp_name += f'/trial_{self.best_trials[appliance]}/' if (self.hparams['kfolds'] > 1): app_result_cross_fold = [] dump_results = {} for fold in model: checkpoint_path = get_latest_checkpoint((exp_name + f'/{fold}')) chechpoint = torch.load(checkpoint_path) model_fold = model[fold] model_fold.load_state_dict(chechpoint['state_dict']) model_fold.eval() network = model_fold.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params[appliance]['mean'] network.std = self.appliance_params[appliance]['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params[appliance]['min'] network.max = self.appliance_params[appliance]['max'] results = network.predict(model_fold, test_loader) df = results['pred'].cpu().numpy().flatten() app_result_cross_fold.append(df) dump_results[fold] = df dump_results['mean_preditions'] = pd.Series(np.mean(np.array(app_result_cross_fold), axis=0)) dump_results['std_predictions'] = pd.Series(np.std(np.array(app_result_cross_fold), axis=0)) dump_results['min_predictions'] = pd.Series(np.min(np.array(app_result_cross_fold), axis=0)) dump_results['max_predictions'] = pd.Series(np.max(np.array(app_result_cross_fold), axis=0)) pickle.dump(dump_results, open(f"{self.hparams['results_path']}/{self.hparams['model_name']}_{appliance}_all_folds_predictions.p", 'wb')) df = pd.Series(np.mean(np.array(app_result_cross_fold), axis=0)) else: checkpoint_path = get_latest_checkpoint(exp_name) chechpoint = torch.load(checkpoint_path) model.load_state_dict(chechpoint['state_dict']) model.eval() network = model.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params[appliance]['mean'] network.std = self.appliance_params[appliance]['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params[appliance]['min'] network.max = self.appliance_params[appliance]['max'] results = network.predict(model, test_loader) df = pd.Series(results['pred'].cpu().numpy().flatten()) disggregation_dict[appliance] = df result_dict[appliance] = results appliance_results = {} for key in results: appliance_results[key] = pd.Series(results[key].cpu().numpy().flatten()) appliance_results = pd.DataFrame(appliance_results) os.makedirs((self.hparams['results_path'] + f'/{appliance}'), exist_ok=True) appliance_results.to_csv((self.hparams['results_path'] + f'/{appliance}/{self.exp_name}.csv'), index=False) mlflow.set_experiment(appliance) if self.hparams['log_artificat']: with mlflow.start_run(self.run_id[appliance]): mlflow.log_artifacts((self.hparams['results_path'] + f'/{appliance}'), artifact_path='test_results') results = pd.DataFrame(disggregation_dict, dtype='float32') test_predictions.append(results) test_results.append(result_dict) np.save((self.hparams['results_path'] + f'{self.exp_name}.npy'), test_results) return test_predictions<|docstring|>Perfroms load disaggregtaion for single appliance models. If Optuna was used during the training phase, it disaggregtaes the test_main_list using only the best trial. If cross-validation is used during training, it returns the average of predictions cross all folds for each applaince. In this later case, the predictions for each fold are also logged in the results folder under the name ['model_name']_[appliance_name]_all_folds_predictions.p. Alternatively, when both Optuna and cross-validation are used, it returns the average predictions of all folds for only the best trial. :param test_main_list: Aggregate power measurements :type test_main_list: liste of pd.DataFrame :param model: Pre-trained appliance's models. Defaults to None. :type model: dict, optional :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True :type do_preprocessing: bool, optional :return: estimated power consumption of the considered appliances. :rtype: liste of dict<|endoftext|>
80aaf9c41d2a71a400abb43113d83d2af3fe4915e6e28396ff79e4abebd283f8
def objective(self, trial, train_loader=None, val_loader=None, fold_idx=None): 'The objective function to be used with optuna. This function requires the model under study to \n implement a static function called suggest_hparams() [see the model documentation for more informations]\n\n :param trial: Optuna.trial\n :param train_loader: training dataLoader for the current experiment. Defaults to None.\n :type train_loader: DataLoader, optional\n :param val_loader: validation dataLoader for the current experiment. Defaults to None.\n :type val_loader: DataLoader, optional\n :param fold_idx: Number of the fold of cross-validation is used. Defaults to None.\n :type fold_idx: int, optional\n :raises Exception: In case the model does not suggest any parameters.\n :return: The best validation loss aschieved\n :rtype: float\n ' best_val_loss = float('Inf') mlflow.set_experiment(f"{self.optuna_params['appliance_name']}") with mlflow.start_run(): suggested_params_func = NILM_MODELS[self.hparams['model_name']]['model'].suggest_hparams if callable(suggested_params_func): suggested_params = NILM_MODELS[self.hparams['model_name']]['model'].suggest_hparams(None, trial) self.hparams.update(suggested_params) mlflow.log_params(suggested_params) else: raise Exception('\n No params to optimise by optuna\n A static function inside the NILM model should provide\n a dictionnary of params suggested by optuna\n see documentation for more details\n ') print('First model training for', self.optuna_params['appliance_name']) (net, dataloader) = self.get_net_and_loaders() if ((train_loader is None) or (val_loader is None)): self.data_loaders[self.optuna_params['appliance_name']] = dataloader dataloader = self.data_loaders[self.optuna_params['appliance_name']] data = dataloader(inputs=self._data['features'], targets=self.optuna_params['power']) (train_data, val_data) = torch.utils.data.random_split(data, [int((data.len * (1 - 0.15))), (data.len - int((data.len * (1 - 0.15))))], generator=torch.Generator().manual_seed(3407)) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) if (fold_idx is None): self.models[self.optuna_params['appliance_name']] = pilModel(net, self.hparams) (best_val_loss, path) = self.train_model(self.optuna_params['appliance_name'], train_loader, val_loader, self.optuna_params['exp_name'], (data.mean if (self.hparams['target_norm'] == 'z-norm') else None), (data.std if (self.hparams['target_norm'] == 'z-norm') else None), trial_idx=trial.number) else: self.models[self.optuna_params['appliance_name']][f'fold_{fold_idx}'] = pilModel(net, self.hparams) (best_val_loss, path) = self.train_model(self.optuna_params['appliance_name'], train_loader, val_loader, self.optuna_params['exp_name'], (data.mean if (self.hparams['target_norm'] == 'z-norm') else None), (data.std if (self.hparams['target_norm'] == 'z-norm') else None), trial_idx=trial.number, fold_idx=fold_idx, model=self.models[self.optuna_params['appliance_name']][f'fold_{fold_idx}']) trial.set_user_attr(key='best_run_id', value=mlflow.active_run().info.run_id) trial.set_user_attr(key='trial_ID', value=trial.number) trial.set_user_attr(key='path', value=path) return best_val_loss
The objective function to be used with optuna. This function requires the model under study to implement a static function called suggest_hparams() [see the model documentation for more informations] :param trial: Optuna.trial :param train_loader: training dataLoader for the current experiment. Defaults to None. :type train_loader: DataLoader, optional :param val_loader: validation dataLoader for the current experiment. Defaults to None. :type val_loader: DataLoader, optional :param fold_idx: Number of the fold of cross-validation is used. Defaults to None. :type fold_idx: int, optional :raises Exception: In case the model does not suggest any parameters. :return: The best validation loss aschieved :rtype: float
deep_nilmtk/disaggregate/nilm_experiment.py
objective
reviwe/deep-nilmtk-v1
0
python
def objective(self, trial, train_loader=None, val_loader=None, fold_idx=None): 'The objective function to be used with optuna. This function requires the model under study to \n implement a static function called suggest_hparams() [see the model documentation for more informations]\n\n :param trial: Optuna.trial\n :param train_loader: training dataLoader for the current experiment. Defaults to None.\n :type train_loader: DataLoader, optional\n :param val_loader: validation dataLoader for the current experiment. Defaults to None.\n :type val_loader: DataLoader, optional\n :param fold_idx: Number of the fold of cross-validation is used. Defaults to None.\n :type fold_idx: int, optional\n :raises Exception: In case the model does not suggest any parameters.\n :return: The best validation loss aschieved\n :rtype: float\n ' best_val_loss = float('Inf') mlflow.set_experiment(f"{self.optuna_params['appliance_name']}") with mlflow.start_run(): suggested_params_func = NILM_MODELS[self.hparams['model_name']]['model'].suggest_hparams if callable(suggested_params_func): suggested_params = NILM_MODELS[self.hparams['model_name']]['model'].suggest_hparams(None, trial) self.hparams.update(suggested_params) mlflow.log_params(suggested_params) else: raise Exception('\n No params to optimise by optuna\n A static function inside the NILM model should provide\n a dictionnary of params suggested by optuna\n see documentation for more details\n ') print('First model training for', self.optuna_params['appliance_name']) (net, dataloader) = self.get_net_and_loaders() if ((train_loader is None) or (val_loader is None)): self.data_loaders[self.optuna_params['appliance_name']] = dataloader dataloader = self.data_loaders[self.optuna_params['appliance_name']] data = dataloader(inputs=self._data['features'], targets=self.optuna_params['power']) (train_data, val_data) = torch.utils.data.random_split(data, [int((data.len * (1 - 0.15))), (data.len - int((data.len * (1 - 0.15))))], generator=torch.Generator().manual_seed(3407)) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) if (fold_idx is None): self.models[self.optuna_params['appliance_name']] = pilModel(net, self.hparams) (best_val_loss, path) = self.train_model(self.optuna_params['appliance_name'], train_loader, val_loader, self.optuna_params['exp_name'], (data.mean if (self.hparams['target_norm'] == 'z-norm') else None), (data.std if (self.hparams['target_norm'] == 'z-norm') else None), trial_idx=trial.number) else: self.models[self.optuna_params['appliance_name']][f'fold_{fold_idx}'] = pilModel(net, self.hparams) (best_val_loss, path) = self.train_model(self.optuna_params['appliance_name'], train_loader, val_loader, self.optuna_params['exp_name'], (data.mean if (self.hparams['target_norm'] == 'z-norm') else None), (data.std if (self.hparams['target_norm'] == 'z-norm') else None), trial_idx=trial.number, fold_idx=fold_idx, model=self.models[self.optuna_params['appliance_name']][f'fold_{fold_idx}']) trial.set_user_attr(key='best_run_id', value=mlflow.active_run().info.run_id) trial.set_user_attr(key='trial_ID', value=trial.number) trial.set_user_attr(key='path', value=path) return best_val_loss
def objective(self, trial, train_loader=None, val_loader=None, fold_idx=None): 'The objective function to be used with optuna. This function requires the model under study to \n implement a static function called suggest_hparams() [see the model documentation for more informations]\n\n :param trial: Optuna.trial\n :param train_loader: training dataLoader for the current experiment. Defaults to None.\n :type train_loader: DataLoader, optional\n :param val_loader: validation dataLoader for the current experiment. Defaults to None.\n :type val_loader: DataLoader, optional\n :param fold_idx: Number of the fold of cross-validation is used. Defaults to None.\n :type fold_idx: int, optional\n :raises Exception: In case the model does not suggest any parameters.\n :return: The best validation loss aschieved\n :rtype: float\n ' best_val_loss = float('Inf') mlflow.set_experiment(f"{self.optuna_params['appliance_name']}") with mlflow.start_run(): suggested_params_func = NILM_MODELS[self.hparams['model_name']]['model'].suggest_hparams if callable(suggested_params_func): suggested_params = NILM_MODELS[self.hparams['model_name']]['model'].suggest_hparams(None, trial) self.hparams.update(suggested_params) mlflow.log_params(suggested_params) else: raise Exception('\n No params to optimise by optuna\n A static function inside the NILM model should provide\n a dictionnary of params suggested by optuna\n see documentation for more details\n ') print('First model training for', self.optuna_params['appliance_name']) (net, dataloader) = self.get_net_and_loaders() if ((train_loader is None) or (val_loader is None)): self.data_loaders[self.optuna_params['appliance_name']] = dataloader dataloader = self.data_loaders[self.optuna_params['appliance_name']] data = dataloader(inputs=self._data['features'], targets=self.optuna_params['power']) (train_data, val_data) = torch.utils.data.random_split(data, [int((data.len * (1 - 0.15))), (data.len - int((data.len * (1 - 0.15))))], generator=torch.Generator().manual_seed(3407)) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) if (fold_idx is None): self.models[self.optuna_params['appliance_name']] = pilModel(net, self.hparams) (best_val_loss, path) = self.train_model(self.optuna_params['appliance_name'], train_loader, val_loader, self.optuna_params['exp_name'], (data.mean if (self.hparams['target_norm'] == 'z-norm') else None), (data.std if (self.hparams['target_norm'] == 'z-norm') else None), trial_idx=trial.number) else: self.models[self.optuna_params['appliance_name']][f'fold_{fold_idx}'] = pilModel(net, self.hparams) (best_val_loss, path) = self.train_model(self.optuna_params['appliance_name'], train_loader, val_loader, self.optuna_params['exp_name'], (data.mean if (self.hparams['target_norm'] == 'z-norm') else None), (data.std if (self.hparams['target_norm'] == 'z-norm') else None), trial_idx=trial.number, fold_idx=fold_idx, model=self.models[self.optuna_params['appliance_name']][f'fold_{fold_idx}']) trial.set_user_attr(key='best_run_id', value=mlflow.active_run().info.run_id) trial.set_user_attr(key='trial_ID', value=trial.number) trial.set_user_attr(key='path', value=path) return best_val_loss<|docstring|>The objective function to be used with optuna. This function requires the model under study to implement a static function called suggest_hparams() [see the model documentation for more informations] :param trial: Optuna.trial :param train_loader: training dataLoader for the current experiment. Defaults to None. :type train_loader: DataLoader, optional :param val_loader: validation dataLoader for the current experiment. Defaults to None. :type val_loader: DataLoader, optional :param fold_idx: Number of the fold of cross-validation is used. Defaults to None. :type fold_idx: int, optional :raises Exception: In case the model does not suggest any parameters. :return: The best validation loss aschieved :rtype: float<|endoftext|>
7cd969ed06d02c8e8c6e0fa8cf6e8683263ba4946eb324c3d0748fc2340676b9
def objective_cv(self, trial): 'The objective function for Optuna when cross-validation is also used\n\n :param trial: An optuna trial\n :type trial: Optuna.Trial\n :return: average of best loss validations for considered folds\n :rtype: float\n ' fold = TimeSeriesSplit(n_splits=self.hparams['kfolds'], test_size=self.hparams['test_size'], gap=self.hparams['gap']) scores = [] (_, dataloader) = self.get_net_and_loaders() self.models[self.optuna_params['appliance_name']] = {} self.data_loaders[self.optuna_params['appliance_name']] = dataloader dataloader = self.data_loaders[self.optuna_params['appliance_name']] dataset = dataloader(inputs=self._data['features'], targets=self.optuna_params['power']) for (fold_idx, (train_idx, valid_idx)) in enumerate(fold.split(range(len(dataset)))): print(f'started training for the fold {fold_idx}.') train_data = torch.utils.data.Subset(dataset, train_idx) val_data = torch.utils.data.Subset(dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mae_loss = self.objective(trial, train_loader, val_loader, fold_idx) scores.append(mae_loss) return np.mean(scores)
The objective function for Optuna when cross-validation is also used :param trial: An optuna trial :type trial: Optuna.Trial :return: average of best loss validations for considered folds :rtype: float
deep_nilmtk/disaggregate/nilm_experiment.py
objective_cv
reviwe/deep-nilmtk-v1
0
python
def objective_cv(self, trial): 'The objective function for Optuna when cross-validation is also used\n\n :param trial: An optuna trial\n :type trial: Optuna.Trial\n :return: average of best loss validations for considered folds\n :rtype: float\n ' fold = TimeSeriesSplit(n_splits=self.hparams['kfolds'], test_size=self.hparams['test_size'], gap=self.hparams['gap']) scores = [] (_, dataloader) = self.get_net_and_loaders() self.models[self.optuna_params['appliance_name']] = {} self.data_loaders[self.optuna_params['appliance_name']] = dataloader dataloader = self.data_loaders[self.optuna_params['appliance_name']] dataset = dataloader(inputs=self._data['features'], targets=self.optuna_params['power']) for (fold_idx, (train_idx, valid_idx)) in enumerate(fold.split(range(len(dataset)))): print(f'started training for the fold {fold_idx}.') train_data = torch.utils.data.Subset(dataset, train_idx) val_data = torch.utils.data.Subset(dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mae_loss = self.objective(trial, train_loader, val_loader, fold_idx) scores.append(mae_loss) return np.mean(scores)
def objective_cv(self, trial): 'The objective function for Optuna when cross-validation is also used\n\n :param trial: An optuna trial\n :type trial: Optuna.Trial\n :return: average of best loss validations for considered folds\n :rtype: float\n ' fold = TimeSeriesSplit(n_splits=self.hparams['kfolds'], test_size=self.hparams['test_size'], gap=self.hparams['gap']) scores = [] (_, dataloader) = self.get_net_and_loaders() self.models[self.optuna_params['appliance_name']] = {} self.data_loaders[self.optuna_params['appliance_name']] = dataloader dataloader = self.data_loaders[self.optuna_params['appliance_name']] dataset = dataloader(inputs=self._data['features'], targets=self.optuna_params['power']) for (fold_idx, (train_idx, valid_idx)) in enumerate(fold.split(range(len(dataset)))): print(f'started training for the fold {fold_idx}.') train_data = torch.utils.data.Subset(dataset, train_idx) val_data = torch.utils.data.Subset(dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mae_loss = self.objective(trial, train_loader, val_loader, fold_idx) scores.append(mae_loss) return np.mean(scores)<|docstring|>The objective function for Optuna when cross-validation is also used :param trial: An optuna trial :type trial: Optuna.Trial :return: average of best loss validations for considered folds :rtype: float<|endoftext|>
3fc66de6c80bf603a92bbbb6b5ca54a49adc552d5f833f7400c3f304f49d23d5
def get_net_and_loaders(self): 'Returns an instance of the specified model and the correspanding dataloader\n\n :return: (model , dataloader)\n :rtype: tuple(nn.Module, torch.utils.data.Dataset)\n ' net = NILM_MODELS[self.hparams['model_name']]['model'](self.hparams) data = partial(NILM_MODELS[self.hparams['model_name']]['loader'], params=self.hparams) return (net, data)
Returns an instance of the specified model and the correspanding dataloader :return: (model , dataloader) :rtype: tuple(nn.Module, torch.utils.data.Dataset)
deep_nilmtk/disaggregate/nilm_experiment.py
get_net_and_loaders
reviwe/deep-nilmtk-v1
0
python
def get_net_and_loaders(self): 'Returns an instance of the specified model and the correspanding dataloader\n\n :return: (model , dataloader)\n :rtype: tuple(nn.Module, torch.utils.data.Dataset)\n ' net = NILM_MODELS[self.hparams['model_name']]['model'](self.hparams) data = partial(NILM_MODELS[self.hparams['model_name']]['loader'], params=self.hparams) return (net, data)
def get_net_and_loaders(self): 'Returns an instance of the specified model and the correspanding dataloader\n\n :return: (model , dataloader)\n :rtype: tuple(nn.Module, torch.utils.data.Dataset)\n ' net = NILM_MODELS[self.hparams['model_name']]['model'](self.hparams) data = partial(NILM_MODELS[self.hparams['model_name']]['loader'], params=self.hparams) return (net, data)<|docstring|>Returns an instance of the specified model and the correspanding dataloader :return: (model , dataloader) :rtype: tuple(nn.Module, torch.utils.data.Dataset)<|endoftext|>
6712f1c4d321ce1cb297afff5f7eb859240a29524adac8848b58ffda99811061
def save_best_model(self, study, trial): 'Keeps track of the trial giving best results\n\n :param study: Optuna study\n :param trial: Optuna trial\n ' if (study.best_trial.number == trial.number): study.set_user_attr(key='trial_ID', value=trial.number) study.set_user_attr(key='best_run_id', value=trial.user_attrs['best_run_id']) study.set_user_attr(key='path', value=trial.user_attrs['path'])
Keeps track of the trial giving best results :param study: Optuna study :param trial: Optuna trial
deep_nilmtk/disaggregate/nilm_experiment.py
save_best_model
reviwe/deep-nilmtk-v1
0
python
def save_best_model(self, study, trial): 'Keeps track of the trial giving best results\n\n :param study: Optuna study\n :param trial: Optuna trial\n ' if (study.best_trial.number == trial.number): study.set_user_attr(key='trial_ID', value=trial.number) study.set_user_attr(key='best_run_id', value=trial.user_attrs['best_run_id']) study.set_user_attr(key='path', value=trial.user_attrs['path'])
def save_best_model(self, study, trial): 'Keeps track of the trial giving best results\n\n :param study: Optuna study\n :param trial: Optuna trial\n ' if (study.best_trial.number == trial.number): study.set_user_attr(key='trial_ID', value=trial.number) study.set_user_attr(key='best_run_id', value=trial.user_attrs['best_run_id']) study.set_user_attr(key='path', value=trial.user_attrs['path'])<|docstring|>Keeps track of the trial giving best results :param study: Optuna study :param trial: Optuna trial<|endoftext|>
e1d8872ad361929ab047025470236ff401d506c668c6982ba33b940e3ee0ffd8
def single_appliance_fit(self): '\n Train the specified models for each appliance separately taking into consideration\n the use of cross-validation and hyper-parameters optimisation. The checkpoints for \n each model are saved in the correspondng path.\n ' self.exp_name = f"{self.hparams['model_name']}_{self.hparams['data']}_single_appliance_{self.hparams['experiment_label']}" original_checkpoint = self.hparams['checkpoints_path'] for (appliance_name, power) in self._data['targets']: exp_name = f'{self.exp_name}_{appliance_name}' checkpoints = Path((original_checkpoint + f'{exp_name}')) checkpoints.mkdir(parents=True, exist_ok=True) new_params = {'checkpoints_path': (original_checkpoint + f'{exp_name}'), 'appliances': [appliance_name]} self.hparams.update(new_params) print(f'fit model for {exp_name}') if self.hparams['use_optuna']: study = optuna.create_study(study_name=exp_name, direction='minimize') self.optuna_params = {'power': power, 'appliance_name': appliance_name, 'exp_name': exp_name} if (self.hparams['kfolds'] <= 1): study.optimize(self.objective, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) (app_model, _) = self.get_net_and_loaders() chechpoint = torch.load(study.user_attrs['path']) model = pilModel(app_model, self.hparams) model.hparams['checkpoint_path'] = study.user_attrs['path'] model.load_state_dict(chechpoint['state_dict']) model.eval() self.models[appliance_name] = model else: study.optimize(self.objective_cv, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) try: fig1 = optuna.visualization.plot_param_importances(study) fig2 = optuna.visualization.plot_parallel_coordinate(study) fig2.write_image((self.hparams['checkpoints_path'] + '/_parallel_coordinate.pdf')) fig1.write_image((self.hparams['checkpoints_path'] + '/_param_importance.pdf')) except: pass results_df = study.trials_dataframe() results_df.to_csv(f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_{appliance_name}.csv") joblib.dump(study, f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_{appliance_name}.pkl") self.best_trials[appliance_name] = study.best_trial.number (app_model, _) = self.get_net_and_loaders() self.run_id[appliance_name] = study.user_attrs['best_run_id'] elif (self.hparams['kfolds'] > 1): self.models[appliance_name] = {} (_, dataloader) = self.get_net_and_loaders() self.data_loaders[appliance_name] = dataloader dataset = dataloader(inputs=self._data['features'], targets=power) fold = TimeSeriesSplit(n_splits=self.hparams['kfolds'], test_size=self.hparams['test_size'], gap=self.hparams['gap']) scores = [] for (fold_idx, (train_idx, valid_idx)) in enumerate(fold.split(range(len(dataset)))): print(f'started training for the fold {fold_idx}.') (app_model, _) = self.get_net_and_loaders() self.models[appliance_name][f'fold_{fold_idx}'] = pilModel(app_model, self.hparams) train_data = torch.utils.data.Subset(dataset, train_idx) val_data = torch.utils.data.Subset(dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'{appliance_name}') with mlflow.start_run(run_name=self.hparams['model_name']): self.run_id[appliance_name] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) mae_loss = self.train_model(appliance_name, train_loader, val_loader, exp_name, (dataset.mean if (self.hparams['target_norm'] == 'z-norm') else None), (dataset.std if (self.hparams['target_norm'] == 'z-norm') else None), fold_idx=fold_idx, model=self.models[appliance_name][f'fold_{fold_idx}']) scores.append(mae_loss) else: if (appliance_name not in self.models): print('First model training for', appliance_name) (net, dataloader) = self.get_net_and_loaders() self.models[appliance_name] = pilModel(net, self.hparams) self.data_loaders[appliance_name] = dataloader else: print('Started Retraining model for', appliance_name) dataloader = self.data_loaders[appliance_name] data = dataloader(inputs=self._data['features'], targets=power) (train_data, val_data) = torch.utils.data.random_split(data, [int((data.len * (1 - 0.15))), (data.len - int((data.len * (1 - 0.15))))], generator=torch.Generator().manual_seed(3407)) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'{appliance_name}') with mlflow.start_run(): self.run_id[appliance_name] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) self.train_model(appliance_name, train_loader, val_loader, exp_name, (data.mean if (self.hparams['target_norm'] == 'z-norm') else 0), (data.std if (self.hparams['target_norm'] == 'z-norm') else 1)) new_params = {'checkpoints_path': original_checkpoint} self.hparams.update(new_params)
Train the specified models for each appliance separately taking into consideration the use of cross-validation and hyper-parameters optimisation. The checkpoints for each model are saved in the correspondng path.
deep_nilmtk/disaggregate/nilm_experiment.py
single_appliance_fit
reviwe/deep-nilmtk-v1
0
python
def single_appliance_fit(self): '\n Train the specified models for each appliance separately taking into consideration\n the use of cross-validation and hyper-parameters optimisation. The checkpoints for \n each model are saved in the correspondng path.\n ' self.exp_name = f"{self.hparams['model_name']}_{self.hparams['data']}_single_appliance_{self.hparams['experiment_label']}" original_checkpoint = self.hparams['checkpoints_path'] for (appliance_name, power) in self._data['targets']: exp_name = f'{self.exp_name}_{appliance_name}' checkpoints = Path((original_checkpoint + f'{exp_name}')) checkpoints.mkdir(parents=True, exist_ok=True) new_params = {'checkpoints_path': (original_checkpoint + f'{exp_name}'), 'appliances': [appliance_name]} self.hparams.update(new_params) print(f'fit model for {exp_name}') if self.hparams['use_optuna']: study = optuna.create_study(study_name=exp_name, direction='minimize') self.optuna_params = {'power': power, 'appliance_name': appliance_name, 'exp_name': exp_name} if (self.hparams['kfolds'] <= 1): study.optimize(self.objective, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) (app_model, _) = self.get_net_and_loaders() chechpoint = torch.load(study.user_attrs['path']) model = pilModel(app_model, self.hparams) model.hparams['checkpoint_path'] = study.user_attrs['path'] model.load_state_dict(chechpoint['state_dict']) model.eval() self.models[appliance_name] = model else: study.optimize(self.objective_cv, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) try: fig1 = optuna.visualization.plot_param_importances(study) fig2 = optuna.visualization.plot_parallel_coordinate(study) fig2.write_image((self.hparams['checkpoints_path'] + '/_parallel_coordinate.pdf')) fig1.write_image((self.hparams['checkpoints_path'] + '/_param_importance.pdf')) except: pass results_df = study.trials_dataframe() results_df.to_csv(f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_{appliance_name}.csv") joblib.dump(study, f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_{appliance_name}.pkl") self.best_trials[appliance_name] = study.best_trial.number (app_model, _) = self.get_net_and_loaders() self.run_id[appliance_name] = study.user_attrs['best_run_id'] elif (self.hparams['kfolds'] > 1): self.models[appliance_name] = {} (_, dataloader) = self.get_net_and_loaders() self.data_loaders[appliance_name] = dataloader dataset = dataloader(inputs=self._data['features'], targets=power) fold = TimeSeriesSplit(n_splits=self.hparams['kfolds'], test_size=self.hparams['test_size'], gap=self.hparams['gap']) scores = [] for (fold_idx, (train_idx, valid_idx)) in enumerate(fold.split(range(len(dataset)))): print(f'started training for the fold {fold_idx}.') (app_model, _) = self.get_net_and_loaders() self.models[appliance_name][f'fold_{fold_idx}'] = pilModel(app_model, self.hparams) train_data = torch.utils.data.Subset(dataset, train_idx) val_data = torch.utils.data.Subset(dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'{appliance_name}') with mlflow.start_run(run_name=self.hparams['model_name']): self.run_id[appliance_name] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) mae_loss = self.train_model(appliance_name, train_loader, val_loader, exp_name, (dataset.mean if (self.hparams['target_norm'] == 'z-norm') else None), (dataset.std if (self.hparams['target_norm'] == 'z-norm') else None), fold_idx=fold_idx, model=self.models[appliance_name][f'fold_{fold_idx}']) scores.append(mae_loss) else: if (appliance_name not in self.models): print('First model training for', appliance_name) (net, dataloader) = self.get_net_and_loaders() self.models[appliance_name] = pilModel(net, self.hparams) self.data_loaders[appliance_name] = dataloader else: print('Started Retraining model for', appliance_name) dataloader = self.data_loaders[appliance_name] data = dataloader(inputs=self._data['features'], targets=power) (train_data, val_data) = torch.utils.data.random_split(data, [int((data.len * (1 - 0.15))), (data.len - int((data.len * (1 - 0.15))))], generator=torch.Generator().manual_seed(3407)) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'{appliance_name}') with mlflow.start_run(): self.run_id[appliance_name] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) self.train_model(appliance_name, train_loader, val_loader, exp_name, (data.mean if (self.hparams['target_norm'] == 'z-norm') else 0), (data.std if (self.hparams['target_norm'] == 'z-norm') else 1)) new_params = {'checkpoints_path': original_checkpoint} self.hparams.update(new_params)
def single_appliance_fit(self): '\n Train the specified models for each appliance separately taking into consideration\n the use of cross-validation and hyper-parameters optimisation. The checkpoints for \n each model are saved in the correspondng path.\n ' self.exp_name = f"{self.hparams['model_name']}_{self.hparams['data']}_single_appliance_{self.hparams['experiment_label']}" original_checkpoint = self.hparams['checkpoints_path'] for (appliance_name, power) in self._data['targets']: exp_name = f'{self.exp_name}_{appliance_name}' checkpoints = Path((original_checkpoint + f'{exp_name}')) checkpoints.mkdir(parents=True, exist_ok=True) new_params = {'checkpoints_path': (original_checkpoint + f'{exp_name}'), 'appliances': [appliance_name]} self.hparams.update(new_params) print(f'fit model for {exp_name}') if self.hparams['use_optuna']: study = optuna.create_study(study_name=exp_name, direction='minimize') self.optuna_params = {'power': power, 'appliance_name': appliance_name, 'exp_name': exp_name} if (self.hparams['kfolds'] <= 1): study.optimize(self.objective, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) (app_model, _) = self.get_net_and_loaders() chechpoint = torch.load(study.user_attrs['path']) model = pilModel(app_model, self.hparams) model.hparams['checkpoint_path'] = study.user_attrs['path'] model.load_state_dict(chechpoint['state_dict']) model.eval() self.models[appliance_name] = model else: study.optimize(self.objective_cv, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) try: fig1 = optuna.visualization.plot_param_importances(study) fig2 = optuna.visualization.plot_parallel_coordinate(study) fig2.write_image((self.hparams['checkpoints_path'] + '/_parallel_coordinate.pdf')) fig1.write_image((self.hparams['checkpoints_path'] + '/_param_importance.pdf')) except: pass results_df = study.trials_dataframe() results_df.to_csv(f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_{appliance_name}.csv") joblib.dump(study, f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_{appliance_name}.pkl") self.best_trials[appliance_name] = study.best_trial.number (app_model, _) = self.get_net_and_loaders() self.run_id[appliance_name] = study.user_attrs['best_run_id'] elif (self.hparams['kfolds'] > 1): self.models[appliance_name] = {} (_, dataloader) = self.get_net_and_loaders() self.data_loaders[appliance_name] = dataloader dataset = dataloader(inputs=self._data['features'], targets=power) fold = TimeSeriesSplit(n_splits=self.hparams['kfolds'], test_size=self.hparams['test_size'], gap=self.hparams['gap']) scores = [] for (fold_idx, (train_idx, valid_idx)) in enumerate(fold.split(range(len(dataset)))): print(f'started training for the fold {fold_idx}.') (app_model, _) = self.get_net_and_loaders() self.models[appliance_name][f'fold_{fold_idx}'] = pilModel(app_model, self.hparams) train_data = torch.utils.data.Subset(dataset, train_idx) val_data = torch.utils.data.Subset(dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'{appliance_name}') with mlflow.start_run(run_name=self.hparams['model_name']): self.run_id[appliance_name] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) mae_loss = self.train_model(appliance_name, train_loader, val_loader, exp_name, (dataset.mean if (self.hparams['target_norm'] == 'z-norm') else None), (dataset.std if (self.hparams['target_norm'] == 'z-norm') else None), fold_idx=fold_idx, model=self.models[appliance_name][f'fold_{fold_idx}']) scores.append(mae_loss) else: if (appliance_name not in self.models): print('First model training for', appliance_name) (net, dataloader) = self.get_net_and_loaders() self.models[appliance_name] = pilModel(net, self.hparams) self.data_loaders[appliance_name] = dataloader else: print('Started Retraining model for', appliance_name) dataloader = self.data_loaders[appliance_name] data = dataloader(inputs=self._data['features'], targets=power) (train_data, val_data) = torch.utils.data.random_split(data, [int((data.len * (1 - 0.15))), (data.len - int((data.len * (1 - 0.15))))], generator=torch.Generator().manual_seed(3407)) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'{appliance_name}') with mlflow.start_run(): self.run_id[appliance_name] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) self.train_model(appliance_name, train_loader, val_loader, exp_name, (data.mean if (self.hparams['target_norm'] == 'z-norm') else 0), (data.std if (self.hparams['target_norm'] == 'z-norm') else 1)) new_params = {'checkpoints_path': original_checkpoint} self.hparams.update(new_params)<|docstring|>Train the specified models for each appliance separately taking into consideration the use of cross-validation and hyper-parameters optimisation. The checkpoints for each model are saved in the correspondng path.<|endoftext|>
fb609dd973cc13146630306c7a264b6131bd0935caea9db9735b3af77a3fc8cd
def train_model(self, appliance_name, train_loader, val_loader, exp_name, mean=None, std=None, trial_idx=None, fold_idx=None, model=None): 'Trains a single PyTorch model.\n\n :param appliance_name: Name of teh appliance to be modeled\n :type appliance_name: str\n :param train_loader: training dataLoader for the current appliance\n :type train_loader: DataLoader\n :param val_loader: validation dataLoader for the current appliance\n :type val_loader: DataLoader\n :param exp_name: the name of the experiment\n :type exp_name: str\n :param mean: mean value of the target appliance power. Defaults to None.\n :type mean: float, optional\n :param std: std value of the target applaince power. Defaults to None.\n :type std: float, optional\n :param trial_idx: ID of the current optuna trial if optuna is used. Defaults to None.\n :type trial_idx: int, optional\n :param fold_idx: the number of the fold if CV is used. Defaults to None.\n :type fold_idx: int, optional\n :param model: Lightning model of the current appliance. Defaults to None.\n :return: in the case of using Optuna, it return the best validation loss and the path to the best checkpoint.\n :rtype: tuple(int, str)\n ' chkpt_path = self.hparams['checkpoints_path'] version = '' if (trial_idx is not None): chkpt_path += f'/trial_{trial_idx}' version += f'/trial_{trial_idx}' if (fold_idx is not None): chkpt_path += f'/fold_{fold_idx}' version += f'/fold_{fold_idx}' best_checkpoint = get_latest_checkpoint(chkpt_path) model = (model if (model is not None) else self.models[appliance_name]) if (self.hparams['target_norm'] == 'z-norm'): self.appliance_params[appliance_name] = {'mean': mean, 'std': std} checkpoint_callback = pl.callbacks.model_checkpoint.ModelCheckpoint(dirpath=chkpt_path, monitor='val_mae', mode='min', save_top_k=1) early_stop_callback = pl.callbacks.EarlyStopping(monitor='val_mae', min_delta=0.0001, patience=self.hparams['patience_check'], mode='min') logger = DictLogger(self.hparams['logs_path'], name=exp_name, version=(('single_appliance_experiment' + version) if (version != '') else 'single_appliance_experiment')) trainer = pl.Trainer(logger=logger, gradient_clip_val=self.hparams['clip_value'], max_epochs=self.hparams['max_nb_epochs'], callbacks=[early_stop_callback, checkpoint_callback], gpus=((- 1) if torch.cuda.is_available() else None), resume_from_checkpoint=(best_checkpoint if (not self.hparams['use_optuna']) else None)) if self.hparams['train']: trainer.fit(model, train_loader, val_loader) if (len(logger.metrics) >= 2): if self.hparams['use_optuna']: return (logger.metrics[(- 2)]['val_loss'], checkpoint_callback.best_model_path)
Trains a single PyTorch model. :param appliance_name: Name of teh appliance to be modeled :type appliance_name: str :param train_loader: training dataLoader for the current appliance :type train_loader: DataLoader :param val_loader: validation dataLoader for the current appliance :type val_loader: DataLoader :param exp_name: the name of the experiment :type exp_name: str :param mean: mean value of the target appliance power. Defaults to None. :type mean: float, optional :param std: std value of the target applaince power. Defaults to None. :type std: float, optional :param trial_idx: ID of the current optuna trial if optuna is used. Defaults to None. :type trial_idx: int, optional :param fold_idx: the number of the fold if CV is used. Defaults to None. :type fold_idx: int, optional :param model: Lightning model of the current appliance. Defaults to None. :return: in the case of using Optuna, it return the best validation loss and the path to the best checkpoint. :rtype: tuple(int, str)
deep_nilmtk/disaggregate/nilm_experiment.py
train_model
reviwe/deep-nilmtk-v1
0
python
def train_model(self, appliance_name, train_loader, val_loader, exp_name, mean=None, std=None, trial_idx=None, fold_idx=None, model=None): 'Trains a single PyTorch model.\n\n :param appliance_name: Name of teh appliance to be modeled\n :type appliance_name: str\n :param train_loader: training dataLoader for the current appliance\n :type train_loader: DataLoader\n :param val_loader: validation dataLoader for the current appliance\n :type val_loader: DataLoader\n :param exp_name: the name of the experiment\n :type exp_name: str\n :param mean: mean value of the target appliance power. Defaults to None.\n :type mean: float, optional\n :param std: std value of the target applaince power. Defaults to None.\n :type std: float, optional\n :param trial_idx: ID of the current optuna trial if optuna is used. Defaults to None.\n :type trial_idx: int, optional\n :param fold_idx: the number of the fold if CV is used. Defaults to None.\n :type fold_idx: int, optional\n :param model: Lightning model of the current appliance. Defaults to None.\n :return: in the case of using Optuna, it return the best validation loss and the path to the best checkpoint.\n :rtype: tuple(int, str)\n ' chkpt_path = self.hparams['checkpoints_path'] version = if (trial_idx is not None): chkpt_path += f'/trial_{trial_idx}' version += f'/trial_{trial_idx}' if (fold_idx is not None): chkpt_path += f'/fold_{fold_idx}' version += f'/fold_{fold_idx}' best_checkpoint = get_latest_checkpoint(chkpt_path) model = (model if (model is not None) else self.models[appliance_name]) if (self.hparams['target_norm'] == 'z-norm'): self.appliance_params[appliance_name] = {'mean': mean, 'std': std} checkpoint_callback = pl.callbacks.model_checkpoint.ModelCheckpoint(dirpath=chkpt_path, monitor='val_mae', mode='min', save_top_k=1) early_stop_callback = pl.callbacks.EarlyStopping(monitor='val_mae', min_delta=0.0001, patience=self.hparams['patience_check'], mode='min') logger = DictLogger(self.hparams['logs_path'], name=exp_name, version=(('single_appliance_experiment' + version) if (version != ) else 'single_appliance_experiment')) trainer = pl.Trainer(logger=logger, gradient_clip_val=self.hparams['clip_value'], max_epochs=self.hparams['max_nb_epochs'], callbacks=[early_stop_callback, checkpoint_callback], gpus=((- 1) if torch.cuda.is_available() else None), resume_from_checkpoint=(best_checkpoint if (not self.hparams['use_optuna']) else None)) if self.hparams['train']: trainer.fit(model, train_loader, val_loader) if (len(logger.metrics) >= 2): if self.hparams['use_optuna']: return (logger.metrics[(- 2)]['val_loss'], checkpoint_callback.best_model_path)
def train_model(self, appliance_name, train_loader, val_loader, exp_name, mean=None, std=None, trial_idx=None, fold_idx=None, model=None): 'Trains a single PyTorch model.\n\n :param appliance_name: Name of teh appliance to be modeled\n :type appliance_name: str\n :param train_loader: training dataLoader for the current appliance\n :type train_loader: DataLoader\n :param val_loader: validation dataLoader for the current appliance\n :type val_loader: DataLoader\n :param exp_name: the name of the experiment\n :type exp_name: str\n :param mean: mean value of the target appliance power. Defaults to None.\n :type mean: float, optional\n :param std: std value of the target applaince power. Defaults to None.\n :type std: float, optional\n :param trial_idx: ID of the current optuna trial if optuna is used. Defaults to None.\n :type trial_idx: int, optional\n :param fold_idx: the number of the fold if CV is used. Defaults to None.\n :type fold_idx: int, optional\n :param model: Lightning model of the current appliance. Defaults to None.\n :return: in the case of using Optuna, it return the best validation loss and the path to the best checkpoint.\n :rtype: tuple(int, str)\n ' chkpt_path = self.hparams['checkpoints_path'] version = if (trial_idx is not None): chkpt_path += f'/trial_{trial_idx}' version += f'/trial_{trial_idx}' if (fold_idx is not None): chkpt_path += f'/fold_{fold_idx}' version += f'/fold_{fold_idx}' best_checkpoint = get_latest_checkpoint(chkpt_path) model = (model if (model is not None) else self.models[appliance_name]) if (self.hparams['target_norm'] == 'z-norm'): self.appliance_params[appliance_name] = {'mean': mean, 'std': std} checkpoint_callback = pl.callbacks.model_checkpoint.ModelCheckpoint(dirpath=chkpt_path, monitor='val_mae', mode='min', save_top_k=1) early_stop_callback = pl.callbacks.EarlyStopping(monitor='val_mae', min_delta=0.0001, patience=self.hparams['patience_check'], mode='min') logger = DictLogger(self.hparams['logs_path'], name=exp_name, version=(('single_appliance_experiment' + version) if (version != ) else 'single_appliance_experiment')) trainer = pl.Trainer(logger=logger, gradient_clip_val=self.hparams['clip_value'], max_epochs=self.hparams['max_nb_epochs'], callbacks=[early_stop_callback, checkpoint_callback], gpus=((- 1) if torch.cuda.is_available() else None), resume_from_checkpoint=(best_checkpoint if (not self.hparams['use_optuna']) else None)) if self.hparams['train']: trainer.fit(model, train_loader, val_loader) if (len(logger.metrics) >= 2): if self.hparams['use_optuna']: return (logger.metrics[(- 2)]['val_loss'], checkpoint_callback.best_model_path)<|docstring|>Trains a single PyTorch model. :param appliance_name: Name of teh appliance to be modeled :type appliance_name: str :param train_loader: training dataLoader for the current appliance :type train_loader: DataLoader :param val_loader: validation dataLoader for the current appliance :type val_loader: DataLoader :param exp_name: the name of the experiment :type exp_name: str :param mean: mean value of the target appliance power. Defaults to None. :type mean: float, optional :param std: std value of the target applaince power. Defaults to None. :type std: float, optional :param trial_idx: ID of the current optuna trial if optuna is used. Defaults to None. :type trial_idx: int, optional :param fold_idx: the number of the fold if CV is used. Defaults to None. :type fold_idx: int, optional :param model: Lightning model of the current appliance. Defaults to None. :return: in the case of using Optuna, it return the best validation loss and the path to the best checkpoint. :rtype: tuple(int, str)<|endoftext|>
60e7f3a6ff77286be77987327ec2d86ed0e2c20dc70deb08771519a8a4287e9e
def multi_appliance_disaggregate(self, test_main_list, model=None, do_preprocessing=True): "\n Perfroms load disaggregtaion for single appliance models. If Optuna was used during the \n training phase, it disaggregtaes the test_main_list using only the best trial. \n If cross-validation is used during training, it returns the average of predictions \n cross all folds for each applaince. In this later case, the predictions for each fold \n are also logged in the results folder under the name \n ['model_name']_[appliance_name]_all_folds_predictions.p. \n Alternatively, when both Optuna and cross-validation are used, it returns the average predictions \n of all folds for only the best trial.\n\n :param test_main_list: Aggregate power measurements\n :type test_main_list: liste of pd.DataFrame\n :param model: Pre-trained appliance's models. Defaults to None.\n :type model: dict, optional\n :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True\n :type do_preprocessing: bool, optional\n :return: estimated power consumption of the considered appliances.\n :rtype: liste of dict\n " if (model is not None): self.models = model if do_preprocessing: test_main_list = data_preprocessing(test_main_list, None, self.hparams['feature_type'], self.hparams['alpha'], self.hparams['input_norm'], self.hparams['main_mu'], self.hparams['main_std'], self.hparams['q_filter']) test_predictions = [] test_results = [] for test_main in test_main_list: test_main = test_main.values disggregation_dict = {} result_dict = {} dataloader = self.data_loaders['Multi-appliance'] model = self.models['Multi-appliance'] data = dataloader(inputs=test_main, targets=None, params=self.hparams) test_loader = torch.utils.data.DataLoader(data, self.hparams['batch_size'], collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), shuffle=False, num_workers=self.hparams['num_workers']) exp_name = (self.hparams['checkpoints_path'] + f'{self.exp_name}_Multi-appliance') if self.hparams['use_optuna']: exp_name += f"/trial_{self.best_trials['Multi-appliance']}/" if (self.hparams['kfolds'] > 1): app_result_cross_fold = [] dump_results = {} for fold in model: checkpoint_path = get_latest_checkpoint((exp_name + f'/{fold}')) chechpoint = torch.load(checkpoint_path) model_fold = model[fold] model_fold.load_state_dict(chechpoint['state_dict']) model_fold.eval() network = model_fold.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params['Multi-appliance']['mean'] network.std = self.appliance_params['Multi-appliance']['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params['Multi-appliance']['min'] network.max = self.appliance_params['Multi-appliance']['max'] results = network.predict(model_fold, test_loader) df = results['pred'].cpu().numpy() app_result_cross_fold.append(df) dump_results[fold] = df dump_results['mean_preditions'] = pd.Series(np.mean(np.array(app_result_cross_fold), axis=0)) dump_results['std_predictions'] = pd.Series(np.std(np.array(app_result_cross_fold), axis=0)) dump_results['min_predictions'] = pd.Series(np.min(np.array(app_result_cross_fold), axis=0)) dump_results['max_predictions'] = pd.Series(np.max(np.array(app_result_cross_fold), axis=0)) pickle.dump(dump_results, open(f"{self.hparams['results_path']}/{self.hparams['model_name']}_{appliance}_all_folds_predictions.p", 'wb')) df = np.mean(np.array(app_result_cross_fold), axis=0) else: checkpoint_path = get_latest_checkpoint(exp_name) chechpoint = torch.load(checkpoint_path) model.load_state_dict(chechpoint['state_dict']) model.eval() network = model.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params['Multi-appliance']['mean'] network.std = self.appliance_params['Multi-appliance']['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params['Multi-appliance']['min'] network.max = self.appliance_params['Multi-appliance']['max'] results = network.predict(model, test_loader) df = results['pred'].cpu().numpy() disggregation_dict = {appliance: df[(:, i)].flatten() for (i, appliance) in enumerate(self.hparams['appliances'])} result_dict = {appliance: {key: results[key][(:, i)].flatten() for key in results} for (i, appliance) in enumerate(self.hparams['appliances'])} results = pd.DataFrame(disggregation_dict, dtype='float32') test_predictions.append(results) test_results.append(result_dict) return test_predictions
Perfroms load disaggregtaion for single appliance models. If Optuna was used during the training phase, it disaggregtaes the test_main_list using only the best trial. If cross-validation is used during training, it returns the average of predictions cross all folds for each applaince. In this later case, the predictions for each fold are also logged in the results folder under the name ['model_name']_[appliance_name]_all_folds_predictions.p. Alternatively, when both Optuna and cross-validation are used, it returns the average predictions of all folds for only the best trial. :param test_main_list: Aggregate power measurements :type test_main_list: liste of pd.DataFrame :param model: Pre-trained appliance's models. Defaults to None. :type model: dict, optional :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True :type do_preprocessing: bool, optional :return: estimated power consumption of the considered appliances. :rtype: liste of dict
deep_nilmtk/disaggregate/nilm_experiment.py
multi_appliance_disaggregate
reviwe/deep-nilmtk-v1
0
python
def multi_appliance_disaggregate(self, test_main_list, model=None, do_preprocessing=True): "\n Perfroms load disaggregtaion for single appliance models. If Optuna was used during the \n training phase, it disaggregtaes the test_main_list using only the best trial. \n If cross-validation is used during training, it returns the average of predictions \n cross all folds for each applaince. In this later case, the predictions for each fold \n are also logged in the results folder under the name \n ['model_name']_[appliance_name]_all_folds_predictions.p. \n Alternatively, when both Optuna and cross-validation are used, it returns the average predictions \n of all folds for only the best trial.\n\n :param test_main_list: Aggregate power measurements\n :type test_main_list: liste of pd.DataFrame\n :param model: Pre-trained appliance's models. Defaults to None.\n :type model: dict, optional\n :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True\n :type do_preprocessing: bool, optional\n :return: estimated power consumption of the considered appliances.\n :rtype: liste of dict\n " if (model is not None): self.models = model if do_preprocessing: test_main_list = data_preprocessing(test_main_list, None, self.hparams['feature_type'], self.hparams['alpha'], self.hparams['input_norm'], self.hparams['main_mu'], self.hparams['main_std'], self.hparams['q_filter']) test_predictions = [] test_results = [] for test_main in test_main_list: test_main = test_main.values disggregation_dict = {} result_dict = {} dataloader = self.data_loaders['Multi-appliance'] model = self.models['Multi-appliance'] data = dataloader(inputs=test_main, targets=None, params=self.hparams) test_loader = torch.utils.data.DataLoader(data, self.hparams['batch_size'], collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), shuffle=False, num_workers=self.hparams['num_workers']) exp_name = (self.hparams['checkpoints_path'] + f'{self.exp_name}_Multi-appliance') if self.hparams['use_optuna']: exp_name += f"/trial_{self.best_trials['Multi-appliance']}/" if (self.hparams['kfolds'] > 1): app_result_cross_fold = [] dump_results = {} for fold in model: checkpoint_path = get_latest_checkpoint((exp_name + f'/{fold}')) chechpoint = torch.load(checkpoint_path) model_fold = model[fold] model_fold.load_state_dict(chechpoint['state_dict']) model_fold.eval() network = model_fold.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params['Multi-appliance']['mean'] network.std = self.appliance_params['Multi-appliance']['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params['Multi-appliance']['min'] network.max = self.appliance_params['Multi-appliance']['max'] results = network.predict(model_fold, test_loader) df = results['pred'].cpu().numpy() app_result_cross_fold.append(df) dump_results[fold] = df dump_results['mean_preditions'] = pd.Series(np.mean(np.array(app_result_cross_fold), axis=0)) dump_results['std_predictions'] = pd.Series(np.std(np.array(app_result_cross_fold), axis=0)) dump_results['min_predictions'] = pd.Series(np.min(np.array(app_result_cross_fold), axis=0)) dump_results['max_predictions'] = pd.Series(np.max(np.array(app_result_cross_fold), axis=0)) pickle.dump(dump_results, open(f"{self.hparams['results_path']}/{self.hparams['model_name']}_{appliance}_all_folds_predictions.p", 'wb')) df = np.mean(np.array(app_result_cross_fold), axis=0) else: checkpoint_path = get_latest_checkpoint(exp_name) chechpoint = torch.load(checkpoint_path) model.load_state_dict(chechpoint['state_dict']) model.eval() network = model.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params['Multi-appliance']['mean'] network.std = self.appliance_params['Multi-appliance']['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params['Multi-appliance']['min'] network.max = self.appliance_params['Multi-appliance']['max'] results = network.predict(model, test_loader) df = results['pred'].cpu().numpy() disggregation_dict = {appliance: df[(:, i)].flatten() for (i, appliance) in enumerate(self.hparams['appliances'])} result_dict = {appliance: {key: results[key][(:, i)].flatten() for key in results} for (i, appliance) in enumerate(self.hparams['appliances'])} results = pd.DataFrame(disggregation_dict, dtype='float32') test_predictions.append(results) test_results.append(result_dict) return test_predictions
def multi_appliance_disaggregate(self, test_main_list, model=None, do_preprocessing=True): "\n Perfroms load disaggregtaion for single appliance models. If Optuna was used during the \n training phase, it disaggregtaes the test_main_list using only the best trial. \n If cross-validation is used during training, it returns the average of predictions \n cross all folds for each applaince. In this later case, the predictions for each fold \n are also logged in the results folder under the name \n ['model_name']_[appliance_name]_all_folds_predictions.p. \n Alternatively, when both Optuna and cross-validation are used, it returns the average predictions \n of all folds for only the best trial.\n\n :param test_main_list: Aggregate power measurements\n :type test_main_list: liste of pd.DataFrame\n :param model: Pre-trained appliance's models. Defaults to None.\n :type model: dict, optional\n :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True\n :type do_preprocessing: bool, optional\n :return: estimated power consumption of the considered appliances.\n :rtype: liste of dict\n " if (model is not None): self.models = model if do_preprocessing: test_main_list = data_preprocessing(test_main_list, None, self.hparams['feature_type'], self.hparams['alpha'], self.hparams['input_norm'], self.hparams['main_mu'], self.hparams['main_std'], self.hparams['q_filter']) test_predictions = [] test_results = [] for test_main in test_main_list: test_main = test_main.values disggregation_dict = {} result_dict = {} dataloader = self.data_loaders['Multi-appliance'] model = self.models['Multi-appliance'] data = dataloader(inputs=test_main, targets=None, params=self.hparams) test_loader = torch.utils.data.DataLoader(data, self.hparams['batch_size'], collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), shuffle=False, num_workers=self.hparams['num_workers']) exp_name = (self.hparams['checkpoints_path'] + f'{self.exp_name}_Multi-appliance') if self.hparams['use_optuna']: exp_name += f"/trial_{self.best_trials['Multi-appliance']}/" if (self.hparams['kfolds'] > 1): app_result_cross_fold = [] dump_results = {} for fold in model: checkpoint_path = get_latest_checkpoint((exp_name + f'/{fold}')) chechpoint = torch.load(checkpoint_path) model_fold = model[fold] model_fold.load_state_dict(chechpoint['state_dict']) model_fold.eval() network = model_fold.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params['Multi-appliance']['mean'] network.std = self.appliance_params['Multi-appliance']['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params['Multi-appliance']['min'] network.max = self.appliance_params['Multi-appliance']['max'] results = network.predict(model_fold, test_loader) df = results['pred'].cpu().numpy() app_result_cross_fold.append(df) dump_results[fold] = df dump_results['mean_preditions'] = pd.Series(np.mean(np.array(app_result_cross_fold), axis=0)) dump_results['std_predictions'] = pd.Series(np.std(np.array(app_result_cross_fold), axis=0)) dump_results['min_predictions'] = pd.Series(np.min(np.array(app_result_cross_fold), axis=0)) dump_results['max_predictions'] = pd.Series(np.max(np.array(app_result_cross_fold), axis=0)) pickle.dump(dump_results, open(f"{self.hparams['results_path']}/{self.hparams['model_name']}_{appliance}_all_folds_predictions.p", 'wb')) df = np.mean(np.array(app_result_cross_fold), axis=0) else: checkpoint_path = get_latest_checkpoint(exp_name) chechpoint = torch.load(checkpoint_path) model.load_state_dict(chechpoint['state_dict']) model.eval() network = model.model.eval() if (self.hparams['target_norm'] == 'z-norm'): network.mean = self.appliance_params['Multi-appliance']['mean'] network.std = self.appliance_params['Multi-appliance']['std'] elif (self.hparams['target_norm'] == 'min-max'): network.min = self.appliance_params['Multi-appliance']['min'] network.max = self.appliance_params['Multi-appliance']['max'] results = network.predict(model, test_loader) df = results['pred'].cpu().numpy() disggregation_dict = {appliance: df[(:, i)].flatten() for (i, appliance) in enumerate(self.hparams['appliances'])} result_dict = {appliance: {key: results[key][(:, i)].flatten() for key in results} for (i, appliance) in enumerate(self.hparams['appliances'])} results = pd.DataFrame(disggregation_dict, dtype='float32') test_predictions.append(results) test_results.append(result_dict) return test_predictions<|docstring|>Perfroms load disaggregtaion for single appliance models. If Optuna was used during the training phase, it disaggregtaes the test_main_list using only the best trial. If cross-validation is used during training, it returns the average of predictions cross all folds for each applaince. In this later case, the predictions for each fold are also logged in the results folder under the name ['model_name']_[appliance_name]_all_folds_predictions.p. Alternatively, when both Optuna and cross-validation are used, it returns the average predictions of all folds for only the best trial. :param test_main_list: Aggregate power measurements :type test_main_list: liste of pd.DataFrame :param model: Pre-trained appliance's models. Defaults to None. :type model: dict, optional :param do_preprocessing: Specify if pre-processing need to be done or not, defaults to True :type do_preprocessing: bool, optional :return: estimated power consumption of the considered appliances. :rtype: liste of dict<|endoftext|>
b954b4599e5e4f227f0e9f0d920f7744e1038682b9362117339305d436fafeb8
def multi_appliance_fit(self): '\n Train the specified models for each appliance separately taking into consideration\n the use of cross-validation and hyper-parameters optimisation. The checkpoints for \n each model are saved in the correspondng path.\n ' self.exp_name = f"{self.hparams['model_name']}_{self.hparams['data']}_{self.hparams['experiment_label']}" original_checkpoint = self.hparams['checkpoints_path'] exp_name = f'{self.exp_name}_Multi-appliance' checkpoints = Path((original_checkpoint + f'{exp_name}')) checkpoints.mkdir(parents=True, exist_ok=True) new_params = {'checkpoints_path': (original_checkpoint + f'{exp_name}')} self.hparams.update(new_params) print(f'fit model for {exp_name}') if self.hparams['use_optuna']: study = optuna.create_study(study_name=exp_name, direction='minimize') self.optuna_params = {'power': self._data['targets'], 'appliance_name': 'Multi-appliance', 'exp_name': exp_name} if (self.hparams['kfolds'] <= 1): study.optimize(self.objective, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) (app_model, _) = self.get_net_and_loaders() chechpoint = torch.load(study.user_attrs['path']) model = pilModel(app_model, self.hparams) model.hparams['checkpoint_path'] = study.user_attrs['path'] model.load_state_dict(chechpoint['state_dict']) model.eval() self.models['Multi-appliance'] = model else: study.optimize(self.objective_cv, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) try: fig1 = optuna.visualization.plot_param_importances(study) fig2 = optuna.visualization.plot_parallel_coordinate(study) fig2.write_image((self.hparams['checkpoints_path'] + '/_parallel_coordinate.pdf')) fig1.write_image((self.hparams['checkpoints_path'] + '/_param_importance.pdf')) except: pass results_df = study.trials_dataframe() results_df.to_csv(f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_Multi-appliance.csv") joblib.dump(study, f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_Multi-appliance.pkl") self.best_trials['Multi-appliance'] = study.best_trial.number (app_model, _) = self.get_net_and_loaders() self.run_id['Multi-appliance'] = study.user_attrs['best_run_id'] elif (self.hparams['kfolds'] > 1): self.models['Multi-appliance'] = {} (_, dataloader) = self.get_net_and_loaders() self.data_loaders['Multi-appliance'] = dataloader dataset = dataloader(inputs=self._data['features'], targets=self._data['targets']) fold = TimeSeriesSplit(n_splits=self.hparams['kfolds'], test_size=self.hparams['test_size'], gap=self.hparams['gap']) scores = [] for (fold_idx, (train_idx, valid_idx)) in enumerate(fold.split(range(len(dataset)))): print(f'started training for the fold {fold_idx}.') (app_model, _) = self.get_net_and_loaders() self.models['Multi-appliance'][f'fold_{fold_idx}'] = pilModel(app_model, self.hparams) train_data = torch.utils.data.Subset(dataset, train_idx) val_data = torch.utils.data.Subset(dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'Multi-appliance') with mlflow.start_run(run_name=self.hparams['model_name']): self.run_id['Multi-appliance'] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) mae_loss = self.train_model('Multi-appliance', train_loader, val_loader, exp_name, (dataset.mean if (self.hparams['target_norm'] == 'z-norm') else None), (dataset.std if (self.hparams['target_norm'] == 'z-norm') else None), fold_idx=fold_idx, model=self.models['Multi-appliance'][f'fold_{fold_idx}']) scores.append(mae_loss) else: if ('Multi-appliance' not in self.models): print('First model training for Multi-appliance model') (net, dataloader) = self.get_net_and_loaders() self.models['Multi-appliance'] = pilModel(net, self.hparams) self.data_loaders['Multi-appliance'] = dataloader else: print('Started Retraining Muti-appliance model') dataloader = self.data_loaders['Multi-appliance'] data = dataloader(inputs=self._data['features'], targets=self._data['targets']) (train_data, val_data) = torch.utils.data.random_split(data, [int((data.len * (1 - 0.15))), (data.len - int((data.len * (1 - 0.15))))], generator=torch.Generator().manual_seed(3407)) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'Multi-appliance') with mlflow.start_run(): self.run_id['Multi-appliance'] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) self.train_model('Multi-appliance', train_loader, val_loader, exp_name, (data.mean if (self.hparams['target_norm'] == 'z-norm') else 0), (data.std if (self.hparams['target_norm'] == 'z-norm') else 1)) new_params = {'checkpoints_path': original_checkpoint} self.hparams.update(new_params)
Train the specified models for each appliance separately taking into consideration the use of cross-validation and hyper-parameters optimisation. The checkpoints for each model are saved in the correspondng path.
deep_nilmtk/disaggregate/nilm_experiment.py
multi_appliance_fit
reviwe/deep-nilmtk-v1
0
python
def multi_appliance_fit(self): '\n Train the specified models for each appliance separately taking into consideration\n the use of cross-validation and hyper-parameters optimisation. The checkpoints for \n each model are saved in the correspondng path.\n ' self.exp_name = f"{self.hparams['model_name']}_{self.hparams['data']}_{self.hparams['experiment_label']}" original_checkpoint = self.hparams['checkpoints_path'] exp_name = f'{self.exp_name}_Multi-appliance' checkpoints = Path((original_checkpoint + f'{exp_name}')) checkpoints.mkdir(parents=True, exist_ok=True) new_params = {'checkpoints_path': (original_checkpoint + f'{exp_name}')} self.hparams.update(new_params) print(f'fit model for {exp_name}') if self.hparams['use_optuna']: study = optuna.create_study(study_name=exp_name, direction='minimize') self.optuna_params = {'power': self._data['targets'], 'appliance_name': 'Multi-appliance', 'exp_name': exp_name} if (self.hparams['kfolds'] <= 1): study.optimize(self.objective, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) (app_model, _) = self.get_net_and_loaders() chechpoint = torch.load(study.user_attrs['path']) model = pilModel(app_model, self.hparams) model.hparams['checkpoint_path'] = study.user_attrs['path'] model.load_state_dict(chechpoint['state_dict']) model.eval() self.models['Multi-appliance'] = model else: study.optimize(self.objective_cv, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) try: fig1 = optuna.visualization.plot_param_importances(study) fig2 = optuna.visualization.plot_parallel_coordinate(study) fig2.write_image((self.hparams['checkpoints_path'] + '/_parallel_coordinate.pdf')) fig1.write_image((self.hparams['checkpoints_path'] + '/_param_importance.pdf')) except: pass results_df = study.trials_dataframe() results_df.to_csv(f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_Multi-appliance.csv") joblib.dump(study, f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_Multi-appliance.pkl") self.best_trials['Multi-appliance'] = study.best_trial.number (app_model, _) = self.get_net_and_loaders() self.run_id['Multi-appliance'] = study.user_attrs['best_run_id'] elif (self.hparams['kfolds'] > 1): self.models['Multi-appliance'] = {} (_, dataloader) = self.get_net_and_loaders() self.data_loaders['Multi-appliance'] = dataloader dataset = dataloader(inputs=self._data['features'], targets=self._data['targets']) fold = TimeSeriesSplit(n_splits=self.hparams['kfolds'], test_size=self.hparams['test_size'], gap=self.hparams['gap']) scores = [] for (fold_idx, (train_idx, valid_idx)) in enumerate(fold.split(range(len(dataset)))): print(f'started training for the fold {fold_idx}.') (app_model, _) = self.get_net_and_loaders() self.models['Multi-appliance'][f'fold_{fold_idx}'] = pilModel(app_model, self.hparams) train_data = torch.utils.data.Subset(dataset, train_idx) val_data = torch.utils.data.Subset(dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'Multi-appliance') with mlflow.start_run(run_name=self.hparams['model_name']): self.run_id['Multi-appliance'] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) mae_loss = self.train_model('Multi-appliance', train_loader, val_loader, exp_name, (dataset.mean if (self.hparams['target_norm'] == 'z-norm') else None), (dataset.std if (self.hparams['target_norm'] == 'z-norm') else None), fold_idx=fold_idx, model=self.models['Multi-appliance'][f'fold_{fold_idx}']) scores.append(mae_loss) else: if ('Multi-appliance' not in self.models): print('First model training for Multi-appliance model') (net, dataloader) = self.get_net_and_loaders() self.models['Multi-appliance'] = pilModel(net, self.hparams) self.data_loaders['Multi-appliance'] = dataloader else: print('Started Retraining Muti-appliance model') dataloader = self.data_loaders['Multi-appliance'] data = dataloader(inputs=self._data['features'], targets=self._data['targets']) (train_data, val_data) = torch.utils.data.random_split(data, [int((data.len * (1 - 0.15))), (data.len - int((data.len * (1 - 0.15))))], generator=torch.Generator().manual_seed(3407)) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'Multi-appliance') with mlflow.start_run(): self.run_id['Multi-appliance'] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) self.train_model('Multi-appliance', train_loader, val_loader, exp_name, (data.mean if (self.hparams['target_norm'] == 'z-norm') else 0), (data.std if (self.hparams['target_norm'] == 'z-norm') else 1)) new_params = {'checkpoints_path': original_checkpoint} self.hparams.update(new_params)
def multi_appliance_fit(self): '\n Train the specified models for each appliance separately taking into consideration\n the use of cross-validation and hyper-parameters optimisation. The checkpoints for \n each model are saved in the correspondng path.\n ' self.exp_name = f"{self.hparams['model_name']}_{self.hparams['data']}_{self.hparams['experiment_label']}" original_checkpoint = self.hparams['checkpoints_path'] exp_name = f'{self.exp_name}_Multi-appliance' checkpoints = Path((original_checkpoint + f'{exp_name}')) checkpoints.mkdir(parents=True, exist_ok=True) new_params = {'checkpoints_path': (original_checkpoint + f'{exp_name}')} self.hparams.update(new_params) print(f'fit model for {exp_name}') if self.hparams['use_optuna']: study = optuna.create_study(study_name=exp_name, direction='minimize') self.optuna_params = {'power': self._data['targets'], 'appliance_name': 'Multi-appliance', 'exp_name': exp_name} if (self.hparams['kfolds'] <= 1): study.optimize(self.objective, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) (app_model, _) = self.get_net_and_loaders() chechpoint = torch.load(study.user_attrs['path']) model = pilModel(app_model, self.hparams) model.hparams['checkpoint_path'] = study.user_attrs['path'] model.load_state_dict(chechpoint['state_dict']) model.eval() self.models['Multi-appliance'] = model else: study.optimize(self.objective_cv, n_trials=self.hparams['n_trials'], callbacks=[self.save_best_model]) try: fig1 = optuna.visualization.plot_param_importances(study) fig2 = optuna.visualization.plot_parallel_coordinate(study) fig2.write_image((self.hparams['checkpoints_path'] + '/_parallel_coordinate.pdf')) fig1.write_image((self.hparams['checkpoints_path'] + '/_param_importance.pdf')) except: pass results_df = study.trials_dataframe() results_df.to_csv(f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_Multi-appliance.csv") joblib.dump(study, f"{self.hparams['checkpoints_path']}/Seq2Point_Study_{exp_name}_Multi-appliance.pkl") self.best_trials['Multi-appliance'] = study.best_trial.number (app_model, _) = self.get_net_and_loaders() self.run_id['Multi-appliance'] = study.user_attrs['best_run_id'] elif (self.hparams['kfolds'] > 1): self.models['Multi-appliance'] = {} (_, dataloader) = self.get_net_and_loaders() self.data_loaders['Multi-appliance'] = dataloader dataset = dataloader(inputs=self._data['features'], targets=self._data['targets']) fold = TimeSeriesSplit(n_splits=self.hparams['kfolds'], test_size=self.hparams['test_size'], gap=self.hparams['gap']) scores = [] for (fold_idx, (train_idx, valid_idx)) in enumerate(fold.split(range(len(dataset)))): print(f'started training for the fold {fold_idx}.') (app_model, _) = self.get_net_and_loaders() self.models['Multi-appliance'][f'fold_{fold_idx}'] = pilModel(app_model, self.hparams) train_data = torch.utils.data.Subset(dataset, train_idx) val_data = torch.utils.data.Subset(dataset, valid_idx) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'Multi-appliance') with mlflow.start_run(run_name=self.hparams['model_name']): self.run_id['Multi-appliance'] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) mae_loss = self.train_model('Multi-appliance', train_loader, val_loader, exp_name, (dataset.mean if (self.hparams['target_norm'] == 'z-norm') else None), (dataset.std if (self.hparams['target_norm'] == 'z-norm') else None), fold_idx=fold_idx, model=self.models['Multi-appliance'][f'fold_{fold_idx}']) scores.append(mae_loss) else: if ('Multi-appliance' not in self.models): print('First model training for Multi-appliance model') (net, dataloader) = self.get_net_and_loaders() self.models['Multi-appliance'] = pilModel(net, self.hparams) self.data_loaders['Multi-appliance'] = dataloader else: print('Started Retraining Muti-appliance model') dataloader = self.data_loaders['Multi-appliance'] data = dataloader(inputs=self._data['features'], targets=self._data['targets']) (train_data, val_data) = torch.utils.data.random_split(data, [int((data.len * (1 - 0.15))), (data.len - int((data.len * (1 - 0.15))))], generator=torch.Generator().manual_seed(3407)) train_loader = torch.utils.data.DataLoader(train_data, self.hparams['batch_size'], shuffle=True, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=True) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) val_loader = torch.utils.data.DataLoader(val_data, self.hparams['batch_size'], shuffle=False, collate_fn=(NILM_MODELS[self.hparams['model_name']]['extra_params']['collate_fns'](self.hparams, sample=False) if ('collate_fns' in NILM_MODELS[self.hparams['model_name']]['extra_params']) else None), num_workers=self.hparams['num_workers']) mlflow.set_experiment(f'Multi-appliance') with mlflow.start_run(): self.run_id['Multi-appliance'] = mlflow.active_run().info.run_id mlflow.log_params(self.hparams) self.train_model('Multi-appliance', train_loader, val_loader, exp_name, (data.mean if (self.hparams['target_norm'] == 'z-norm') else 0), (data.std if (self.hparams['target_norm'] == 'z-norm') else 1)) new_params = {'checkpoints_path': original_checkpoint} self.hparams.update(new_params)<|docstring|>Train the specified models for each appliance separately taking into consideration the use of cross-validation and hyper-parameters optimisation. The checkpoints for each model are saved in the correspondng path.<|endoftext|>
6f713870ca44fdd7c64b8609185febcb535533540be9bbcd9e72f51aa65436e5
def parse_int(string): '\n Finds the first integer in a string without casting it.\n :param string:\n :return:\n ' matches = re.findall('(\\d+)', string) if matches: return matches[0] else: return None
Finds the first integer in a string without casting it. :param string: :return:
scraper/scraper/loaders.py
parse_int
viktorfa/F033583-project
2
python
def parse_int(string): '\n Finds the first integer in a string without casting it.\n :param string:\n :return:\n ' matches = re.findall('(\\d+)', string) if matches: return matches[0] else: return None
def parse_int(string): '\n Finds the first integer in a string without casting it.\n :param string:\n :return:\n ' matches = re.findall('(\\d+)', string) if matches: return matches[0] else: return None<|docstring|>Finds the first integer in a string without casting it. :param string: :return:<|endoftext|>
a32fb8a52de54b39ed2597ed14f8b01b7faa022cb22787cc39655f442af3d985
def parse_float(string): '\n Finds the first float in a string without casting it.\n :param string:\n :return:\n ' matches = re.findall('(\\d+\\.\\d+)', string) if matches: return matches[0] else: return None
Finds the first float in a string without casting it. :param string: :return:
scraper/scraper/loaders.py
parse_float
viktorfa/F033583-project
2
python
def parse_float(string): '\n Finds the first float in a string without casting it.\n :param string:\n :return:\n ' matches = re.findall('(\\d+\\.\\d+)', string) if matches: return matches[0] else: return None
def parse_float(string): '\n Finds the first float in a string without casting it.\n :param string:\n :return:\n ' matches = re.findall('(\\d+\\.\\d+)', string) if matches: return matches[0] else: return None<|docstring|>Finds the first float in a string without casting it. :param string: :return:<|endoftext|>
125f69b68d239b9a2af7179fcead7f26b7802f23b7ffc8c8451309db9f5c223c
@property def url_format(self): 'Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017' return '{}-{}-{}'.format(self.month, self.day, self.year)
Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017
tools/util_date.py
url_format
osvenskan/data_rescue_D62CD1E5
0
python
@property def url_format(self): return '{}-{}-{}'.format(self.month, self.day, self.year)
@property def url_format(self): return '{}-{}-{}'.format(self.month, self.day, self.year)<|docstring|>Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017<|endoftext|>
125f69b68d239b9a2af7179fcead7f26b7802f23b7ffc8c8451309db9f5c223c
@property def url_format(self): 'Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017' return '{}-{}-{}'.format(self.month, self.day, self.year)
Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017
tools/util_date.py
url_format
osvenskan/data_rescue_D62CD1E5
0
python
@property def url_format(self): return '{}-{}-{}'.format(self.month, self.day, self.year)
@property def url_format(self): return '{}-{}-{}'.format(self.month, self.day, self.year)<|docstring|>Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017<|endoftext|>
9b9c69f3faa45ab63c624a7974fe1ae222622f9967ec39ca296a2f24c2f0bfc6
@property def url_format(self): 'Return e.g. Tue Feb 14 2017' return self.strftime('%a %b %d %Y')
Return e.g. Tue Feb 14 2017
tools/util_date.py
url_format
osvenskan/data_rescue_D62CD1E5
0
python
@property def url_format(self): return self.strftime('%a %b %d %Y')
@property def url_format(self): return self.strftime('%a %b %d %Y')<|docstring|>Return e.g. Tue Feb 14 2017<|endoftext|>
bb8dd71d7b0d5ffa5600042a14e2889f088c3612eb76b730363caf8685550129
@property def url_format_for_geographic_type(self): 'Return e.g. Tue Feb 14 2017' return self.strftime('%a %b %d %Y')
Return e.g. Tue Feb 14 2017
tools/util_date.py
url_format_for_geographic_type
osvenskan/data_rescue_D62CD1E5
0
python
@property def url_format_for_geographic_type(self): return self.strftime('%a %b %d %Y')
@property def url_format_for_geographic_type(self): return self.strftime('%a %b %d %Y')<|docstring|>Return e.g. Tue Feb 14 2017<|endoftext|>
033f366a11599207fead6ff1ddfd9e36907b8620662114dc80d1f58703719538
@property def url_format_for_attribute(self): 'Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017' return '{}-{}-{}'.format(self.month, self.day, self.year)
Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017
tools/util_date.py
url_format_for_attribute
osvenskan/data_rescue_D62CD1E5
0
python
@property def url_format_for_attribute(self): return '{}-{}-{}'.format(self.month, self.day, self.year)
@property def url_format_for_attribute(self): return '{}-{}-{}'.format(self.month, self.day, self.year)<|docstring|>Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017<|endoftext|>
bb8dd71d7b0d5ffa5600042a14e2889f088c3612eb76b730363caf8685550129
@property def url_format_for_geographic_type(self): 'Return e.g. Tue Feb 14 2017' return self.strftime('%a %b %d %Y')
Return e.g. Tue Feb 14 2017
tools/util_date.py
url_format_for_geographic_type
osvenskan/data_rescue_D62CD1E5
0
python
@property def url_format_for_geographic_type(self): return self.strftime('%a %b %d %Y')
@property def url_format_for_geographic_type(self): return self.strftime('%a %b %d %Y')<|docstring|>Return e.g. Tue Feb 14 2017<|endoftext|>
033f366a11599207fead6ff1ddfd9e36907b8620662114dc80d1f58703719538
@property def url_format_for_attribute(self): 'Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017' return '{}-{}-{}'.format(self.month, self.day, self.year)
Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017
tools/util_date.py
url_format_for_attribute
osvenskan/data_rescue_D62CD1E5
0
python
@property def url_format_for_attribute(self): return '{}-{}-{}'.format(self.month, self.day, self.year)
@property def url_format_for_attribute(self): return '{}-{}-{}'.format(self.month, self.day, self.year)<|docstring|>Return m-d-yyyy, e.g. 2-14-2017 for Feb 14th 2017<|endoftext|>
a129211f82a3ee9841c1499eb3c91652bcd064d6878852523fa368f1dcafb448
def get_language(query: str) -> str: 'Tries to work out the highlight.js language of a given file name or\n shebang. Returns an empty string if none match.\n ' query = query.lower() for language in LANGUAGES: if query.endswith(language): return language return ''
Tries to work out the highlight.js language of a given file name or shebang. Returns an empty string if none match.
discordjspy/addons/jishaku/hljs.py
get_language
Gelbpunkt/discord.jspy
5
python
def get_language(query: str) -> str: 'Tries to work out the highlight.js language of a given file name or\n shebang. Returns an empty string if none match.\n ' query = query.lower() for language in LANGUAGES: if query.endswith(language): return language return
def get_language(query: str) -> str: 'Tries to work out the highlight.js language of a given file name or\n shebang. Returns an empty string if none match.\n ' query = query.lower() for language in LANGUAGES: if query.endswith(language): return language return <|docstring|>Tries to work out the highlight.js language of a given file name or shebang. Returns an empty string if none match.<|endoftext|>
eda9689e73dfc9ba66d6b44f57c675e7616cbc892162ed72291467b6b9406a2b
def notebook_node_from_string_list(string_list): '\n Reads a notebook from a string list and returns the NotebookNode\n object.\n\n :param string_list: The notebook file contents as list of strings\n (linewise).\n :return: The notebook as NotebookNode.\n ' return nbformat.reads(''.join(string_list), nbformat.NO_CONVERT)
Reads a notebook from a string list and returns the NotebookNode object. :param string_list: The notebook file contents as list of strings (linewise). :return: The notebook as NotebookNode.
venv/lib/python3.5/site-packages/bears/python/PEP8NotebookBear.py
notebook_node_from_string_list
prashant0598/CoffeeApp
0
python
def notebook_node_from_string_list(string_list): '\n Reads a notebook from a string list and returns the NotebookNode\n object.\n\n :param string_list: The notebook file contents as list of strings\n (linewise).\n :return: The notebook as NotebookNode.\n ' return nbformat.reads(.join(string_list), nbformat.NO_CONVERT)
def notebook_node_from_string_list(string_list): '\n Reads a notebook from a string list and returns the NotebookNode\n object.\n\n :param string_list: The notebook file contents as list of strings\n (linewise).\n :return: The notebook as NotebookNode.\n ' return nbformat.reads(.join(string_list), nbformat.NO_CONVERT)<|docstring|>Reads a notebook from a string list and returns the NotebookNode object. :param string_list: The notebook file contents as list of strings (linewise). :return: The notebook as NotebookNode.<|endoftext|>
77b7e08b4c4de40f6b138038232ee877ab2a8f3b1f86f9a37f3da930ee198a79
def notebook_node_to_string_list(notebook_node): '\n Writes a NotebookNode to a list of strings.\n\n :param notebook_node: The notebook as NotebookNode to write.\n :return: The notebook as list of strings (linewise).\n ' return nbformat.writes(notebook_node, nbformat.NO_CONVERT).splitlines(True)
Writes a NotebookNode to a list of strings. :param notebook_node: The notebook as NotebookNode to write. :return: The notebook as list of strings (linewise).
venv/lib/python3.5/site-packages/bears/python/PEP8NotebookBear.py
notebook_node_to_string_list
prashant0598/CoffeeApp
0
python
def notebook_node_to_string_list(notebook_node): '\n Writes a NotebookNode to a list of strings.\n\n :param notebook_node: The notebook as NotebookNode to write.\n :return: The notebook as list of strings (linewise).\n ' return nbformat.writes(notebook_node, nbformat.NO_CONVERT).splitlines(True)
def notebook_node_to_string_list(notebook_node): '\n Writes a NotebookNode to a list of strings.\n\n :param notebook_node: The notebook as NotebookNode to write.\n :return: The notebook as list of strings (linewise).\n ' return nbformat.writes(notebook_node, nbformat.NO_CONVERT).splitlines(True)<|docstring|>Writes a NotebookNode to a list of strings. :param notebook_node: The notebook as NotebookNode to write. :return: The notebook as list of strings (linewise).<|endoftext|>
57e0ea8dd5ae06f82fd743be968ab1697128d09c07dc5c2fc52fa4213e0477b4
def autopep8_fix_code_cell(source, options=None, apply_config=None): "\n Applies autopep8.fix_code and takes care of newline characters.\n\n autopep8.fix_code automatically adds a final newline at the end,\n e.g. ``autopep8.fix_code('a=1')`` yields 'a = 1\\n'.\n Note that this is not related to the 'W292' flag, i.e.\n ``autopep8.fix_code('a=1', options=dict(ignore=('W292',)))`` gives\n the same result.\n For notebook code cells, this behaviour does not make sense, hence\n newline is removed if ``source`` does not end with one.\n " source_corrected = autopep8.fix_code(source, apply_config=apply_config, options=options) if (not source.endswith('\n')): return source_corrected[:(- 1)] return source_corrected
Applies autopep8.fix_code and takes care of newline characters. autopep8.fix_code automatically adds a final newline at the end, e.g. ``autopep8.fix_code('a=1')`` yields 'a = 1\n'. Note that this is not related to the 'W292' flag, i.e. ``autopep8.fix_code('a=1', options=dict(ignore=('W292',)))`` gives the same result. For notebook code cells, this behaviour does not make sense, hence newline is removed if ``source`` does not end with one.
venv/lib/python3.5/site-packages/bears/python/PEP8NotebookBear.py
autopep8_fix_code_cell
prashant0598/CoffeeApp
0
python
def autopep8_fix_code_cell(source, options=None, apply_config=None): "\n Applies autopep8.fix_code and takes care of newline characters.\n\n autopep8.fix_code automatically adds a final newline at the end,\n e.g. ``autopep8.fix_code('a=1')`` yields 'a = 1\\n'.\n Note that this is not related to the 'W292' flag, i.e.\n ``autopep8.fix_code('a=1', options=dict(ignore=('W292',)))`` gives\n the same result.\n For notebook code cells, this behaviour does not make sense, hence\n newline is removed if ``source`` does not end with one.\n " source_corrected = autopep8.fix_code(source, apply_config=apply_config, options=options) if (not source.endswith('\n')): return source_corrected[:(- 1)] return source_corrected
def autopep8_fix_code_cell(source, options=None, apply_config=None): "\n Applies autopep8.fix_code and takes care of newline characters.\n\n autopep8.fix_code automatically adds a final newline at the end,\n e.g. ``autopep8.fix_code('a=1')`` yields 'a = 1\\n'.\n Note that this is not related to the 'W292' flag, i.e.\n ``autopep8.fix_code('a=1', options=dict(ignore=('W292',)))`` gives\n the same result.\n For notebook code cells, this behaviour does not make sense, hence\n newline is removed if ``source`` does not end with one.\n " source_corrected = autopep8.fix_code(source, apply_config=apply_config, options=options) if (not source.endswith('\n')): return source_corrected[:(- 1)] return source_corrected<|docstring|>Applies autopep8.fix_code and takes care of newline characters. autopep8.fix_code automatically adds a final newline at the end, e.g. ``autopep8.fix_code('a=1')`` yields 'a = 1\n'. Note that this is not related to the 'W292' flag, i.e. ``autopep8.fix_code('a=1', options=dict(ignore=('W292',)))`` gives the same result. For notebook code cells, this behaviour does not make sense, hence newline is removed if ``source`` does not end with one.<|endoftext|>
2d8dc63a0480f1633aaac830535d86dfd7584a6e08a6222fbe46f13c1f0ca6c3
def run(self, filename, file, max_line_length: int=79, indent_size: int=SpacingHelper.DEFAULT_TAB_WIDTH, pep_ignore: typed_list(str)=(), pep_select: typed_list(str)=(), local_pep8_config: bool=False): '\n Detects and fixes PEP8 incompliant code in Jupyter Notebooks. This bear\n will not change functionality of the code in any way.\n\n :param max_line_length: Maximum number of characters for a line.\n :param indent_size: Number of spaces per indent level.\n :param pep_ignore: A list of errors/warnings to ignore.\n :param pep_select: A list of errors/warnings to exclusively\n apply.\n :param local_pep8_config: Set to true if autopep8 should use a config\n file as if run normally from this directory.\n ' options = {'ignore': pep_ignore, 'select': pep_select, 'max_line_length': max_line_length, 'indent_size': indent_size} notebook_node = notebook_node_from_string_list(file) cells = notebook_node['cells'] for cell in cells: if (cell['cell_type'] != 'code'): continue cell['source'] = autopep8_fix_code_cell(cell['source'], local_pep8_config, options) corrected = notebook_node_to_string_list(notebook_node) if (file[(- 1)].endswith('\n') and (not corrected[(- 1)].endswith('\n'))): corrected[(- 1)] += '\n' diffs = Diff.from_string_arrays(file, corrected).split_diff() for diff in diffs: (yield Result(self, 'The code does not comply to PEP8.', affected_code=(diff.range(filename),), diffs={filename: diff}))
Detects and fixes PEP8 incompliant code in Jupyter Notebooks. This bear will not change functionality of the code in any way. :param max_line_length: Maximum number of characters for a line. :param indent_size: Number of spaces per indent level. :param pep_ignore: A list of errors/warnings to ignore. :param pep_select: A list of errors/warnings to exclusively apply. :param local_pep8_config: Set to true if autopep8 should use a config file as if run normally from this directory.
venv/lib/python3.5/site-packages/bears/python/PEP8NotebookBear.py
run
prashant0598/CoffeeApp
0
python
def run(self, filename, file, max_line_length: int=79, indent_size: int=SpacingHelper.DEFAULT_TAB_WIDTH, pep_ignore: typed_list(str)=(), pep_select: typed_list(str)=(), local_pep8_config: bool=False): '\n Detects and fixes PEP8 incompliant code in Jupyter Notebooks. This bear\n will not change functionality of the code in any way.\n\n :param max_line_length: Maximum number of characters for a line.\n :param indent_size: Number of spaces per indent level.\n :param pep_ignore: A list of errors/warnings to ignore.\n :param pep_select: A list of errors/warnings to exclusively\n apply.\n :param local_pep8_config: Set to true if autopep8 should use a config\n file as if run normally from this directory.\n ' options = {'ignore': pep_ignore, 'select': pep_select, 'max_line_length': max_line_length, 'indent_size': indent_size} notebook_node = notebook_node_from_string_list(file) cells = notebook_node['cells'] for cell in cells: if (cell['cell_type'] != 'code'): continue cell['source'] = autopep8_fix_code_cell(cell['source'], local_pep8_config, options) corrected = notebook_node_to_string_list(notebook_node) if (file[(- 1)].endswith('\n') and (not corrected[(- 1)].endswith('\n'))): corrected[(- 1)] += '\n' diffs = Diff.from_string_arrays(file, corrected).split_diff() for diff in diffs: (yield Result(self, 'The code does not comply to PEP8.', affected_code=(diff.range(filename),), diffs={filename: diff}))
def run(self, filename, file, max_line_length: int=79, indent_size: int=SpacingHelper.DEFAULT_TAB_WIDTH, pep_ignore: typed_list(str)=(), pep_select: typed_list(str)=(), local_pep8_config: bool=False): '\n Detects and fixes PEP8 incompliant code in Jupyter Notebooks. This bear\n will not change functionality of the code in any way.\n\n :param max_line_length: Maximum number of characters for a line.\n :param indent_size: Number of spaces per indent level.\n :param pep_ignore: A list of errors/warnings to ignore.\n :param pep_select: A list of errors/warnings to exclusively\n apply.\n :param local_pep8_config: Set to true if autopep8 should use a config\n file as if run normally from this directory.\n ' options = {'ignore': pep_ignore, 'select': pep_select, 'max_line_length': max_line_length, 'indent_size': indent_size} notebook_node = notebook_node_from_string_list(file) cells = notebook_node['cells'] for cell in cells: if (cell['cell_type'] != 'code'): continue cell['source'] = autopep8_fix_code_cell(cell['source'], local_pep8_config, options) corrected = notebook_node_to_string_list(notebook_node) if (file[(- 1)].endswith('\n') and (not corrected[(- 1)].endswith('\n'))): corrected[(- 1)] += '\n' diffs = Diff.from_string_arrays(file, corrected).split_diff() for diff in diffs: (yield Result(self, 'The code does not comply to PEP8.', affected_code=(diff.range(filename),), diffs={filename: diff}))<|docstring|>Detects and fixes PEP8 incompliant code in Jupyter Notebooks. This bear will not change functionality of the code in any way. :param max_line_length: Maximum number of characters for a line. :param indent_size: Number of spaces per indent level. :param pep_ignore: A list of errors/warnings to ignore. :param pep_select: A list of errors/warnings to exclusively apply. :param local_pep8_config: Set to true if autopep8 should use a config file as if run normally from this directory.<|endoftext|>
affe7a6e9615ddd89985087426ce951021b6a1c089646f4b050d5e368998b218
def run_file(file_path: Path, is_console_app: bool, args: str): ' Decide, if a file should be opened or executed and call the appropriate method ' if (not file_path.is_file()): return if is_file_executable(file_path): execute_app(file_path, is_console_app, args) else: open_file(file_path)
Decide, if a file should be opened or executed and call the appropriate method
src/conan_app_launcher/components/file_runner.py
run_file
goszpeti/conan_app_launcher
5
python
def run_file(file_path: Path, is_console_app: bool, args: str): ' ' if (not file_path.is_file()): return if is_file_executable(file_path): execute_app(file_path, is_console_app, args) else: open_file(file_path)
def run_file(file_path: Path, is_console_app: bool, args: str): ' ' if (not file_path.is_file()): return if is_file_executable(file_path): execute_app(file_path, is_console_app, args) else: open_file(file_path)<|docstring|>Decide, if a file should be opened or executed and call the appropriate method<|endoftext|>
b56c91fdfd6d5ae93dea84eb754007216edf6ab27bd17a4c2b6fc792b5ef825b
def execute_app(executable: Path, is_console_app: bool, args: str) -> int: '\n Executes an application with args and optionally spawns a new shell\n as specified in the app entry.\n Returns the pid of the new process.\n ' if executable.absolute().is_file(): cmd = [str(executable)] if (platform.system() == 'Windows'): creationflags = 0 if is_console_app: creationflags = subprocess.CREATE_NEW_CONSOLE if args: cmd += args.strip().split(' ') proc = subprocess.Popen(cmd, creationflags=creationflags) elif (platform.system() == 'Linux'): if is_console_app: cmd = ['x-terminal-emulator', '-e', str(executable)] if args: cmd += args.strip().split(' ') proc = subprocess.Popen(cmd) return proc.pid Logger().warning(f'No executable {str(executable)} to start.') return 0
Executes an application with args and optionally spawns a new shell as specified in the app entry. Returns the pid of the new process.
src/conan_app_launcher/components/file_runner.py
execute_app
goszpeti/conan_app_launcher
5
python
def execute_app(executable: Path, is_console_app: bool, args: str) -> int: '\n Executes an application with args and optionally spawns a new shell\n as specified in the app entry.\n Returns the pid of the new process.\n ' if executable.absolute().is_file(): cmd = [str(executable)] if (platform.system() == 'Windows'): creationflags = 0 if is_console_app: creationflags = subprocess.CREATE_NEW_CONSOLE if args: cmd += args.strip().split(' ') proc = subprocess.Popen(cmd, creationflags=creationflags) elif (platform.system() == 'Linux'): if is_console_app: cmd = ['x-terminal-emulator', '-e', str(executable)] if args: cmd += args.strip().split(' ') proc = subprocess.Popen(cmd) return proc.pid Logger().warning(f'No executable {str(executable)} to start.') return 0
def execute_app(executable: Path, is_console_app: bool, args: str) -> int: '\n Executes an application with args and optionally spawns a new shell\n as specified in the app entry.\n Returns the pid of the new process.\n ' if executable.absolute().is_file(): cmd = [str(executable)] if (platform.system() == 'Windows'): creationflags = 0 if is_console_app: creationflags = subprocess.CREATE_NEW_CONSOLE if args: cmd += args.strip().split(' ') proc = subprocess.Popen(cmd, creationflags=creationflags) elif (platform.system() == 'Linux'): if is_console_app: cmd = ['x-terminal-emulator', '-e', str(executable)] if args: cmd += args.strip().split(' ') proc = subprocess.Popen(cmd) return proc.pid Logger().warning(f'No executable {str(executable)} to start.') return 0<|docstring|>Executes an application with args and optionally spawns a new shell as specified in the app entry. Returns the pid of the new process.<|endoftext|>
0ade625e82d498e531884f6db2598e4bd8a7fd99479bdb1f334d8ad2729ad0cd
def open_file(file: Path): ' Open files with their assocoiated programs ' if file.absolute().is_file(): if (platform.system() == 'Windows'): os.startfile(str(file)) elif (platform.system() == 'Linux'): subprocess.call(('xdg-open', str(file)))
Open files with their assocoiated programs
src/conan_app_launcher/components/file_runner.py
open_file
goszpeti/conan_app_launcher
5
python
def open_file(file: Path): ' ' if file.absolute().is_file(): if (platform.system() == 'Windows'): os.startfile(str(file)) elif (platform.system() == 'Linux'): subprocess.call(('xdg-open', str(file)))
def open_file(file: Path): ' ' if file.absolute().is_file(): if (platform.system() == 'Windows'): os.startfile(str(file)) elif (platform.system() == 'Linux'): subprocess.call(('xdg-open', str(file)))<|docstring|>Open files with their assocoiated programs<|endoftext|>
62b52dcd07afd3f86b2707683ddbbaa54bbcfee89f3ee468316292a14086d520
def __users_me(self, **kwargs): 'users_me # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.users_me(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n {str: (bool, date, datetime, dict, float, int, list, str, none_type)}\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.call_with_http_info(**kwargs)
users_me # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.users_me(async_req=True) >>> result = thread.get() Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: {str: (bool, date, datetime, dict, float, int, list, str, none_type)} If the method is called asynchronously, returns the request thread.
src/gretel_client/rest/api/users_api.py
__users_me
gretelai/gretel-python-client
23
python
def __users_me(self, **kwargs): 'users_me # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.users_me(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n {str: (bool, date, datetime, dict, float, int, list, str, none_type)}\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.call_with_http_info(**kwargs)
def __users_me(self, **kwargs): 'users_me # noqa: E501\n\n This method makes a synchronous HTTP request by default. To make an\n asynchronous HTTP request, please pass async_req=True\n\n >>> thread = api.users_me(async_req=True)\n >>> result = thread.get()\n\n\n Keyword Args:\n _return_http_data_only (bool): response data without head status\n code and headers. Default is True.\n _preload_content (bool): if False, the urllib3.HTTPResponse object\n will be returned without reading/decoding response data.\n Default is True.\n _request_timeout (float/tuple): timeout setting for this request. If one\n number provided, it will be total request timeout. It can also\n be a pair (tuple) of (connection, read) timeouts.\n Default is None.\n _check_input_type (bool): specifies if type checking\n should be done one the data sent to the server.\n Default is True.\n _check_return_type (bool): specifies if type checking\n should be done one the data received from the server.\n Default is True.\n _host_index (int/None): specifies the index of the server\n that we want to use.\n Default is read from the configuration.\n async_req (bool): execute request asynchronously\n\n Returns:\n {str: (bool, date, datetime, dict, float, int, list, str, none_type)}\n If the method is called asynchronously, returns the request\n thread.\n ' kwargs['async_req'] = kwargs.get('async_req', False) kwargs['_return_http_data_only'] = kwargs.get('_return_http_data_only', True) kwargs['_preload_content'] = kwargs.get('_preload_content', True) kwargs['_request_timeout'] = kwargs.get('_request_timeout', None) kwargs['_check_input_type'] = kwargs.get('_check_input_type', True) kwargs['_check_return_type'] = kwargs.get('_check_return_type', True) kwargs['_host_index'] = kwargs.get('_host_index') return self.call_with_http_info(**kwargs)<|docstring|>users_me # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.users_me(async_req=True) >>> result = thread.get() Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: {str: (bool, date, datetime, dict, float, int, list, str, none_type)} If the method is called asynchronously, returns the request thread.<|endoftext|>
46a06ed5aaf6d824302dcaea2be3529740c550983f65c1bbe17d92b2ae17a8d5
def _start(func: Callable, scheduler: Optional[Scheduler]=None) -> Observable: "Invokes the specified function asynchronously on the specified\n scheduler, surfacing the result through an observable sequence.\n\n Example:\n >>> res = rx3.start(lambda: pprint('hello'))\n >>> res = rx3.start(lambda: pprint('hello'), rx3.Scheduler.timeout)\n\n Args:\n func: Function to run asynchronously.\n scheduler: [Optional] Scheduler to run the function on. If\n not specified, defaults to Scheduler.timeout.\n\n Remarks:\n The function is called immediately, not during the subscription\n of the resulting sequence. Multiple subscriptions to the\n resulting sequence can observe the function's result.\n\n Returns:\n An observable sequence exposing the function's result value,\n or an exception.\n " return to_async(func, scheduler)()
Invokes the specified function asynchronously on the specified scheduler, surfacing the result through an observable sequence. Example: >>> res = rx3.start(lambda: pprint('hello')) >>> res = rx3.start(lambda: pprint('hello'), rx3.Scheduler.timeout) Args: func: Function to run asynchronously. scheduler: [Optional] Scheduler to run the function on. If not specified, defaults to Scheduler.timeout. Remarks: The function is called immediately, not during the subscription of the resulting sequence. Multiple subscriptions to the resulting sequence can observe the function's result. Returns: An observable sequence exposing the function's result value, or an exception.
rx3/core/observable/start.py
_start
samiur/RxPY
0
python
def _start(func: Callable, scheduler: Optional[Scheduler]=None) -> Observable: "Invokes the specified function asynchronously on the specified\n scheduler, surfacing the result through an observable sequence.\n\n Example:\n >>> res = rx3.start(lambda: pprint('hello'))\n >>> res = rx3.start(lambda: pprint('hello'), rx3.Scheduler.timeout)\n\n Args:\n func: Function to run asynchronously.\n scheduler: [Optional] Scheduler to run the function on. If\n not specified, defaults to Scheduler.timeout.\n\n Remarks:\n The function is called immediately, not during the subscription\n of the resulting sequence. Multiple subscriptions to the\n resulting sequence can observe the function's result.\n\n Returns:\n An observable sequence exposing the function's result value,\n or an exception.\n " return to_async(func, scheduler)()
def _start(func: Callable, scheduler: Optional[Scheduler]=None) -> Observable: "Invokes the specified function asynchronously on the specified\n scheduler, surfacing the result through an observable sequence.\n\n Example:\n >>> res = rx3.start(lambda: pprint('hello'))\n >>> res = rx3.start(lambda: pprint('hello'), rx3.Scheduler.timeout)\n\n Args:\n func: Function to run asynchronously.\n scheduler: [Optional] Scheduler to run the function on. If\n not specified, defaults to Scheduler.timeout.\n\n Remarks:\n The function is called immediately, not during the subscription\n of the resulting sequence. Multiple subscriptions to the\n resulting sequence can observe the function's result.\n\n Returns:\n An observable sequence exposing the function's result value,\n or an exception.\n " return to_async(func, scheduler)()<|docstring|>Invokes the specified function asynchronously on the specified scheduler, surfacing the result through an observable sequence. Example: >>> res = rx3.start(lambda: pprint('hello')) >>> res = rx3.start(lambda: pprint('hello'), rx3.Scheduler.timeout) Args: func: Function to run asynchronously. scheduler: [Optional] Scheduler to run the function on. If not specified, defaults to Scheduler.timeout. Remarks: The function is called immediately, not during the subscription of the resulting sequence. Multiple subscriptions to the resulting sequence can observe the function's result. Returns: An observable sequence exposing the function's result value, or an exception.<|endoftext|>
ed15beb46b7920ed4dbd3f1fe6a06324ec1044dc0af8b38411ce876a5b99a1f2
def get_active_announcements(): 'Get the active announcements.' now = arrow.utcnow().to(current_app.config['TIME_ZONE']).naive return Announcement.query.current.filter((Announcement.active == True), (Announcement.published < now)).order_by(db.desc(Announcement.published)).all()
Get the active announcements.
pygotham/news/__init__.py
get_active_announcements
PyGotham/pygotham
19
python
def get_active_announcements(): now = arrow.utcnow().to(current_app.config['TIME_ZONE']).naive return Announcement.query.current.filter((Announcement.active == True), (Announcement.published < now)).order_by(db.desc(Announcement.published)).all()
def get_active_announcements(): now = arrow.utcnow().to(current_app.config['TIME_ZONE']).naive return Announcement.query.current.filter((Announcement.active == True), (Announcement.published < now)).order_by(db.desc(Announcement.published)).all()<|docstring|>Get the active announcements.<|endoftext|>
ed0c8069cb1d8d32651516fba5b0c6ab2ed07d40ef684b0832f39ac8431e43ca
def get_active_call_to_action(): 'Return the active call to action.' now = arrow.utcnow().to(current_app.config['TIME_ZONE']).naive return CallToAction.query.current.filter((CallToAction.active == True), (CallToAction.begins < now), db.or_((CallToAction.ends > now), (CallToAction.ends == None))).order_by(CallToAction.begins, db.desc(CallToAction.ends)).first()
Return the active call to action.
pygotham/news/__init__.py
get_active_call_to_action
PyGotham/pygotham
19
python
def get_active_call_to_action(): now = arrow.utcnow().to(current_app.config['TIME_ZONE']).naive return CallToAction.query.current.filter((CallToAction.active == True), (CallToAction.begins < now), db.or_((CallToAction.ends > now), (CallToAction.ends == None))).order_by(CallToAction.begins, db.desc(CallToAction.ends)).first()
def get_active_call_to_action(): now = arrow.utcnow().to(current_app.config['TIME_ZONE']).naive return CallToAction.query.current.filter((CallToAction.active == True), (CallToAction.begins < now), db.or_((CallToAction.ends > now), (CallToAction.ends == None))).order_by(CallToAction.begins, db.desc(CallToAction.ends)).first()<|docstring|>Return the active call to action.<|endoftext|>
befa8a1763a1e3a99308ba5b2b8d06198ee11c1f8b844a49f2341fe34b08f4a3
def wjx_sump(url, filename): "filename = './t.xlsx' 本地正确答案所在名称\n " imp_data = xlsx_get.get_asw(filename, 1) net_hold = webdriver.Chrome('chromedriver.exe') net_hold.get(url) time.sleep(2) for i in range(1, (len(imp_data) + 1)): q_id = ('div' + str(i)) question_hold = net_hold.find_element_by_id(q_id) t_as = imp_data[i] try: q_type = question_hold.find_element_by_class_name('qtypetip').get_attribute('textContent') q_type = re.sub('[^\\u4e00-\\u9fa5]', '', q_type) if (q_type == ''): raise Exception('抛出一个异常') except Exception: q_type = '单选题' print(q_id, q_type) q_ui = question_hold.find_element_by_xpath('.//ul') list_li = q_ui.find_elements_by_xpath('./li') li_len = list_li.__len__() if (q_type == '多选题'): asw_list = mul_as(li_len, t_as) for li_i in range(1, (li_len + 1)): if (asw_list[(li_i - 1)] == 1): q_as_id = './/li[{}]'.format(li_i) q_as_hold = question_hold.find_element_by_xpath(q_as_id) q_as_hold.click() time.sleep(0.01) del asw_list elif (q_type == '单选题'): asw_o = sig_as(li_len, t_as[0], i) q_as_id = './/li[{}]'.format(asw_o) print() q_as_hold = question_hold.find_element_by_xpath(q_as_id) q_as_hold.click() del asw_o del question_hold del q_ui print('结束...') time.sleep((0.5 * random.random())) time.sleep(0.5) sump_hold = net_hold.find_element_by_xpath('//*[@id="submit_table"]/tbody/tr/td[1]') sump_hold.click() time.sleep(3) net_hold.quit()
filename = './t.xlsx' 本地正确答案所在名称
selenuim/wjx_obj.py
wjx_sump
yinboliu-git/-
0
python
def wjx_sump(url, filename): "\n " imp_data = xlsx_get.get_asw(filename, 1) net_hold = webdriver.Chrome('chromedriver.exe') net_hold.get(url) time.sleep(2) for i in range(1, (len(imp_data) + 1)): q_id = ('div' + str(i)) question_hold = net_hold.find_element_by_id(q_id) t_as = imp_data[i] try: q_type = question_hold.find_element_by_class_name('qtypetip').get_attribute('textContent') q_type = re.sub('[^\\u4e00-\\u9fa5]', , q_type) if (q_type == ): raise Exception('抛出一个异常') except Exception: q_type = '单选题' print(q_id, q_type) q_ui = question_hold.find_element_by_xpath('.//ul') list_li = q_ui.find_elements_by_xpath('./li') li_len = list_li.__len__() if (q_type == '多选题'): asw_list = mul_as(li_len, t_as) for li_i in range(1, (li_len + 1)): if (asw_list[(li_i - 1)] == 1): q_as_id = './/li[{}]'.format(li_i) q_as_hold = question_hold.find_element_by_xpath(q_as_id) q_as_hold.click() time.sleep(0.01) del asw_list elif (q_type == '单选题'): asw_o = sig_as(li_len, t_as[0], i) q_as_id = './/li[{}]'.format(asw_o) print() q_as_hold = question_hold.find_element_by_xpath(q_as_id) q_as_hold.click() del asw_o del question_hold del q_ui print('结束...') time.sleep((0.5 * random.random())) time.sleep(0.5) sump_hold = net_hold.find_element_by_xpath('//*[@id="submit_table"]/tbody/tr/td[1]') sump_hold.click() time.sleep(3) net_hold.quit()
def wjx_sump(url, filename): "\n " imp_data = xlsx_get.get_asw(filename, 1) net_hold = webdriver.Chrome('chromedriver.exe') net_hold.get(url) time.sleep(2) for i in range(1, (len(imp_data) + 1)): q_id = ('div' + str(i)) question_hold = net_hold.find_element_by_id(q_id) t_as = imp_data[i] try: q_type = question_hold.find_element_by_class_name('qtypetip').get_attribute('textContent') q_type = re.sub('[^\\u4e00-\\u9fa5]', , q_type) if (q_type == ): raise Exception('抛出一个异常') except Exception: q_type = '单选题' print(q_id, q_type) q_ui = question_hold.find_element_by_xpath('.//ul') list_li = q_ui.find_elements_by_xpath('./li') li_len = list_li.__len__() if (q_type == '多选题'): asw_list = mul_as(li_len, t_as) for li_i in range(1, (li_len + 1)): if (asw_list[(li_i - 1)] == 1): q_as_id = './/li[{}]'.format(li_i) q_as_hold = question_hold.find_element_by_xpath(q_as_id) q_as_hold.click() time.sleep(0.01) del asw_list elif (q_type == '单选题'): asw_o = sig_as(li_len, t_as[0], i) q_as_id = './/li[{}]'.format(asw_o) print() q_as_hold = question_hold.find_element_by_xpath(q_as_id) q_as_hold.click() del asw_o del question_hold del q_ui print('结束...') time.sleep((0.5 * random.random())) time.sleep(0.5) sump_hold = net_hold.find_element_by_xpath('//*[@id="submit_table"]/tbody/tr/td[1]') sump_hold.click() time.sleep(3) net_hold.quit()<|docstring|>filename = './t.xlsx' 本地正确答案所在名称<|endoftext|>
1e98bcc545e30d906c332a132a1a5a21fce7d0fef2130f0112d39e95c21b0345
def update_graph_memory(): "Use Q_G(φ(s), a) ← r + γ max_{a'}( Q_G (φ(s'), a')) )\n r: reward\n γ: discount \n φ: state vector. Each state is some s in S. \n "
Use Q_G(φ(s), a) ← r + γ max_{a'}( Q_G (φ(s'), a')) ) r: reward γ: discount φ: state vector. Each state is some s in S.
rl_memory/erik/old/value_prop_associative.py
update_graph_memory
eskalnes/RL_memory
0
python
def update_graph_memory(): "Use Q_G(φ(s), a) ← r + γ max_{a'}( Q_G (φ(s'), a')) )\n r: reward\n γ: discount \n φ: state vector. Each state is some s in S. \n "
def update_graph_memory(): "Use Q_G(φ(s), a) ← r + γ max_{a'}( Q_G (φ(s'), a')) )\n r: reward\n γ: discount \n φ: state vector. Each state is some s in S. \n "<|docstring|>Use Q_G(φ(s), a) ← r + γ max_{a'}( Q_G (φ(s'), a')) ) r: reward γ: discount φ: state vector. Each state is some s in S.<|endoftext|>
0b8cbc74fe6e202c15f48ac414b6086ab175ef0adbb65e4535d21dcddc49c4d1
def prepare_parser(): " Parse the command line arguments.\n\n Arguments:\n --input: Name of the input data folder, defaults to 'data'.\n --output: Name of the output file, defaults to 'output.txt'.\n Returns:\n The parser with all the arguments.\n " parser = argparse.ArgumentParser(description='Collect and convert JSON data to CSV file.') parser.add_argument('-i', '--input_data_folder', default='data', help='Name of the input data folder.') parser.add_argument('-o', '--output', default='output.csv', help='Name of the output file.') return parser
Parse the command line arguments. Arguments: --input: Name of the input data folder, defaults to 'data'. --output: Name of the output file, defaults to 'output.txt'. Returns: The parser with all the arguments.
json-to-csv-converter/converter.py
prepare_parser
vkaracic/review-parser
1
python
def prepare_parser(): " Parse the command line arguments.\n\n Arguments:\n --input: Name of the input data folder, defaults to 'data'.\n --output: Name of the output file, defaults to 'output.txt'.\n Returns:\n The parser with all the arguments.\n " parser = argparse.ArgumentParser(description='Collect and convert JSON data to CSV file.') parser.add_argument('-i', '--input_data_folder', default='data', help='Name of the input data folder.') parser.add_argument('-o', '--output', default='output.csv', help='Name of the output file.') return parser
def prepare_parser(): " Parse the command line arguments.\n\n Arguments:\n --input: Name of the input data folder, defaults to 'data'.\n --output: Name of the output file, defaults to 'output.txt'.\n Returns:\n The parser with all the arguments.\n " parser = argparse.ArgumentParser(description='Collect and convert JSON data to CSV file.') parser.add_argument('-i', '--input_data_folder', default='data', help='Name of the input data folder.') parser.add_argument('-o', '--output', default='output.csv', help='Name of the output file.') return parser<|docstring|>Parse the command line arguments. Arguments: --input: Name of the input data folder, defaults to 'data'. --output: Name of the output file, defaults to 'output.txt'. Returns: The parser with all the arguments.<|endoftext|>
064f92113f089a0e85dea0b8e80e4b3fa9d2687a68286cb82c29be814953ed8a
def main(): ' Reads JSON files from input data folder and saves as CSV with labeled sentiments.\n Labels are 1 for positive, 0 for negative.\n ' POSITIVE = 1 NEGATIVE = 0 parser = prepare_parser() args = parser.parse_args() csv_file = csv.writer(open(args.output, 'w')) csv_file.writerow(['review', 'sentiment']) for filename in os.listdir(args.input_data_folder): data = json.load(open(((args.input_data_folder + '/') + filename))) for entry in data: if entry['positive']: csv_file.writerow([entry['positive'].encode('utf8'), POSITIVE]) if entry['negative']: csv_file.writerow([entry['negative'].encode('utf8'), NEGATIVE])
Reads JSON files from input data folder and saves as CSV with labeled sentiments. Labels are 1 for positive, 0 for negative.
json-to-csv-converter/converter.py
main
vkaracic/review-parser
1
python
def main(): ' Reads JSON files from input data folder and saves as CSV with labeled sentiments.\n Labels are 1 for positive, 0 for negative.\n ' POSITIVE = 1 NEGATIVE = 0 parser = prepare_parser() args = parser.parse_args() csv_file = csv.writer(open(args.output, 'w')) csv_file.writerow(['review', 'sentiment']) for filename in os.listdir(args.input_data_folder): data = json.load(open(((args.input_data_folder + '/') + filename))) for entry in data: if entry['positive']: csv_file.writerow([entry['positive'].encode('utf8'), POSITIVE]) if entry['negative']: csv_file.writerow([entry['negative'].encode('utf8'), NEGATIVE])
def main(): ' Reads JSON files from input data folder and saves as CSV with labeled sentiments.\n Labels are 1 for positive, 0 for negative.\n ' POSITIVE = 1 NEGATIVE = 0 parser = prepare_parser() args = parser.parse_args() csv_file = csv.writer(open(args.output, 'w')) csv_file.writerow(['review', 'sentiment']) for filename in os.listdir(args.input_data_folder): data = json.load(open(((args.input_data_folder + '/') + filename))) for entry in data: if entry['positive']: csv_file.writerow([entry['positive'].encode('utf8'), POSITIVE]) if entry['negative']: csv_file.writerow([entry['negative'].encode('utf8'), NEGATIVE])<|docstring|>Reads JSON files from input data folder and saves as CSV with labeled sentiments. Labels are 1 for positive, 0 for negative.<|endoftext|>
07dc1d0a22698748efa073f0d0f2313e4c051049b51353d825893fdeb6a89172
def test_run_charclassml(self): 'test run_charclassml' config = attr.assoc(run_charclassml.CONFIG_DEFAULT) config.nn_opt['max_epochs'] = 1 config.train.idxs = [1] config.train.do_balance = True config.train.balance_size = 64 config.dev.idxs = [2] config.test.idxs = [3] self._test_training_module(run_charclassml, 'charclass', config)
test run_charclassml
integration/tests_train.py
test_run_charclassml
bdzimmer/handwriting
2
python
def test_run_charclassml(self): config = attr.assoc(run_charclassml.CONFIG_DEFAULT) config.nn_opt['max_epochs'] = 1 config.train.idxs = [1] config.train.do_balance = True config.train.balance_size = 64 config.dev.idxs = [2] config.test.idxs = [3] self._test_training_module(run_charclassml, 'charclass', config)
def test_run_charclassml(self): config = attr.assoc(run_charclassml.CONFIG_DEFAULT) config.nn_opt['max_epochs'] = 1 config.train.idxs = [1] config.train.do_balance = True config.train.balance_size = 64 config.dev.idxs = [2] config.test.idxs = [3] self._test_training_module(run_charclassml, 'charclass', config)<|docstring|>test run_charclassml<|endoftext|>
f66e23a6f60a064734d348b954437eb6ed91bd48aa0f8051e863a3a434fa1f5d
def test_run_charposml(self): 'test run_charposml' run_charposml.VERBOSE = True config = attr.assoc(run_charposml.CONFIG_DEFAULT) config.train.idxs = [1] config.nn_opt['max_epochs'] = 1 config.train.do_balance = True config.train.balance_size = 64 config.dev.idxs = [2] config.test.idxs = [3] self._test_training_module(run_charposml, 'charpos', config)
test run_charposml
integration/tests_train.py
test_run_charposml
bdzimmer/handwriting
2
python
def test_run_charposml(self): run_charposml.VERBOSE = True config = attr.assoc(run_charposml.CONFIG_DEFAULT) config.train.idxs = [1] config.nn_opt['max_epochs'] = 1 config.train.do_balance = True config.train.balance_size = 64 config.dev.idxs = [2] config.test.idxs = [3] self._test_training_module(run_charposml, 'charpos', config)
def test_run_charposml(self): run_charposml.VERBOSE = True config = attr.assoc(run_charposml.CONFIG_DEFAULT) config.train.idxs = [1] config.nn_opt['max_epochs'] = 1 config.train.do_balance = True config.train.balance_size = 64 config.dev.idxs = [2] config.test.idxs = [3] self._test_training_module(run_charposml, 'charpos', config)<|docstring|>test run_charposml<|endoftext|>
3148bb084c3474ec1faa5bba6358cde6d49bbb1326819c94e34e07a04f521c41
def _test_training_module(self, module, sub_dirname, config): 'helper method: run one of the training modules with config,\n testing that it produces output files' work_dirname = os.path.join('integration', sub_dirname) if os.path.exists(work_dirname): shutil.rmtree(work_dirname) os.makedirs(work_dirname) config_filename = os.path.join(work_dirname, 'config.json') cf.save(config, config_filename) model_filename = os.path.join(work_dirname, 'model.pkl') args = ['', 'train', config_filename, model_filename] module.main(args) self.assertTrue(os.path.exists(model_filename)) self.assertTrue(os.path.exists((model_filename + '.log.txt')))
helper method: run one of the training modules with config, testing that it produces output files
integration/tests_train.py
_test_training_module
bdzimmer/handwriting
2
python
def _test_training_module(self, module, sub_dirname, config): 'helper method: run one of the training modules with config,\n testing that it produces output files' work_dirname = os.path.join('integration', sub_dirname) if os.path.exists(work_dirname): shutil.rmtree(work_dirname) os.makedirs(work_dirname) config_filename = os.path.join(work_dirname, 'config.json') cf.save(config, config_filename) model_filename = os.path.join(work_dirname, 'model.pkl') args = [, 'train', config_filename, model_filename] module.main(args) self.assertTrue(os.path.exists(model_filename)) self.assertTrue(os.path.exists((model_filename + '.log.txt')))
def _test_training_module(self, module, sub_dirname, config): 'helper method: run one of the training modules with config,\n testing that it produces output files' work_dirname = os.path.join('integration', sub_dirname) if os.path.exists(work_dirname): shutil.rmtree(work_dirname) os.makedirs(work_dirname) config_filename = os.path.join(work_dirname, 'config.json') cf.save(config, config_filename) model_filename = os.path.join(work_dirname, 'model.pkl') args = [, 'train', config_filename, model_filename] module.main(args) self.assertTrue(os.path.exists(model_filename)) self.assertTrue(os.path.exists((model_filename + '.log.txt')))<|docstring|>helper method: run one of the training modules with config, testing that it produces output files<|endoftext|>
9defd67dc3607bdbdbad802492f17145c1407993e2ca38ee339b1f60bea4ac1d
def run_model_training(): 'Run model training using tensorflow\n\n # TODO Refactor the function\n Args:\n\n Output:\n None' with open('params.yaml', 'r') as fd: params = yaml.safe_load(fd) SEED = params['seed'] GCP_BUCKET = params['gcp_bucket'] TEST_SIZE = params['test_size'] EPOCHS = params['train']['epochs'] BATCH_SIZE = params['train']['batch_size'] ACTIVATION = params['train']['activation'] LAYERS = params['train']['fc_layers'] EVAL_BATCH_SIZE = params['eval']['batch_size'] df = pd.read_csv('Reduced_Features.csv') features_df = df.drop(['Label'], axis=1) labels_df = df['Label'] (X_train, X_test, y_train, y_test) = train_test_split(features_df, labels_df, test_size=TEST_SIZE, random_state=SEED) train_data = tf.data.Dataset.from_tensor_slices((X_train, y_train)) test_data = tf.data.Dataset.from_tensor_slices((X_test, y_test)) train_dataset = train_data.batch(BATCH_SIZE) test_dataset = test_data.batch(BATCH_SIZE) fc_layers = [] for x in LAYERS: fc_layers.append(tf.keras.layers.Dense(x, activation=ACTIVATION)) model = tf.keras.Sequential((fc_layers + [tf.keras.layers.Dense(14, activation='softmax')])) checkpoint_path = os.path.join('gs://', GCP_BUCKET, 'feat-sel-check', 'save_at_{epoch}') tensorboard_path = os.path.join('gs://', GCP_BUCKET, 'logs', datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) callbacks = [tf.keras.callbacks.ModelCheckpoint(checkpoint_path)] model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model.fit(train_dataset, epochs=EPOCHS) MODEL_PATH = '../2_Training_Workflow/keras-model' SAVE_PATH = MODEL_PATH model.save(SAVE_PATH) model = tf.keras.models.load_model(SAVE_PATH) print(model.evaluate(test_dataset, batch_size=EVAL_BATCH_SIZE)) print('Done evaluating the model') return MODEL_PATH
Run model training using tensorflow # TODO Refactor the function Args: Output: None
src/Model_Training/Model_Training.py
run_model_training
Roger-Parkinson-EHP/ML_Workflow_Demo
0
python
def run_model_training(): 'Run model training using tensorflow\n\n # TODO Refactor the function\n Args:\n\n Output:\n None' with open('params.yaml', 'r') as fd: params = yaml.safe_load(fd) SEED = params['seed'] GCP_BUCKET = params['gcp_bucket'] TEST_SIZE = params['test_size'] EPOCHS = params['train']['epochs'] BATCH_SIZE = params['train']['batch_size'] ACTIVATION = params['train']['activation'] LAYERS = params['train']['fc_layers'] EVAL_BATCH_SIZE = params['eval']['batch_size'] df = pd.read_csv('Reduced_Features.csv') features_df = df.drop(['Label'], axis=1) labels_df = df['Label'] (X_train, X_test, y_train, y_test) = train_test_split(features_df, labels_df, test_size=TEST_SIZE, random_state=SEED) train_data = tf.data.Dataset.from_tensor_slices((X_train, y_train)) test_data = tf.data.Dataset.from_tensor_slices((X_test, y_test)) train_dataset = train_data.batch(BATCH_SIZE) test_dataset = test_data.batch(BATCH_SIZE) fc_layers = [] for x in LAYERS: fc_layers.append(tf.keras.layers.Dense(x, activation=ACTIVATION)) model = tf.keras.Sequential((fc_layers + [tf.keras.layers.Dense(14, activation='softmax')])) checkpoint_path = os.path.join('gs://', GCP_BUCKET, 'feat-sel-check', 'save_at_{epoch}') tensorboard_path = os.path.join('gs://', GCP_BUCKET, 'logs', datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) callbacks = [tf.keras.callbacks.ModelCheckpoint(checkpoint_path)] model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model.fit(train_dataset, epochs=EPOCHS) MODEL_PATH = '../2_Training_Workflow/keras-model' SAVE_PATH = MODEL_PATH model.save(SAVE_PATH) model = tf.keras.models.load_model(SAVE_PATH) print(model.evaluate(test_dataset, batch_size=EVAL_BATCH_SIZE)) print('Done evaluating the model') return MODEL_PATH
def run_model_training(): 'Run model training using tensorflow\n\n # TODO Refactor the function\n Args:\n\n Output:\n None' with open('params.yaml', 'r') as fd: params = yaml.safe_load(fd) SEED = params['seed'] GCP_BUCKET = params['gcp_bucket'] TEST_SIZE = params['test_size'] EPOCHS = params['train']['epochs'] BATCH_SIZE = params['train']['batch_size'] ACTIVATION = params['train']['activation'] LAYERS = params['train']['fc_layers'] EVAL_BATCH_SIZE = params['eval']['batch_size'] df = pd.read_csv('Reduced_Features.csv') features_df = df.drop(['Label'], axis=1) labels_df = df['Label'] (X_train, X_test, y_train, y_test) = train_test_split(features_df, labels_df, test_size=TEST_SIZE, random_state=SEED) train_data = tf.data.Dataset.from_tensor_slices((X_train, y_train)) test_data = tf.data.Dataset.from_tensor_slices((X_test, y_test)) train_dataset = train_data.batch(BATCH_SIZE) test_dataset = test_data.batch(BATCH_SIZE) fc_layers = [] for x in LAYERS: fc_layers.append(tf.keras.layers.Dense(x, activation=ACTIVATION)) model = tf.keras.Sequential((fc_layers + [tf.keras.layers.Dense(14, activation='softmax')])) checkpoint_path = os.path.join('gs://', GCP_BUCKET, 'feat-sel-check', 'save_at_{epoch}') tensorboard_path = os.path.join('gs://', GCP_BUCKET, 'logs', datetime.datetime.now().strftime('%Y%m%d-%H%M%S')) callbacks = [tf.keras.callbacks.ModelCheckpoint(checkpoint_path)] model.compile(loss='sparse_categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model.fit(train_dataset, epochs=EPOCHS) MODEL_PATH = '../2_Training_Workflow/keras-model' SAVE_PATH = MODEL_PATH model.save(SAVE_PATH) model = tf.keras.models.load_model(SAVE_PATH) print(model.evaluate(test_dataset, batch_size=EVAL_BATCH_SIZE)) print('Done evaluating the model') return MODEL_PATH<|docstring|>Run model training using tensorflow # TODO Refactor the function Args: Output: None<|endoftext|>
29ba05b6db315ce65f0cef813cba1957f54797b37e452547af11167095e117a0
@pytest.fixture() def bomb(): 'A bomb context appropriate for proper testing of all wire sequences\n cases.\n ' bomb = Bomb() return bomb
A bomb context appropriate for proper testing of all wire sequences cases.
tests/test_wire_sequences.py
bomb
MartinHarding/ktaned
1
python
@pytest.fixture() def bomb(): 'A bomb context appropriate for proper testing of all wire sequences\n cases.\n ' bomb = Bomb() return bomb
@pytest.fixture() def bomb(): 'A bomb context appropriate for proper testing of all wire sequences\n cases.\n ' bomb = Bomb() return bomb<|docstring|>A bomb context appropriate for proper testing of all wire sequences cases.<|endoftext|>
bbb9a208502d9abdfc936cfc91a6f5ab9d43790fec9be1f21cc86d0df2c61e83
@pytest.fixture() def cuts(): 'Hand crafted validated result sets for cuts on each wire sequence. The\n named key represents the color of the wire, the index of the list\n represents how many times that wire color has appeared, and the list of\n values represent you should cut the wire if it is connected to that letter.\n\n e.g. "third time I\'ve seen a red wire, it\'s connected to letter c" would\n map to cuts[\'red\'][3], which contains [\'a\', \'c\'], so should be cut.\n ' cuts = {'red': [['c'], ['b'], ['a'], ['a', 'c'], ['b'], ['a', 'c'], ['a', 'b', 'c'], ['a', 'b'], ['b']], 'blue': [['b'], ['a', 'c'], ['b'], ['a'], ['b'], ['b', 'c'], ['c'], ['a', 'c'], ['a']], 'black': [['a', 'b', 'c'], ['a', 'c'], ['b'], ['a', 'c'], ['b'], ['b', 'c'], ['a', 'b'], ['c'], ['c']]} return cuts
Hand crafted validated result sets for cuts on each wire sequence. The named key represents the color of the wire, the index of the list represents how many times that wire color has appeared, and the list of values represent you should cut the wire if it is connected to that letter. e.g. "third time I've seen a red wire, it's connected to letter c" would map to cuts['red'][3], which contains ['a', 'c'], so should be cut.
tests/test_wire_sequences.py
cuts
MartinHarding/ktaned
1
python
@pytest.fixture() def cuts(): 'Hand crafted validated result sets for cuts on each wire sequence. The\n named key represents the color of the wire, the index of the list\n represents how many times that wire color has appeared, and the list of\n values represent you should cut the wire if it is connected to that letter.\n\n e.g. "third time I\'ve seen a red wire, it\'s connected to letter c" would\n map to cuts[\'red\'][3], which contains [\'a\', \'c\'], so should be cut.\n ' cuts = {'red': [['c'], ['b'], ['a'], ['a', 'c'], ['b'], ['a', 'c'], ['a', 'b', 'c'], ['a', 'b'], ['b']], 'blue': [['b'], ['a', 'c'], ['b'], ['a'], ['b'], ['b', 'c'], ['c'], ['a', 'c'], ['a']], 'black': [['a', 'b', 'c'], ['a', 'c'], ['b'], ['a', 'c'], ['b'], ['b', 'c'], ['a', 'b'], ['c'], ['c']]} return cuts
@pytest.fixture() def cuts(): 'Hand crafted validated result sets for cuts on each wire sequence. The\n named key represents the color of the wire, the index of the list\n represents how many times that wire color has appeared, and the list of\n values represent you should cut the wire if it is connected to that letter.\n\n e.g. "third time I\'ve seen a red wire, it\'s connected to letter c" would\n map to cuts[\'red\'][3], which contains [\'a\', \'c\'], so should be cut.\n ' cuts = {'red': [['c'], ['b'], ['a'], ['a', 'c'], ['b'], ['a', 'c'], ['a', 'b', 'c'], ['a', 'b'], ['b']], 'blue': [['b'], ['a', 'c'], ['b'], ['a'], ['b'], ['b', 'c'], ['c'], ['a', 'c'], ['a']], 'black': [['a', 'b', 'c'], ['a', 'c'], ['b'], ['a', 'c'], ['b'], ['b', 'c'], ['a', 'b'], ['c'], ['c']]} return cuts<|docstring|>Hand crafted validated result sets for cuts on each wire sequence. The named key represents the color of the wire, the index of the list represents how many times that wire color has appeared, and the list of values represent you should cut the wire if it is connected to that letter. e.g. "third time I've seen a red wire, it's connected to letter c" would map to cuts['red'][3], which contains ['a', 'c'], so should be cut.<|endoftext|>
ab82abc87eb7d5a5557bd3a39bfac95fe4e0e38b397dc422d353cd6c138ea791
def test_add_wire_invalid_color(bomb): 'Test adding a wire to a sequence with an invalid color.' wire_sequences = WireSequences(bomb) expected_exception = 'Color (chartreuse) must be one of {}'.format(wire_sequences.valid_colors) with pytest.raises(Exception, message=expected_exception): wire_sequences.add_wire('chartreuse', 'c')
Test adding a wire to a sequence with an invalid color.
tests/test_wire_sequences.py
test_add_wire_invalid_color
MartinHarding/ktaned
1
python
def test_add_wire_invalid_color(bomb): wire_sequences = WireSequences(bomb) expected_exception = 'Color (chartreuse) must be one of {}'.format(wire_sequences.valid_colors) with pytest.raises(Exception, message=expected_exception): wire_sequences.add_wire('chartreuse', 'c')
def test_add_wire_invalid_color(bomb): wire_sequences = WireSequences(bomb) expected_exception = 'Color (chartreuse) must be one of {}'.format(wire_sequences.valid_colors) with pytest.raises(Exception, message=expected_exception): wire_sequences.add_wire('chartreuse', 'c')<|docstring|>Test adding a wire to a sequence with an invalid color.<|endoftext|>
db3e86a4a59bb3b230bce0cb4825b69f5d9ef3bd3f1e31c1b04acd7312a09386
def test_add_wire_invalid_letter(bomb): 'Test adding a wire to a sequence connected to an invalid letter.' wire_sequences = WireSequences(bomb) expected_exception = 'Letter (d) must be one of {}'.format(wire_sequences.valid_letters) with pytest.raises(Exception, message=expected_exception): wire_sequences.add_wire('red', 'd')
Test adding a wire to a sequence connected to an invalid letter.
tests/test_wire_sequences.py
test_add_wire_invalid_letter
MartinHarding/ktaned
1
python
def test_add_wire_invalid_letter(bomb): wire_sequences = WireSequences(bomb) expected_exception = 'Letter (d) must be one of {}'.format(wire_sequences.valid_letters) with pytest.raises(Exception, message=expected_exception): wire_sequences.add_wire('red', 'd')
def test_add_wire_invalid_letter(bomb): wire_sequences = WireSequences(bomb) expected_exception = 'Letter (d) must be one of {}'.format(wire_sequences.valid_letters) with pytest.raises(Exception, message=expected_exception): wire_sequences.add_wire('red', 'd')<|docstring|>Test adding a wire to a sequence connected to an invalid letter.<|endoftext|>
83c2fcce74263bb01577f68f1e4c189c38ca8882af6ae773ae8659d2d6b06b1d
def test_add_wire_by_color_letter(bomb, cuts): 'Test adding wire by color and letter (iterates through every color,\n letter, and appearance combination).\n ' for color in ['red', 'blue', 'black']: for letter in ['a', 'b', 'c']: wire_sequences = WireSequences(bomb) for cut in cuts[color]: actual = wire_sequences.add_wire(color, letter) expected = bool((letter in cut)) assert (actual == expected)
Test adding wire by color and letter (iterates through every color, letter, and appearance combination).
tests/test_wire_sequences.py
test_add_wire_by_color_letter
MartinHarding/ktaned
1
python
def test_add_wire_by_color_letter(bomb, cuts): 'Test adding wire by color and letter (iterates through every color,\n letter, and appearance combination).\n ' for color in ['red', 'blue', 'black']: for letter in ['a', 'b', 'c']: wire_sequences = WireSequences(bomb) for cut in cuts[color]: actual = wire_sequences.add_wire(color, letter) expected = bool((letter in cut)) assert (actual == expected)
def test_add_wire_by_color_letter(bomb, cuts): 'Test adding wire by color and letter (iterates through every color,\n letter, and appearance combination).\n ' for color in ['red', 'blue', 'black']: for letter in ['a', 'b', 'c']: wire_sequences = WireSequences(bomb) for cut in cuts[color]: actual = wire_sequences.add_wire(color, letter) expected = bool((letter in cut)) assert (actual == expected)<|docstring|>Test adding wire by color and letter (iterates through every color, letter, and appearance combination).<|endoftext|>
93aa0358eb04d9df0ca7f8069c8239debfcf716ee675de635aadb3b7f3f7cbeb
def test_add_wire_mixed(bomb): 'Test some random wire sequences.' wire_sequences = WireSequences(bomb) wires = [('red', 'c', True), ('blue', 'a', False), ('black', 'b', True), ('blue', 'a', True), ('red', 'c', False), ('black', 'b', False), ('red', 'a', True), ('blue', 'c', False), ('black', 'b', True)] for wire in wires: (color, letter, cut) = wire actual = wire_sequences.add_wire(color, letter) expected = cut assert (actual == expected)
Test some random wire sequences.
tests/test_wire_sequences.py
test_add_wire_mixed
MartinHarding/ktaned
1
python
def test_add_wire_mixed(bomb): wire_sequences = WireSequences(bomb) wires = [('red', 'c', True), ('blue', 'a', False), ('black', 'b', True), ('blue', 'a', True), ('red', 'c', False), ('black', 'b', False), ('red', 'a', True), ('blue', 'c', False), ('black', 'b', True)] for wire in wires: (color, letter, cut) = wire actual = wire_sequences.add_wire(color, letter) expected = cut assert (actual == expected)
def test_add_wire_mixed(bomb): wire_sequences = WireSequences(bomb) wires = [('red', 'c', True), ('blue', 'a', False), ('black', 'b', True), ('blue', 'a', True), ('red', 'c', False), ('black', 'b', False), ('red', 'a', True), ('blue', 'c', False), ('black', 'b', True)] for wire in wires: (color, letter, cut) = wire actual = wire_sequences.add_wire(color, letter) expected = cut assert (actual == expected)<|docstring|>Test some random wire sequences.<|endoftext|>
c5da5dc5ab7083bf49e69e9f8c61cd3f7f28f32727415fb13d886a137e534014
def math2html(formula): 'Convert some TeX math to HTML.' factory = FormulaFactory() whole = factory.parseformula(formula) FormulaProcessor().process(whole) whole.process() return ''.join(whole.gethtml())
Convert some TeX math to HTML.
Lib/site-packages/docutils/utils/math/math2html.py
math2html
edupyter/EDUPYTER
2
python
def math2html(formula): factory = FormulaFactory() whole = factory.parseformula(formula) FormulaProcessor().process(whole) whole.process() return .join(whole.gethtml())
def math2html(formula): factory = FormulaFactory() whole = factory.parseformula(formula) FormulaProcessor().process(whole) whole.process() return .join(whole.gethtml())<|docstring|>Convert some TeX math to HTML.<|endoftext|>
f24277821d9a641dca613cb77393656b4e278d85aa264faa7b7c5b89721af99f
def main(): 'Main function, called if invoked from the command line' args = sys.argv Options().parseoptions(args) if (len(args) != 1): Trace.error('Usage: math2html.py escaped_string') exit() result = math2html(args[0]) Trace.message(result)
Main function, called if invoked from the command line
Lib/site-packages/docutils/utils/math/math2html.py
main
edupyter/EDUPYTER
2
python
def main(): args = sys.argv Options().parseoptions(args) if (len(args) != 1): Trace.error('Usage: math2html.py escaped_string') exit() result = math2html(args[0]) Trace.message(result)
def main(): args = sys.argv Options().parseoptions(args) if (len(args) != 1): Trace.error('Usage: math2html.py escaped_string') exit() result = math2html(args[0]) Trace.message(result)<|docstring|>Main function, called if invoked from the command line<|endoftext|>
ef97645dcd8b9754745b191c29b29d446897f222c55125ce90ae0328d990d0ed
def debug(cls, message): 'Show a debug message' if ((not Trace.debugmode) or Trace.quietmode): return Trace.show(message, sys.stdout)
Show a debug message
Lib/site-packages/docutils/utils/math/math2html.py
debug
edupyter/EDUPYTER
2
python
def debug(cls, message): if ((not Trace.debugmode) or Trace.quietmode): return Trace.show(message, sys.stdout)
def debug(cls, message): if ((not Trace.debugmode) or Trace.quietmode): return Trace.show(message, sys.stdout)<|docstring|>Show a debug message<|endoftext|>
0e5b9c016a9d4f7d24161573c57eb2608094dceea90401b145ce96a7fc1eaf47
def message(cls, message): 'Show a trace message' if Trace.quietmode: return if (Trace.prefix and Trace.showlinesmode): message = (Trace.prefix + message) Trace.show(message, sys.stdout)
Show a trace message
Lib/site-packages/docutils/utils/math/math2html.py
message
edupyter/EDUPYTER
2
python
def message(cls, message): if Trace.quietmode: return if (Trace.prefix and Trace.showlinesmode): message = (Trace.prefix + message) Trace.show(message, sys.stdout)
def message(cls, message): if Trace.quietmode: return if (Trace.prefix and Trace.showlinesmode): message = (Trace.prefix + message) Trace.show(message, sys.stdout)<|docstring|>Show a trace message<|endoftext|>
bef16f7e7e2a238f039c0b9a1872d712ce6154c8f4e005f6ed6701a357cf8eb6
def error(cls, message): 'Show an error message' message = ('* ' + message) if (Trace.prefix and Trace.showlinesmode): message = (Trace.prefix + message) Trace.show(message, sys.stderr)
Show an error message
Lib/site-packages/docutils/utils/math/math2html.py
error
edupyter/EDUPYTER
2
python
def error(cls, message): message = ('* ' + message) if (Trace.prefix and Trace.showlinesmode): message = (Trace.prefix + message) Trace.show(message, sys.stderr)
def error(cls, message): message = ('* ' + message) if (Trace.prefix and Trace.showlinesmode): message = (Trace.prefix + message) Trace.show(message, sys.stderr)<|docstring|>Show an error message<|endoftext|>
dbf8bf09f41eff2710ce880753a36053194174db62974597ab84034850c991f9
def fatal(cls, message): 'Show an error message and terminate' Trace.error(('FATAL: ' + message)) exit((- 1))
Show an error message and terminate
Lib/site-packages/docutils/utils/math/math2html.py
fatal
edupyter/EDUPYTER
2
python
def fatal(cls, message): Trace.error(('FATAL: ' + message)) exit((- 1))
def fatal(cls, message): Trace.error(('FATAL: ' + message)) exit((- 1))<|docstring|>Show an error message and terminate<|endoftext|>
c431322ea34926e8df5ebf04932cc8c4266bf6259dd9329a42264217c1087b5a
def show(cls, message, channel): 'Show a message out of a channel' if (sys.version_info < (3, 0)): message = message.encode('utf-8') channel.write((message + '\n'))
Show a message out of a channel
Lib/site-packages/docutils/utils/math/math2html.py
show
edupyter/EDUPYTER
2
python
def show(cls, message, channel): if (sys.version_info < (3, 0)): message = message.encode('utf-8') channel.write((message + '\n'))
def show(cls, message, channel): if (sys.version_info < (3, 0)): message = message.encode('utf-8') channel.write((message + '\n'))<|docstring|>Show a message out of a channel<|endoftext|>
dcea4c5623947b569e40a349e26151c43d72a7aa0347c2edf19299f215dc75c3
def parseoptions(self, args): 'Parse command line options' if (len(args) == 0): return None while ((len(args) > 0) and args[0].startswith('--')): (key, value) = self.readoption(args) if (not key): return (('Option ' + value) + ' not recognized') if (not value): return (('Option ' + key) + ' needs a value') setattr(self.options, key, value) return None
Parse command line options
Lib/site-packages/docutils/utils/math/math2html.py
parseoptions
edupyter/EDUPYTER
2
python
def parseoptions(self, args): if (len(args) == 0): return None while ((len(args) > 0) and args[0].startswith('--')): (key, value) = self.readoption(args) if (not key): return (('Option ' + value) + ' not recognized') if (not value): return (('Option ' + key) + ' needs a value') setattr(self.options, key, value) return None
def parseoptions(self, args): if (len(args) == 0): return None while ((len(args) > 0) and args[0].startswith('--')): (key, value) = self.readoption(args) if (not key): return (('Option ' + value) + ' not recognized') if (not value): return (('Option ' + key) + ' needs a value') setattr(self.options, key, value) return None<|docstring|>Parse command line options<|endoftext|>
45d75e57d485dd7447afc78e8b20ea35a382ceffa6d4efa8fcde46b0e6ccd513
def readoption(self, args): 'Read the key and value for an option' arg = args[0][2:] del args[0] if ('=' in arg): key = self.readequalskey(arg, args) else: key = arg.replace('-', '') if (not hasattr(self.options, key)): return (None, key) current = getattr(self.options, key) if isinstance(current, bool): return (key, True) if (len(args) == 0): return (key, None) if args[0].startswith('"'): initial = args[0] del args[0] return (key, self.readquoted(args, initial)) value = args[0].decode('utf-8') del args[0] if isinstance(current, list): current.append(value) return (key, current) return (key, value)
Read the key and value for an option
Lib/site-packages/docutils/utils/math/math2html.py
readoption
edupyter/EDUPYTER
2
python
def readoption(self, args): arg = args[0][2:] del args[0] if ('=' in arg): key = self.readequalskey(arg, args) else: key = arg.replace('-', ) if (not hasattr(self.options, key)): return (None, key) current = getattr(self.options, key) if isinstance(current, bool): return (key, True) if (len(args) == 0): return (key, None) if args[0].startswith('"'): initial = args[0] del args[0] return (key, self.readquoted(args, initial)) value = args[0].decode('utf-8') del args[0] if isinstance(current, list): current.append(value) return (key, current) return (key, value)
def readoption(self, args): arg = args[0][2:] del args[0] if ('=' in arg): key = self.readequalskey(arg, args) else: key = arg.replace('-', ) if (not hasattr(self.options, key)): return (None, key) current = getattr(self.options, key) if isinstance(current, bool): return (key, True) if (len(args) == 0): return (key, None) if args[0].startswith('"'): initial = args[0] del args[0] return (key, self.readquoted(args, initial)) value = args[0].decode('utf-8') del args[0] if isinstance(current, list): current.append(value) return (key, current) return (key, value)<|docstring|>Read the key and value for an option<|endoftext|>
0611334680e2831ca78bac6cf5ecede2ed60718411e5faf01ebdb08e208237f9
def readquoted(self, args, initial): 'Read a value between quotes' Trace.error('Oops') value = initial[1:] while ((len(args) > 0) and (not args[0].endswith('"')) and (not args[0].startswith('--'))): Trace.error(('Appending ' + args[0])) value += (' ' + args[0]) del args[0] if ((len(args) == 0) or args[0].startswith('--')): return None value += (' ' + args[0:(- 1)]) return value
Read a value between quotes
Lib/site-packages/docutils/utils/math/math2html.py
readquoted
edupyter/EDUPYTER
2
python
def readquoted(self, args, initial): Trace.error('Oops') value = initial[1:] while ((len(args) > 0) and (not args[0].endswith('"')) and (not args[0].startswith('--'))): Trace.error(('Appending ' + args[0])) value += (' ' + args[0]) del args[0] if ((len(args) == 0) or args[0].startswith('--')): return None value += (' ' + args[0:(- 1)]) return value
def readquoted(self, args, initial): Trace.error('Oops') value = initial[1:] while ((len(args) > 0) and (not args[0].endswith('"')) and (not args[0].startswith('--'))): Trace.error(('Appending ' + args[0])) value += (' ' + args[0]) del args[0] if ((len(args) == 0) or args[0].startswith('--')): return None value += (' ' + args[0:(- 1)]) return value<|docstring|>Read a value between quotes<|endoftext|>
704dc2e0b83ad9676bee39717bce95598aa6e4febfac007fedbca710633d9213
def readequalskey(self, arg, args): 'Read a key using equals' split = arg.split('=', 1) key = split[0] value = split[1] args.insert(0, value) return key
Read a key using equals
Lib/site-packages/docutils/utils/math/math2html.py
readequalskey
edupyter/EDUPYTER
2
python
def readequalskey(self, arg, args): split = arg.split('=', 1) key = split[0] value = split[1] args.insert(0, value) return key
def readequalskey(self, arg, args): split = arg.split('=', 1) key = split[0] value = split[1] args.insert(0, value) return key<|docstring|>Read a key using equals<|endoftext|>
555dd0f334ffa8dfa424532c308beb63f5de450331120db7e869872ec41c3ceb
def parseoptions(self, args): 'Parse command line options' Options.location = args[0] del args[0] parser = CommandLineParser(Options) result = parser.parseoptions(args) if result: Trace.error(result) self.usage() self.processoptions()
Parse command line options
Lib/site-packages/docutils/utils/math/math2html.py
parseoptions
edupyter/EDUPYTER
2
python
def parseoptions(self, args): Options.location = args[0] del args[0] parser = CommandLineParser(Options) result = parser.parseoptions(args) if result: Trace.error(result) self.usage() self.processoptions()
def parseoptions(self, args): Options.location = args[0] del args[0] parser = CommandLineParser(Options) result = parser.parseoptions(args) if result: Trace.error(result) self.usage() self.processoptions()<|docstring|>Parse command line options<|endoftext|>
8dbf1e5fd2b592ff7f8c3007c226577afadc42856ce7e08c13ee402e7152b0a1
def processoptions(self): 'Process all options parsed.' if Options.help: self.usage() if Options.version: self.showversion() for param in dir(Trace): if param.endswith('mode'): setattr(Trace, param, getattr(self, param[:(- 4)]))
Process all options parsed.
Lib/site-packages/docutils/utils/math/math2html.py
processoptions
edupyter/EDUPYTER
2
python
def processoptions(self): if Options.help: self.usage() if Options.version: self.showversion() for param in dir(Trace): if param.endswith('mode'): setattr(Trace, param, getattr(self, param[:(- 4)]))
def processoptions(self): if Options.help: self.usage() if Options.version: self.showversion() for param in dir(Trace): if param.endswith('mode'): setattr(Trace, param, getattr(self, param[:(- 4)]))<|docstring|>Process all options parsed.<|endoftext|>
05cd9ed5aa02f2aaad953f2f642b2dd43995bca3450f9459d24b3df0f384474f
def usage(self): 'Show correct usage' Trace.error((('Usage: ' + os.path.basename(Options.location)) + ' [options] "input string"')) Trace.error('Convert input string with LaTeX math to MathML') self.showoptions()
Show correct usage
Lib/site-packages/docutils/utils/math/math2html.py
usage
edupyter/EDUPYTER
2
python
def usage(self): Trace.error((('Usage: ' + os.path.basename(Options.location)) + ' [options] "input string"')) Trace.error('Convert input string with LaTeX math to MathML') self.showoptions()
def usage(self): Trace.error((('Usage: ' + os.path.basename(Options.location)) + ' [options] "input string"')) Trace.error('Convert input string with LaTeX math to MathML') self.showoptions()<|docstring|>Show correct usage<|endoftext|>
634f867c80fdbdd3a242bbc5a23e3700f19ef06866f0a7f260ba14e6096055c2
def showoptions(self): 'Show all possible options' Trace.error(' --help: show this online help') Trace.error(' --quiet: disables all runtime messages') Trace.error(' --debug: enable debugging messages (for developers)') Trace.error(' --version: show version number and release date') Trace.error(' --simplemath: do not generate fancy math constructions') sys.exit()
Show all possible options
Lib/site-packages/docutils/utils/math/math2html.py
showoptions
edupyter/EDUPYTER
2
python
def showoptions(self): Trace.error(' --help: show this online help') Trace.error(' --quiet: disables all runtime messages') Trace.error(' --debug: enable debugging messages (for developers)') Trace.error(' --version: show version number and release date') Trace.error(' --simplemath: do not generate fancy math constructions') sys.exit()
def showoptions(self): Trace.error(' --help: show this online help') Trace.error(' --quiet: disables all runtime messages') Trace.error(' --debug: enable debugging messages (for developers)') Trace.error(' --version: show version number and release date') Trace.error(' --simplemath: do not generate fancy math constructions') sys.exit()<|docstring|>Show all possible options<|endoftext|>
534b16bddca9d5a4370725e1df28baaf36a3e4a69fd8a6403a914eb26ced9b08
def showversion(self): 'Return the current eLyXer version string' Trace.error(('math2html ' + __version__)) sys.exit()
Return the current eLyXer version string
Lib/site-packages/docutils/utils/math/math2html.py
showversion
edupyter/EDUPYTER
2
python
def showversion(self): Trace.error(('math2html ' + __version__)) sys.exit()
def showversion(self): Trace.error(('math2html ' + __version__)) sys.exit()<|docstring|>Return the current eLyXer version string<|endoftext|>
b8961ad1e8d09d389289fbadebff6279b43fe90e621097a8107e241aaa64ce7d
def clone(cls, original): 'Return an exact copy of an object.' 'The original object must have an empty constructor.' return cls.create(original.__class__)
Return an exact copy of an object.
Lib/site-packages/docutils/utils/math/math2html.py
clone
edupyter/EDUPYTER
2
python
def clone(cls, original): 'The original object must have an empty constructor.' return cls.create(original.__class__)
def clone(cls, original): 'The original object must have an empty constructor.' return cls.create(original.__class__)<|docstring|>Return an exact copy of an object.<|endoftext|>