hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c493e1bf2ed370836c63e29a5f7c2abab7be087 | 1,982 | py | Python | azure/mgmt/network/v2016_09_01/models/vpn_client_configuration.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 1 | 2022-01-25T22:52:58.000Z | 2022-01-25T22:52:58.000Z | azure/mgmt/network/v2016_09_01/models/vpn_client_configuration.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | azure/mgmt/network/v2016_09_01/models/vpn_client_configuration.py | EnjoyLifeFund/Debian_py36_packages | 1985d4c73fabd5f08f54b922e73a9306e09c77a5 | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class VpnClientConfiguration(Model):
"""VpnClientConfiguration for P2S client.
:param vpn_client_address_pool: The reference of the address space
resource which represents Address space for P2S VpnClient.
:type vpn_client_address_pool:
~azure.mgmt.network.v2016_09_01.models.AddressSpace
:param vpn_client_root_certificates: VpnClientRootCertificate for virtual
network gateway.
:type vpn_client_root_certificates:
list[~azure.mgmt.network.v2016_09_01.models.VpnClientRootCertificate]
:param vpn_client_revoked_certificates: VpnClientRevokedCertificate for
Virtual network gateway.
:type vpn_client_revoked_certificates:
list[~azure.mgmt.network.v2016_09_01.models.VpnClientRevokedCertificate]
"""
_attribute_map = {
'vpn_client_address_pool': {'key': 'vpnClientAddressPool', 'type': 'AddressSpace'},
'vpn_client_root_certificates': {'key': 'vpnClientRootCertificates', 'type': '[VpnClientRootCertificate]'},
'vpn_client_revoked_certificates': {'key': 'vpnClientRevokedCertificates', 'type': '[VpnClientRevokedCertificate]'},
}
def __init__(self, vpn_client_address_pool=None, vpn_client_root_certificates=None, vpn_client_revoked_certificates=None):
self.vpn_client_address_pool = vpn_client_address_pool
self.vpn_client_root_certificates = vpn_client_root_certificates
self.vpn_client_revoked_certificates = vpn_client_revoked_certificates
| 47.190476 | 126 | 0.717962 |
from msrest.serialization import Model
class VpnClientConfiguration(Model):
_attribute_map = {
'vpn_client_address_pool': {'key': 'vpnClientAddressPool', 'type': 'AddressSpace'},
'vpn_client_root_certificates': {'key': 'vpnClientRootCertificates', 'type': '[VpnClientRootCertificate]'},
'vpn_client_revoked_certificates': {'key': 'vpnClientRevokedCertificates', 'type': '[VpnClientRevokedCertificate]'},
}
def __init__(self, vpn_client_address_pool=None, vpn_client_root_certificates=None, vpn_client_revoked_certificates=None):
self.vpn_client_address_pool = vpn_client_address_pool
self.vpn_client_root_certificates = vpn_client_root_certificates
self.vpn_client_revoked_certificates = vpn_client_revoked_certificates
| true | true |
1c493e8807b0e5346571eaaacbb826cbf365e77c | 565 | py | Python | tracker/user.py | k4t0mono/bridge-chat | 49f70e270002b1cb91363b2a0b3acce2a56fee16 | [
"BSD-2-Clause"
] | null | null | null | tracker/user.py | k4t0mono/bridge-chat | 49f70e270002b1cb91363b2a0b3acce2a56fee16 | [
"BSD-2-Clause"
] | null | null | null | tracker/user.py | k4t0mono/bridge-chat | 49f70e270002b1cb91363b2a0b3acce2a56fee16 | [
"BSD-2-Clause"
] | null | null | null | import jwt
import time
import os
class User():
def __init__(self, login):
self.login = login
self.tokens = []
def gen_token(self):
end = int(str(time.time())[:-8]) + 86400
d = { 'login': self.login, 'type': 'auth', 'time': end }
t = jwt.encode(d, os.environ['BRIDGECHAT_SECRET'], algorithm='HS512')
t = t.decode('utf-8')
self.tokens.append(t)
return t
def __repr__(self):
s = '<User login=\'{}\' tokens={}>'.format(self.login, len(self.tokens))
return s
| 23.541667 | 80 | 0.534513 | import jwt
import time
import os
class User():
def __init__(self, login):
self.login = login
self.tokens = []
def gen_token(self):
end = int(str(time.time())[:-8]) + 86400
d = { 'login': self.login, 'type': 'auth', 'time': end }
t = jwt.encode(d, os.environ['BRIDGECHAT_SECRET'], algorithm='HS512')
t = t.decode('utf-8')
self.tokens.append(t)
return t
def __repr__(self):
s = '<User login=\'{}\' tokens={}>'.format(self.login, len(self.tokens))
return s
| true | true |
1c493e966b7d54c69854c811b65ceb355625b0b4 | 347 | py | Python | Python3/0009-Palindrome-Number/soln.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0009-Palindrome-Number/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0009-Palindrome-Number/soln.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | class Solution:
def isPalindrome(self, x):
"""
:type x: int
:rtype: bool
"""
# solve it without converting the integer to a string
if x < 0:
return False
r = 0
origin = x
while x:
r = r * 10 + x % 10
x //= 10
return r == origin | 23.133333 | 61 | 0.420749 | class Solution:
def isPalindrome(self, x):
if x < 0:
return False
r = 0
origin = x
while x:
r = r * 10 + x % 10
x //= 10
return r == origin | true | true |
1c493ee7f94ba470d37424f4171f5c35c2ec9d91 | 15,082 | py | Python | vspk/v6/nufirewallacl.py | axxyhtrx/vspk-python | 4495882c6bcbb1ef51b14b9f4dc7efe46476ff50 | [
"BSD-3-Clause"
] | 19 | 2016-03-07T12:34:22.000Z | 2020-06-11T11:09:02.000Z | vspk/v6/nufirewallacl.py | axxyhtrx/vspk-python | 4495882c6bcbb1ef51b14b9f4dc7efe46476ff50 | [
"BSD-3-Clause"
] | 40 | 2016-06-13T15:36:54.000Z | 2020-11-10T18:14:43.000Z | vspk/v6/nufirewallacl.py | axxyhtrx/vspk-python | 4495882c6bcbb1ef51b14b9f4dc7efe46476ff50 | [
"BSD-3-Clause"
] | 15 | 2016-06-10T22:06:01.000Z | 2020-12-15T18:37:42.000Z | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUFirewallRulesFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUDomainsFetcher
from bambou import NURESTObject
class NUFirewallAcl(NURESTObject):
""" Represents a FirewallAcl in the VSD
Notes:
None
"""
__rest_name__ = "firewallacl"
__resource_name__ = "firewallacls"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
""" Initializes a FirewallAcl instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> firewallacl = NUFirewallAcl(id=u'xxxx-xxx-xxx-xxx', name=u'FirewallAcl')
>>> firewallacl = NUFirewallAcl(data=my_dict)
"""
super(NUFirewallAcl, self).__init__()
# Read/Write Attributes
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._active = None
self._default_allow_ip = None
self._default_allow_non_ip = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._rule_ids = None
self._auto_generate_priority = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="active", remote_name="active", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_ip", remote_name="defaultAllowIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_non_ip", remote_name="defaultAllowNonIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="rule_ids", remote_name="ruleIds", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_generate_priority", remote_name="autoGeneratePriority", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
# Fetchers
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.firewall_rules = NUFirewallRulesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.domains = NUDomainsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def name(self):
""" Get name value.
Notes:
The name of the entity
"""
return self._name
@name.setter
def name(self, value):
""" Set name value.
Notes:
The name of the entity
"""
self._name = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def last_updated_date(self):
""" Get last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
""" Set last_updated_date value.
Notes:
Time stamp when this object was last updated.
This attribute is named `lastUpdatedDate` in VSD API.
"""
self._last_updated_date = value
@property
def active(self):
""" Get active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
return self._active
@active.setter
def active(self, value):
""" Set active value.
Notes:
If enabled, it means that this ACL or QOS entry is active
"""
self._active = value
@property
def default_allow_ip(self):
""" Get default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in thelist of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
return self._default_allow_ip
@default_allow_ip.setter
def default_allow_ip(self, value):
""" Set default_allow_ip value.
Notes:
If enabled a default ACL of Allow All is added as the last entry in thelist of ACL entries
This attribute is named `defaultAllowIP` in VSD API.
"""
self._default_allow_ip = value
@property
def default_allow_non_ip(self):
""" Get default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
return self._default_allow_non_ip
@default_allow_non_ip.setter
def default_allow_non_ip(self, value):
""" Set default_allow_non_ip value.
Notes:
If enabled, non ip traffic will be dropped
This attribute is named `defaultAllowNonIP` in VSD API.
"""
self._default_allow_non_ip = value
@property
def description(self):
""" Get description value.
Notes:
A description of the entity
"""
return self._description
@description.setter
def description(self, value):
""" Set description value.
Notes:
A description of the entity
"""
self._description = value
@property
def embedded_metadata(self):
""" Get embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
""" Set embedded_metadata value.
Notes:
Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration.
This attribute is named `embeddedMetadata` in VSD API.
"""
self._embedded_metadata = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def creation_date(self):
""" Get creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
return self._creation_date
@creation_date.setter
def creation_date(self, value):
""" Set creation_date value.
Notes:
Time stamp when this object was created.
This attribute is named `creationDate` in VSD API.
"""
self._creation_date = value
@property
def rule_ids(self):
""" Get rule_ids value.
Notes:
Firewall rules associated with this firewall acl.
This attribute is named `ruleIds` in VSD API.
"""
return self._rule_ids
@rule_ids.setter
def rule_ids(self, value):
""" Set rule_ids value.
Notes:
Firewall rules associated with this firewall acl.
This attribute is named `ruleIds` in VSD API.
"""
self._rule_ids = value
@property
def auto_generate_priority(self):
""" Get auto_generate_priority value.
Notes:
If enabled, entries priority will be randomly generated between allowed range.
This attribute is named `autoGeneratePriority` in VSD API.
"""
return self._auto_generate_priority
@auto_generate_priority.setter
def auto_generate_priority(self, value):
""" Set auto_generate_priority value.
Notes:
If enabled, entries priority will be randomly generated between allowed range.
This attribute is named `autoGeneratePriority` in VSD API.
"""
self._auto_generate_priority = value
@property
def owner(self):
""" Get owner value.
Notes:
Identifies the user that has created this object.
"""
return self._owner
@owner.setter
def owner(self, value):
""" Set owner value.
Notes:
Identifies the user that has created this object.
"""
self._owner = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
| 30.164 | 296 | 0.604628 |
from .fetchers import NUPermissionsFetcher
from .fetchers import NUMetadatasFetcher
from .fetchers import NUFirewallRulesFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUDomainsFetcher
from bambou import NURESTObject
class NUFirewallAcl(NURESTObject):
__rest_name__ = "firewallacl"
__resource_name__ = "firewallacls"
ONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
def __init__(self, **kwargs):
super(NUFirewallAcl, self).__init__()
self._name = None
self._last_updated_by = None
self._last_updated_date = None
self._active = None
self._default_allow_ip = None
self._default_allow_non_ip = None
self._description = None
self._embedded_metadata = None
self._entity_scope = None
self._creation_date = None
self._rule_ids = None
self._auto_generate_priority = None
self._owner = None
self._external_id = None
self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="active", remote_name="active", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_ip", remote_name="defaultAllowIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="default_allow_non_ip", remote_name="defaultAllowNonIP", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="rule_ids", remote_name="ruleIds", attribute_type=list, is_required=False, is_unique=False)
self.expose_attribute(local_name="auto_generate_priority", remote_name="autoGeneratePriority", attribute_type=bool, is_required=False, is_unique=False)
self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.firewall_rules = NUFirewallRulesFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.domains = NUDomainsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def last_updated_by(self):
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
self._last_updated_by = value
@property
def last_updated_date(self):
return self._last_updated_date
@last_updated_date.setter
def last_updated_date(self, value):
self._last_updated_date = value
@property
def active(self):
return self._active
@active.setter
def active(self, value):
self._active = value
@property
def default_allow_ip(self):
return self._default_allow_ip
@default_allow_ip.setter
def default_allow_ip(self, value):
self._default_allow_ip = value
@property
def default_allow_non_ip(self):
return self._default_allow_non_ip
@default_allow_non_ip.setter
def default_allow_non_ip(self, value):
self._default_allow_non_ip = value
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
@property
def embedded_metadata(self):
return self._embedded_metadata
@embedded_metadata.setter
def embedded_metadata(self, value):
self._embedded_metadata = value
@property
def entity_scope(self):
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
self._entity_scope = value
@property
def creation_date(self):
return self._creation_date
@creation_date.setter
def creation_date(self, value):
self._creation_date = value
@property
def rule_ids(self):
return self._rule_ids
@rule_ids.setter
def rule_ids(self, value):
self._rule_ids = value
@property
def auto_generate_priority(self):
return self._auto_generate_priority
@auto_generate_priority.setter
def auto_generate_priority(self, value):
self._auto_generate_priority = value
@property
def owner(self):
return self._owner
@owner.setter
def owner(self, value):
self._owner = value
@property
def external_id(self):
return self._external_id
@external_id.setter
def external_id(self, value):
self._external_id = value
| true | true |
1c493ef011476abc24d0368d37585b1c67c3570d | 1,548 | py | Python | umusicfy/user_profile/urls.py | CarlosMart626/umusicfy | 97e2166fe26d1fbe36df6bea435044ef3d367edf | [
"Apache-2.0"
] | null | null | null | umusicfy/user_profile/urls.py | CarlosMart626/umusicfy | 97e2166fe26d1fbe36df6bea435044ef3d367edf | [
"Apache-2.0"
] | 8 | 2020-06-05T18:08:05.000Z | 2022-01-13T00:44:30.000Z | umusicfy/user_profile/urls.py | CarlosMart626/umusicfy | 97e2166fe26d1fbe36df6bea435044ef3d367edf | [
"Apache-2.0"
] | null | null | null | from django.contrib.auth.decorators import login_required
from django.conf.urls import url
# Import Class Based Views
from .views import UserProfileView, UpdateUserProfileView, UpdateUserPasswordView, \
UserProfileDetailView, PlaylistDetailView, PlaylistCreateView, FollowUserProfileView, \
FollowPlaylistView, PlayListListView, AddToPlaylistView
urlpatterns = [
url(r'^$', login_required(UserProfileView.as_view()), name='user_profile'),
url(r'^(?P<username>[\w-]+)/playlist/$', login_required(PlayListListView.as_view()), name='user_all_playlists'),
url(r'^password/$', login_required(UpdateUserPasswordView.as_view()), name='user_change_password'),
url(r'^update/$', login_required(UpdateUserProfileView.as_view()), name='user_update_profile'),
url(r'^create-playlist/$', login_required(PlaylistCreateView.as_view()), name='user_create_playlist'),
url(r'^(?P<username>[\w-]+)/(?P<playlist_slug>[\w-]+)/$', login_required(PlaylistDetailView.as_view()),
name='user_playlist'),
url(r'^add-song/(?P<playlist_id>[\w-]+)/(?P<song_id>[\w-]+)/$', login_required(AddToPlaylistView.as_view()),
name='add_song_playlist'),
url(r'^(?P<pk>[0-9]+)/$', login_required(UserProfileDetailView.as_view()), name='visit_user_profile'),
url(r'^follow-user/(?P<user_id>[0-9]+)/$', login_required(FollowUserProfileView.as_view()),
name='visit_user_profile'),
url(r'^follow-playlist/(?P<playlist_id>[0-9]+)/$', login_required(FollowPlaylistView.as_view()),
name='visit_user_profile'),
]
| 57.333333 | 116 | 0.720284 | from django.contrib.auth.decorators import login_required
from django.conf.urls import url
from .views import UserProfileView, UpdateUserProfileView, UpdateUserPasswordView, \
UserProfileDetailView, PlaylistDetailView, PlaylistCreateView, FollowUserProfileView, \
FollowPlaylistView, PlayListListView, AddToPlaylistView
urlpatterns = [
url(r'^$', login_required(UserProfileView.as_view()), name='user_profile'),
url(r'^(?P<username>[\w-]+)/playlist/$', login_required(PlayListListView.as_view()), name='user_all_playlists'),
url(r'^password/$', login_required(UpdateUserPasswordView.as_view()), name='user_change_password'),
url(r'^update/$', login_required(UpdateUserProfileView.as_view()), name='user_update_profile'),
url(r'^create-playlist/$', login_required(PlaylistCreateView.as_view()), name='user_create_playlist'),
url(r'^(?P<username>[\w-]+)/(?P<playlist_slug>[\w-]+)/$', login_required(PlaylistDetailView.as_view()),
name='user_playlist'),
url(r'^add-song/(?P<playlist_id>[\w-]+)/(?P<song_id>[\w-]+)/$', login_required(AddToPlaylistView.as_view()),
name='add_song_playlist'),
url(r'^(?P<pk>[0-9]+)/$', login_required(UserProfileDetailView.as_view()), name='visit_user_profile'),
url(r'^follow-user/(?P<user_id>[0-9]+)/$', login_required(FollowUserProfileView.as_view()),
name='visit_user_profile'),
url(r'^follow-playlist/(?P<playlist_id>[0-9]+)/$', login_required(FollowPlaylistView.as_view()),
name='visit_user_profile'),
]
| true | true |
1c49402d0cfc1395f249ead6417ae479d1dbeb4c | 3,495 | py | Python | scripts/compute_scores.py | JannikWirtz/importance-sampling-diagnostics | 9c9cab2ac91081ae2b64f99891504155057c09e3 | [
"MIT"
] | 289 | 2017-08-03T17:30:12.000Z | 2022-03-30T12:04:21.000Z | scripts/compute_scores.py | JannikWirtz/importance-sampling-diagnostics | 9c9cab2ac91081ae2b64f99891504155057c09e3 | [
"MIT"
] | 34 | 2017-08-03T21:47:49.000Z | 2021-06-16T17:59:45.000Z | scripts/compute_scores.py | JannikWirtz/importance-sampling-diagnostics | 9c9cab2ac91081ae2b64f99891504155057c09e3 | [
"MIT"
] | 58 | 2017-08-06T01:10:24.000Z | 2022-03-07T00:30:24.000Z | #!/usr/bin/env python
#
# Copyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/
# Written by Angelos Katharopoulos <angelos.katharopoulos@idiap.ch>
#
import argparse
import numpy as np
from importance_sampling import models
from importance_sampling.datasets import CIFAR10, CIFAR100, MNIST, \
OntheflyAugmentedImages, PennTreeBank
from importance_sampling.model_wrappers import OracleWrapper
from importance_sampling.utils.functional import compose, partial, ___
def load_dataset(dataset):
datasets = {
"mnist": MNIST,
"cifar10": CIFAR10,
"cifar100": CIFAR100,
"cifar10-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
)),
CIFAR10
),
"cifar100-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
)),
CIFAR100
),
"ptb": partial(PennTreeBank, 20)
}
return datasets[dataset]()
def main(argv):
parser = argparse.ArgumentParser(
description="Plot the loss distribution of a model and dataset pair"
)
parser.add_argument(
"model",
choices=[
"small_cnn", "cnn", "lstm_lm", "lstm_lm2", "lstm_lm3",
"small_cnn_sq"
],
help="Choose the type of the model"
)
parser.add_argument(
"weights",
help="The file containing the model weights"
)
parser.add_argument(
"dataset",
choices=[
"mnist", "cifar10", "cifar100", "cifar10-augmented",
"cifar100-augmented", "ptb"
],
help="Choose the dataset to compute the loss"
)
parser.add_argument(
"--score",
choices=["gnorm", "loss"],
default="loss",
help="Choose a score to plot"
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size for computing the loss"
)
parser.add_argument(
"--random_seed",
type=int,
default=0,
help="A seed for the PRNG (mainly used for dataset generation)"
)
args = parser.parse_args(argv)
np.random.seed(args.random_seed)
dataset = load_dataset(args.dataset)
network = models.get(args.model)(dataset.shape, dataset.output_size)
model = OracleWrapper(network, score=args.score)
model.model.load_weights(args.weights)
for i in range(0, dataset.train_size, args.batch_size):
idxs = slice(i, i+args.batch_size)
for s in model.score_batch(*dataset.train_data(idxs)):
print s
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| 28.414634 | 76 | 0.595136 |
import argparse
import numpy as np
from importance_sampling import models
from importance_sampling.datasets import CIFAR10, CIFAR100, MNIST, \
OntheflyAugmentedImages, PennTreeBank
from importance_sampling.model_wrappers import OracleWrapper
from importance_sampling.utils.functional import compose, partial, ___
def load_dataset(dataset):
datasets = {
"mnist": MNIST,
"cifar10": CIFAR10,
"cifar100": CIFAR100,
"cifar10-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
)),
CIFAR10
),
"cifar100-augmented": compose(
partial(OntheflyAugmentedImages, ___, dict(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=0,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=False
)),
CIFAR100
),
"ptb": partial(PennTreeBank, 20)
}
return datasets[dataset]()
def main(argv):
parser = argparse.ArgumentParser(
description="Plot the loss distribution of a model and dataset pair"
)
parser.add_argument(
"model",
choices=[
"small_cnn", "cnn", "lstm_lm", "lstm_lm2", "lstm_lm3",
"small_cnn_sq"
],
help="Choose the type of the model"
)
parser.add_argument(
"weights",
help="The file containing the model weights"
)
parser.add_argument(
"dataset",
choices=[
"mnist", "cifar10", "cifar100", "cifar10-augmented",
"cifar100-augmented", "ptb"
],
help="Choose the dataset to compute the loss"
)
parser.add_argument(
"--score",
choices=["gnorm", "loss"],
default="loss",
help="Choose a score to plot"
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size for computing the loss"
)
parser.add_argument(
"--random_seed",
type=int,
default=0,
help="A seed for the PRNG (mainly used for dataset generation)"
)
args = parser.parse_args(argv)
np.random.seed(args.random_seed)
dataset = load_dataset(args.dataset)
network = models.get(args.model)(dataset.shape, dataset.output_size)
model = OracleWrapper(network, score=args.score)
model.model.load_weights(args.weights)
for i in range(0, dataset.train_size, args.batch_size):
idxs = slice(i, i+args.batch_size)
for s in model.score_batch(*dataset.train_data(idxs)):
print s
if __name__ == "__main__":
import sys
main(sys.argv[1:])
| false | true |
1c49404f0513b7d760f2819862a1b1a1b9b0b8f1 | 48,784 | py | Python | preproc/preproc_wifi.py | metehancekic/wireless-fingerprinting | 41872761260b3fc26f33acec983220e8b4d9f42f | [
"MIT"
] | 12 | 2020-03-05T12:24:37.000Z | 2022-01-07T15:10:37.000Z | preproc/preproc_wifi.py | metehancekic/wireless-fingerprinting | 41872761260b3fc26f33acec983220e8b4d9f42f | [
"MIT"
] | 5 | 2020-06-29T02:17:14.000Z | 2021-06-24T22:22:23.000Z | preproc/preproc_wifi.py | metehancekic/wireless-fingerprinting | 41872761260b3fc26f33acec983220e8b4d9f42f | [
"MIT"
] | 5 | 2020-11-01T17:49:46.000Z | 2022-03-05T02:52:11.000Z | '''
Contains code for fractionally spaced equalization, preamble detection
Also includes a modified version of Teledyne's data read and preprocessing code
'''
import numpy as np
import os
import json
import csv
import math
import fractions
import resampy
from tqdm import tqdm, trange
import matplotlib
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft, fftshift, ifftshift
import ipdb
from sklearn.preprocessing import normalize
def preprocess_wifi(data_dict, sample_duration, sample_rate, preprocess_type=1, progress=True):
'''
Detects preamble and extract its
'''
signal_indices = range(len(data_dict['data_file']))
if progress is True:
signal_indices = tqdm(signal_indices)
flag = 0
for i in signal_indices:
signal = data_dict['signal'][i]
orig_sample_rate = data_dict['capture_sample_rate'][i]
start_index = 0
end_index = math.ceil(sample_duration * orig_sample_rate)
if orig_sample_rate == np.int(200e6):
if (preprocess_type == 2) or (preprocess_type == 3):
lowFreq = data_dict['freq_lower_edge'][i]
upFreq = data_dict['freq_upper_edge'][i]
Fc = data_dict['capture_frequency'][i]
signal, flag_i = detect_frame(signal, lowFreq, upFreq, Fc, verbose=False)
flag = flag + flag_i
if preprocess_type == 3:
signal = frac_eq_preamble(signal)
start_index = np.int(start_index)
end_index = np.int(end_index)
if (preprocess_type == 1) or (preprocess_type == 2) or (orig_sample_rate != np.int(200e6)):
signal = signal[start_index:end_index] # extract needed section of signal
with np.errstate(all='raise'):
try:
signal = signal / rms(signal) # normalize signal
except FloatingPointError:
# print('data_file = '+str(data_dict['data_file'][i]) + ',\t reference_number = '+str(data_dict['reference_number'][i]))
try:
# print('Normalization error. RMS = {}, Max = {}, Min = {}, Data size = {}'.format(rms(signal), np.abs(signal).min(), np.abs(signal).max(), signal.shape))
signal += 1.0/np.sqrt(2*signal.size) + 1.0/np.sqrt(2*signal.size)*1j
except FloatingPointError:
# print('i = {}, signal.shape = {}'.format(i, signal.shape))
# print('start_index = {}, end_index = {}'.format(start_index, end_index))
signal_size = end_index - start_index
signal = np.ones([signal_size]) * (1.0 + 1.0*1j)/np.sqrt(2*signal_size)
if (preprocess_type == 1) or (orig_sample_rate != np.int(200e6)):
freq_shift = (data_dict['freq_upper_edge'][i] +
data_dict['freq_lower_edge'][i])/2 - data_dict['capture_frequency'][i]
# baseband signal w.r.t. center frequency
signal = shift_frequency(signal, freq_shift, orig_sample_rate)
# filter and downsample signal
signal = resample(signal, orig_sample_rate, sample_rate)
if (preprocess_type == 2):
signal = resample(signal, orig_sample_rate, sample_rate)
data_dict['signal'][i] = signal
# data_dict['freq_lower_edge'][i] = -sample_rate/2.
# data_dict['freq_upper_edge'][i] = sample_rate/2.
# data_dict['sample_start'][i] = 0
# data_dict['sample_count'][i] = len(signal)
data_dict['center_frequency'][i] = (
data_dict['freq_upper_edge'][i] + data_dict['freq_lower_edge'][i])/2.
data_dict['sample_rate'][i] = sample_rate
if (preprocess_type == 2) or (preprocess_type == 3):
print('Successful frame detection on {:.2f}% of signals'.format(
100.0-flag*100.0/len(data_dict['data_file'])))
return data_dict
def frac_eq_preamble(rx, verbose=False):
'''
Fractionally equalize preamble
https://ieeexplore.ieee.org/document/489269
'''
# print('Hello!')
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
stf_64 = ifft(ifftshift(Stf_64))
# stf = stf_64[:16]
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
ltf = ifft(ifftshift(Ltf))
tx = np.concatenate((stf_64[:-32], stf_64, stf_64, ltf[-32:], ltf, ltf))
L = 160
N = 320
rx = rx.reshape([-1, 1])
R = np.zeros([L, L]) + 0j
p = np.zeros([L, 1]) + 0j
for i in range(N):
j = 10*i
R += rx[j:j+L].dot(rx[j:j+L].conj().T)
p += rx[j:j+L] * tx[i].conj()
c, residuals, rank, sing = np.linalg.lstsq(R, p)
# h = c[::-1].conj()
# rx_eq = np.convolve(h, rx, mode='full')[np.int(L/2):-np.int(L/2)]
# signal_eq = rx_eq[::10][:1600]
signal_eq = np.zeros([N, 1]) + 0j
for i in range(N):
j = 10*i
signal_eq[i] = rx[j:j+L].T.dot(c.conj())
return signal_eq.flatten()
def detect_frame(complex_signal, lowFreq, upFreq, Fc, verbose=False):
'''
Detects preamble and extract its
'''
Fs = 200e6
flag = 0
# ----------------------------------------------------
# Filter out-of-band noise
# ----------------------------------------------------
N = complex_signal.shape[0]
if N % 2 != 0:
complex_signal = complex_signal[:-1]
N -= 1
low_ind = np.int((lowFreq-Fc)*(N/Fs) + N/2)
up_ind = np.int((upFreq-Fc)*(N/Fs) + N/2)
lag = np.int((-Fc + (lowFreq+upFreq)/2)*(N/Fs) + N/2) - np.int(N/2)
X = fftshift(fft(complex_signal))
X[:low_ind] = 0 + 0j
X[up_ind:] = 0 + 0j
X = np.roll(X, -lag)
complex_signal = ifft(ifftshift(X))
# ----------------------------------------------------
# Coarse frame detection (using STF)
# ----------------------------------------------------
guard_band_upsamp = np.int(2e-6*Fs) # 2 usec
n_win = 1600-160 # ?
lag = 160
search_length_stf_upsamp = min(2*guard_band_upsamp+1, np.int(complex_signal.size))
autocorr_stf_upsamp = np.zeros(search_length_stf_upsamp)
a = np.zeros(search_length_stf_upsamp)+0j
p = np.zeros(search_length_stf_upsamp)
for n in range(search_length_stf_upsamp):
sig1 = complex_signal[n:n+n_win].reshape(1, -1)
sig2 = complex_signal[n+lag:n+n_win+lag].conj().reshape(1, -1)
a[n] = sig1.dot(sig2.T)
# p[n] = np.sum(np.abs(sig1)**2)
p[n] = np.sqrt(np.sum(np.abs(sig1)**2)*np.sum(np.abs(sig2)**2))
autocorr_stf_upsamp = np.abs(a)/p
frame_start_autocorr_upsamp = np.argmax(autocorr_stf_upsamp)
# ----------------------------------------------------
# Guard band sanity check
# ----------------------------------------------------
n_short_upsamp = 1600
if frame_start_autocorr_upsamp <= 2*guard_band_upsamp:
# sig3 = complex_signal[frame_start_autocorr_upsamp+np.int(n_short_upsamp/2):frame_start_autocorr_upsamp+n_short_upsamp-160].conj().copy()
# sig4 = complex_signal[frame_start_autocorr_upsamp+np.int(n_short_upsamp/2)+160:frame_start_autocorr_upsamp+n_short_upsamp].copy()
# df1_upsamp = 1/160 * np.angle(sig3.dot(sig4.T))
# complex_signal[frame_start_autocorr_upsamp:] *= np.exp(-1j*np.arange(0,complex_signal.size - frame_start_autocorr_upsamp)*df1_upsamp).flatten()
if verbose == True:
print('Autocorr prediction = {}'.format(frame_start_autocorr_upsamp))
# print('Freq offset_upsamp = {:.2f} KHz'.format(df1_upsamp* 2e8 / (2*np.pi*1e3)))
else:
if verbose == True:
print('Autocorr detection failed\n Prediction = {}'.format(frame_start_autocorr_upsamp))
frame_start_autocorr_upsamp = guard_band_upsamp
# df1_upsamp = 0
flag = 1
return complex_signal[frame_start_autocorr_upsamp:], flag
def offset_compensate_preamble(preamble_in, fs=200e6, verbose=False, option=1):
"""
Function that strips out the effect of the offset from the preamble.
df = 1/16 arg(sum_{n=0}^{N_short - 1 - 16} s[n]* s'[n+16] )
s[n] <---- s[n]* e^(j.n.df)
Inputs:
preamble - Preamble containing effects of the channel and Tx nonlinearities
(320 samples)
fs - Sampling frequency
[Verbose] - Verbose
### NotImplemented: freq_offset - Dict containing freq offset
Output:
preamble_eq - Preamble with the channel stripped out (320 samples)
### NotImplemented: preamble_eq_offset - Equalized preamble with frequency offset
"""
# if fs!=20e6:
# raise NotImplementedError
preamble = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600 # Length of short preamble
n_long = 1600 # Length of long preamble
L = 160 # length of single short sequence
N = 640 # length of single long sequnce
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
# Coarse estimation
# sig3 = preamble[n_short//2: n_short-L].conj().copy()
# sig4 = preamble[n_short//2 + L: n_short].copy()
sig3 = preamble[: n_short-L].conj().copy()
sig4 = preamble[L: n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# Fine estimation
sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()
sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
elif fs == 20e6:
if preamble.size != 320:
raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))
n_short = 160 # Length of short preamble
n_long = 160 # Length of long preamble
L = 16 # length of single short sequence
N = 64 # length of single long sequence
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
# Coarse estimation
sig3 = preamble[np.int(n_short/2):n_short-L].conj().copy()
sig4 = preamble[np.int(n_short/2)+L:n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# Fine estimation
sig5 = preamble[n_short+32:n_short+32+N].conj().copy()
sig6 = preamble[n_short+N+32:n_short+n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
if option == 1:
return preamble
elif option == 2:
return preamble, freq_offset
else:
raise NotImplementedError
def get_residuals_preamble(preamble_in, fs, method='subtraction', channel_method='frequency', verbose=False, label=''):
"""
Function that reconstructs the preamble fed into this function with the channel and CFO effects
and returns the difference between original preamble and reconstructed one (residuals):
Inputs:
preamble - Preamble containing effects of the channel and Tx nonlinearities
(3200 samples)
### NotImplemented: freq_offset - Dict containing freq offset
Output:
preamble_eq - Preamble with the channel stripped out (320 samples)
### NotImplemented: preamble_eq_offset - Equalized preamble with frequency offset
"""
# if fs!=20e6:
# raise NotImplementedError
preamble = preamble_in.copy()
preamble_orig = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600
n_long = 1600
L = 160
N = 640
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
sig3 = preamble[: n_short-L].conj().copy()
sig4 = preamble[L: n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# Fine estimation
sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()
sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
cfo_total = np.multiply(np.exp(1j*np.arange(0, preamble.size)*df1).flatten(),
np.exp(1j*np.arange(0, preamble.size)*df2).flatten())
# ------------------------------------------------------------------------
# LTI channel estimation (with delay spread <= length of cyclic prefix)
# ------------------------------------------------------------------------
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
ind_all = np.arange(-32, 32) + (N//2)
H_hat = np.zeros((N)) + 1j*np.zeros((N))
# ipdb.set_trace()
Ltf_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Ltf, np.zeros(32*9) + 1j * np.zeros(32*9)))
H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf # because Ltf is 1's and 0's
h_hat = np.roll(ifft(ifftshift(H_hat)), -N//2)
# H_1_hat[ind_all] = Ltf_1_rx[ind_all]*Ltf
# H_2_hat[ind_all] = Ltf_2_rx[ind_all]*Ltf
# H_hat[ind_all] = Ltf/Ltf_avg_rx[ind_all]
# ltf_1_interpolated = ifft(ifftshift(H_1_hat*Ltf_interpolated))
# ltf_2_interpolated = ifft(ifftshift(H_2_hat*Ltf_interpolated))
# ltf_total = np.concatenate((ltf_1_interpolated[-N//2:], ltf_1_interpolated, ltf_2_interpolated))
# ltf_interpolated = ifft(ifftshift(H_hat * Ltf_interpolated))
if channel_method == 'time':
ltf_interpolated = ifft(ifftshift(Ltf_interpolated))
ltf_total = np.concatenate(
(ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))
Stf_64_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))
stf_64_interpolated = ifft(ifftshift(Stf_64_interpolated))
stf_total = np.concatenate(
(stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))
preamble_constructed = cfo_total * (np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[
N//2-1:-N//2])/rms(np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[N//2-1:-N//2])
elif channel_method == 'frequency':
ltf_interpolated = ifft(ifftshift(H_hat * Ltf_interpolated))
ltf_total = np.concatenate(
(ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))
Stf_64_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))
stf_64_interpolated = ifft(ifftshift(H_hat * Stf_64_interpolated))
stf_total = np.concatenate(
(stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))
preamble_constructed = cfo_total * np.concatenate((stf_total, ltf_total))
# stf_ch_cfo = ifft(ifftshift(fftshift(fft(preamble_constructed[N//2:N+N//2]))*H_hat))
# ltf_ch_cfo = ifft(ifftshift(fftshift(fft(preamble_constructed[n_short+N//2:n_short+N//2+N]))*H_hat))
# stf_total_cfo_ch_added = np.concatenate((stf_ch_cfo[-N//2:], stf_ch_cfo, stf_ch_cfo))
# ltf_total_cfo_ch_added = np.concatenate((ltf_ch_cfo[-N//2:], ltf_ch_cfo, ltf_ch_cfo))
# preamble_constructed = np.concatenate((stf_total_cfo_ch_added, ltf_total_cfo_ch_added))
if method == 'division':
residuals = preamble_orig/(preamble_constructed+0.001)
elif method == 'subtraction':
residuals = preamble_orig - preamble_constructed
# # ----------------------------------------------------
# # Preamble equalization
# # ----------------------------------------------------
# ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + (N//2)
# ind_null = np.concatenate((np.array([0]), np.arange(-(N//2), -32), np.arange(32, (N//2)) )) + (N//2)
# ind_pilots = np.array([-21, -7, 7, 21]) + (N//2)
# mask_data = np.ones(N)
# mask_data_pilots = np.ones(N)
# mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
# mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
# ind_all_all = np.arange(-(N//2), (N//2)) + N//2
# ind_data = ind_all_all[mask_data==1]
# ind_data_pilots = ind_all_all[mask_data_pilots==1]
# h_hat = ifft(ifftshift(H_hat))
# Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
# Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
# Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
# Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
# Stf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
# Stf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
# Ltf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
# Ltf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
# Stf_1_eq[ind_guard] = 0
# Stf_2_eq[ind_guard] = 0
# Ltf_1_eq[ind_guard] = 0
# Ltf_2_eq[ind_guard] = 0
# Stf_1_eq[ind_null] = 0
# Stf_2_eq[ind_null] = 0
# Ltf_1_eq[ind_null] = 0
# Ltf_2_eq[ind_null] = 0
# # Sanity check
# Ltf_1_eq = Ltf
# Ltf_2_eq = Ltf
# Stf_1_eq = Stf_64
# Stf_2_eq = Stf_64
# stf_1_eq = ifft(ifftshift(Stf_1_eq))
# stf_2_eq = ifft(ifftshift(Stf_2_eq))
# ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
# ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
# preamble_eq = np.concatenate((stf_1_eq[:-(N//4)], stf_1_eq, stf_2_eq[:-(N//4)], stf_2_eq, ltf_1_eq[:-(N//2)], ltf_1_eq, ltf_2_eq))
return residuals, preamble_constructed # , h_hat, H_hat
def basic_equalize_preamble(preamble_in, fs, verbose=False, label=''):
"""
Function that strips out the effect of the channel from the preamble.
It does the following:
1. LTI channel estimation (with delay spread <= length of cyclic prefix)
2. Remove the channel estimate from the preamble
Inputs:
preamble - Preamble containing effects of the channel and Tx nonlinearities
(320 samples)
### NotImplemented: freq_offset - Dict containing freq offset
Output:
preamble_eq - Preamble with the channel stripped out (320 samples)
### NotImplemented: preamble_eq_offset - Equalized preamble with frequency offset
"""
# if fs!=20e6:
# raise NotImplementedError
preamble = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600
n_long = 1600
L = 160
N = 640
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
# sig3 = preamble[np.int(n_short/2):n_short-L].conj().copy()
# sig4 = preamble[np.int(n_short/2)+L:n_short].copy()
# df1 = 1/L * np.angle(sig3.dot(sig4.T))
# preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# sig5 = preamble[n_short+2*L:n_short+2*L+N].conj().copy()
# sig6 = preamble[n_short+N+2*L:n_short+n_long].reshape(1,-1).copy()
# df2 = 1/N * np.angle(sig5.dot(sig6.T))
# preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
# ------------------------------------------------------------------------
# LTI channel estimation (with delay spread <= length of cyclic prefix)
# ------------------------------------------------------------------------
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
# Ltf_avg_rx = Ltf1_rx
# Ltf_avg_rx = Ltf2_rx
# Ltf_mid_rx = Ltf_avg_rx
# AA = np.zeros((N, N)) + 0j
# for m in range(N):
# for n in range(L+1):
# AA[m, n] = Ltf[m] * np.exp(-1j*2*np.pi*m*n/N)
# A = AA[:, :L+1] * np.exp(1j*np.pi*np.arange(L+1)).reshape(1, -1)
# ind_all = np.arange(-32, 32) + 32
# ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
# ind_null = np.array([0]) + 32
# mask_data_pilots = np.ones(64)
# mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
# ind_data_pilots = ind_all[mask_data_pilots==1]
# h_hat_small, residuals, rank, singular_values = np.linalg.lstsq(A[ind_data_pilots,:], Ltf_mid_rx[ind_data_pilots], rcond=None)
# h_hat = np.zeros(N)+0j
# h_hat[:L+1] = h_hat_small
# # h_hat = np.roll(h_hat, -np.int(L/2))
# H_hat = fftshift(fft(h_hat))
ind_all = np.arange(-32, 32) + (N//2)
H_hat = np.zeros((N)) + 1j*np.zeros((N))
# ipdb.set_trace()
H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf
# H_hat[ind_all] = Ltf/Ltf_avg_rx[ind_all]
if verbose is True:
freq = np.arange(-32, 32)
# H_hat_coarse = Ltf_mid_rx*Ltf
H_hat_coarse = H_hat[ind_all]
h_hat_coarse = ifft(ifftshift(H_hat_coarse))
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat_coarse))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
# plt.stem(freq, np.unwrap(np.angle(H_hat)))
plt.stem(freq, np.angle(H_hat_coarse))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Coarse estimation'+label)
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat_coarse))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
# plt.stem(np.unwrap(np.angle(h_hat)))
plt.stem(np.angle(h_hat_coarse))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Coarse estimation'+label)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
# plt.figure(figsize=[10, 3])
# plt.subplot(1,2,1)
# plt.stem(freq, np.abs(H_hat))
# plt.grid(True)
# plt.title('Magnitude')
# plt.xlabel('Frequency bin')
# plt.subplot(1,2,2)
# # plt.stem(freq, np.unwrap(np.angle(H_hat)))
# plt.stem(freq, np.angle(H_hat))
# plt.title('Phase')
# plt.xlabel('Frequency bin')
# plt.suptitle('Frequency domain least squares estimation')
# plt.grid(True)
# plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
# plt.figure(figsize=[10, 3])
# plt.subplot(1,2,1)
# plt.stem(np.abs(h_hat))
# plt.title('Magnitude')
# plt.xlabel('Time (in samples)')
# plt.grid(True)
# plt.subplot(1,2,2)
# # plt.stem(np.unwrap(np.angle(h_hat)))
# plt.stem(np.angle(h_hat))
# plt.title('Phase')
# plt.xlabel('Time (in samples)')
# plt.grid(True)
# plt.suptitle('Frequency domain least squares estimation')
# plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
plt.show()
# ----------------------------------------------------
# Preamble equalization
# ----------------------------------------------------
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + (N//2)
ind_null = np.concatenate(
(np.array([0]), np.arange(-(N//2), -32), np.arange(32, (N//2)))) + (N//2)
ind_pilots = np.array([-21, -7, 7, 21]) + (N//2)
mask_data = np.ones(N)
mask_data_pilots = np.ones(N)
mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_all_all = np.arange(-(N//2), (N//2)) + N//2
ind_data = ind_all_all[mask_data == 1]
ind_data_pilots = ind_all_all[mask_data_pilots == 1]
Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
Stf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Stf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Ltf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Ltf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Stf_1_eq[ind_guard] = 0
Stf_2_eq[ind_guard] = 0
Ltf_1_eq[ind_guard] = 0
Ltf_2_eq[ind_guard] = 0
Stf_1_eq[ind_null] = 0
Stf_2_eq[ind_null] = 0
Ltf_1_eq[ind_null] = 0
Ltf_2_eq[ind_null] = 0
# # Sanity check
# Ltf_1_eq = Ltf
# Ltf_2_eq = Ltf
# Stf_1_eq = Stf_64
# Stf_2_eq = Stf_64
if verbose is True:
Stf_1_eq_down = Stf_1_eq[ind_all]
Stf_2_eq_down = Stf_2_eq[ind_all]
Ltf_1_eq_down = Ltf_1_eq[ind_all]
Ltf_2_eq_down = Ltf_2_eq[ind_all]
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Stf_1_eq_down.real, Stf_1_eq_down.imag)
plt.title('Equalized STF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Stf_2_eq_down.real, Stf_2_eq_down.imag)
plt.title('Equalized STF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Stf_64.real, Stf_64.imag)
plt.title('Actual STF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Ltf_1_eq_down.real, Ltf_1_eq_down.imag)
plt.title('Equalized LTF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Ltf_2_eq_down.real, Ltf_2_eq_down.imag)
plt.title('Equalized LTF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Ltf.real, Ltf.imag)
plt.title('Actual LTF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
# ipdb.set_trace()
stf_1_eq = ifft(ifftshift(Stf_1_eq))
stf_2_eq = ifft(ifftshift(Stf_2_eq))
ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
# preamble_eq = np.concatenate((stf_1_eq[:-(N//2)], stf_1_eq, stf_2_eq, ltf_1_eq[:-(N//2)], ltf_1_eq, ltf_2_eq))
preamble_eq = np.concatenate(
(stf_1_eq[-(N//4):], stf_1_eq, stf_2_eq[-(N//4):], stf_2_eq, ltf_1_eq[-(N//2):], ltf_1_eq, ltf_2_eq))
# import pdb
# pdb.set_trace()
# shift = freq_offset['shift_coarse']
# df1 = freq_offset['carrier_coarse']
# df2 = freq_offset['carrier_fine']
# preamble_eq_offset = preamble_eq.copy()
# Add in coarse carrier freq offset, taking the shift into account
# if shift>=0:
# preamble_eq_offset[shift:] = preamble_eq[shift:] * np.exp(1j*np.arange(0,preamble_eq.size - shift)*df1).flatten()
# else:
# preamble_eq_offset= preamble_eq * np.exp(1j*(np.arange(0, preamble_eq.size)+shift)*df1).flatten()
# # Add in fine carrier freq offset
# preamble_eq_offset *= np.exp(1j*np.arange(0, preamble_eq.size)*df2).flatten()
# return preamble_eq, preamble_eq_offset
elif fs == 20e6:
if preamble.size != 320:
raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))
n_short = 160
n_long = 160
# ----------------------------------------------------
# Frequency offset correction
# ----------------------------------------------------
# sig3 = preamble[np.int(n_short/2):n_short-16].conj().copy()
# sig4 = preamble[np.int(n_short/2)+16:n_short].copy()
# df1 = 1/16 * np.angle(sig3.dot(sig4.T))
# preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
# sig5 = preamble[n_short+32:n_short+32+64].conj().copy()
# sig6 = preamble[n_short+64+32:n_short+n_long].reshape(1,-1).copy()
# df2 = 1/64 * np.angle(sig5.dot(sig6.T))
# preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
# ------------------------------------------------------------------------
# LTI channel estimation (with delay spread <= length of cyclic prefix)
# ------------------------------------------------------------------------
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
L = 16
N = 64
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
# Ltf_mid_rx = Ltf_avg_rx
AA = np.zeros((N, N)) + 0j
for m in range(N):
for n in range(L+1):
AA[m, n] = Ltf[m] * np.exp(-1j*2*np.pi*m*n/N)
A = AA[:, :L+1] * np.exp(1j*np.pi*np.arange(L+1)).reshape(1, -1)
ind_all = np.arange(-32, 32) + 32
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
ind_null = np.array([0]) + 32
mask_data_pilots = np.ones(64)
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_data_pilots = ind_all[mask_data_pilots == 1]
h_hat_small, residuals, rank, singular_values = np.linalg.lstsq(
A[ind_data_pilots, :], Ltf_mid_rx[ind_data_pilots], rcond=None)
h_hat = np.zeros(N)+0j
h_hat[:L+1] = h_hat_small
# h_hat = np.roll(h_hat, -np.int(L/2))
H_hat = fftshift(fft(h_hat))
H_hat = Ltf_avg_rx*Ltf
if verbose is True:
freq = np.arange(-32, 32)
H_hat_coarse = Ltf_mid_rx*Ltf
h_hat_coarse = ifft(ifftshift(H_hat_coarse))
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat_coarse))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
# plt.stem(freq, np.unwrap(np.angle(H_hat)))
plt.stem(freq, np.angle(H_hat_coarse))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Coarse estimation')
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat_coarse))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
# plt.stem(np.unwrap(np.angle(h_hat)))
plt.stem(np.angle(h_hat_coarse))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Coarse estimation')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
# plt.stem(freq, np.unwrap(np.angle(H_hat)))
plt.stem(freq, np.angle(H_hat))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Frequency domain least squares estimation')
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
# plt.stem(np.unwrap(np.angle(h_hat)))
plt.stem(np.angle(h_hat))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Frequency domain least squares estimation')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
# plt.show()
# ----------------------------------------------------
# Preamble equalization
# ----------------------------------------------------
ind_all = np.arange(-32, 32) + 32
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
ind_null = np.array([0]) + 32
ind_pilots = np.array([-21, -7, 7, 21]) + 32
mask_data = np.ones(64)
mask_data_pilots = np.ones(64)
mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_data = ind_all[mask_data == 1]
ind_data_pilots = ind_all[mask_data_pilots == 1]
Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
Stf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Stf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Ltf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Ltf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Stf_1_eq[ind_guard] = 0
Stf_2_eq[ind_guard] = 0
Ltf_1_eq[ind_guard] = 0
Ltf_2_eq[ind_guard] = 0
Stf_1_eq[ind_null] = 0
Stf_2_eq[ind_null] = 0
Ltf_1_eq[ind_null] = 0
Ltf_2_eq[ind_null] = 0
# # Sanity check
# Ltf_1_eq = Ltf
# Ltf_2_eq = Ltf
# Stf_1_eq = Stf_64
# Stf_2_eq = Stf_64
if verbose is True:
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Stf_1_eq.real, Stf_1_eq.imag)
plt.title('Equalized STF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Stf_2_eq.real, Stf_2_eq.imag)
plt.title('Equalized STF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Stf_64.real, Stf_64.imag)
plt.title('Actual STF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Ltf_1_eq.real, Ltf_1_eq.imag)
plt.title('Equalized LTF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Ltf_2_eq.real, Ltf_2_eq.imag)
plt.title('Equalized LTF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Ltf.real, Ltf.imag)
plt.title('Actual LTF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
stf_1_eq = ifft(ifftshift(Stf_1_eq))
stf_2_eq = ifft(ifftshift(Stf_2_eq))
ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
preamble_eq = np.concatenate(
(stf_1_eq[-32:], stf_1_eq, stf_2_eq, ltf_1_eq[-32:], ltf_1_eq, ltf_2_eq))
# shift = freq_offset['shift_coarse']
# df1 = freq_offset['carrier_coarse']
# df2 = freq_offset['carrier_fine']
# preamble_eq_offset = preamble_eq.copy()
# Add in coarse carrier freq offset, taking the shift into account
# if shift>=0:
# preamble_eq_offset[shift:] = preamble_eq[shift:] * np.exp(1j*np.arange(0,preamble_eq.size - shift)*df1).flatten()
# else:
# preamble_eq_offset= preamble_eq * np.exp(1j*(np.arange(0, preamble_eq.size)+shift)*df1).flatten()
# # Add in fine carrier freq offset
# preamble_eq_offset *= np.exp(1j*np.arange(0, preamble_eq.size)*df2).flatten()
# return preamble_eq, preamble_eq_offset
return preamble_eq
def rms(x):
# Root mean squared value
return np.sqrt(np.mean(x * np.conjugate(x)))
def shift_frequency(vector, freq_shift, fs):
# Shift frequency of time-series signal by specified amount
# vector: complex time-series signal
# freq_shift: frequency shift amount
# fs: sampling frequency of complex signal
t = np.arange(0, np.size(vector)) / fs # define time axis
# Sqrt(2) factor ensures that the power of the frequency downconverted signal
# is equal to the power of its passband counterpart
modulation = np.exp(-1j * 2 * np.pi * freq_shift * t) / np.sqrt(2) # frequency shift factor
return vector * modulation # baseband signal
def resample(vector, fs, dfs):
# Resample signal from original sample rate to desired sample rate
# fs: original sampling frequency
# dfs: desired sampling frequency
fs = int(round(fs)) # convert to integers
dfs = int(round(dfs))
cfs = lcm(fs, dfs) # common sampling frequency
if cfs > fs:
# Upsample from start-Hz to common-Hz
vector = resampy.resample(vector, fs, cfs, filter='kaiser_best')
# Downsample from common-Hz to desired-Hz
return resampy.resample(vector, cfs, dfs, filter='kaiser_best')
def lcm(a, b):
# Least common multiple of a and b
return a * int(b / fractions.gcd(a, b)) if a and b else 0
def get_sliding_window(x, window_size=10, stride=1, fs=200e6, fs_natural=20e6):
shape_ = x.shape
window_size_samples = np.int(window_size * (fs/fs_natural))
stride_samples = np.int(stride * (fs/fs_natural))
# sliding_window = [None] * ((shape_[1]-100+10)//10)
for i in tqdm(np.arange(0, shape_[1] - window_size_samples + stride_samples, stride_samples)):
if i == 0:
y = x[:, i:i + window_size_samples, :].copy()
else:
y = np.concatenate((y, x[:, i:i + window_size_samples, :]), axis=0)
return y
def read_wifi(files, base_data_directory, device_map, progress=True):
'''
Read wifi data frin data directory
'''
csv = files['csv_objects'].items()
if progress is True:
csv = tqdm(csv)
data_dict = dict(signal={}, device_key={}, # Complex signal and device label [0, N-1] from device_map
sample_rate={}, capture_sample_rate={}, capture_frequency={}, capture_hw={},
center_frequency={}, freq_lower_edge={}, freq_upper_edge={},
reference_number={}, data_file={}, sample_start={}, sample_count={},
device_type={}, device_id={}, device_manufacturer={}
)
signal_index = 0
for file, signal_list in csv:
# Example:
# file = 'adsb_gfi_3_dataset/10_sigmf_files_dataset/A-23937.sigmf-data'
# signal_list = ['A-23937-34', 'A-23937-54']
# check to see if the first character in "file" is a slash:
while file[0] == '/' or file[0] == '\\':
file = file[1:]
# if 'Windows' in platform():
# file = file.replace("/", "\\")
data_file = os.path.join(base_data_directory, file)
metadata_file = data_file.replace('sigmf-data', 'sigmf-meta')
all_signals = json.load(open(metadata_file))
capture = dict(capture_sample_rate=all_signals['global']['core:sample_rate'],
sample_rate=all_signals['global']['core:sample_rate'],
capture_hw=all_signals['global']['core:hw'],
capture_frequency=all_signals['capture'][0]['core:frequency'],
data_file=data_file)
for signal_name in signal_list:
# data_dict['reference_number'][signal_index] = signal_name
for key, value in capture.items():
data_dict[key][signal_index] = value
capture_properties = all_signals['capture']
signal_properties = get_json_signal(
all_signals['annotations'], capture_properties[0], signal_name, type='wifi')
for key, value in signal_properties.items():
data_dict[key][signal_index] = value
device_id = signal_properties['device_id']
data_dict['device_key'][signal_index] = device_map[device_id]
filename = data_dict['data_file'][signal_index]
start_sample = data_dict['sample_start'][signal_index]
sample_count = data_dict['sample_count'][signal_index]
data, buffer_start, buffer_end = read_sample(
filename, start_sample, sample_count, desired_buffer=0)
data_dict['signal'][signal_index] = data
data_dict['center_frequency'][signal_index] = data_dict['capture_frequency'][signal_index]
# ipdb.set_trace()
signal_index = signal_index + 1
return data_dict
def parse_input_files(input_csv, devices_csv):
'''
Parser for wifi dataset
'''
device_list = [] # a list of the devices to be trained/tested with
device_map = {} # a reverse map from device name to index
csv_objects = {} # a dictionary with filenames for keys, lists of signals as values
with open(devices_csv) as devices_csv_file:
devices_reader = csv.reader(devices_csv_file, delimiter=',')
for device in devices_reader:
device_list.append(device[0])
for i, device in enumerate(device_list):
device_map[device] = i
with open(input_csv) as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=',')
for row in input_reader:
csv_objects[row[0]] = row[1:]
return {'device_list': device_list,
'device_map': device_map,
'csv_objects': csv_objects}
def get_json_signal(json_annotations, capture, signal_id, type=None):
'''
Get signal from json
'''
for signal in json_annotations:
if signal != {} and signal['capture_details:signal_reference_number'] == signal_id:
if 'rfml:label' in signal:
signal_label = signal['rfml:label']
if type is None:
type = signal_label[0]
else:
signal_label = tuple(None, None, None)
if type is None:
type = "unknown"
if type == "wifi":
return {'freq_lower_edge': signal['core:freq_lower_edge'],
'freq_upper_edge': signal['core:freq_upper_edge'],
'sample_start': signal['core:sample_start'],
'sample_count': signal['core:sample_count'],
'device_type': signal_label[0],
'device_manufacturer': signal_label[1],
'device_id': signal_label[2]}
elif type == "ADS-B":
return{'snr': signal['capture_details:SNRdB'],
'reference_number': signal['capture_details:signal_reference_number'],
'freq_lower_edge': capture['core:freq_lower_edge'],
'freq_upper_edge': capture['core:freq_upper_edge'],
'sample_start': signal['core:sample_start'],
'sample_count': signal['core:sample_count'],
'device_type': signal_label[0],
'device_id': signal_label[1]}
else:
print('Unknown signal type', type)
return None
return None
def read_sample(filename, start_sample, sample_count, desired_buffer):
'''
Read samples
'''
buffer_start = min(desired_buffer, start_sample)
buffer_end = desired_buffer
sample_count += (buffer_start + buffer_end)
with open(filename, "rb") as f:
# Seek to startSample
f.seek((start_sample - buffer_start) * 4) # 4bytes per sample (2x16 bit ints)
# Read in as ints
raw = np.fromfile(f, dtype='int16', count=2*sample_count)
samples_read = int(raw.size / 2)
buffer_end -= (sample_count - samples_read)
# Convert interleaved ints into two planes, real and imaginary
array = raw.reshape([samples_read, 2])
# convert the array to complex
array = array[:, 0] + 1j*array[:, 1]
return array, buffer_start, buffer_end
| 40.755221 | 174 | 0.55426 |
import numpy as np
import os
import json
import csv
import math
import fractions
import resampy
from tqdm import tqdm, trange
import matplotlib
import matplotlib.pyplot as plt
from scipy.fftpack import fft, ifft, fftshift, ifftshift
import ipdb
from sklearn.preprocessing import normalize
def preprocess_wifi(data_dict, sample_duration, sample_rate, preprocess_type=1, progress=True):
signal_indices = range(len(data_dict['data_file']))
if progress is True:
signal_indices = tqdm(signal_indices)
flag = 0
for i in signal_indices:
signal = data_dict['signal'][i]
orig_sample_rate = data_dict['capture_sample_rate'][i]
start_index = 0
end_index = math.ceil(sample_duration * orig_sample_rate)
if orig_sample_rate == np.int(200e6):
if (preprocess_type == 2) or (preprocess_type == 3):
lowFreq = data_dict['freq_lower_edge'][i]
upFreq = data_dict['freq_upper_edge'][i]
Fc = data_dict['capture_frequency'][i]
signal, flag_i = detect_frame(signal, lowFreq, upFreq, Fc, verbose=False)
flag = flag + flag_i
if preprocess_type == 3:
signal = frac_eq_preamble(signal)
start_index = np.int(start_index)
end_index = np.int(end_index)
if (preprocess_type == 1) or (preprocess_type == 2) or (orig_sample_rate != np.int(200e6)):
signal = signal[start_index:end_index]
with np.errstate(all='raise'):
try:
signal = signal / rms(signal)
except FloatingPointError:
try:
signal += 1.0/np.sqrt(2*signal.size) + 1.0/np.sqrt(2*signal.size)*1j
except FloatingPointError:
signal_size = end_index - start_index
signal = np.ones([signal_size]) * (1.0 + 1.0*1j)/np.sqrt(2*signal_size)
if (preprocess_type == 1) or (orig_sample_rate != np.int(200e6)):
freq_shift = (data_dict['freq_upper_edge'][i] +
data_dict['freq_lower_edge'][i])/2 - data_dict['capture_frequency'][i]
signal = shift_frequency(signal, freq_shift, orig_sample_rate)
signal = resample(signal, orig_sample_rate, sample_rate)
if (preprocess_type == 2):
signal = resample(signal, orig_sample_rate, sample_rate)
data_dict['signal'][i] = signal
data_dict['center_frequency'][i] = (
data_dict['freq_upper_edge'][i] + data_dict['freq_lower_edge'][i])/2.
data_dict['sample_rate'][i] = sample_rate
if (preprocess_type == 2) or (preprocess_type == 3):
print('Successful frame detection on {:.2f}% of signals'.format(
100.0-flag*100.0/len(data_dict['data_file'])))
return data_dict
def frac_eq_preamble(rx, verbose=False):
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
stf_64 = ifft(ifftshift(Stf_64))
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
ltf = ifft(ifftshift(Ltf))
tx = np.concatenate((stf_64[:-32], stf_64, stf_64, ltf[-32:], ltf, ltf))
L = 160
N = 320
rx = rx.reshape([-1, 1])
R = np.zeros([L, L]) + 0j
p = np.zeros([L, 1]) + 0j
for i in range(N):
j = 10*i
R += rx[j:j+L].dot(rx[j:j+L].conj().T)
p += rx[j:j+L] * tx[i].conj()
c, residuals, rank, sing = np.linalg.lstsq(R, p)
signal_eq = np.zeros([N, 1]) + 0j
for i in range(N):
j = 10*i
signal_eq[i] = rx[j:j+L].T.dot(c.conj())
return signal_eq.flatten()
def detect_frame(complex_signal, lowFreq, upFreq, Fc, verbose=False):
Fs = 200e6
flag = 0
N = complex_signal.shape[0]
if N % 2 != 0:
complex_signal = complex_signal[:-1]
N -= 1
low_ind = np.int((lowFreq-Fc)*(N/Fs) + N/2)
up_ind = np.int((upFreq-Fc)*(N/Fs) + N/2)
lag = np.int((-Fc + (lowFreq+upFreq)/2)*(N/Fs) + N/2) - np.int(N/2)
X = fftshift(fft(complex_signal))
X[:low_ind] = 0 + 0j
X[up_ind:] = 0 + 0j
X = np.roll(X, -lag)
complex_signal = ifft(ifftshift(X))
guard_band_upsamp = np.int(2e-6*Fs)
n_win = 1600-160
lag = 160
search_length_stf_upsamp = min(2*guard_band_upsamp+1, np.int(complex_signal.size))
autocorr_stf_upsamp = np.zeros(search_length_stf_upsamp)
a = np.zeros(search_length_stf_upsamp)+0j
p = np.zeros(search_length_stf_upsamp)
for n in range(search_length_stf_upsamp):
sig1 = complex_signal[n:n+n_win].reshape(1, -1)
sig2 = complex_signal[n+lag:n+n_win+lag].conj().reshape(1, -1)
a[n] = sig1.dot(sig2.T)
p[n] = np.sqrt(np.sum(np.abs(sig1)**2)*np.sum(np.abs(sig2)**2))
autocorr_stf_upsamp = np.abs(a)/p
frame_start_autocorr_upsamp = np.argmax(autocorr_stf_upsamp)
n_short_upsamp = 1600
if frame_start_autocorr_upsamp <= 2*guard_band_upsamp:
if verbose == True:
print('Autocorr prediction = {}'.format(frame_start_autocorr_upsamp))
else:
if verbose == True:
print('Autocorr detection failed\n Prediction = {}'.format(frame_start_autocorr_upsamp))
frame_start_autocorr_upsamp = guard_band_upsamp
flag = 1
return complex_signal[frame_start_autocorr_upsamp:], flag
def offset_compensate_preamble(preamble_in, fs=200e6, verbose=False, option=1):
preamble = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600
n_long = 1600
L = 160
N = 640
sig3 = preamble[: n_short-L].conj().copy()
sig4 = preamble[L: n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()
sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
elif fs == 20e6:
if preamble.size != 320:
raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))
n_short = 160
n_long = 160
L = 16
N = 64
sig3 = preamble[np.int(n_short/2):n_short-L].conj().copy()
sig4 = preamble[np.int(n_short/2)+L:n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
sig5 = preamble[n_short+32:n_short+32+N].conj().copy()
sig6 = preamble[n_short+N+32:n_short+n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
if option == 1:
return preamble
elif option == 2:
return preamble, freq_offset
else:
raise NotImplementedError
def get_residuals_preamble(preamble_in, fs, method='subtraction', channel_method='frequency', verbose=False, label=''):
preamble = preamble_in.copy()
preamble_orig = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600
n_long = 1600
L = 160
N = 640
sig3 = preamble[: n_short-L].conj().copy()
sig4 = preamble[L: n_short].copy()
df1 = 1./L * np.angle(sig3.dot(sig4.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df1).flatten()
sig5 = preamble[n_short + 2*L: n_short + 2*L + N].conj().copy()
sig6 = preamble[n_short + N+2*L: n_short + n_long].reshape(1, -1).copy()
df2 = 1./N * np.angle(sig5.dot(sig6.T))
preamble *= np.exp(-1j*np.arange(0, preamble.size)*df2).flatten()
freq_offset = np.array([df1, df2])
cfo_total = np.multiply(np.exp(1j*np.arange(0, preamble.size)*df1).flatten(),
np.exp(1j*np.arange(0, preamble.size)*df2).flatten())
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
ind_all = np.arange(-32, 32) + (N//2)
H_hat = np.zeros((N)) + 1j*np.zeros((N))
Ltf_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Ltf, np.zeros(32*9) + 1j * np.zeros(32*9)))
H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf
h_hat = np.roll(ifft(ifftshift(H_hat)), -N//2)
if channel_method == 'time':
ltf_interpolated = ifft(ifftshift(Ltf_interpolated))
ltf_total = np.concatenate(
(ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))
Stf_64_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))
stf_64_interpolated = ifft(ifftshift(Stf_64_interpolated))
stf_total = np.concatenate(
(stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))
preamble_constructed = cfo_total * (np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[
N//2-1:-N//2])/rms(np.convolve(np.concatenate((stf_total, ltf_total)), h_hat)[N//2-1:-N//2])
elif channel_method == 'frequency':
ltf_interpolated = ifft(ifftshift(H_hat * Ltf_interpolated))
ltf_total = np.concatenate(
(ltf_interpolated[-N//2:], ltf_interpolated, ltf_interpolated))
Stf_64_interpolated = np.concatenate(
(np.zeros(32*9) + 1j * np.zeros(32*9), Stf_64, np.zeros(32*9) + 1j * np.zeros(32*9)))
stf_64_interpolated = ifft(ifftshift(H_hat * Stf_64_interpolated))
stf_total = np.concatenate(
(stf_64_interpolated[-N//2:], stf_64_interpolated, stf_64_interpolated))
preamble_constructed = cfo_total * np.concatenate((stf_total, ltf_total))
if method == 'division':
residuals = preamble_orig/(preamble_constructed+0.001)
elif method == 'subtraction':
residuals = preamble_orig - preamble_constructed
return residuals, preamble_constructed
def basic_equalize_preamble(preamble_in, fs, verbose=False, label=''):
preamble = preamble_in.copy()
if fs == 200e6:
if preamble.size != 3200:
raise Exception('Size of preamble is {}, but it should be 3200.'.format(preamble.size))
n_short = 1600
n_long = 1600
L = 160
N = 640
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
(-32, 32) + (N//2)
H_hat = np.zeros((N)) + 1j*np.zeros((N))
H_hat[ind_all] = Ltf_avg_rx[ind_all]*Ltf
if verbose is True:
freq = np.arange(-32, 32)
H_hat_coarse = H_hat[ind_all]
h_hat_coarse = ifft(ifftshift(H_hat_coarse))
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat_coarse))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
plt.stem(freq, np.angle(H_hat_coarse))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Coarse estimation'+label)
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat_coarse))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
plt.stem(np.angle(h_hat_coarse))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Coarse estimation'+label)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + (N//2)
ind_null = np.concatenate(
(np.array([0]), np.arange(-(N//2), -32), np.arange(32, (N//2)))) + (N//2)
ind_pilots = np.array([-21, -7, 7, 21]) + (N//2)
mask_data = np.ones(N)
mask_data_pilots = np.ones(N)
mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_all_all = np.arange(-(N//2), (N//2)) + N//2
ind_data = ind_all_all[mask_data == 1]
ind_data_pilots = ind_all_all[mask_data_pilots == 1]
Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
Stf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Stf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Ltf_1_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Ltf_2_eq[ind_data_pilots] /= (H_hat[ind_data_pilots]+0.001)
Stf_1_eq[ind_guard] = 0
Stf_2_eq[ind_guard] = 0
Ltf_1_eq[ind_guard] = 0
Ltf_2_eq[ind_guard] = 0
Stf_1_eq[ind_null] = 0
Stf_2_eq[ind_null] = 0
Ltf_1_eq[ind_null] = 0
Ltf_2_eq[ind_null] = 0
if verbose is True:
Stf_1_eq_down = Stf_1_eq[ind_all]
Stf_2_eq_down = Stf_2_eq[ind_all]
Ltf_1_eq_down = Ltf_1_eq[ind_all]
Ltf_2_eq_down = Ltf_2_eq[ind_all]
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Stf_1_eq_down.real, Stf_1_eq_down.imag)
plt.title('Equalized STF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Stf_2_eq_down.real, Stf_2_eq_down.imag)
plt.title('Equalized STF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Stf_64.real, Stf_64.imag)
plt.title('Actual STF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Ltf_1_eq_down.real, Ltf_1_eq_down.imag)
plt.title('Equalized LTF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Ltf_2_eq_down.real, Ltf_2_eq_down.imag)
plt.title('Equalized LTF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Ltf.real, Ltf.imag)
plt.title('Actual LTF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
stf_1_eq = ifft(ifftshift(Stf_1_eq))
stf_2_eq = ifft(ifftshift(Stf_2_eq))
ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
preamble_eq = np.concatenate(
(stf_1_eq[-(N//4):], stf_1_eq, stf_2_eq[-(N//4):], stf_2_eq, ltf_1_eq[-(N//2):], ltf_1_eq, ltf_2_eq))
== 20e6:
if preamble.size != 320:
raise Exception('Size of preamble is {}, but it should be 320.'.format(preamble.size))
n_short = 160
n_long = 160
Stf_64 = np.sqrt(13/6)*np.array([0, 0, 0, 0, 0, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0,
1+1j, 0, 0, 0, 0, 0, 0, 0, -1-1j, 0, 0, 0, -1-1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 1+1j, 0, 0, 0, 0, 0, 0, 0])
Ltf = np.array([0, 0, 0, 0, 0, 0, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, 1, 1,
1, 0, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
L = 16
N = 64
Ltf1_rx = fftshift(
fft(preamble[n_short+np.int(n_long/5):n_short+np.int(n_long/5 + n_long*2/5)]))
Ltf2_rx = fftshift(fft(preamble[n_short+np.int(n_long/5 + n_long*2/5):n_short+n_long]))
Ltf_mid_rx = fftshift(
fft(preamble[n_short + 2*L - np.int(L/2):n_short + 2*L+N - np.int(L/2)]))
Ltf_avg_rx = (Ltf1_rx + Ltf2_rx)/2
AA = np.zeros((N, N)) + 0j
for m in range(N):
for n in range(L+1):
AA[m, n] = Ltf[m] * np.exp(-1j*2*np.pi*m*n/N)
A = AA[:, :L+1] * np.exp(1j*np.pi*np.arange(L+1)).reshape(1, -1)
ind_all = np.arange(-32, 32) + 32
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
ind_null = np.array([0]) + 32
mask_data_pilots = np.ones(64)
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_data_pilots = ind_all[mask_data_pilots == 1]
h_hat_small, residuals, rank, singular_values = np.linalg.lstsq(
A[ind_data_pilots, :], Ltf_mid_rx[ind_data_pilots], rcond=None)
h_hat = np.zeros(N)+0j
h_hat[:L+1] = h_hat_small
H_hat = fftshift(fft(h_hat))
H_hat = Ltf_avg_rx*Ltf
if verbose is True:
freq = np.arange(-32, 32)
H_hat_coarse = Ltf_mid_rx*Ltf
h_hat_coarse = ifft(ifftshift(H_hat_coarse))
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat_coarse))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
plt.stem(freq, np.angle(H_hat_coarse))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Coarse estimation')
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat_coarse))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
plt.stem(np.angle(h_hat_coarse))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Coarse estimation')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(freq, np.abs(H_hat))
plt.grid(True)
plt.title('Magnitude')
plt.xlabel('Frequency bin')
plt.subplot(1, 2, 2)
plt.stem(freq, np.angle(H_hat))
plt.title('Phase')
plt.xlabel('Frequency bin')
plt.suptitle('Frequency domain least squares estimation')
plt.grid(True)
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
plt.figure(figsize=[10, 3])
plt.subplot(1, 2, 1)
plt.stem(np.abs(h_hat))
plt.title('Magnitude')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.subplot(1, 2, 2)
plt.stem(np.angle(h_hat))
plt.title('Phase')
plt.xlabel('Time (in samples)')
plt.grid(True)
plt.suptitle('Frequency domain least squares estimation')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.9])
ind_all = np.arange(-32, 32) + 32
ind_guard = np.concatenate((np.arange(-32, -26), np.arange(27, 32))) + 32
ind_null = np.array([0]) + 32
ind_pilots = np.array([-21, -7, 7, 21]) + 32
mask_data = np.ones(64)
mask_data_pilots = np.ones(64)
mask_data[list(np.concatenate((ind_guard, ind_null, ind_pilots)))] = 0
mask_data_pilots[list(np.concatenate((ind_guard, ind_null)))] = 0
ind_data = ind_all[mask_data == 1]
ind_data_pilots = ind_all[mask_data_pilots == 1]
Stf_1_eq = fftshift(fft(preamble[n_short-2*N:n_short-N]))
Stf_2_eq = fftshift(fft(preamble[n_short-N:n_short]))
Ltf_1_eq = fftshift(fft(preamble[n_short+n_long-2*N:n_short+n_long-N]))
Ltf_2_eq = fftshift(fft(preamble[n_short+n_long-N:n_short+n_long]))
Stf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Stf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Ltf_1_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Ltf_2_eq[ind_data_pilots] /= H_hat[ind_data_pilots]
Stf_1_eq[ind_guard] = 0
Stf_2_eq[ind_guard] = 0
Ltf_1_eq[ind_guard] = 0
Ltf_2_eq[ind_guard] = 0
Stf_1_eq[ind_null] = 0
Stf_2_eq[ind_null] = 0
Ltf_1_eq[ind_null] = 0
Ltf_2_eq[ind_null] = 0
if verbose is True:
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Stf_1_eq.real, Stf_1_eq.imag)
plt.title('Equalized STF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Stf_2_eq.real, Stf_2_eq.imag)
plt.title('Equalized STF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Stf_64.real, Stf_64.imag)
plt.title('Actual STF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.figure(figsize=[13, 4.8])
plt.subplot(1, 3, 1)
plt.scatter(Ltf_1_eq.real, Ltf_1_eq.imag)
plt.title('Equalized LTF - 1')
plt.subplot(1, 3, 2)
plt.scatter(Ltf_2_eq.real, Ltf_2_eq.imag)
plt.title('Equalized LTF - 2')
plt.subplot(1, 3, 3)
plt.scatter(Ltf.real, Ltf.imag)
plt.title('Actual LTF')
plt.suptitle('Signal constellations')
plt.tight_layout(rect=[0.01, 0.03, 0.98, 0.93])
plt.show()
stf_1_eq = ifft(ifftshift(Stf_1_eq))
stf_2_eq = ifft(ifftshift(Stf_2_eq))
ltf_1_eq = ifft(ifftshift(Ltf_1_eq))
ltf_2_eq = ifft(ifftshift(Ltf_2_eq))
preamble_eq = np.concatenate(
(stf_1_eq[-32:], stf_1_eq, stf_2_eq, ltf_1_eq[-32:], ltf_1_eq, ltf_2_eq))
reamble_eq
def rms(x):
return np.sqrt(np.mean(x * np.conjugate(x)))
def shift_frequency(vector, freq_shift, fs):
t = np.arange(0, np.size(vector)) / fs
modulation = np.exp(-1j * 2 * np.pi * freq_shift * t) / np.sqrt(2)
return vector * modulation
def resample(vector, fs, dfs):
fs = int(round(fs))
dfs = int(round(dfs))
cfs = lcm(fs, dfs)
if cfs > fs:
vector = resampy.resample(vector, fs, cfs, filter='kaiser_best')
return resampy.resample(vector, cfs, dfs, filter='kaiser_best')
def lcm(a, b):
return a * int(b / fractions.gcd(a, b)) if a and b else 0
def get_sliding_window(x, window_size=10, stride=1, fs=200e6, fs_natural=20e6):
shape_ = x.shape
window_size_samples = np.int(window_size * (fs/fs_natural))
stride_samples = np.int(stride * (fs/fs_natural))
for i in tqdm(np.arange(0, shape_[1] - window_size_samples + stride_samples, stride_samples)):
if i == 0:
y = x[:, i:i + window_size_samples, :].copy()
else:
y = np.concatenate((y, x[:, i:i + window_size_samples, :]), axis=0)
return y
def read_wifi(files, base_data_directory, device_map, progress=True):
csv = files['csv_objects'].items()
if progress is True:
csv = tqdm(csv)
data_dict = dict(signal={}, device_key={},
sample_rate={}, capture_sample_rate={}, capture_frequency={}, capture_hw={},
center_frequency={}, freq_lower_edge={}, freq_upper_edge={},
reference_number={}, data_file={}, sample_start={}, sample_count={},
device_type={}, device_id={}, device_manufacturer={}
)
signal_index = 0
for file, signal_list in csv:
while file[0] == '/' or file[0] == '\\':
file = file[1:]
data_file = os.path.join(base_data_directory, file)
metadata_file = data_file.replace('sigmf-data', 'sigmf-meta')
all_signals = json.load(open(metadata_file))
capture = dict(capture_sample_rate=all_signals['global']['core:sample_rate'],
sample_rate=all_signals['global']['core:sample_rate'],
capture_hw=all_signals['global']['core:hw'],
capture_frequency=all_signals['capture'][0]['core:frequency'],
data_file=data_file)
for signal_name in signal_list:
for key, value in capture.items():
data_dict[key][signal_index] = value
capture_properties = all_signals['capture']
signal_properties = get_json_signal(
all_signals['annotations'], capture_properties[0], signal_name, type='wifi')
for key, value in signal_properties.items():
data_dict[key][signal_index] = value
device_id = signal_properties['device_id']
data_dict['device_key'][signal_index] = device_map[device_id]
filename = data_dict['data_file'][signal_index]
start_sample = data_dict['sample_start'][signal_index]
sample_count = data_dict['sample_count'][signal_index]
data, buffer_start, buffer_end = read_sample(
filename, start_sample, sample_count, desired_buffer=0)
data_dict['signal'][signal_index] = data
data_dict['center_frequency'][signal_index] = data_dict['capture_frequency'][signal_index]
signal_index = signal_index + 1
return data_dict
def parse_input_files(input_csv, devices_csv):
device_list = []
device_map = {}
csv_objects = {}
with open(devices_csv) as devices_csv_file:
devices_reader = csv.reader(devices_csv_file, delimiter=',')
for device in devices_reader:
device_list.append(device[0])
for i, device in enumerate(device_list):
device_map[device] = i
with open(input_csv) as input_csv_file:
input_reader = csv.reader(input_csv_file, delimiter=',')
for row in input_reader:
csv_objects[row[0]] = row[1:]
return {'device_list': device_list,
'device_map': device_map,
'csv_objects': csv_objects}
def get_json_signal(json_annotations, capture, signal_id, type=None):
for signal in json_annotations:
if signal != {} and signal['capture_details:signal_reference_number'] == signal_id:
if 'rfml:label' in signal:
signal_label = signal['rfml:label']
if type is None:
type = signal_label[0]
else:
signal_label = tuple(None, None, None)
if type is None:
type = "unknown"
if type == "wifi":
return {'freq_lower_edge': signal['core:freq_lower_edge'],
'freq_upper_edge': signal['core:freq_upper_edge'],
'sample_start': signal['core:sample_start'],
'sample_count': signal['core:sample_count'],
'device_type': signal_label[0],
'device_manufacturer': signal_label[1],
'device_id': signal_label[2]}
elif type == "ADS-B":
return{'snr': signal['capture_details:SNRdB'],
'reference_number': signal['capture_details:signal_reference_number'],
'freq_lower_edge': capture['core:freq_lower_edge'],
'freq_upper_edge': capture['core:freq_upper_edge'],
'sample_start': signal['core:sample_start'],
'sample_count': signal['core:sample_count'],
'device_type': signal_label[0],
'device_id': signal_label[1]}
else:
print('Unknown signal type', type)
return None
return None
def read_sample(filename, start_sample, sample_count, desired_buffer):
buffer_start = min(desired_buffer, start_sample)
buffer_end = desired_buffer
sample_count += (buffer_start + buffer_end)
with open(filename, "rb") as f:
f.seek((start_sample - buffer_start) * 4)
raw = np.fromfile(f, dtype='int16', count=2*sample_count)
samples_read = int(raw.size / 2)
buffer_end -= (sample_count - samples_read)
array = raw.reshape([samples_read, 2])
array = array[:, 0] + 1j*array[:, 1]
return array, buffer_start, buffer_end
| true | true |
1c4940a471a05633b194d7313df6009ea37014ef | 25,648 | py | Python | src/tests/api/test_permissions.py | tixl/tixl | 9f515a4b4e17a14d1990b29385475195438969be | [
"Apache-2.0"
] | null | null | null | src/tests/api/test_permissions.py | tixl/tixl | 9f515a4b4e17a14d1990b29385475195438969be | [
"Apache-2.0"
] | 8 | 2015-01-06T10:50:27.000Z | 2015-01-18T18:38:18.000Z | src/tests/api/test_permissions.py | tixl/tixl | 9f515a4b4e17a14d1990b29385475195438969be | [
"Apache-2.0"
] | null | null | null | #
# This file is part of pretix (Community Edition).
#
# Copyright (C) 2014-2020 Raphael Michel and contributors
# Copyright (C) 2020-2021 rami.io GmbH and contributors
#
# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation in version 3 of the License.
#
# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are
# applicable granting you additional permissions and placing additional restrictions on your usage of this software.
# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive
# this file, see <https://pretix.eu/about/en/license>.
#
# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License along with this program. If not, see
# <https://www.gnu.org/licenses/>.
#
# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of
# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.
#
# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A
# full history of changes and contributors is available at <https://github.com/pretix/pretix>.
#
# This file contains Apache-licensed contributions copyrighted by: Ture Gjørup
#
# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under the License.
import time
import pytest
from django.test import override_settings
from django.utils.timezone import now
from pretix.base.models import Organizer
event_urls = [
(None, ''),
(None, 'categories/'),
('can_view_orders', 'invoices/'),
(None, 'items/'),
('can_view_orders', 'orders/'),
('can_view_orders', 'orderpositions/'),
(None, 'questions/'),
(None, 'quotas/'),
('can_view_vouchers', 'vouchers/'),
(None, 'subevents/'),
(None, 'taxrules/'),
('can_view_orders', 'waitinglistentries/'),
('can_view_orders', 'checkinlists/'),
]
event_permission_sub_urls = [
('get', 'can_change_event_settings', 'settings/', 200),
('patch', 'can_change_event_settings', 'settings/', 200),
('get', 'can_view_orders', 'revokedsecrets/', 200),
('get', 'can_view_orders', 'revokedsecrets/1/', 404),
('get', 'can_view_orders', 'orders/', 200),
('get', 'can_view_orders', 'orderpositions/', 200),
('delete', 'can_change_orders', 'orderpositions/1/', 404),
('post', 'can_change_orders', 'orderpositions/1/price_calc/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_orders', 'invoices/', 200),
('get', 'can_view_orders', 'invoices/1/', 404),
('post', 'can_change_orders', 'invoices/1/regenerate/', 404),
('post', 'can_change_orders', 'invoices/1/reissue/', 404),
('get', 'can_view_orders', 'waitinglistentries/', 200),
('get', 'can_view_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/', 400),
('delete', 'can_change_orders', 'waitinglistentries/1/', 404),
('patch', 'can_change_orders', 'waitinglistentries/1/', 404),
('put', 'can_change_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/1/send_voucher/', 404),
('get', None, 'categories/', 200),
('get', None, 'items/', 200),
('get', None, 'questions/', 200),
('get', None, 'quotas/', 200),
('get', None, 'discounts/', 200),
('post', 'can_change_items', 'items/', 400),
('get', None, 'items/1/', 404),
('put', 'can_change_items', 'items/1/', 404),
('patch', 'can_change_items', 'items/1/', 404),
('delete', 'can_change_items', 'items/1/', 404),
('post', 'can_change_items', 'categories/', 400),
('get', None, 'categories/1/', 404),
('put', 'can_change_items', 'categories/1/', 404),
('patch', 'can_change_items', 'categories/1/', 404),
('delete', 'can_change_items', 'categories/1/', 404),
('post', 'can_change_items', 'discounts/', 400),
('get', None, 'discounts/1/', 404),
('put', 'can_change_items', 'discounts/1/', 404),
('patch', 'can_change_items', 'discounts/1/', 404),
('delete', 'can_change_items', 'discounts/1/', 404),
('post', 'can_change_items', 'items/1/variations/', 404),
('get', None, 'items/1/variations/', 404),
('get', None, 'items/1/variations/1/', 404),
('put', 'can_change_items', 'items/1/variations/1/', 404),
('patch', 'can_change_items', 'items/1/variations/1/', 404),
('delete', 'can_change_items', 'items/1/variations/1/', 404),
('get', None, 'items/1/addons/', 404),
('get', None, 'items/1/addons/1/', 404),
('post', 'can_change_items', 'items/1/addons/', 404),
('put', 'can_change_items', 'items/1/addons/1/', 404),
('patch', 'can_change_items', 'items/1/addons/1/', 404),
('delete', 'can_change_items', 'items/1/addons/1/', 404),
('get', None, 'subevents/', 200),
('get', None, 'subevents/1/', 404),
('get', None, 'taxrules/', 200),
('get', None, 'taxrules/1/', 404),
('post', 'can_change_event_settings', 'taxrules/', 400),
('put', 'can_change_event_settings', 'taxrules/1/', 404),
('patch', 'can_change_event_settings', 'taxrules/1/', 404),
('delete', 'can_change_event_settings', 'taxrules/1/', 404),
('get', 'can_change_event_settings', 'sendmail_rules/', 200),
('get', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('post', 'can_change_event_settings', 'sendmail_rules/', 400),
('put', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('patch', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('delete', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_vouchers', 'vouchers/1/', 404),
('post', 'can_change_vouchers', 'vouchers/', 201),
('put', 'can_change_vouchers', 'vouchers/1/', 404),
('patch', 'can_change_vouchers', 'vouchers/1/', 404),
('delete', 'can_change_vouchers', 'vouchers/1/', 404),
('get', None, 'quotas/', 200),
('get', None, 'quotas/1/', 404),
('post', 'can_change_items', 'quotas/', 400),
('put', 'can_change_items', 'quotas/1/', 404),
('patch', 'can_change_items', 'quotas/1/', 404),
('delete', 'can_change_items', 'quotas/1/', 404),
('get', None, 'questions/', 200),
('get', None, 'questions/1/', 404),
('post', 'can_change_items', 'questions/', 400),
('put', 'can_change_items', 'questions/1/', 404),
('patch', 'can_change_items', 'questions/1/', 404),
('delete', 'can_change_items', 'questions/1/', 404),
('get', None, 'questions/1/options/', 404),
('get', None, 'questions/1/options/1/', 404),
('put', 'can_change_items', 'questions/1/options/1/', 404),
('patch', 'can_change_items', 'questions/1/options/1/', 404),
('delete', 'can_change_items', 'questions/1/options/1/', 404),
('post', 'can_change_orders', 'orders/', 400),
('patch', 'can_change_orders', 'orders/ABC12/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_paid/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_pending/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_expired/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_canceled/', 404),
('post', 'can_change_orders', 'orders/ABC12/approve/', 404),
('post', 'can_change_orders', 'orders/ABC12/deny/', 404),
('post', 'can_change_orders', 'orders/ABC12/extend/', 400),
('post', 'can_change_orders', 'orders/ABC12/create_invoice/', 404),
('post', 'can_change_orders', 'orders/ABC12/resend_link/', 404),
('post', 'can_change_orders', 'orders/ABC12/regenerate_secrets/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/1/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/1/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/confirm/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/refund/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/process/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/done/', 404),
('get', 'can_view_orders', 'checkinlists/', 200),
('post', 'can_change_orders', 'checkinlists/1/failed_checkins/', 400),
('post', 'can_change_event_settings', 'checkinlists/', 400),
('put', 'can_change_event_settings', 'checkinlists/1/', 404),
('patch', 'can_change_event_settings', 'checkinlists/1/', 404),
('delete', 'can_change_event_settings', 'checkinlists/1/', 404),
('get', 'can_view_orders', 'checkinlists/1/positions/', 404),
('post', 'can_change_orders', 'checkinlists/1/positions/3/redeem/', 404),
('post', 'can_create_events', 'clone/', 400),
('get', 'can_view_orders', 'cartpositions/', 200),
('get', 'can_view_orders', 'cartpositions/1/', 404),
('post', 'can_change_orders', 'cartpositions/', 400),
('delete', 'can_change_orders', 'cartpositions/1/', 404),
('post', 'can_view_orders', 'exporters/invoicedata/run/', 400),
('get', 'can_view_orders', 'exporters/invoicedata/download/bc3f9884-26ee-425b-8636-80613f84b6fa/3cb49ae6-eda3-4605-814e-099e23777b36/', 404),
]
org_permission_sub_urls = [
('get', 'can_change_organizer_settings', 'settings/', 200),
('patch', 'can_change_organizer_settings', 'settings/', 200),
('get', 'can_change_organizer_settings', 'webhooks/', 200),
('post', 'can_change_organizer_settings', 'webhooks/', 400),
('get', 'can_change_organizer_settings', 'webhooks/1/', 404),
('put', 'can_change_organizer_settings', 'webhooks/1/', 404),
('patch', 'can_change_organizer_settings', 'webhooks/1/', 404),
('delete', 'can_change_organizer_settings', 'webhooks/1/', 404),
('get', 'can_manage_customers', 'customers/', 200),
('post', 'can_manage_customers', 'customers/', 201),
('get', 'can_manage_customers', 'customers/1/', 404),
('patch', 'can_manage_customers', 'customers/1/', 404),
('post', 'can_manage_customers', 'customers/1/anonymize/', 404),
('put', 'can_manage_customers', 'customers/1/', 404),
('delete', 'can_manage_customers', 'customers/1/', 404),
('get', 'can_manage_customers', 'memberships/', 200),
('post', 'can_manage_customers', 'memberships/', 400),
('get', 'can_manage_customers', 'memberships/1/', 404),
('patch', 'can_manage_customers', 'memberships/1/', 404),
('put', 'can_manage_customers', 'memberships/1/', 404),
('delete', 'can_manage_customers', 'memberships/1/', 404),
('get', 'can_change_organizer_settings', 'membershiptypes/', 200),
('post', 'can_change_organizer_settings', 'membershiptypes/', 400),
('get', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('patch', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('put', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('delete', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/', 200),
('post', 'can_manage_gift_cards', 'giftcards/', 400),
('get', 'can_manage_gift_cards', 'giftcards/1/', 404),
('put', 'can_manage_gift_cards', 'giftcards/1/', 404),
('patch', 'can_manage_gift_cards', 'giftcards/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/1/', 404),
('get', 'can_change_organizer_settings', 'devices/', 200),
('post', 'can_change_organizer_settings', 'devices/', 400),
('get', 'can_change_organizer_settings', 'devices/1/', 404),
('put', 'can_change_organizer_settings', 'devices/1/', 404),
('patch', 'can_change_organizer_settings', 'devices/1/', 404),
('get', 'can_change_teams', 'teams/', 200),
('post', 'can_change_teams', 'teams/', 400),
('get', 'can_change_teams', 'teams/{team_id}/', 200),
('put', 'can_change_teams', 'teams/{team_id}/', 400),
('patch', 'can_change_teams', 'teams/{team_id}/', 200),
('get', 'can_change_teams', 'teams/{team_id}/members/', 200),
('delete', 'can_change_teams', 'teams/{team_id}/members/2/', 404),
('get', 'can_change_teams', 'teams/{team_id}/invites/', 200),
('get', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('post', 'can_change_teams', 'teams/{team_id}/invites/', 400),
('get', 'can_change_teams', 'teams/{team_id}/tokens/', 200),
('get', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('post', 'can_change_teams', 'teams/{team_id}/tokens/', 400),
]
event_permission_root_urls = [
('post', 'can_create_events', 400),
('put', 'can_change_event_settings', 400),
('patch', 'can_change_event_settings', 200),
('delete', 'can_change_event_settings', 204),
]
@pytest.fixture
def token_client(client, team):
team.can_view_orders = True
team.can_view_vouchers = True
team.can_change_items = True
team.save()
t = team.tokens.create(name='Foo')
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
return client
@pytest.mark.django_db
def test_organizer_allowed(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert resp.status_code == 200
@pytest.mark.django_db
def test_organizer_not_allowed(token_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = token_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_allowed_device(device_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = device_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_existing(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format('o2'))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events(token_client, team, organizer, event, url):
team.all_events = True
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events_device(device_client, device, organizer, event, url):
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events(token_client, organizer, team, event, url):
team.all_events = False
team.save()
team.limit_events.add(event)
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
device.limit_events.add(event)
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed(token_client, organizer, team, event, url):
team.all_events = False
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_existing(token_client, organizer, url, event):
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], True)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == urlset[2]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_not_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], False)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_log_out_after_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
@override_settings(PRETIX_LONG_SESSIONS=False)
def test_ignore_long_session_if_disabled_in_config(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_log_out_after_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_dont_logout_by_relative_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_update_session_activity(user_client, team, organizer, event):
t1 = int(time.time()) - 5
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = t1
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
assert user_client.session['pretix_auth_last_used'] > t1
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_device_subresource_permission_check(device_client, device, organizer, event, urlset):
if urlset == ('get', 'can_change_event_settings', 'settings/', 200):
return
resp = getattr(device_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[1] is None or urlset[1] in device.permission_set():
assert resp.status_code == urlset[3]
else:
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_staff_requires_staff_session(user_client, organizer, team, event, url, user):
team.delete()
user.is_staff = True
user.save()
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
user.staffsession_set.create(date_start=now(), session_key=user_client.session.session_key)
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
| 45.314488 | 145 | 0.681535 |
import time
import pytest
from django.test import override_settings
from django.utils.timezone import now
from pretix.base.models import Organizer
event_urls = [
(None, ''),
(None, 'categories/'),
('can_view_orders', 'invoices/'),
(None, 'items/'),
('can_view_orders', 'orders/'),
('can_view_orders', 'orderpositions/'),
(None, 'questions/'),
(None, 'quotas/'),
('can_view_vouchers', 'vouchers/'),
(None, 'subevents/'),
(None, 'taxrules/'),
('can_view_orders', 'waitinglistentries/'),
('can_view_orders', 'checkinlists/'),
]
event_permission_sub_urls = [
('get', 'can_change_event_settings', 'settings/', 200),
('patch', 'can_change_event_settings', 'settings/', 200),
('get', 'can_view_orders', 'revokedsecrets/', 200),
('get', 'can_view_orders', 'revokedsecrets/1/', 404),
('get', 'can_view_orders', 'orders/', 200),
('get', 'can_view_orders', 'orderpositions/', 200),
('delete', 'can_change_orders', 'orderpositions/1/', 404),
('post', 'can_change_orders', 'orderpositions/1/price_calc/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_orders', 'invoices/', 200),
('get', 'can_view_orders', 'invoices/1/', 404),
('post', 'can_change_orders', 'invoices/1/regenerate/', 404),
('post', 'can_change_orders', 'invoices/1/reissue/', 404),
('get', 'can_view_orders', 'waitinglistentries/', 200),
('get', 'can_view_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/', 400),
('delete', 'can_change_orders', 'waitinglistentries/1/', 404),
('patch', 'can_change_orders', 'waitinglistentries/1/', 404),
('put', 'can_change_orders', 'waitinglistentries/1/', 404),
('post', 'can_change_orders', 'waitinglistentries/1/send_voucher/', 404),
('get', None, 'categories/', 200),
('get', None, 'items/', 200),
('get', None, 'questions/', 200),
('get', None, 'quotas/', 200),
('get', None, 'discounts/', 200),
('post', 'can_change_items', 'items/', 400),
('get', None, 'items/1/', 404),
('put', 'can_change_items', 'items/1/', 404),
('patch', 'can_change_items', 'items/1/', 404),
('delete', 'can_change_items', 'items/1/', 404),
('post', 'can_change_items', 'categories/', 400),
('get', None, 'categories/1/', 404),
('put', 'can_change_items', 'categories/1/', 404),
('patch', 'can_change_items', 'categories/1/', 404),
('delete', 'can_change_items', 'categories/1/', 404),
('post', 'can_change_items', 'discounts/', 400),
('get', None, 'discounts/1/', 404),
('put', 'can_change_items', 'discounts/1/', 404),
('patch', 'can_change_items', 'discounts/1/', 404),
('delete', 'can_change_items', 'discounts/1/', 404),
('post', 'can_change_items', 'items/1/variations/', 404),
('get', None, 'items/1/variations/', 404),
('get', None, 'items/1/variations/1/', 404),
('put', 'can_change_items', 'items/1/variations/1/', 404),
('patch', 'can_change_items', 'items/1/variations/1/', 404),
('delete', 'can_change_items', 'items/1/variations/1/', 404),
('get', None, 'items/1/addons/', 404),
('get', None, 'items/1/addons/1/', 404),
('post', 'can_change_items', 'items/1/addons/', 404),
('put', 'can_change_items', 'items/1/addons/1/', 404),
('patch', 'can_change_items', 'items/1/addons/1/', 404),
('delete', 'can_change_items', 'items/1/addons/1/', 404),
('get', None, 'subevents/', 200),
('get', None, 'subevents/1/', 404),
('get', None, 'taxrules/', 200),
('get', None, 'taxrules/1/', 404),
('post', 'can_change_event_settings', 'taxrules/', 400),
('put', 'can_change_event_settings', 'taxrules/1/', 404),
('patch', 'can_change_event_settings', 'taxrules/1/', 404),
('delete', 'can_change_event_settings', 'taxrules/1/', 404),
('get', 'can_change_event_settings', 'sendmail_rules/', 200),
('get', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('post', 'can_change_event_settings', 'sendmail_rules/', 400),
('put', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('patch', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('delete', 'can_change_event_settings', 'sendmail_rules/1/', 404),
('get', 'can_view_vouchers', 'vouchers/', 200),
('get', 'can_view_vouchers', 'vouchers/1/', 404),
('post', 'can_change_vouchers', 'vouchers/', 201),
('put', 'can_change_vouchers', 'vouchers/1/', 404),
('patch', 'can_change_vouchers', 'vouchers/1/', 404),
('delete', 'can_change_vouchers', 'vouchers/1/', 404),
('get', None, 'quotas/', 200),
('get', None, 'quotas/1/', 404),
('post', 'can_change_items', 'quotas/', 400),
('put', 'can_change_items', 'quotas/1/', 404),
('patch', 'can_change_items', 'quotas/1/', 404),
('delete', 'can_change_items', 'quotas/1/', 404),
('get', None, 'questions/', 200),
('get', None, 'questions/1/', 404),
('post', 'can_change_items', 'questions/', 400),
('put', 'can_change_items', 'questions/1/', 404),
('patch', 'can_change_items', 'questions/1/', 404),
('delete', 'can_change_items', 'questions/1/', 404),
('get', None, 'questions/1/options/', 404),
('get', None, 'questions/1/options/1/', 404),
('put', 'can_change_items', 'questions/1/options/1/', 404),
('patch', 'can_change_items', 'questions/1/options/1/', 404),
('delete', 'can_change_items', 'questions/1/options/1/', 404),
('post', 'can_change_orders', 'orders/', 400),
('patch', 'can_change_orders', 'orders/ABC12/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_paid/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_pending/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_expired/', 404),
('post', 'can_change_orders', 'orders/ABC12/mark_canceled/', 404),
('post', 'can_change_orders', 'orders/ABC12/approve/', 404),
('post', 'can_change_orders', 'orders/ABC12/deny/', 404),
('post', 'can_change_orders', 'orders/ABC12/extend/', 400),
('post', 'can_change_orders', 'orders/ABC12/create_invoice/', 404),
('post', 'can_change_orders', 'orders/ABC12/resend_link/', 404),
('post', 'can_change_orders', 'orders/ABC12/regenerate_secrets/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/', 404),
('get', 'can_view_orders', 'orders/ABC12/payments/1/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/', 404),
('get', 'can_view_orders', 'orders/ABC12/refunds/1/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/confirm/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/refund/', 404),
('post', 'can_change_orders', 'orders/ABC12/payments/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/cancel/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/process/', 404),
('post', 'can_change_orders', 'orders/ABC12/refunds/1/done/', 404),
('get', 'can_view_orders', 'checkinlists/', 200),
('post', 'can_change_orders', 'checkinlists/1/failed_checkins/', 400),
('post', 'can_change_event_settings', 'checkinlists/', 400),
('put', 'can_change_event_settings', 'checkinlists/1/', 404),
('patch', 'can_change_event_settings', 'checkinlists/1/', 404),
('delete', 'can_change_event_settings', 'checkinlists/1/', 404),
('get', 'can_view_orders', 'checkinlists/1/positions/', 404),
('post', 'can_change_orders', 'checkinlists/1/positions/3/redeem/', 404),
('post', 'can_create_events', 'clone/', 400),
('get', 'can_view_orders', 'cartpositions/', 200),
('get', 'can_view_orders', 'cartpositions/1/', 404),
('post', 'can_change_orders', 'cartpositions/', 400),
('delete', 'can_change_orders', 'cartpositions/1/', 404),
('post', 'can_view_orders', 'exporters/invoicedata/run/', 400),
('get', 'can_view_orders', 'exporters/invoicedata/download/bc3f9884-26ee-425b-8636-80613f84b6fa/3cb49ae6-eda3-4605-814e-099e23777b36/', 404),
]
org_permission_sub_urls = [
('get', 'can_change_organizer_settings', 'settings/', 200),
('patch', 'can_change_organizer_settings', 'settings/', 200),
('get', 'can_change_organizer_settings', 'webhooks/', 200),
('post', 'can_change_organizer_settings', 'webhooks/', 400),
('get', 'can_change_organizer_settings', 'webhooks/1/', 404),
('put', 'can_change_organizer_settings', 'webhooks/1/', 404),
('patch', 'can_change_organizer_settings', 'webhooks/1/', 404),
('delete', 'can_change_organizer_settings', 'webhooks/1/', 404),
('get', 'can_manage_customers', 'customers/', 200),
('post', 'can_manage_customers', 'customers/', 201),
('get', 'can_manage_customers', 'customers/1/', 404),
('patch', 'can_manage_customers', 'customers/1/', 404),
('post', 'can_manage_customers', 'customers/1/anonymize/', 404),
('put', 'can_manage_customers', 'customers/1/', 404),
('delete', 'can_manage_customers', 'customers/1/', 404),
('get', 'can_manage_customers', 'memberships/', 200),
('post', 'can_manage_customers', 'memberships/', 400),
('get', 'can_manage_customers', 'memberships/1/', 404),
('patch', 'can_manage_customers', 'memberships/1/', 404),
('put', 'can_manage_customers', 'memberships/1/', 404),
('delete', 'can_manage_customers', 'memberships/1/', 404),
('get', 'can_change_organizer_settings', 'membershiptypes/', 200),
('post', 'can_change_organizer_settings', 'membershiptypes/', 400),
('get', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('patch', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('put', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('delete', 'can_change_organizer_settings', 'membershiptypes/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/', 200),
('post', 'can_manage_gift_cards', 'giftcards/', 400),
('get', 'can_manage_gift_cards', 'giftcards/1/', 404),
('put', 'can_manage_gift_cards', 'giftcards/1/', 404),
('patch', 'can_manage_gift_cards', 'giftcards/1/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/', 404),
('get', 'can_manage_gift_cards', 'giftcards/1/transactions/1/', 404),
('get', 'can_change_organizer_settings', 'devices/', 200),
('post', 'can_change_organizer_settings', 'devices/', 400),
('get', 'can_change_organizer_settings', 'devices/1/', 404),
('put', 'can_change_organizer_settings', 'devices/1/', 404),
('patch', 'can_change_organizer_settings', 'devices/1/', 404),
('get', 'can_change_teams', 'teams/', 200),
('post', 'can_change_teams', 'teams/', 400),
('get', 'can_change_teams', 'teams/{team_id}/', 200),
('put', 'can_change_teams', 'teams/{team_id}/', 400),
('patch', 'can_change_teams', 'teams/{team_id}/', 200),
('get', 'can_change_teams', 'teams/{team_id}/members/', 200),
('delete', 'can_change_teams', 'teams/{team_id}/members/2/', 404),
('get', 'can_change_teams', 'teams/{team_id}/invites/', 200),
('get', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/invites/2/', 404),
('post', 'can_change_teams', 'teams/{team_id}/invites/', 400),
('get', 'can_change_teams', 'teams/{team_id}/tokens/', 200),
('get', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('delete', 'can_change_teams', 'teams/{team_id}/tokens/0/', 404),
('post', 'can_change_teams', 'teams/{team_id}/tokens/', 400),
]
event_permission_root_urls = [
('post', 'can_create_events', 400),
('put', 'can_change_event_settings', 400),
('patch', 'can_change_event_settings', 200),
('delete', 'can_change_event_settings', 204),
]
@pytest.fixture
def token_client(client, team):
team.can_view_orders = True
team.can_view_vouchers = True
team.can_change_items = True
team.save()
t = team.tokens.create(name='Foo')
client.credentials(HTTP_AUTHORIZATION='Token ' + t.token)
return client
@pytest.mark.django_db
def test_organizer_allowed(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert resp.status_code == 200
@pytest.mark.django_db
def test_organizer_not_allowed(token_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = token_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_allowed_device(device_client, organizer):
o2 = Organizer.objects.create(slug='o2', name='Organizer 2')
resp = device_client.get('/api/v1/organizers/{}/events/'.format(o2.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_organizer_not_existing(token_client, organizer):
resp = token_client.get('/api/v1/organizers/{}/events/'.format('o2'))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events(token_client, team, organizer, event, url):
team.all_events = True
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_all_events_device(device_client, device, organizer, event, url):
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events(token_client, organizer, team, event, url):
team.all_events = False
team.save()
team.limit_events.add(event)
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_allowed_limit_events_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
device.limit_events.add(event)
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
if url[0] is None or url[0] in device.permission_set():
assert resp.status_code == 200
else:
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed(token_client, organizer, team, event, url):
team.all_events = False
team.save()
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_allowed_device(device_client, organizer, device, event, url):
device.all_events = False
device.save()
resp = device_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_not_existing(token_client, organizer, url, event):
resp = token_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_token_event_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], True)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == urlset[2]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_root_urls)
def test_token_event_permission_not_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
setattr(team, urlset[1], False)
team.save()
if urlset[0] == 'post':
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/'.format(organizer.slug))
else:
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/events/{}/'.format(organizer.slug, event.slug))
assert resp.status_code == 403
@pytest.mark.django_db
def test_log_out_after_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_absolute_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
@override_settings(PRETIX_LONG_SESSIONS=False)
def test_ignore_long_session_if_disabled_in_config(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 12 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_log_out_after_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 403
@pytest.mark.django_db
def test_dont_logout_before_relative_timeout(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 6
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 + 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_dont_logout_by_relative_in_long_session(user_client, team, organizer, event):
session = user_client.session
session['pretix_auth_long_session'] = True
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = int(time.time()) - 3600 * 3 - 60
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
@pytest.mark.django_db
def test_update_session_activity(user_client, team, organizer, event):
t1 = int(time.time()) - 5
session = user_client.session
session['pretix_auth_long_session'] = False
session['pretix_auth_login_time'] = int(time.time()) - 3600 * 5
session['pretix_auth_last_used'] = t1
session.save()
response = user_client.get('/api/v1/organizers/{}/events/'.format(organizer.slug))
assert response.status_code == 200
assert user_client.session['pretix_auth_last_used'] > t1
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", event_permission_sub_urls)
def test_device_subresource_permission_check(device_client, device, organizer, event, urlset):
if urlset == ('get', 'can_change_event_settings', 'settings/', 200):
return
resp = getattr(device_client, urlset[0])('/api/v1/organizers/{}/events/{}/{}'.format(
organizer.slug, event.slug, urlset[2]))
if urlset[1] is None or urlset[1] in device.permission_set():
assert resp.status_code == urlset[3]
else:
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_allowed(token_client, team, organizer, event, urlset):
team.all_events = True
if urlset[1]:
setattr(team, urlset[1], True)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
assert resp.status_code == urlset[3]
@pytest.mark.django_db
@pytest.mark.parametrize("urlset", org_permission_sub_urls)
def test_token_org_subresources_permission_not_allowed(token_client, team, organizer, event, urlset):
if urlset[1] is None:
team.all_events = False
else:
team.all_events = True
setattr(team, urlset[1], False)
team.save()
resp = getattr(token_client, urlset[0])('/api/v1/organizers/{}/{}'.format(
organizer.slug, urlset[2].format(team_id=team.pk)))
if urlset[3] == 404:
assert resp.status_code == 403
else:
assert resp.status_code in (404, 403)
@pytest.mark.django_db
@pytest.mark.parametrize("url", event_urls)
def test_event_staff_requires_staff_session(user_client, organizer, team, event, url, user):
team.delete()
user.is_staff = True
user.save()
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 403
user.staffsession_set.create(date_start=now(), session_key=user_client.session.session_key)
resp = user_client.get('/api/v1/organizers/{}/events/{}/{}'.format(organizer.slug, event.slug, url[1]))
assert resp.status_code == 200
| true | true |
1c4940b8959cc53cd05290301b2d13364041c21b | 751 | py | Python | archive/migrations/0002_auto_20181215_2009.py | WarwickAnimeSoc/aniMango | f927c2bc6eb484561ab38172ebebee6f03c8b13b | [
"MIT"
] | null | null | null | archive/migrations/0002_auto_20181215_2009.py | WarwickAnimeSoc/aniMango | f927c2bc6eb484561ab38172ebebee6f03c8b13b | [
"MIT"
] | 6 | 2016-10-18T14:52:05.000Z | 2020-06-18T15:14:41.000Z | archive/migrations/0002_auto_20181215_2009.py | WarwickAnimeSoc/aniMango | f927c2bc6eb484561ab38172ebebee6f03c8b13b | [
"MIT"
] | 6 | 2020-02-07T17:37:37.000Z | 2021-01-15T00:01:43.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-12-15 20:09
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archive', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='item',
name='file',
field=models.FileField(help_text=b'The file that should be uploaded', upload_to=b'archive/'),
),
migrations.AlterField(
model_name='item',
name='type',
field=models.CharField(choices=[(b'im', b'Image'), (b'vi', b'Video'), (b'tx', b'Text File'), (b'we', b'Website File')], default=b'tx', max_length=2),
),
]
| 28.884615 | 161 | 0.585885 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('archive', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='item',
name='file',
field=models.FileField(help_text=b'The file that should be uploaded', upload_to=b'archive/'),
),
migrations.AlterField(
model_name='item',
name='type',
field=models.CharField(choices=[(b'im', b'Image'), (b'vi', b'Video'), (b'tx', b'Text File'), (b'we', b'Website File')], default=b'tx', max_length=2),
),
]
| true | true |
1c4941197e11bced5ec610532458438235e3a434 | 664 | py | Python | src/oca_github_bot/tasks/delete_branch.py | tafaRU/oca-github-bot | 4ede8cf4e7ffb6aa0fd02aadcdd53edfb94b211a | [
"MIT"
] | null | null | null | src/oca_github_bot/tasks/delete_branch.py | tafaRU/oca-github-bot | 4ede8cf4e7ffb6aa0fd02aadcdd53edfb94b211a | [
"MIT"
] | 1 | 2019-05-28T10:15:24.000Z | 2019-05-28T10:15:24.000Z | src/oca_github_bot/tasks/delete_branch.py | tafaRU/oca-github-bot | 4ede8cf4e7ffb6aa0fd02aadcdd53edfb94b211a | [
"MIT"
] | 1 | 2019-06-18T15:17:53.000Z | 2019-06-18T15:17:53.000Z | # Copyright (c) ACSONE SA/NV 2018
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
from .. import github
from ..config import switchable
from ..github import gh_call
from ..queue import getLogger, task
_logger = getLogger(__name__)
@task()
@switchable()
def delete_branch(org, repo, branch, dry_run=False):
with github.repository(org, repo) as gh_repo:
gh_branch = gh_call(gh_repo.ref, f"heads/{branch}")
if dry_run:
_logger.info(f"DRY-RUN delete branch {branch} in {org}/{repo}")
else:
_logger.info(f"deleting branch {branch} in {org}/{repo}")
gh_call(gh_branch.delete)
| 30.181818 | 75 | 0.674699 |
from .. import github
from ..config import switchable
from ..github import gh_call
from ..queue import getLogger, task
_logger = getLogger(__name__)
@task()
@switchable()
def delete_branch(org, repo, branch, dry_run=False):
with github.repository(org, repo) as gh_repo:
gh_branch = gh_call(gh_repo.ref, f"heads/{branch}")
if dry_run:
_logger.info(f"DRY-RUN delete branch {branch} in {org}/{repo}")
else:
_logger.info(f"deleting branch {branch} in {org}/{repo}")
gh_call(gh_branch.delete)
| true | true |
1c49418810ea5ca5da0598ff490ca27f6dd4bd50 | 4,785 | py | Python | had/app/views/api/v1/persons/phone_api.py | eduardolujan/hexagonal_architecture_django | 8055927cb460bc40f3a2651c01a9d1da696177e8 | [
"BSD-3-Clause"
] | 6 | 2020-08-09T23:41:08.000Z | 2021-03-16T22:05:40.000Z | had/app/views/api/v1/persons/phone_api.py | eduardolujan/hexagonal_architecture_django | 8055927cb460bc40f3a2651c01a9d1da696177e8 | [
"BSD-3-Clause"
] | 1 | 2020-10-02T02:59:38.000Z | 2020-10-02T02:59:38.000Z | had/app/views/api/v1/persons/phone_api.py | eduardolujan/hexagonal_architecture_django | 8055927cb460bc40f3a2651c01a9d1da696177e8 | [
"BSD-3-Clause"
] | 2 | 2021-03-16T22:05:43.000Z | 2021-04-30T06:35:25.000Z | # -*- coding: utf-8 -*-
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from modules.shared.infrastructure.serializers.django.serializer_manager import (
SerializerManager as DjangoSerializerManager,
)
from modules.users.infrastructure.serializers.django import (
UserSerializer as DjangoUserSerializer,
GetUserSerializer as DjangoGetUserSerializer,
CreateUserSerializer as DjangoCreateUserSerializer,
)
from modules.shared.infrastructure.log import LoggerDecorator, PyLoggerService
from modules.shared.infrastructure.requests.django import Request as DjangoRequest
from modules.shared.infrastructure.responses.django import RestResponse as DjangoRestResponse
from modules.shared.infrastructure.persistence.django import UnitOfWork as DjangoUnitOfWork
from modules.shared.infrastructure.passwords.django import PasswordCreator as DjangoPasswordCreator
from modules.users.infrastructure.repository.django import (
UserRepository as DjangoUserRepository
)
from modules.users.application.api.v1 import GetUserApi, CreateUserApi, UpdateUserApi, DeleteUserApi
@LoggerDecorator(logger=PyLoggerService(file_path=__file__))
class UserApi(APIView):
# authentication_classes = [SessionAuthentication, BasicAuthentication]
permission_classes = [AllowAny]
def get(self, request, _id: str = None):
"""
Get User
@param request:
@type request:
@param _id:
@type _id:
@return:
@rtype:
"""
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
request_serializer_manager = DjangoSerializerManager(DjangoGetUserSerializer)
response_serializer_manager = DjangoSerializerManager(DjangoUserSerializer)
user_get_api = GetUserApi(request,
response,
user_repository,
request_serializer_manager,
response_serializer_manager)
response = user_get_api(_id)
return response
def post(self, request, _id: str = None):
"""
Post User
@param request: request
@type request: response
@param _id: user id
@type _id: int
@return: post response
@rtype: Response
"""
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
unit_of_work = DjangoUnitOfWork()
password_creator = DjangoPasswordCreator()
user_serializer_manager = DjangoSerializerManager(DjangoCreateUserSerializer)
create_user_api = CreateUserApi(request,
response,
user_serializer_manager,
user_repository,
password_creator,
unit_of_work)
response = create_user_api()
return response
def put(self, request, _id: str = None):
"""
Update User
@param request: request
@type request: response
@param _id: user id
@type _id: int
@return: post response
@rtype: Response
"""
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
unit_of_work = DjangoUnitOfWork()
password_creator = DjangoPasswordCreator()
user_serializer_manager = DjangoSerializerManager(DjangoCreateUserSerializer)
update_user_api = UpdateUserApi(request,
response,
user_serializer_manager,
user_repository,
password_creator,
unit_of_work)
response = update_user_api()
return response
def delete(self, request, _id):
"""
Delete user api
@param request:
@type request:
@param _id:
@type _id:
@return:
@rtype:
"""
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
request_serializer_manager = DjangoSerializerManager(DjangoGetUserSerializer)
delete_user_api = DeleteUserApi(request,
response,
request_serializer_manager,
user_repository)
response = delete_user_api(_id)
return response
| 37.97619 | 100 | 0.614629 |
from rest_framework.views import APIView
from rest_framework.permissions import AllowAny
from modules.shared.infrastructure.serializers.django.serializer_manager import (
SerializerManager as DjangoSerializerManager,
)
from modules.users.infrastructure.serializers.django import (
UserSerializer as DjangoUserSerializer,
GetUserSerializer as DjangoGetUserSerializer,
CreateUserSerializer as DjangoCreateUserSerializer,
)
from modules.shared.infrastructure.log import LoggerDecorator, PyLoggerService
from modules.shared.infrastructure.requests.django import Request as DjangoRequest
from modules.shared.infrastructure.responses.django import RestResponse as DjangoRestResponse
from modules.shared.infrastructure.persistence.django import UnitOfWork as DjangoUnitOfWork
from modules.shared.infrastructure.passwords.django import PasswordCreator as DjangoPasswordCreator
from modules.users.infrastructure.repository.django import (
UserRepository as DjangoUserRepository
)
from modules.users.application.api.v1 import GetUserApi, CreateUserApi, UpdateUserApi, DeleteUserApi
@LoggerDecorator(logger=PyLoggerService(file_path=__file__))
class UserApi(APIView):
permission_classes = [AllowAny]
def get(self, request, _id: str = None):
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
request_serializer_manager = DjangoSerializerManager(DjangoGetUserSerializer)
response_serializer_manager = DjangoSerializerManager(DjangoUserSerializer)
user_get_api = GetUserApi(request,
response,
user_repository,
request_serializer_manager,
response_serializer_manager)
response = user_get_api(_id)
return response
def post(self, request, _id: str = None):
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
unit_of_work = DjangoUnitOfWork()
password_creator = DjangoPasswordCreator()
user_serializer_manager = DjangoSerializerManager(DjangoCreateUserSerializer)
create_user_api = CreateUserApi(request,
response,
user_serializer_manager,
user_repository,
password_creator,
unit_of_work)
response = create_user_api()
return response
def put(self, request, _id: str = None):
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
unit_of_work = DjangoUnitOfWork()
password_creator = DjangoPasswordCreator()
user_serializer_manager = DjangoSerializerManager(DjangoCreateUserSerializer)
update_user_api = UpdateUserApi(request,
response,
user_serializer_manager,
user_repository,
password_creator,
unit_of_work)
response = update_user_api()
return response
def delete(self, request, _id):
request = DjangoRequest(request)
response = DjangoRestResponse()
user_repository = DjangoUserRepository()
request_serializer_manager = DjangoSerializerManager(DjangoGetUserSerializer)
delete_user_api = DeleteUserApi(request,
response,
request_serializer_manager,
user_repository)
response = delete_user_api(_id)
return response
| true | true |
1c4943683afb62f8fb1168cb730218c3287099c4 | 5,313 | py | Python | app/api/v2/tests/test_candidate.py | softMaina/political-v2 | 985e96ec0ff6cc866a26538ef7a69436de7e17d0 | [
"MIT"
] | 2 | 2019-03-17T08:11:13.000Z | 2019-11-14T06:08:50.000Z | app/api/v2/tests/test_candidate.py | softMaina/political-v2 | 985e96ec0ff6cc866a26538ef7a69436de7e17d0 | [
"MIT"
] | null | null | null | app/api/v2/tests/test_candidate.py | softMaina/political-v2 | 985e96ec0ff6cc866a26538ef7a69436de7e17d0 | [
"MIT"
] | null | null | null |
import json
from flask import current_app
from app.api.v2.tests import base_tests
from . import helper_functions
from app.api.v2.tests.helper_functions import convert_response_to_json
class TestCandidates(base_tests.TestBaseClass):
"""
A class to test candidate's endpoints
:param: object of the TestBaseClass
"""
def register_user(self):
"""
user registration
:Response: status 201, data, user_data
"""
response = self.app_test_client.post('api/v2/auth/register', json=self.Admin)
return response.status_code
def log_user(self):
"""
Method to login
:Response: auth_token
"""
response1 = self.app_test_client.post('/api/v2/auth/signup',json=self.Admin)
response = self.app_test_client.post('/api/v2/auth/login',json=self.admin_login)
token = convert_response_to_json(response)['token']
return token
def test_candidate(self):
"""
Method to test candidate registration
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 201)
def test_candidate_no_token(self):
"""
Method to test candidate registration with no auth token
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = ""), content_type='application/json')
self.assertEqual(response.status_code,401)
def test_candidate_invalid_token(self):
"""
Method to test candidate registration with invalid token
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = "invalid_token"), content_type='application/json')
self.assertEqual(response.status_code,403)
def test_candidate_with_no_office(self):
"""
Method to test candidate registration without existing offices
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code,400)
def test_candidate_with_no_party(self):
"""
Method to test candidate registration with no existing parties
"""
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code,400)
def test_candidate_with_missing_keys(self):
"""
Test add office with missing keys
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":1
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_candidate_with_string(self):
"""
Test add office with missing keys
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":"one",
"party":1
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_candidate_with_party_string(self):
"""
Test add office with missing keys
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":1,
"party":"one"
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
| 45.410256 | 165 | 0.680783 |
import json
from flask import current_app
from app.api.v2.tests import base_tests
from . import helper_functions
from app.api.v2.tests.helper_functions import convert_response_to_json
class TestCandidates(base_tests.TestBaseClass):
def register_user(self):
response = self.app_test_client.post('api/v2/auth/register', json=self.Admin)
return response.status_code
def log_user(self):
response1 = self.app_test_client.post('/api/v2/auth/signup',json=self.Admin)
response = self.app_test_client.post('/api/v2/auth/login',json=self.admin_login)
token = convert_response_to_json(response)['token']
return token
def test_candidate(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 201)
def test_candidate_no_token(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = ""), content_type='application/json')
self.assertEqual(response.status_code,401)
def test_candidate_invalid_token(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = "invalid_token"), content_type='application/json')
self.assertEqual(response.status_code,403)
def test_candidate_with_no_office(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code,400)
def test_candidate_with_no_party(self):
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code,400)
def test_candidate_with_missing_keys(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":1
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_candidate_with_string(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":"one",
"party":1
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_candidate_with_party_string(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":1,
"party":"one"
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
| true | true |
1c49452cccd050c0ae0b8b5468b700cbb6d115c9 | 1,059 | py | Python | feature_importance_v2.py | terryli710/MPS_regression | d8f9c94ad315734ff9376a53e6be3f508b4da742 | [
"MIT"
] | null | null | null | feature_importance_v2.py | terryli710/MPS_regression | d8f9c94ad315734ff9376a53e6be3f508b4da742 | [
"MIT"
] | null | null | null | feature_importance_v2.py | terryli710/MPS_regression | d8f9c94ad315734ff9376a53e6be3f508b4da742 | [
"MIT"
] | null | null | null | ## Without filtering results with VIF, calculate the importance for all the features.
## Works for "first" and "structcoef"
from util_relaimpo import *
from util import loadNpy, loadCsv
def main(x_name, y_name, method, feature_names = []):
# INFO
print("Dataset", x_name.split('_')[0])
print("Method", str(method).split(' ')[1])
# load data
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
# make dataframe
if feature_names: xdf = pd.DataFrame(data=X, columns=feature_names)
else: xdf = pd.DataFrame(data=X)
print("bootstrapping ...")
coef_boot = bootstrapping(xdf, Y, method)
print(printBootResult(coef_boot, list(xdf.columns), list(xdf.columns)))
feature_names = getFeatureNames(loadCsv(['data', 'X', 'feature_descriptions.csv']))
if __name__ == '__main__':
main('HM_X_ang_vel.npy','HM_MPSCC95.npy', structcoef, feature_names)
main('AF_X_ang_vel.npy', 'AF_MPSCC95.npy', structcoef, feature_names)
main('NFL53_X_ang_vel.npy', 'NFL53_MPSCC95.npy', structcoef, feature_names)
| 40.730769 | 85 | 0.693107 |
print("Dataset", x_name.split('_')[0])
print("Method", str(method).split(' ')[1])
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
if feature_names: xdf = pd.DataFrame(data=X, columns=feature_names)
else: xdf = pd.DataFrame(data=X)
print("bootstrapping ...")
coef_boot = bootstrapping(xdf, Y, method)
print(printBootResult(coef_boot, list(xdf.columns), list(xdf.columns)))
feature_names = getFeatureNames(loadCsv(['data', 'X', 'feature_descriptions.csv']))
if __name__ == '__main__':
main('HM_X_ang_vel.npy','HM_MPSCC95.npy', structcoef, feature_names)
main('AF_X_ang_vel.npy', 'AF_MPSCC95.npy', structcoef, feature_names)
main('NFL53_X_ang_vel.npy', 'NFL53_MPSCC95.npy', structcoef, feature_names)
| true | true |
1c494530de31aff8b8204b0ef28d50b5b3cad91c | 113 | py | Python | tcex/sessions/__init__.py | brikardtc/tcex | 78680f055f4259e31f0b4989a5695604108d9fdd | [
"Apache-2.0"
] | null | null | null | tcex/sessions/__init__.py | brikardtc/tcex | 78680f055f4259e31f0b4989a5695604108d9fdd | [
"Apache-2.0"
] | null | null | null | tcex/sessions/__init__.py | brikardtc/tcex | 78680f055f4259e31f0b4989a5695604108d9fdd | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Session module for TcEx Framework"""
# flake8: noqa
from .tc_session import TcSession
| 22.6 | 39 | 0.690265 |
from .tc_session import TcSession
| true | true |
1c49454c8e0883c7cc820aae3666d057cd052c30 | 3,723 | py | Python | monai/data/synthetic.py | loftwah/MONAI | 37fb3e779121e6dc74127993df102fc91d9065f8 | [
"Apache-2.0"
] | 1 | 2020-04-23T13:05:29.000Z | 2020-04-23T13:05:29.000Z | monai/data/synthetic.py | tranduyquockhanh/MONAI | 37fb3e779121e6dc74127993df102fc91d9065f8 | [
"Apache-2.0"
] | null | null | null | monai/data/synthetic.py | tranduyquockhanh/MONAI | 37fb3e779121e6dc74127993df102fc91d9065f8 | [
"Apache-2.0"
] | 1 | 2021-09-20T12:10:01.000Z | 2021-09-20T12:10:01.000Z | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from monai.transforms.utils import rescale_array
def create_test_image_2d(width, height, num_objs=12, rad_max=30, noise_max=0.0, num_seg_classes=5, channel_dim=None):
"""
Return a noisy 2D image with `num_obj` circles and a 2D mask image. The maximum radius of the circles is given as
`rad_max`. The mask will have `num_seg_classes` number of classes for segmentations labeled sequentially from 1, plus a
background class represented as 0. If `noise_max` is greater than 0 then noise will be added to the image taken from
the uniform distribution on range `[0,noise_max)`. If `channel_dim` is None, will create an image without channel
dimension, otherwise create an image with channel dimension as first dim or last dim.
"""
image = np.zeros((width, height))
for i in range(num_objs):
x = np.random.randint(rad_max, width - rad_max)
y = np.random.randint(rad_max, height - rad_max)
rad = np.random.randint(5, rad_max)
spy, spx = np.ogrid[-x:width - x, -y:height - y]
circle = (spx * spx + spy * spy) <= rad * rad
if num_seg_classes > 1:
image[circle] = np.ceil(np.random.random() * num_seg_classes)
else:
image[circle] = np.random.random() * 0.5 + 0.5
labels = np.ceil(image).astype(np.int32)
norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)
noisyimage = rescale_array(np.maximum(image, norm))
if channel_dim is not None:
assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 2), 'invalid channel dim.'
noisyimage, labels = noisyimage[None], labels[None] \
if channel_dim == 0 else (noisyimage[..., None], labels[..., None])
return noisyimage, labels
def create_test_image_3d(height, width, depth, num_objs=12, rad_max=30,
noise_max=0.0, num_seg_classes=5, channel_dim=None):
"""
Return a noisy 3D image and segmentation.
See also:
:py:meth:`~create_test_image_2d`
"""
image = np.zeros((width, height, depth))
for i in range(num_objs):
x = np.random.randint(rad_max, width - rad_max)
y = np.random.randint(rad_max, height - rad_max)
z = np.random.randint(rad_max, depth - rad_max)
rad = np.random.randint(5, rad_max)
spy, spx, spz = np.ogrid[-x:width - x, -y:height - y, -z:depth - z]
circle = (spx * spx + spy * spy + spz * spz) <= rad * rad
if num_seg_classes > 1:
image[circle] = np.ceil(np.random.random() * num_seg_classes)
else:
image[circle] = np.random.random() * 0.5 + 0.5
labels = np.ceil(image).astype(np.int32)
norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)
noisyimage = rescale_array(np.maximum(image, norm))
if channel_dim is not None:
assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 3), 'invalid channel dim.'
noisyimage, labels = (noisyimage[None], labels[None]) \
if channel_dim == 0 else (noisyimage[..., None], labels[..., None])
return noisyimage, labels
| 43.290698 | 123 | 0.665055 |
import numpy as np
from monai.transforms.utils import rescale_array
def create_test_image_2d(width, height, num_objs=12, rad_max=30, noise_max=0.0, num_seg_classes=5, channel_dim=None):
image = np.zeros((width, height))
for i in range(num_objs):
x = np.random.randint(rad_max, width - rad_max)
y = np.random.randint(rad_max, height - rad_max)
rad = np.random.randint(5, rad_max)
spy, spx = np.ogrid[-x:width - x, -y:height - y]
circle = (spx * spx + spy * spy) <= rad * rad
if num_seg_classes > 1:
image[circle] = np.ceil(np.random.random() * num_seg_classes)
else:
image[circle] = np.random.random() * 0.5 + 0.5
labels = np.ceil(image).astype(np.int32)
norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)
noisyimage = rescale_array(np.maximum(image, norm))
if channel_dim is not None:
assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 2), 'invalid channel dim.'
noisyimage, labels = noisyimage[None], labels[None] \
if channel_dim == 0 else (noisyimage[..., None], labels[..., None])
return noisyimage, labels
def create_test_image_3d(height, width, depth, num_objs=12, rad_max=30,
noise_max=0.0, num_seg_classes=5, channel_dim=None):
image = np.zeros((width, height, depth))
for i in range(num_objs):
x = np.random.randint(rad_max, width - rad_max)
y = np.random.randint(rad_max, height - rad_max)
z = np.random.randint(rad_max, depth - rad_max)
rad = np.random.randint(5, rad_max)
spy, spx, spz = np.ogrid[-x:width - x, -y:height - y, -z:depth - z]
circle = (spx * spx + spy * spy + spz * spz) <= rad * rad
if num_seg_classes > 1:
image[circle] = np.ceil(np.random.random() * num_seg_classes)
else:
image[circle] = np.random.random() * 0.5 + 0.5
labels = np.ceil(image).astype(np.int32)
norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)
noisyimage = rescale_array(np.maximum(image, norm))
if channel_dim is not None:
assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 3), 'invalid channel dim.'
noisyimage, labels = (noisyimage[None], labels[None]) \
if channel_dim == 0 else (noisyimage[..., None], labels[..., None])
return noisyimage, labels
| true | true |
1c49454d298b0470f6d86b30368b4e5d57afc0e8 | 1,953 | py | Python | DataSource/TickData.py | dukechain2333/BossaNova | af9fa7abf060b2e070aa6469afa44fd2861d5a22 | [
"MIT"
] | 2 | 2020-10-15T12:48:01.000Z | 2021-09-11T01:44:28.000Z | DataSource/TickData.py | dukechain2333/BossaNova | af9fa7abf060b2e070aa6469afa44fd2861d5a22 | [
"MIT"
] | null | null | null | DataSource/TickData.py | dukechain2333/BossaNova | af9fa7abf060b2e070aa6469afa44fd2861d5a22 | [
"MIT"
] | null | null | null | # @author Duke Chain
# @File:TickData.py
# @createTime 2020/12/08 15:25:08
import threading
from DBOperate.CreateStockInfo import CreateStockInfo
from DBOperate.AddStockInfo import AddStockInfo
import akshare as ak
class TickData(threading.Thread):
"""
获取Tick数据并写入数据库
Args:
ak:传入akshare接口
stockID:传入股票代码
dateList:传入日期列表
"""
def __init__(self, ak, stockID, dateList):
super().__init__()
self.ak = ak
self.stockID = stockID
self.dateList = dateList
def run(self):
createInfo = CreateStockInfo(self.stockID, 'stock_info_tick', 't')
createInfo.createTable()
for date in self.dateList:
data = self.ak.stock_zh_a_tick_tx(code=self.stockID, trade_date=date)
for i in range(data.shape[0]):
try:
print(date + ' ' + data['成交时间'][i])
trade_date = date + ' ' + data['成交时间'][i]
stock_price = data['成交价格'][i]
chg = data['价格变动'][i]
volume = data['成交量(手)'][i]
except IndexError:
print(date, '数据不存在,请修改时间!')
else:
addInfo = AddStockInfo(self.stockID, trade_date=trade_date, close_price=stock_price, chg=chg,
volume=volume)
addInfo.addInfoTick()
# if __name__ == '__main__':
# dateList = ['20200907', '20200908', '20200909', '20200910', '20200911']
# thread1 = TickData(ak, "sh601808", dateList)
# thread2 = TickData(ak, "sh601811", dateList)
# thread3 = TickData(ak, "sh601858", dateList)
# thread4 = TickData(ak, "sh601878", dateList)
#
# thread1.start()
# thread2.start()
# thread3.start()
# thread4.start()
#
# thread1.join()
# thread2.join()
# thread3.join()
# thread4.join()
#
# print("ALL DONE!")
| 29.590909 | 113 | 0.550947 |
import threading
from DBOperate.CreateStockInfo import CreateStockInfo
from DBOperate.AddStockInfo import AddStockInfo
import akshare as ak
class TickData(threading.Thread):
def __init__(self, ak, stockID, dateList):
super().__init__()
self.ak = ak
self.stockID = stockID
self.dateList = dateList
def run(self):
createInfo = CreateStockInfo(self.stockID, 'stock_info_tick', 't')
createInfo.createTable()
for date in self.dateList:
data = self.ak.stock_zh_a_tick_tx(code=self.stockID, trade_date=date)
for i in range(data.shape[0]):
try:
print(date + ' ' + data['成交时间'][i])
trade_date = date + ' ' + data['成交时间'][i]
stock_price = data['成交价格'][i]
chg = data['价格变动'][i]
volume = data['成交量(手)'][i]
except IndexError:
print(date, '数据不存在,请修改时间!')
else:
addInfo = AddStockInfo(self.stockID, trade_date=trade_date, close_price=stock_price, chg=chg,
volume=volume)
addInfo.addInfoTick()
| true | true |
1c49456a4e965385fb2cd2b8f180a1dcc77558ad | 7,238 | py | Python | tools/train.py | tszssong/HRNet-Image-Classification | 6d8ee24aedf2e0b3134102c221a29fb9b0ce2e1b | [
"MIT"
] | null | null | null | tools/train.py | tszssong/HRNet-Image-Classification | 6d8ee24aedf2e0b3134102c221a29fb9b0ce2e1b | [
"MIT"
] | null | null | null | tools/train.py | tszssong/HRNet-Image-Classification | 6d8ee24aedf2e0b3134102c221a29fb9b0ce2e1b | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Ke Sun (sunk@mail.ustc.edu.cn)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import shutil
import sys
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import _init_paths
import models
from config import config
from config import update_config
from core.function import train
from core.function import validate
from utils.modelsummary import get_model_summary
from utils.utils import get_optimizer
from utils.utils import save_checkpoint
from utils.utils import create_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train classification network')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('--modelDir',
help='model directory',
type=str,
default='')
parser.add_argument('--logDir',
help='log directory',
type=str,
default='')
parser.add_argument('--dataDir',
help='data directory',
type=str,
default='')
parser.add_argument('--testModel',
help='testModel',
type=str,
default='')
args = parser.parse_args()
update_config(config, args)
return args
def main():
args = parse_args()
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'train')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
model = eval('models.'+config.MODEL.NAME+'.get_cls_net')(
config)
dump_input = torch.rand(
(1, 3, config.MODEL.IMAGE_SIZE[1], config.MODEL.IMAGE_SIZE[0])
)
logger.info(get_model_summary(model, dump_input))
# copy model file
this_dir = os.path.dirname(__file__)
models_dst_dir = os.path.join(final_output_dir, 'models')
if os.path.exists(models_dst_dir):
shutil.rmtree(models_dst_dir)
shutil.copytree(os.path.join(this_dir, '../lib/models'), models_dst_dir)
writer_dict = {
'writer': SummaryWriter(log_dir=tb_log_dir),
'train_global_steps': 0,
'valid_global_steps': 0,
}
gpus = list(config.GPUS)
print("gpus:",gpus,type(gpus))
DEVICE = torch.device("cuda:%d"%config.GPUS[0] if torch.cuda.is_available() else "cpu")
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
model = model.to(DEVICE)
# define loss function (criterion) and optimizer
criterion = torch.nn.CrossEntropyLoss().cuda()
optimizer = get_optimizer(config, model)
best_perf = 0.0
best_model = False
last_epoch = config.TRAIN.BEGIN_EPOCH
if config.TRAIN.RESUME:
model_state_file = os.path.join(final_output_dir,
'checkpoint.pth.tar')
if os.path.isfile(model_state_file):
checkpoint = torch.load(model_state_file)
last_epoch = checkpoint['epoch']
best_perf = checkpoint['perf']
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("=> loaded checkpoint (epoch {})"
.format(checkpoint['epoch']))
best_model = True
if isinstance(config.TRAIN.LR_STEP, list):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
last_epoch-1
)
else:
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
last_epoch-1
)
# Data loading code
traindir = os.path.join(config.DATASET.ROOT, config.DATASET.TRAIN_SET)
valdir = os.path.join(config.DATASET.ROOT, config.DATASET.TEST_SET)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.TRAIN.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=True,
num_workers=config.WORKERS,
pin_memory=True
)
valid_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(int(config.MODEL.IMAGE_SIZE[0] / 0.875)),
transforms.CenterCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.ToTensor(),
normalize,
])),
batch_size=config.TEST.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True
)
for epoch in range(last_epoch, config.TRAIN.END_EPOCH):
lr_scheduler.step()
# train for one epoch
train(config, train_loader, model, DEVICE, criterion, optimizer, epoch,
final_output_dir, tb_log_dir, writer_dict)
# evaluate on validation set
perf_indicator = validate(config, valid_loader, model, criterion,
final_output_dir, tb_log_dir, writer_dict)
if perf_indicator > best_perf:
best_perf = perf_indicator
best_model = True
else:
best_model = False
logger.info('=> saving checkpoint to {}'.format(final_output_dir))
save_checkpoint({
'epoch': epoch + 1,
'model': config.MODEL.NAME,
'state_dict': model.module.state_dict(),
'perf': perf_indicator,
'optimizer': optimizer.state_dict(),
}, best_model, final_output_dir, filename='checkpoint.pth.tar')
final_model_state_file = os.path.join(final_output_dir,
'final_state.pth.tar')
logger.info('saving final model state to {}'.format(
final_model_state_file))
torch.save(model.module.state_dict(), final_model_state_file)
writer_dict['writer'].close()
if __name__ == '__main__':
main()
| 33.665116 | 91 | 0.613844 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import shutil
import sys
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import _init_paths
import models
from config import config
from config import update_config
from core.function import train
from core.function import validate
from utils.modelsummary import get_model_summary
from utils.utils import get_optimizer
from utils.utils import save_checkpoint
from utils.utils import create_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train classification network')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('--modelDir',
help='model directory',
type=str,
default='')
parser.add_argument('--logDir',
help='log directory',
type=str,
default='')
parser.add_argument('--dataDir',
help='data directory',
type=str,
default='')
parser.add_argument('--testModel',
help='testModel',
type=str,
default='')
args = parser.parse_args()
update_config(config, args)
return args
def main():
args = parse_args()
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'train')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
model = eval('models.'+config.MODEL.NAME+'.get_cls_net')(
config)
dump_input = torch.rand(
(1, 3, config.MODEL.IMAGE_SIZE[1], config.MODEL.IMAGE_SIZE[0])
)
logger.info(get_model_summary(model, dump_input))
this_dir = os.path.dirname(__file__)
models_dst_dir = os.path.join(final_output_dir, 'models')
if os.path.exists(models_dst_dir):
shutil.rmtree(models_dst_dir)
shutil.copytree(os.path.join(this_dir, '../lib/models'), models_dst_dir)
writer_dict = {
'writer': SummaryWriter(log_dir=tb_log_dir),
'train_global_steps': 0,
'valid_global_steps': 0,
}
gpus = list(config.GPUS)
print("gpus:",gpus,type(gpus))
DEVICE = torch.device("cuda:%d"%config.GPUS[0] if torch.cuda.is_available() else "cpu")
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
model = model.to(DEVICE)
criterion = torch.nn.CrossEntropyLoss().cuda()
optimizer = get_optimizer(config, model)
best_perf = 0.0
best_model = False
last_epoch = config.TRAIN.BEGIN_EPOCH
if config.TRAIN.RESUME:
model_state_file = os.path.join(final_output_dir,
'checkpoint.pth.tar')
if os.path.isfile(model_state_file):
checkpoint = torch.load(model_state_file)
last_epoch = checkpoint['epoch']
best_perf = checkpoint['perf']
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("=> loaded checkpoint (epoch {})"
.format(checkpoint['epoch']))
best_model = True
if isinstance(config.TRAIN.LR_STEP, list):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
last_epoch-1
)
else:
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
last_epoch-1
)
traindir = os.path.join(config.DATASET.ROOT, config.DATASET.TRAIN_SET)
valdir = os.path.join(config.DATASET.ROOT, config.DATASET.TEST_SET)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.TRAIN.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=True,
num_workers=config.WORKERS,
pin_memory=True
)
valid_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(int(config.MODEL.IMAGE_SIZE[0] / 0.875)),
transforms.CenterCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.ToTensor(),
normalize,
])),
batch_size=config.TEST.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True
)
for epoch in range(last_epoch, config.TRAIN.END_EPOCH):
lr_scheduler.step()
train(config, train_loader, model, DEVICE, criterion, optimizer, epoch,
final_output_dir, tb_log_dir, writer_dict)
perf_indicator = validate(config, valid_loader, model, criterion,
final_output_dir, tb_log_dir, writer_dict)
if perf_indicator > best_perf:
best_perf = perf_indicator
best_model = True
else:
best_model = False
logger.info('=> saving checkpoint to {}'.format(final_output_dir))
save_checkpoint({
'epoch': epoch + 1,
'model': config.MODEL.NAME,
'state_dict': model.module.state_dict(),
'perf': perf_indicator,
'optimizer': optimizer.state_dict(),
}, best_model, final_output_dir, filename='checkpoint.pth.tar')
final_model_state_file = os.path.join(final_output_dir,
'final_state.pth.tar')
logger.info('saving final model state to {}'.format(
final_model_state_file))
torch.save(model.module.state_dict(), final_model_state_file)
writer_dict['writer'].close()
if __name__ == '__main__':
main()
| true | true |
1c4945b72a6e9e9e1a10dfad2632125e558e165c | 860 | py | Python | migrations/0066_auto_20190820_1448.py | audaciouscode/PassiveDataKit-Django | ed1e00c436801b9f49a3e0e6657c2adb6b2ba3d4 | [
"Apache-2.0"
] | 5 | 2016-01-26T19:19:44.000Z | 2018-12-12T18:04:04.000Z | migrations/0066_auto_20190820_1448.py | audacious-software/PassiveDataKit-Django | da91a375c075ceec938f2c9bb6b011f9f019b024 | [
"Apache-2.0"
] | 6 | 2020-02-17T20:16:28.000Z | 2021-12-13T21:51:20.000Z | migrations/0066_auto_20190820_1448.py | audacious-software/PassiveDataKit-Django | da91a375c075ceec938f2c9bb6b011f9f019b024 | [
"Apache-2.0"
] | 4 | 2020-01-29T15:36:58.000Z | 2021-06-01T18:55:26.000Z | # pylint: skip-file
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-20 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0065_devicemodel_reference'),
]
operations = [
migrations.AddField(
model_name='deviceissue',
name='platform',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
migrations.AddField(
model_name='deviceissue',
name='platform_version',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
migrations.AddField(
model_name='deviceissue',
name='user_agent',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
]
| 26.875 | 78 | 0.6 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0065_devicemodel_reference'),
]
operations = [
migrations.AddField(
model_name='deviceissue',
name='platform',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
migrations.AddField(
model_name='deviceissue',
name='platform_version',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
migrations.AddField(
model_name='deviceissue',
name='user_agent',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
]
| true | true |
1c4946a18d3acce164e58e7d5d801355d9aea016 | 3,169 | py | Python | tensorboard/tools/whitespace_hygiene_test.py | isabella232/tensorboard | 77cf61f74dd57e4f3a6256e3972335bbd82feb51 | [
"Apache-2.0"
] | null | null | null | tensorboard/tools/whitespace_hygiene_test.py | isabella232/tensorboard | 77cf61f74dd57e4f3a6256e3972335bbd82feb51 | [
"Apache-2.0"
] | 1 | 2021-02-24T00:55:12.000Z | 2021-02-24T00:55:12.000Z | tensorboard/tools/whitespace_hygiene_test.py | isabella232/tensorboard | 77cf61f74dd57e4f3a6256e3972335bbd82feb51 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Check for superfluous whitespace at ends of lines.
Keeps diffs clean and persnickety developers happy.
"""
import collections
import os
import subprocess
import sys
exceptions = frozenset(
[
# End-of-line whitespace is semantic in patch files when a line
# contains a single space.
"third_party/mock_call_assertions.patch",
]
)
Match = collections.namedtuple("Match", ("filename", "line_number", "line"))
def main():
chdir_to_repo_root()
matches = git_grep(" *$")
errors = [m for m in matches if m.filename not in exceptions]
okay = True
if errors:
print("Superfluous trailing whitespace:")
for error in errors:
print("%s:%d:%s$" % (error.filename, error.line_number, error.line))
print()
okay = False
stale_exceptions = exceptions - frozenset(m.filename for m in matches)
if stale_exceptions:
print(
"Stale exceptions (no whitespace problems; prune exceptions list):"
)
for filename in stale_exceptions:
print(filename)
print()
okay = False
sys.exit(0 if okay else 1)
def git_grep(pattern):
"""Run `git grep` and collect matches.
This function exits the process if `git grep` writes any stderr: for
instance, if the provided pattern is an invalid regular expression.
Args:
pattern: `str`; a pattern argument to `git grep`.
Returns:
A list of `Match` values.
"""
cmd = ["git", "grep", "-Izn", "--", pattern]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if stderr:
getattr(sys.stderr, "buffer", sys.stderr).write(
stderr
) # Python 2 compat
sys.exit(1)
result = []
for (
line
) in stdout.splitlines(): # assumes no newline characters in filenames
(filename_raw, line_number_raw, line_raw) = line.split(b"\0", 2)
match = Match(
filename=filename_raw.decode("utf-8", errors="replace"),
line_number=int(line_number_raw),
line=line_raw.decode("utf-8", errors="replace"),
)
result.append(match)
return result
def chdir_to_repo_root():
toplevel = subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
toplevel = toplevel[:-1] # trim trailing LF
os.chdir(toplevel)
if __name__ == "__main__":
main()
| 29.342593 | 80 | 0.634269 |
import collections
import os
import subprocess
import sys
exceptions = frozenset(
[
"third_party/mock_call_assertions.patch",
]
)
Match = collections.namedtuple("Match", ("filename", "line_number", "line"))
def main():
chdir_to_repo_root()
matches = git_grep(" *$")
errors = [m for m in matches if m.filename not in exceptions]
okay = True
if errors:
print("Superfluous trailing whitespace:")
for error in errors:
print("%s:%d:%s$" % (error.filename, error.line_number, error.line))
print()
okay = False
stale_exceptions = exceptions - frozenset(m.filename for m in matches)
if stale_exceptions:
print(
"Stale exceptions (no whitespace problems; prune exceptions list):"
)
for filename in stale_exceptions:
print(filename)
print()
okay = False
sys.exit(0 if okay else 1)
def git_grep(pattern):
cmd = ["git", "grep", "-Izn", "--", pattern]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if stderr:
getattr(sys.stderr, "buffer", sys.stderr).write(
stderr
)
sys.exit(1)
result = []
for (
line
) in stdout.splitlines():
(filename_raw, line_number_raw, line_raw) = line.split(b"\0", 2)
match = Match(
filename=filename_raw.decode("utf-8", errors="replace"),
line_number=int(line_number_raw),
line=line_raw.decode("utf-8", errors="replace"),
)
result.append(match)
return result
def chdir_to_repo_root():
toplevel = subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
toplevel = toplevel[:-1]
os.chdir(toplevel)
if __name__ == "__main__":
main()
| true | true |
1c49472d0f5e80c89a16bc24af6c12fc4c561fcb | 2,362 | py | Python | src/third_party/beaengine/tests/0f3850.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | 1 | 2022-01-17T17:40:29.000Z | 2022-01-17T17:40:29.000Z | src/third_party/beaengine/tests/0f3850.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | null | null | null | src/third_party/beaengine/tests/0f3850.py | CrackerCat/rp | 5fe693c26d76b514efaedb4084f6e37d820db023 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# EVEX.128.66.0F38.W0 50 /r
# VPDPBUSD xmm1{k1}{z}, xmm2, xmm3/m128/m32bcst
myEVEX = EVEX('EVEX.128.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd xmm25, xmm16, xmmword ptr [r14]')
# EVEX.256.66.0F38.W0 50 /r
# VPDPBUSD ymm1{k1}{z}, ymm2, ymm3/m256/m32bcst
myEVEX = EVEX('EVEX.256.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd ymm25, ymm16, ymmword ptr [r14]')
# EVEX.512.66.0F38.W0 50 /r
# VPDPBUSD zmm1{k1}{z}, zmm2, zmm3/m512/m32bcst
myEVEX = EVEX('EVEX.512.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd zmm25, zmm16, zmmword ptr [r14]')
| 40.033898 | 81 | 0.662151 |
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
myEVEX = EVEX('EVEX.128.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd xmm25, xmm16, xmmword ptr [r14]')
myEVEX = EVEX('EVEX.256.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd ymm25, ymm16, ymmword ptr [r14]')
myEVEX = EVEX('EVEX.512.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd zmm25, zmm16, zmmword ptr [r14]')
| true | true |
1c49473fd7bb5ce515eff66c03f9cbc72d5e5171 | 719 | py | Python | assistant/configurations/theme.py | AmulyaParitosh/Virtual-Assistant | b1a0e6d8569a481558bd04c2d9295a6933536ed4 | [
"MIT"
] | null | null | null | assistant/configurations/theme.py | AmulyaParitosh/Virtual-Assistant | b1a0e6d8569a481558bd04c2d9295a6933536ed4 | [
"MIT"
] | null | null | null | assistant/configurations/theme.py | AmulyaParitosh/Virtual-Assistant | b1a0e6d8569a481558bd04c2d9295a6933536ed4 | [
"MIT"
] | null | null | null | import json
information = json.loads(open('assistant/configurations/themes.json').read())
Theme = "Shizuka"
for theme in information["Themes"]:
if theme["name"] == Theme:
name = theme["name"]
voice = theme["voice"]
art = theme["ascii"]
bg_image = theme["bg_image"]
label_bg_colour = theme["label_bg_colour"]
scrolltext_bg_colour = theme["scrolltext_bg_colour"]
button_colour = theme["button_colour"]
fg_colour = theme["fg_colour"]
base_font = theme["base_font"]
title_font = theme["title_font"]
def get_themes():
for theme in information["Themes"]:
print(theme["name"])
if __name__ == "__main__":
get_themes()
| 23.966667 | 77 | 0.628651 | import json
information = json.loads(open('assistant/configurations/themes.json').read())
Theme = "Shizuka"
for theme in information["Themes"]:
if theme["name"] == Theme:
name = theme["name"]
voice = theme["voice"]
art = theme["ascii"]
bg_image = theme["bg_image"]
label_bg_colour = theme["label_bg_colour"]
scrolltext_bg_colour = theme["scrolltext_bg_colour"]
button_colour = theme["button_colour"]
fg_colour = theme["fg_colour"]
base_font = theme["base_font"]
title_font = theme["title_font"]
def get_themes():
for theme in information["Themes"]:
print(theme["name"])
if __name__ == "__main__":
get_themes()
| true | true |
1c4947a8a1f80457570c9ffe5b8f4037ae19954e | 943 | py | Python | submission/damagereport/api/1/urls.py | simonprast/wopi-engine | b3f59782659c8be42f4064bce5281afd391833be | [
"BSD-Source-Code"
] | null | null | null | submission/damagereport/api/1/urls.py | simonprast/wopi-engine | b3f59782659c8be42f4064bce5281afd391833be | [
"BSD-Source-Code"
] | null | null | null | submission/damagereport/api/1/urls.py | simonprast/wopi-engine | b3f59782659c8be42f4064bce5281afd391833be | [
"BSD-Source-Code"
] | null | null | null | #
# Created on Wed Nov 18 2020
#
# Copyright (c) 2020 - Simon Prast
#
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import api_views
urlpatterns = [
# POST - create new damage report (customer)
path('submit/', api_views.SubmitDamageReport.as_view()),
# POST - send message to a damage report (customer/admin)
path('submit/<int:report>/', api_views.SendMessage.as_view()),
# GET - show all own damage reports o/w/c (customer)
# GET - show damage reports of related users (admin)
path('show/', api_views.GetDamageReports.as_view()),
# GET - show all damage reports o/w/c + denied (admin)
path('show/all/', api_views.GetAllDamageReports.as_view()),
# GET - show all messages of as specific damage report (customer/admin)
path('show/<int:pk>/', api_views.GetDamageReportDetails.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 29.46875 | 75 | 0.711559 |
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import api_views
urlpatterns = [
path('submit/', api_views.SubmitDamageReport.as_view()),
path('submit/<int:report>/', api_views.SendMessage.as_view()),
path('show/', api_views.GetDamageReports.as_view()),
path('show/all/', api_views.GetAllDamageReports.as_view()),
path('show/<int:pk>/', api_views.GetDamageReportDetails.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| true | true |
1c494833f793e0560e6b2f5a6c672a8f1d65c98c | 2,574 | py | Python | odd_tableau_adapter/mappers/sheets.py | opendatadiscovery/odd-tableau-adapter | dee69398ccdbed6acbc02a13c188f5ec1f26a7e1 | [
"Apache-2.0"
] | null | null | null | odd_tableau_adapter/mappers/sheets.py | opendatadiscovery/odd-tableau-adapter | dee69398ccdbed6acbc02a13c188f5ec1f26a7e1 | [
"Apache-2.0"
] | 1 | 2021-11-01T18:00:00.000Z | 2021-11-01T18:00:00.000Z | odd_tableau_adapter/mappers/sheets.py | opendatadiscovery/odd-tableau-adapter | dee69398ccdbed6acbc02a13c188f5ec1f26a7e1 | [
"Apache-2.0"
] | null | null | null | from copy import deepcopy
from datetime import datetime
import pytz
from odd_models.models import DataEntity, DataConsumer, DataEntityType
from oddrn_generator import TableauGenerator
from . import _TABLEAU_DATETIME_FORMAT, _data_consumer_metadata_schema_url, _data_consumer_metadata_excluded_keys
from .metadata import _append_metadata_extension
def map_sheet(oddrn_generator: TableauGenerator, sheets: list[dict]) -> list[DataEntity]:
data_entities: list[DataEntity] = []
for sheet in sheets:
oddrn_generator.set_oddrn_paths(workbooks=sheet['workbook']['name'], sheets=sheet['name'])
# DataEntity
data_entity: DataEntity = DataEntity(
oddrn=oddrn_generator.get_oddrn_by_path("sheets"),
name=sheet['name'],
owner=sheet['workbook'].get('owner', {}).get('name'),
metadata=[],
type=DataEntityType.DASHBOARD,
)
data_entities.append(data_entity)
_append_metadata_extension(data_entity.metadata, _data_consumer_metadata_schema_url, sheet,
_data_consumer_metadata_excluded_keys)
if sheet['createdAt'] is not None:
data_entity.created_at = datetime.strptime(sheet['createdAt'], _TABLEAU_DATETIME_FORMAT) \
.replace(tzinfo=pytz.utc) \
.isoformat()
if sheet['updatedAt'] is not None:
data_entity.updated_at = datetime.strptime(sheet['updatedAt'], _TABLEAU_DATETIME_FORMAT) \
.replace(tzinfo=pytz.utc) \
.isoformat()
else:
if sheet['createdAt'] is not None:
data_entity.updated_at = data_entity.created_at
# DataConsumer
data_entity.data_consumer = DataConsumer(
inputs=_map_datasource_fields_to_oddrns(
oddrn_generator, sheet.get('datasourceFields', {})
),
outputs=[],
)
return data_entities
def _map_datasource_fields_to_oddrns(oddrn_generator: TableauGenerator, datasource_fields: dict) -> list[str]:
oddrn_gen = deepcopy(oddrn_generator) # do not change previous oddrn
inputs_oddrns: set = set()
for field in datasource_fields:
for table in field['upstreamTables']:
oddrn_gen.set_oddrn_paths(
databases=table.get('database', {}).get('name', ''),
schemas=table['schema'] or None,
tables=table['name']
)
inputs_oddrns.add(oddrn_gen.get_oddrn_by_path("tables"))
return list(inputs_oddrns)
| 37.852941 | 113 | 0.653458 | from copy import deepcopy
from datetime import datetime
import pytz
from odd_models.models import DataEntity, DataConsumer, DataEntityType
from oddrn_generator import TableauGenerator
from . import _TABLEAU_DATETIME_FORMAT, _data_consumer_metadata_schema_url, _data_consumer_metadata_excluded_keys
from .metadata import _append_metadata_extension
def map_sheet(oddrn_generator: TableauGenerator, sheets: list[dict]) -> list[DataEntity]:
data_entities: list[DataEntity] = []
for sheet in sheets:
oddrn_generator.set_oddrn_paths(workbooks=sheet['workbook']['name'], sheets=sheet['name'])
data_entity: DataEntity = DataEntity(
oddrn=oddrn_generator.get_oddrn_by_path("sheets"),
name=sheet['name'],
owner=sheet['workbook'].get('owner', {}).get('name'),
metadata=[],
type=DataEntityType.DASHBOARD,
)
data_entities.append(data_entity)
_append_metadata_extension(data_entity.metadata, _data_consumer_metadata_schema_url, sheet,
_data_consumer_metadata_excluded_keys)
if sheet['createdAt'] is not None:
data_entity.created_at = datetime.strptime(sheet['createdAt'], _TABLEAU_DATETIME_FORMAT) \
.replace(tzinfo=pytz.utc) \
.isoformat()
if sheet['updatedAt'] is not None:
data_entity.updated_at = datetime.strptime(sheet['updatedAt'], _TABLEAU_DATETIME_FORMAT) \
.replace(tzinfo=pytz.utc) \
.isoformat()
else:
if sheet['createdAt'] is not None:
data_entity.updated_at = data_entity.created_at
data_entity.data_consumer = DataConsumer(
inputs=_map_datasource_fields_to_oddrns(
oddrn_generator, sheet.get('datasourceFields', {})
),
outputs=[],
)
return data_entities
def _map_datasource_fields_to_oddrns(oddrn_generator: TableauGenerator, datasource_fields: dict) -> list[str]:
oddrn_gen = deepcopy(oddrn_generator)
inputs_oddrns: set = set()
for field in datasource_fields:
for table in field['upstreamTables']:
oddrn_gen.set_oddrn_paths(
databases=table.get('database', {}).get('name', ''),
schemas=table['schema'] or None,
tables=table['name']
)
inputs_oddrns.add(oddrn_gen.get_oddrn_by_path("tables"))
return list(inputs_oddrns)
| true | true |
1c4949bbb3f9fca427ac14839fdc5b4b1b8faa8f | 7,434 | py | Python | tests/test_metadata.py | tskisner/sotodlib | 9b80171129ea312bc7a61ce5c37d6abfbb3d5be9 | [
"MIT"
] | null | null | null | tests/test_metadata.py | tskisner/sotodlib | 9b80171129ea312bc7a61ce5c37d6abfbb3d5be9 | [
"MIT"
] | null | null | null | tests/test_metadata.py | tskisner/sotodlib | 9b80171129ea312bc7a61ce5c37d6abfbb3d5be9 | [
"MIT"
] | null | null | null | # Copyright (c) 2020 Simons Observatory.
# Full license can be found in the top level "LICENSE" file.
"""Demonstrate construction of some simple metadata structures. This
includes HDF5 IO helper routines, and the ObsDb/DetDb resolution and
association system used in Context/SuperLoader.
"""
import unittest
import tempfile
from sotodlib.core import metadata
from sotodlib.io.metadata import ResultSetHdfLoader, write_dataset, _decode_array
import os
import h5py
class MetadataTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tempdir.cleanup()
def test_000_support(self):
"""Test some numpy-HDF5 conversion support functions.
"""
rs = metadata.ResultSet(keys=['a_string', 'a_float', 'a_bad_string', 'a_bad_float'])
rs.rows.append(('hello', 1.2, 'yuck', 1.3))
aru = rs.asarray(hdf_compat=True)
self.assertTrue(aru.dtype['a_string'].char == 'S')
# Conversion code.
arx = _decode_array(aru, key_map={
'a_string': 'another_string',
'a_float': 'another_float',
'a_bad_string': None,
'a_bad_float': None,
})
self.assertCountEqual(arx.dtype.names, ['another_string', 'another_float'])
self.assertEqual(arx['another_string'].dtype.char, 'U')
def test_001_hdf(self):
"""Test metadata write/read to HDF5 datasets
"""
hdf_fn = os.path.join(self.tempdir.name, '_test_000_hdf.h5')
# The reason we're here today is that this things works but is
# going to be removed.
loader = ResultSetHdfLoader()
test_obs_id = 'testobs_1234'
# Create an hdf5 dataset which is a structured array with only the
# 'timeconst' column, containing the single fixed value. Since there
# are no columns with names prefixed by 'dets:' or 'obs:', this value
# will be broadcast to all observations and detectors that access it.
TGOOD = 1e-3
rs = metadata.ResultSet(keys=['timeconst'])
rs.append({'timeconst': TGOOD})
with h5py.File(hdf_fn, 'a') as fout:
# Simple one...
write_dataset(rs, fout, 'timeconst_1ms', overwrite=True)
# Simple look-up:
req = {'filename': hdf_fn,
'obs:obs_id': test_obs_id,
'dataset': 'timeconst_1ms'}
data = loader.from_loadspec(req)
self.assertCountEqual(data['timeconst'], [TGOOD])
def test_010_dbs(self):
"""Test metadata detdb/obsdb resolution system
This tests one of the more complicated cases:
- The ManifestDb includes restrictions on dets:band, so f090
is to be loaded from one dataset and f150 is to be loaded
from another.
- The two datasets both provide values for f090 and f150, so
the code has to know to ignore the ones that weren't asked
for.
"""
hdf_fn = os.path.join(self.tempdir.name, '_test_010_dbs.h5')
mandb_fn = os.path.join(self.tempdir.name, '_test_010_dbs.sqlite')
# Add two datasets to the HDF file. They are called
# "timeconst_early" and "timeconst_late" but there is no
# specific time range associated with each. Each dataset
# contains a value for bands f090 and f150. The "early" set
# has TBAD for f150 and the "late" set has TBAD for f090.
T090, T150, TBAD = 90e-3, 150e-3, 1e0
with h5py.File(hdf_fn, 'a') as fout:
# First test.
for label, tau1, tau2 in [('early', T090, TBAD),
('late', TBAD, T150)]:
rs = metadata.ResultSet(keys=['dets:band', 'timeconst'])
rs.append({'dets:band': 'f090', 'timeconst': tau1})
rs.append({'dets:band': 'f150', 'timeconst': tau2})
write_dataset(rs, fout, 'timeconst_%s' % label, overwrite=True)
# To match the early/late example we need DetDb and ObsDb.
detdb = metadata.DetDb()
detdb.create_table('base', ["`band` str", "`polcode` str"])
detdb.add_props('base', 'det1', band='f090', polcode='A')
detdb.add_props('base', 'det2', band='f090', polcode='B')
detdb.add_props('base', 'det3', band='f150', polcode='A')
detdb.add_props('base', 'det4', band='f150', polcode='B')
obsdb = metadata.ObsDb()
t_pivot = 2000010000
obsdb.add_obs_columns(['timestamp float'])
obsdb.update_obs('obs_00', {'timestamp': t_pivot - 10000})
obsdb.update_obs('obs_01', {'timestamp': t_pivot + 10000})
# Test 1 -- ManifestDb and Stored datasets both have "band" rules.
scheme = metadata.ManifestScheme() \
.add_range_match('obs:timestamp') \
.add_data_field('dets:band') \
.add_data_field('dataset')
mandb = metadata.ManifestDb(scheme=scheme)
for band, this_pivot in [('f090', t_pivot + 1e6),
('f150', t_pivot - 1e6)]:
mandb.add_entry({'dataset': 'timeconst_early',
'dets:band': band,
'obs:timestamp': (0, this_pivot)},
filename=hdf_fn)
mandb.add_entry({'dataset': 'timeconst_late',
'dets:band': band,
'obs:timestamp': (this_pivot, 4e9)},
filename=hdf_fn)
mandb.to_file(mandb_fn)
# The SuperLoader is where the logic lives to combine multiple
# results and pull out the right information in the right
# order. It should leave us with no TBAD values.
loader = metadata.SuperLoader(obsdb=obsdb, detdb=detdb)
spec_list = [
{'db': mandb_fn,
'name': 'tau&timeconst'}
]
mtod = loader.load(spec_list, {'obs:obs_id': 'obs_00'})
self.assertCountEqual(mtod['tau'], [T090, T090, T150, T150])
# Test 2: ManifestDb specifies polcode, which crosses with
# dataset band.
scheme = metadata.ManifestScheme() \
.add_range_match('obs:timestamp') \
.add_data_field('dets:polcode') \
.add_data_field('dataset')
mandb = metadata.ManifestDb(scheme=scheme)
for polcode, this_pivot in [('A', t_pivot + 1e6),
('B', t_pivot - 1e6)]:
mandb.add_entry({'dataset': 'timeconst_early',
'dets:polcode': polcode,
'obs:timestamp': (0, this_pivot)},
filename=hdf_fn)
mandb.add_entry({'dataset': 'timeconst_late',
'dets:polcode': polcode,
'obs:timestamp': (this_pivot, 4e9)},
filename=hdf_fn)
mandb.to_file(mandb_fn)
# Now we expect only f090 A and f150 B to resolve to non-bad vals.
# Make sure you reinit the loader, to avoid cached dbs.
loader = metadata.SuperLoader(obsdb=obsdb, detdb=detdb)
mtod = loader.load(spec_list, {'obs:obs_id': 'obs_00'})
self.assertCountEqual(mtod['tau'], [T090, TBAD, TBAD, T150])
if __name__ == '__main__':
unittest.main()
| 41.764045 | 92 | 0.576944 |
import unittest
import tempfile
from sotodlib.core import metadata
from sotodlib.io.metadata import ResultSetHdfLoader, write_dataset, _decode_array
import os
import h5py
class MetadataTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tempdir.cleanup()
def test_000_support(self):
rs = metadata.ResultSet(keys=['a_string', 'a_float', 'a_bad_string', 'a_bad_float'])
rs.rows.append(('hello', 1.2, 'yuck', 1.3))
aru = rs.asarray(hdf_compat=True)
self.assertTrue(aru.dtype['a_string'].char == 'S')
arx = _decode_array(aru, key_map={
'a_string': 'another_string',
'a_float': 'another_float',
'a_bad_string': None,
'a_bad_float': None,
})
self.assertCountEqual(arx.dtype.names, ['another_string', 'another_float'])
self.assertEqual(arx['another_string'].dtype.char, 'U')
def test_001_hdf(self):
hdf_fn = os.path.join(self.tempdir.name, '_test_000_hdf.h5')
# going to be removed.
loader = ResultSetHdfLoader()
test_obs_id = 'testobs_1234'
# Create an hdf5 dataset which is a structured array with only the
# 'timeconst' column, containing the single fixed value. Since there
# are no columns with names prefixed by 'dets:' or 'obs:', this value
# will be broadcast to all observations and detectors that access it.
TGOOD = 1e-3
rs = metadata.ResultSet(keys=['timeconst'])
rs.append({'timeconst': TGOOD})
with h5py.File(hdf_fn, 'a') as fout:
# Simple one...
write_dataset(rs, fout, 'timeconst_1ms', overwrite=True)
# Simple look-up:
req = {'filename': hdf_fn,
'obs:obs_id': test_obs_id,
'dataset': 'timeconst_1ms'}
data = loader.from_loadspec(req)
self.assertCountEqual(data['timeconst'], [TGOOD])
def test_010_dbs(self):
hdf_fn = os.path.join(self.tempdir.name, '_test_010_dbs.h5')
mandb_fn = os.path.join(self.tempdir.name, '_test_010_dbs.sqlite')
# Add two datasets to the HDF file. They are called
# "timeconst_early" and "timeconst_late" but there is no
# specific time range associated with each. Each dataset
# contains a value for bands f090 and f150. The "early" set
# has TBAD for f150 and the "late" set has TBAD for f090.
T090, T150, TBAD = 90e-3, 150e-3, 1e0
with h5py.File(hdf_fn, 'a') as fout:
# First test.
for label, tau1, tau2 in [('early', T090, TBAD),
('late', TBAD, T150)]:
rs = metadata.ResultSet(keys=['dets:band', 'timeconst'])
rs.append({'dets:band': 'f090', 'timeconst': tau1})
rs.append({'dets:band': 'f150', 'timeconst': tau2})
write_dataset(rs, fout, 'timeconst_%s' % label, overwrite=True)
# To match the early/late example we need DetDb and ObsDb.
detdb = metadata.DetDb()
detdb.create_table('base', ["`band` str", "`polcode` str"])
detdb.add_props('base', 'det1', band='f090', polcode='A')
detdb.add_props('base', 'det2', band='f090', polcode='B')
detdb.add_props('base', 'det3', band='f150', polcode='A')
detdb.add_props('base', 'det4', band='f150', polcode='B')
obsdb = metadata.ObsDb()
t_pivot = 2000010000
obsdb.add_obs_columns(['timestamp float'])
obsdb.update_obs('obs_00', {'timestamp': t_pivot - 10000})
obsdb.update_obs('obs_01', {'timestamp': t_pivot + 10000})
# Test 1 -- ManifestDb and Stored datasets both have "band" rules.
scheme = metadata.ManifestScheme() \
.add_range_match('obs:timestamp') \
.add_data_field('dets:band') \
.add_data_field('dataset')
mandb = metadata.ManifestDb(scheme=scheme)
for band, this_pivot in [('f090', t_pivot + 1e6),
('f150', t_pivot - 1e6)]:
mandb.add_entry({'dataset': 'timeconst_early',
'dets:band': band,
'obs:timestamp': (0, this_pivot)},
filename=hdf_fn)
mandb.add_entry({'dataset': 'timeconst_late',
'dets:band': band,
'obs:timestamp': (this_pivot, 4e9)},
filename=hdf_fn)
mandb.to_file(mandb_fn)
# The SuperLoader is where the logic lives to combine multiple
# results and pull out the right information in the right
# order. It should leave us with no TBAD values.
loader = metadata.SuperLoader(obsdb=obsdb, detdb=detdb)
spec_list = [
{'db': mandb_fn,
'name': 'tau&timeconst'}
]
mtod = loader.load(spec_list, {'obs:obs_id': 'obs_00'})
self.assertCountEqual(mtod['tau'], [T090, T090, T150, T150])
# Test 2: ManifestDb specifies polcode, which crosses with
# dataset band.
scheme = metadata.ManifestScheme() \
.add_range_match('obs:timestamp') \
.add_data_field('dets:polcode') \
.add_data_field('dataset')
mandb = metadata.ManifestDb(scheme=scheme)
for polcode, this_pivot in [('A', t_pivot + 1e6),
('B', t_pivot - 1e6)]:
mandb.add_entry({'dataset': 'timeconst_early',
'dets:polcode': polcode,
'obs:timestamp': (0, this_pivot)},
filename=hdf_fn)
mandb.add_entry({'dataset': 'timeconst_late',
'dets:polcode': polcode,
'obs:timestamp': (this_pivot, 4e9)},
filename=hdf_fn)
mandb.to_file(mandb_fn)
# Now we expect only f090 A and f150 B to resolve to non-bad vals.
# Make sure you reinit the loader, to avoid cached dbs.
loader = metadata.SuperLoader(obsdb=obsdb, detdb=detdb)
mtod = loader.load(spec_list, {'obs:obs_id': 'obs_00'})
self.assertCountEqual(mtod['tau'], [T090, TBAD, TBAD, T150])
if __name__ == '__main__':
unittest.main()
| true | true |
1c4949e8507d2d5b90702103641c5f8095dbb773 | 2,557 | py | Python | main.py | fsevenm/ulauncher-uuid | 2fbb70fd2af246277b2baff03465bc8bd971c85f | [
"MIT"
] | 1 | 2022-01-29T16:30:00.000Z | 2022-01-29T16:30:00.000Z | main.py | fsevenm/ulauncher-uuid | 2fbb70fd2af246277b2baff03465bc8bd971c85f | [
"MIT"
] | null | null | null | main.py | fsevenm/ulauncher-uuid | 2fbb70fd2af246277b2baff03465bc8bd971c85f | [
"MIT"
] | null | null | null | import logging
import uuid
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
logger = logging.getLogger(__name__)
class UuidExtension(Extension):
def __init__(self):
logger.info('init UUID extension')
super(UuidExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
generated_uuids = []
accepted_versions = ["v1", "v3", "v4", "v5"]
args = None
v = "v4"
name = "python.org"
args_string = event.get_argument()
if args_string is not None:
args = args_string.split(' ') # [0]v5 [1]name
try:
if args is not None and args[0] in accepted_versions:
v = args[0]
except IndexError:
pass
try:
if args is not None and args[1] is not None:
name = args[1]
except IndexError:
pass
if v == "v1":
generated_uuids.append(["UUID v1", str(uuid.uuid1())])
elif v == "v4":
generated_uuids.append(["UUID v4", str(uuid.uuid4())])
elif v == "v3":
generated_uuids.append(["UUID v3 DNS", str(uuid.uuid3(uuid.NAMESPACE_DNS, name))])
generated_uuids.append(["UUID v3 URL", str(uuid.uuid3(uuid.NAMESPACE_URL, name))])
elif v == "v5":
generated_uuids.append(["UUID v5 DNS", str(uuid.uuid5(uuid.NAMESPACE_DNS, name))])
generated_uuids.append(["UUID v5 URL", str(uuid.uuid5(uuid.NAMESPACE_URL, name))])
for desc, uuid_value in generated_uuids:
items.append(ExtensionResultItem(icon='images/icon.png',
name=uuid_value,
description=desc,
highlightable=False,
on_enter=CopyToClipboardAction(uuid_value)
))
return RenderResultListAction(items)
if __name__ == '__main__':
UuidExtension().run()
| 35.027397 | 94 | 0.596402 | import logging
import uuid
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
logger = logging.getLogger(__name__)
class UuidExtension(Extension):
def __init__(self):
logger.info('init UUID extension')
super(UuidExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
generated_uuids = []
accepted_versions = ["v1", "v3", "v4", "v5"]
args = None
v = "v4"
name = "python.org"
args_string = event.get_argument()
if args_string is not None:
args = args_string.split(' ')
try:
if args is not None and args[0] in accepted_versions:
v = args[0]
except IndexError:
pass
try:
if args is not None and args[1] is not None:
name = args[1]
except IndexError:
pass
if v == "v1":
generated_uuids.append(["UUID v1", str(uuid.uuid1())])
elif v == "v4":
generated_uuids.append(["UUID v4", str(uuid.uuid4())])
elif v == "v3":
generated_uuids.append(["UUID v3 DNS", str(uuid.uuid3(uuid.NAMESPACE_DNS, name))])
generated_uuids.append(["UUID v3 URL", str(uuid.uuid3(uuid.NAMESPACE_URL, name))])
elif v == "v5":
generated_uuids.append(["UUID v5 DNS", str(uuid.uuid5(uuid.NAMESPACE_DNS, name))])
generated_uuids.append(["UUID v5 URL", str(uuid.uuid5(uuid.NAMESPACE_URL, name))])
for desc, uuid_value in generated_uuids:
items.append(ExtensionResultItem(icon='images/icon.png',
name=uuid_value,
description=desc,
highlightable=False,
on_enter=CopyToClipboardAction(uuid_value)
))
return RenderResultListAction(items)
if __name__ == '__main__':
UuidExtension().run()
| true | true |
1c494a3c50140aabc3b3a441f1a35000c0f75722 | 189 | py | Python | pset_loops/loop_basics/p5.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 5 | 2019-04-08T20:05:37.000Z | 2019-12-04T20:48:45.000Z | pset_loops/loop_basics/p5.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 8 | 2019-04-15T15:16:05.000Z | 2022-02-12T10:33:32.000Z | pset_loops/loop_basics/p5.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 2 | 2019-04-10T00:14:42.000Z | 2020-02-26T20:35:21.000Z | """
Factors
"""
# Find all factors of a number that a user inputs and print out 'The factors of <the_user_input_number> are: '.
user_input = input('Enter a number to find its factors: ')
| 23.625 | 111 | 0.714286 |
user_input = input('Enter a number to find its factors: ')
| true | true |
1c494c0c55610349d045688af032b680b719446c | 2,457 | py | Python | pypeln/process/api/ordered.py | quarckster/pypeln | f4160d0f4d4718b67f79a0707d7261d249459a4b | [
"MIT"
] | 1,281 | 2018-09-20T05:35:27.000Z | 2022-03-30T01:29:48.000Z | pypeln/process/api/ordered.py | webclinic017/pypeln | 5231806f2cac9d2019dacbbcf913484fd268b8c1 | [
"MIT"
] | 78 | 2018-09-18T20:38:12.000Z | 2022-03-30T20:16:02.000Z | pypeln/process/api/ordered.py | webclinic017/pypeln | 5231806f2cac9d2019dacbbcf913484fd268b8c1 | [
"MIT"
] | 88 | 2018-09-24T10:46:14.000Z | 2022-03-28T09:34:50.000Z | import bisect
import typing as tp
from pypeln import utils as pypeln_utils
from pypeln.utils import A, B, T
from ..stage import Stage
from ..worker import ProcessFn, Worker
from .to_stage import to_stage
class Ordered(tp.NamedTuple):
def __call__(self, worker: Worker, **kwargs):
elems = []
for elem in worker.stage_params.input_queue:
bisect.insort(elems, elem)
for _ in range(len(elems)):
worker.stage_params.output_queues.put(elems.pop(0))
@tp.overload
def ordered(
stage: Stage[A],
maxsize: int = 0,
) -> Stage[A]:
...
@tp.overload
def ordered(maxsize: int = 0) -> pypeln_utils.Partial[Stage[A]]:
...
def ordered(
stage: tp.Union[
Stage[A], tp.Iterable[A], pypeln_utils.Undefined
] = pypeln_utils.UNDEFINED,
maxsize: int = 0,
) -> tp.Union[Stage[A], pypeln_utils.Partial[Stage[A]]]:
"""
Creates a stage that sorts its elements based on their order of creation on the source iterable(s) of the pipeline.
```python
import pypeln as pl
import random
import time
def slow_squared(x):
time.sleep(random.random())
return x ** 2
stage = range(5)
stage = pl.process.map(slow_squared, stage, workers = 2)
stage = pl.process.ordered(stage)
print(list(stage)) # [0, 1, 4, 9, 16]
```
!!! note
`ordered` will work even if the previous stages are from different `pypeln` modules, but it may not work if you introduce an itermediate external iterable stage.
!!! warning
This stage will not yield util it accumulates all of the elements from the previous stage, use this only if all elements fit in memory.
Arguments:
stage: A Stage or Iterable.
maxsize: The maximum number of objects the stage can hold simultaneously, if set to `0` (default) then the stage can grow unbounded.
Returns:
If the `stage` parameters is given then this function returns an iterable, else it returns a `Partial`.
"""
if isinstance(stage, pypeln_utils.Undefined):
return pypeln_utils.Partial(lambda stage: ordered(stage))
stage = to_stage(stage, maxsize=maxsize)
return Stage(
process_fn=Ordered(),
workers=1,
maxsize=maxsize,
timeout=0,
total_sources=stage.workers,
dependencies=[stage],
on_start=None,
on_done=None,
use_threads=False,
f_args=[],
)
| 26.138298 | 169 | 0.64998 | import bisect
import typing as tp
from pypeln import utils as pypeln_utils
from pypeln.utils import A, B, T
from ..stage import Stage
from ..worker import ProcessFn, Worker
from .to_stage import to_stage
class Ordered(tp.NamedTuple):
def __call__(self, worker: Worker, **kwargs):
elems = []
for elem in worker.stage_params.input_queue:
bisect.insort(elems, elem)
for _ in range(len(elems)):
worker.stage_params.output_queues.put(elems.pop(0))
@tp.overload
def ordered(
stage: Stage[A],
maxsize: int = 0,
) -> Stage[A]:
...
@tp.overload
def ordered(maxsize: int = 0) -> pypeln_utils.Partial[Stage[A]]:
...
def ordered(
stage: tp.Union[
Stage[A], tp.Iterable[A], pypeln_utils.Undefined
] = pypeln_utils.UNDEFINED,
maxsize: int = 0,
) -> tp.Union[Stage[A], pypeln_utils.Partial[Stage[A]]]:
if isinstance(stage, pypeln_utils.Undefined):
return pypeln_utils.Partial(lambda stage: ordered(stage))
stage = to_stage(stage, maxsize=maxsize)
return Stage(
process_fn=Ordered(),
workers=1,
maxsize=maxsize,
timeout=0,
total_sources=stage.workers,
dependencies=[stage],
on_start=None,
on_done=None,
use_threads=False,
f_args=[],
)
| true | true |
1c494d4b0502d2f40178993523ea1cca94619b40 | 55 | py | Python | lahman/__init__.py | PeterA182/liteSaber | 6560feb70fd23916c0188ba98a751f8fee99a18b | [
"MIT"
] | null | null | null | lahman/__init__.py | PeterA182/liteSaber | 6560feb70fd23916c0188ba98a751f8fee99a18b | [
"MIT"
] | null | null | null | lahman/__init__.py | PeterA182/liteSaber | 6560feb70fd23916c0188ba98a751f8fee99a18b | [
"MIT"
] | 1 | 2019-06-28T01:19:38.000Z | 2019-06-28T01:19:38.000Z | __author__ = 'Peter Altamura'
from lahman import Lahman | 27.5 | 29 | 0.818182 | __author__ = 'Peter Altamura'
from lahman import Lahman | true | true |
1c494dfc5b9895a242fb3bc427570d5fcb2fd608 | 12,414 | py | Python | lib/pavilion/expression_functions/base.py | ubccr/pavilion2 | 4c6d043b436761d9162d8824657f51cedc9907cc | [
"BSD-3-Clause"
] | null | null | null | lib/pavilion/expression_functions/base.py | ubccr/pavilion2 | 4c6d043b436761d9162d8824657f51cedc9907cc | [
"BSD-3-Clause"
] | null | null | null | lib/pavilion/expression_functions/base.py | ubccr/pavilion2 | 4c6d043b436761d9162d8824657f51cedc9907cc | [
"BSD-3-Clause"
] | null | null | null | """Contains the base Expression Function plugin class."""
import logging
import re
import inspect
from yapsy import IPlugin
LOGGER = logging.getLogger(__file__)
# The dictionary of available function plugins.
_FUNCTIONS = {} # type: {str,FunctionPlugin}
class FunctionPluginError(RuntimeError):
"""Error raised when there's a problem with a function plugin
itself."""
class FunctionArgError(ValueError):
"""Error raised when a function plugin has a problem with the
function arguments."""
def num(val):
"""Return val as an int, float, or bool, depending on what it most
closely resembles."""
if isinstance(val, (float, int)):
return val
elif val in ('True', 'False'):
return val == 'True'
elif isinstance(val, str):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
raise ValueError("Could not convert '{}' to either "
"int or float.")
raise RuntimeError("Invalid value '{}' given to num.".format(val))
class FunctionPlugin(IPlugin.IPlugin):
"""Plugin base class for math functions.
Child classes must override ``__init__`` (as is typical for Pavilion
plugin), and must also provide a method to act as the function itself.
This method must have the same name as the plugin (ie. The 'max' plugin
must have a 'max' method), and take the arguments the function expects.
"""
VALID_SPEC_TYPES = (
int,
float,
str,
bool,
num,
)
NAME_RE = re.compile(r'[a-zA-Z][a-zA-Z0-9_]*$')
PRIO_CORE = 0
PRIO_COMMON = 10
PRIO_USER = 20
def __init__(self, name, description, arg_specs,
priority=PRIO_COMMON):
"""
:param str name: The name of this function.
:param str description: A short description of this function.
:param int priority: The plugin priority.
:param [type] arg_specs: A list of type specs for each function
argument. The spec for each argument defines what structure
and types the value will have, and the auto-conversions that
will happen if possible. ``None`` denotes that arg_specs
won't be used or validated, and requires that ``_validate_arg`` be
overridden.
"""
if not self.NAME_RE.match(name):
raise FunctionPluginError(
"Invalid function name: '{}'".format(name))
self.name = name
self.description = description
self.priority = priority
sig = inspect.signature(getattr(self, self.name))
if arg_specs is None:
if self._validate_arg is FunctionPlugin._validate_arg:
raise RuntimeError(
"Function plugin {} at {} was given an arg_spec of "
"'None', but did not override '_validate_arg'."
.format(self.name, self.path)
)
if self.__class__.signature is FunctionPlugin.signature:
raise RuntimeError(
"Function plugin {} at {} was given an arg_spec of "
"'None', but did not override 'signature'."
.format(self.name, self.path)
)
else:
if len(sig.parameters) != len(arg_specs):
raise FunctionPluginError(
"Invalid arg specs. The function takes {} arguments, but"
"an arg_spec of length {} was provided."
.format(len(sig.parameters), len(arg_specs)))
for arg_spec in arg_specs:
self._validate_arg_spec(arg_spec)
self.arg_specs = arg_specs
super().__init__()
def _validate_arg_spec(self, arg):
"""Recursively validate the argument spec, to make sure plugin
creators are using this right.
:param arg: A valid arg spec is a structure of lists and
dicts, and types from self.VALID_SPEC_TYPES.
- Lists should contain one representative containing type.
- Dicts should have at least one key-value pair (with string keys).
- Dict specs don't have to contain every key the dict might have,
just those that will be used.
- Specs may be any structure of these types, as long
as they comply with the above rules.
- The 'num' spec type will accept strings, floats, ints,
or bool. ints and floats are left alone, bools become
ints, and strings become an int or a float if they can.
:raises FunctionPluginError: On a bad arg spec.
"""
if isinstance(arg, list):
if len(arg) != 1:
raise FunctionPluginError(
"Invalid list spec argument. List arguments must contain "
"a single subtype. This had '{}'."
.format(arg)
)
self._validate_arg_spec(arg[0])
elif isinstance(arg, dict):
if len(arg) == 0:
raise FunctionPluginError(
"Invalid dict spec argument. Dict arguments must contain "
"at least one key-value pair. This had '{}'."
.format(arg)
)
for key, sub_arg in arg.items():
self._validate_arg_spec(sub_arg)
elif arg not in self.VALID_SPEC_TYPES:
raise FunctionPluginError(
"Invalid spec type '{}'. Must be one of '{}'"
.format(arg, self.VALID_SPEC_TYPES)
)
@property
def path(self):
"""The path to the file containing this result parser plugin."""
return inspect.getfile(self.__class__)
def __call__(self, *args):
"""Validate/convert the arguments and call the function."""
if self.arg_specs is not None:
if len(args) != len(self.arg_specs):
raise FunctionPluginError(
"Invalid number of arguments defined for function {}. Got "
"{}, but expected {}"
.format(self.name, len(args), len(self.arg_specs)))
# Create the full list of validated arguments.
val_args = []
for arg, spec in zip(args, self.arg_specs):
val_args.append(self._validate_arg(arg, spec))
else:
val_args = args
try:
func = getattr(self, self.name)
return func(*val_args)
except Exception as err:
raise FunctionPluginError(
"Error in function plugin {}: {}"
.format(self.name, err)
)
@property
def signature(self):
"""Generate a function signature for this function.
:newlines: Put each argument on a separate line.
"""
sig = inspect.signature(getattr(self, self.name))
arg_names = list(sig.parameters.keys())
parts = [self.name + '(']
arg_parts = []
for i in range(len(arg_names)):
arg_name = arg_names[i]
spec = self.arg_specs[i]
arg_parts.append(
'{}: {}'.format(arg_name, self._spec_to_desc(spec)))
parts.append(', '.join(arg_parts))
parts.append(')')
return ''.join(parts)
@property
def long_description(self):
"""Return the docstring for the function."""
func = getattr(self, self.name)
desc = func.__doc__
return ' '.join(desc.split())
def _spec_to_desc(self, spec):
"""Convert an argument spec into a descriptive structure that
can be reasonably printed."""
if isinstance(spec, list):
return [self._spec_to_desc(spec[0])]
elif isinstance(spec, dict):
return {k: self._spec_to_desc(v) for k, v in spec.items()}
else:
return spec.__name__
def _validate_arg(self, arg, spec):
"""Ensure that the argument is of the structure specified by 'spec',
and convert all contained values accordingly.
:param arg: The argument to validate.
:param Union[list,dict,int,bool,str,float] spec: The spec to apply to
this argument.
:return: The validated, auto-converted argument.
"""
if isinstance(spec, list):
if not isinstance(arg, list):
raise FunctionPluginError(
"Invalid argument '{}'. Expected a list."
.format(arg)
)
val_args = []
for arg_item in arg:
try:
val_args.append(self._validate_arg(arg_item, spec[0]))
except FunctionPluginError:
raise FunctionPluginError(
"Invalid list item argument '{}'. Expected a list of "
"'{}'."
.format(arg_item, spec[0]))
return val_args
if isinstance(spec, dict):
if not isinstance(arg, dict):
raise FunctionPluginError(
"Invalid argument '{}'. Expected a dict."
.format(arg))
val_args = {}
for key, sub_spec in spec.items():
if key not in arg:
raise FunctionPluginError(
"Invalid dict argument '{}'. Missing key '{}'"
.format(arg, key))
try:
val_args[key] = self._validate_arg(arg[key], sub_spec)
except FunctionPluginError as err:
raise FunctionPluginError(
"Invalid dict argument '{}' for key '{}': {}"
.format(arg[key], key, err))
return val_args
try:
# Boolean strings need a little conversion help when
# converting to other types. The num type takes care of this
# internally.
if spec in (int, float) and arg in ('True', 'False'):
arg = bool(arg)
return spec(arg)
except ValueError:
raise FunctionPluginError(
"Invalid {} ({})"
.format(spec.__name__, arg))
def activate(self):
"""Yapsy runs this when adding the plugin. Add our plugin
to the registry of function plugins."""
if self.name in _FUNCTIONS:
other = _FUNCTIONS[self.name]
if self.priority > other.priority:
LOGGER.info(
"Function plugin '%s' at %s is superceded by plugin at %s",
self.name, other.path, self.path)
_FUNCTIONS[self.name] = self
elif self.priority < other.priority:
LOGGER.info(
"Function plugin '%s' at %s is ignored in lieu of "
"plugin at %s.",
self.name, self.path, other.path)
else:
raise RuntimeError(
"Function plugin conflict. Parser '{}' at '{}'"
"has the same priority as plugin at '{}'"
.format(self.name, self.path, other.path))
else:
_FUNCTIONS[self.name] = self
def deactivate(self):
"""Yapsy runs this when removing the plugin. Plugins will
only be removed by unit tests."""
del _FUNCTIONS[self.name]
class CoreFunctionPlugin(FunctionPlugin):
"""A function plugin that sets defaults for core plugins. Use when adding
additional function plugins to the core_functions module."""
def __init__(self, name, description, arg_specs):
super().__init__(name, description, arg_specs,
priority=self.PRIO_CORE)
def register_core_plugins():
"""Find all the core function plugins and activate them."""
# We need to load this module just to define all the included classes.
from pavilion.expression_functions import core
_ = core
for cls in CoreFunctionPlugin.__subclasses__():
obj = cls()
obj.activate()
def __reset():
"""Reset all function plugins. For testing only."""
for plugin in list(_FUNCTIONS.values()):
plugin.deactivate()
| 34.483333 | 79 | 0.556791 |
import logging
import re
import inspect
from yapsy import IPlugin
LOGGER = logging.getLogger(__file__)
_FUNCTIONS = {}
class FunctionPluginError(RuntimeError):
class FunctionArgError(ValueError):
def num(val):
if isinstance(val, (float, int)):
return val
elif val in ('True', 'False'):
return val == 'True'
elif isinstance(val, str):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
raise ValueError("Could not convert '{}' to either "
"int or float.")
raise RuntimeError("Invalid value '{}' given to num.".format(val))
class FunctionPlugin(IPlugin.IPlugin):
VALID_SPEC_TYPES = (
int,
float,
str,
bool,
num,
)
NAME_RE = re.compile(r'[a-zA-Z][a-zA-Z0-9_]*$')
PRIO_CORE = 0
PRIO_COMMON = 10
PRIO_USER = 20
def __init__(self, name, description, arg_specs,
priority=PRIO_COMMON):
if not self.NAME_RE.match(name):
raise FunctionPluginError(
"Invalid function name: '{}'".format(name))
self.name = name
self.description = description
self.priority = priority
sig = inspect.signature(getattr(self, self.name))
if arg_specs is None:
if self._validate_arg is FunctionPlugin._validate_arg:
raise RuntimeError(
"Function plugin {} at {} was given an arg_spec of "
"'None', but did not override '_validate_arg'."
.format(self.name, self.path)
)
if self.__class__.signature is FunctionPlugin.signature:
raise RuntimeError(
"Function plugin {} at {} was given an arg_spec of "
"'None', but did not override 'signature'."
.format(self.name, self.path)
)
else:
if len(sig.parameters) != len(arg_specs):
raise FunctionPluginError(
"Invalid arg specs. The function takes {} arguments, but"
"an arg_spec of length {} was provided."
.format(len(sig.parameters), len(arg_specs)))
for arg_spec in arg_specs:
self._validate_arg_spec(arg_spec)
self.arg_specs = arg_specs
super().__init__()
def _validate_arg_spec(self, arg):
if isinstance(arg, list):
if len(arg) != 1:
raise FunctionPluginError(
"Invalid list spec argument. List arguments must contain "
"a single subtype. This had '{}'."
.format(arg)
)
self._validate_arg_spec(arg[0])
elif isinstance(arg, dict):
if len(arg) == 0:
raise FunctionPluginError(
"Invalid dict spec argument. Dict arguments must contain "
"at least one key-value pair. This had '{}'."
.format(arg)
)
for key, sub_arg in arg.items():
self._validate_arg_spec(sub_arg)
elif arg not in self.VALID_SPEC_TYPES:
raise FunctionPluginError(
"Invalid spec type '{}'. Must be one of '{}'"
.format(arg, self.VALID_SPEC_TYPES)
)
@property
def path(self):
return inspect.getfile(self.__class__)
def __call__(self, *args):
if self.arg_specs is not None:
if len(args) != len(self.arg_specs):
raise FunctionPluginError(
"Invalid number of arguments defined for function {}. Got "
"{}, but expected {}"
.format(self.name, len(args), len(self.arg_specs)))
val_args = []
for arg, spec in zip(args, self.arg_specs):
val_args.append(self._validate_arg(arg, spec))
else:
val_args = args
try:
func = getattr(self, self.name)
return func(*val_args)
except Exception as err:
raise FunctionPluginError(
"Error in function plugin {}: {}"
.format(self.name, err)
)
@property
def signature(self):
sig = inspect.signature(getattr(self, self.name))
arg_names = list(sig.parameters.keys())
parts = [self.name + '(']
arg_parts = []
for i in range(len(arg_names)):
arg_name = arg_names[i]
spec = self.arg_specs[i]
arg_parts.append(
'{}: {}'.format(arg_name, self._spec_to_desc(spec)))
parts.append(', '.join(arg_parts))
parts.append(')')
return ''.join(parts)
@property
def long_description(self):
func = getattr(self, self.name)
desc = func.__doc__
return ' '.join(desc.split())
def _spec_to_desc(self, spec):
if isinstance(spec, list):
return [self._spec_to_desc(spec[0])]
elif isinstance(spec, dict):
return {k: self._spec_to_desc(v) for k, v in spec.items()}
else:
return spec.__name__
def _validate_arg(self, arg, spec):
if isinstance(spec, list):
if not isinstance(arg, list):
raise FunctionPluginError(
"Invalid argument '{}'. Expected a list."
.format(arg)
)
val_args = []
for arg_item in arg:
try:
val_args.append(self._validate_arg(arg_item, spec[0]))
except FunctionPluginError:
raise FunctionPluginError(
"Invalid list item argument '{}'. Expected a list of "
"'{}'."
.format(arg_item, spec[0]))
return val_args
if isinstance(spec, dict):
if not isinstance(arg, dict):
raise FunctionPluginError(
"Invalid argument '{}'. Expected a dict."
.format(arg))
val_args = {}
for key, sub_spec in spec.items():
if key not in arg:
raise FunctionPluginError(
"Invalid dict argument '{}'. Missing key '{}'"
.format(arg, key))
try:
val_args[key] = self._validate_arg(arg[key], sub_spec)
except FunctionPluginError as err:
raise FunctionPluginError(
"Invalid dict argument '{}' for key '{}': {}"
.format(arg[key], key, err))
return val_args
try:
if spec in (int, float) and arg in ('True', 'False'):
arg = bool(arg)
return spec(arg)
except ValueError:
raise FunctionPluginError(
"Invalid {} ({})"
.format(spec.__name__, arg))
def activate(self):
if self.name in _FUNCTIONS:
other = _FUNCTIONS[self.name]
if self.priority > other.priority:
LOGGER.info(
"Function plugin '%s' at %s is superceded by plugin at %s",
self.name, other.path, self.path)
_FUNCTIONS[self.name] = self
elif self.priority < other.priority:
LOGGER.info(
"Function plugin '%s' at %s is ignored in lieu of "
"plugin at %s.",
self.name, self.path, other.path)
else:
raise RuntimeError(
"Function plugin conflict. Parser '{}' at '{}'"
"has the same priority as plugin at '{}'"
.format(self.name, self.path, other.path))
else:
_FUNCTIONS[self.name] = self
def deactivate(self):
del _FUNCTIONS[self.name]
class CoreFunctionPlugin(FunctionPlugin):
def __init__(self, name, description, arg_specs):
super().__init__(name, description, arg_specs,
priority=self.PRIO_CORE)
def register_core_plugins():
from pavilion.expression_functions import core
_ = core
for cls in CoreFunctionPlugin.__subclasses__():
obj = cls()
obj.activate()
def __reset():
for plugin in list(_FUNCTIONS.values()):
plugin.deactivate()
| true | true |
1c494f5029b56fe8b217e4d957810d9edc58d324 | 15,887 | py | Python | model/model.py | eaidova/UNITER | 5b4c9faf8ed922176b20d89ac56a3e0b39374a22 | [
"MIT"
] | 612 | 2020-01-28T00:34:23.000Z | 2022-03-31T00:40:06.000Z | model/model.py | eaidova/UNITER | 5b4c9faf8ed922176b20d89ac56a3e0b39374a22 | [
"MIT"
] | 90 | 2020-02-18T10:54:40.000Z | 2022-03-17T07:36:35.000Z | model/model.py | eaidova/UNITER | 5b4c9faf8ed922176b20d89ac56a3e0b39374a22 | [
"MIT"
] | 114 | 2020-01-31T03:03:25.000Z | 2022-03-17T15:53:51.000Z | """
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Pytorch modules
some classes are modified from HuggingFace
(https://github.com/huggingface/transformers)
"""
import copy
import json
import logging
from io import open
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm
from .layer import BertLayer, BertPooler
logger = logging.getLogger(__name__)
class UniterConfig(object):
"""Configuration class to store the configuration of a `UniterModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs UniterConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in
`UniterModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer
encoder.
num_attention_heads: Number of attention heads for each attention
layer in the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e.
feed-forward) layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string)
in the encoder and pooler. If string, "gelu", "relu" and
"swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully
connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this
model might ever be used with. Typically set this to something
large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed
into `UniterModel`.
initializer_range: The sttdev of the truncated_normal_initializer
for initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file,
"r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size "
"(int) or the path to a pretrained model config "
"file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `UniterConfig` from a
Python dictionary of parameters."""
config = UniterConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `UniterConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class UniterPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, UniterConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of "
"class `UniterConfig`. To create a model from a Google "
"pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses
# truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, FusedLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, config_file, state_dict, *inputs, **kwargs):
"""
Instantiate a UniterPreTrainedModel from a pre-trained model file or a
pytorch state dict.
Params:
config_file: config json file
state_dict: an state dictionnary
*inputs, **kwargs: additional input for the specific Uniter class
"""
# Load config
config = UniterConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if metadata is None
else metadata.get(prefix[:-1], {}))
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys,
unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.')
for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from "
"pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in "
"{}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for '
'{}:\n\t{}'.format(
model.__class__.__name__,
"\n\t".join(error_msgs)))
return model
class UniterTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size,
config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings,
config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model
# variable name and be able to load any TensorFlow checkpoint file
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (words_embeddings
+ position_embeddings
+ token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterImageEmbeddings(nn.Module):
def __init__(self, config, img_dim):
super().__init__()
self.img_linear = nn.Linear(img_dim, config.hidden_size)
self.img_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_linear = nn.Linear(7, config.hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
# tf naming convention for layer norm
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, img_feat, img_pos_feat, type_embeddings, img_masks=None):
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterEncoder(nn.Module):
def __init__(self, config):
super().__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, input_, attention_mask,
output_all_encoded_layers=True):
all_encoder_layers = []
hidden_states = input_
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class UniterModel(UniterPreTrainedModel):
""" Modification for Joint Vision-Language Encoding
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.embeddings = UniterTextEmbeddings(config)
self.img_embeddings = UniterImageEmbeddings(config, img_dim)
self.encoder = UniterEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _compute_txt_embeddings(self, input_ids, position_ids,
txt_type_ids=None):
output = self.embeddings(input_ids, position_ids, txt_type_ids)
return output
def _compute_img_embeddings(self, img_feat, img_pos_feat, img_masks=None,
img_type_ids=None):
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.embeddings.token_type_embeddings(
img_type_ids)
output = self.img_embeddings(img_feat, img_pos_feat,
img_type_embeddings, img_masks)
return output
def _compute_img_txt_embeddings(self, input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks=None,
txt_type_ids=None, img_type_ids=None):
txt_emb = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
# align back to most compact input
gather_index = gather_index.unsqueeze(-1).expand(
-1, -1, self.config.hidden_size)
embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),
dim=1, index=gather_index)
return embedding_output
def forward(self, input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index=None, img_masks=None,
output_all_encoded_layers=True,
txt_type_ids=None, img_type_ids=None):
# compute self-attention mask
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding layer
if input_ids is None:
# image only
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
elif img_feat is None:
# text only
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks, txt_type_ids, img_type_ids)
encoded_layers = self.encoder(
embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers
| 43.171196 | 79 | 0.615031 | import copy
import json
import logging
from io import open
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm
from .layer import BertLayer, BertPooler
logger = logging.getLogger(__name__)
class UniterConfig(object):
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file,
"r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size "
"(int) or the path to a pretrained model config "
"file (str)")
@classmethod
def from_dict(cls, json_object):
config = UniterConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class UniterPreTrainedModel(nn.Module):
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, UniterConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of "
"class `UniterConfig`. To create a model from a Google "
"pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, FusedLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, config_file, state_dict, *inputs, **kwargs):
config = UniterConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
model = cls(config, *inputs, **kwargs)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if metadata is None
else metadata.get(prefix[:-1], {}))
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys,
unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.')
for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from "
"pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in "
"{}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for '
'{}:\n\t{}'.format(
model.__class__.__name__,
"\n\t".join(error_msgs)))
return model
class UniterTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size,
config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings,
config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (words_embeddings
+ position_embeddings
+ token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterImageEmbeddings(nn.Module):
def __init__(self, config, img_dim):
super().__init__()
self.img_linear = nn.Linear(img_dim, config.hidden_size)
self.img_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_linear = nn.Linear(7, config.hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, img_feat, img_pos_feat, type_embeddings, img_masks=None):
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterEncoder(nn.Module):
def __init__(self, config):
super().__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, input_, attention_mask,
output_all_encoded_layers=True):
all_encoder_layers = []
hidden_states = input_
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class UniterModel(UniterPreTrainedModel):
def __init__(self, config, img_dim):
super().__init__(config)
self.embeddings = UniterTextEmbeddings(config)
self.img_embeddings = UniterImageEmbeddings(config, img_dim)
self.encoder = UniterEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _compute_txt_embeddings(self, input_ids, position_ids,
txt_type_ids=None):
output = self.embeddings(input_ids, position_ids, txt_type_ids)
return output
def _compute_img_embeddings(self, img_feat, img_pos_feat, img_masks=None,
img_type_ids=None):
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.embeddings.token_type_embeddings(
img_type_ids)
output = self.img_embeddings(img_feat, img_pos_feat,
img_type_embeddings, img_masks)
return output
def _compute_img_txt_embeddings(self, input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks=None,
txt_type_ids=None, img_type_ids=None):
txt_emb = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
gather_index = gather_index.unsqueeze(-1).expand(
-1, -1, self.config.hidden_size)
embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),
dim=1, index=gather_index)
return embedding_output
def forward(self, input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index=None, img_masks=None,
output_all_encoded_layers=True,
txt_type_ids=None, img_type_ids=None):
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if input_ids is None:
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
elif img_feat is None:
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks, txt_type_ids, img_type_ids)
encoded_layers = self.encoder(
embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers
| true | true |
1c494fcec0deb185879083532a650364ea510ab7 | 3,911 | py | Python | tools/verify_caffe_model.py | cybercore-co-ltd/Onnx2Caffe | aa4a90b7539e2b5ee0ad42f507021585da58be80 | [
"MIT"
] | 7 | 2020-07-24T22:51:14.000Z | 2021-06-28T05:12:52.000Z | tools/verify_caffe_model.py | cybercore-co-ltd/Onnx2Caffe | aa4a90b7539e2b5ee0ad42f507021585da58be80 | [
"MIT"
] | 6 | 2020-07-23T04:32:06.000Z | 2020-12-28T09:52:40.000Z | tools/verify_caffe_model.py | cybercore-co-ltd/Onnx2Caffe | aa4a90b7539e2b5ee0ad42f507021585da58be80 | [
"MIT"
] | 5 | 2020-07-30T08:19:16.000Z | 2021-06-28T05:12:53.000Z | import argparse
import numpy as np
import onnx
import onnxruntime as rt
import torch
import os
import mmcv
import caffe
from terminaltables import AsciiTable
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('onnx_checkpoint', help='onnx checkpoint file')
parser.add_argument('caffe_checkpoint', help='caffe checkpoint file')
parser.add_argument('--input_img', type=str, help='Images for input')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 768],
help='input image size')
args = parser.parse_args()
return args
def imread_img(img_path):
# read image
one_img = mmcv.imread(img_path, 'color')
one_img = mmcv.imresize(one_img, input_shape[1:]).transpose(2, 1, 0)
one_img = one_img/255
one_img = torch.from_numpy(one_img).unsqueeze(0).float()
return one_img
def get_onnx_pred(onnx_model_path, one_img):
onnx_model = onnx.load(onnx_model_path)
onnx.checker.check_model(onnx_model)
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1)
sess = rt.InferenceSession(onnx_model_path)
onnx_result = sess.run(
None, {net_feed_input[0]: one_img.detach().numpy()})
onnx_result_dict = dict()
output_name = [node.name for node in onnx_model.graph.output]
for i,name in enumerate(output_name):
onnx_result_dict[name]=onnx_result[i]
input_name = net_feed_input[0]
return onnx_result_dict, input_name
def get_caffe_pred(model, input_name, inputs):
caffe_model.blobs[input_name].data[...] = inputs
caffe_outs = caffe_model.forward()
return caffe_outs
def compute_relative_err_onnx2caffe(onnx_result, caffe_outs):
total_err = 0
table_data = [
['Output', 'MAE', 'Relative_err']
]
for k,v in onnx_result.items():
# calculate the mae error between onnx and caffe model
mae_err = (np.abs(v - caffe_outs[k])).sum()
# calculate the relative err mae/norm(onnx)
norm_onnx = (np.linalg.norm(v)).sum()
rel_err = mae_err/norm_onnx
total_err = total_err + rel_err
# table result
table_data.append([k, mae_err, rel_err])
table = AsciiTable(table_data)
print(table.table)
return total_err
def get_onnx_outputname(onnx_model_path):
model = onnx.load(onnx_model_path)
output_name = [node.name for node in model.graph.output]
return output_name
if __name__ == '__main__':
args = parse_args()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
3,
) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
# generate the random image for testing
if args.input_img is None:
input_data = torch.randn(1, (*input_shape))
else:
input_data = imread_img(args.input_img)
# get the name of output branch
output_name = get_onnx_outputname(args.onnx_checkpoint)
# get onnx results
onnx_result, input_name = get_onnx_pred(args.onnx_checkpoint, input_data)
# Create caffe model
prototxt_path=args.caffe_checkpoint.replace('.caffemodel','.prototxt')
caffe_model = caffe.Net(prototxt_path, caffe.TEST)
caffe_model.copy_from(args.caffe_checkpoint)
# get caffe results
caffe_result = get_caffe_pred(caffe_model, input_name, input_data)
# compute the err between pytorch model and converted onnx model
total_err = compute_relative_err_onnx2caffe(onnx_result, caffe_result)
print(f'TOTAL ERR BETWEEN CAFFE MODEL AND ONNX MODEL : TOTAL_ERR {total_err} ')
| 28.97037 | 83 | 0.682434 | import argparse
import numpy as np
import onnx
import onnxruntime as rt
import torch
import os
import mmcv
import caffe
from terminaltables import AsciiTable
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('onnx_checkpoint', help='onnx checkpoint file')
parser.add_argument('caffe_checkpoint', help='caffe checkpoint file')
parser.add_argument('--input_img', type=str, help='Images for input')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 768],
help='input image size')
args = parser.parse_args()
return args
def imread_img(img_path):
one_img = mmcv.imread(img_path, 'color')
one_img = mmcv.imresize(one_img, input_shape[1:]).transpose(2, 1, 0)
one_img = one_img/255
one_img = torch.from_numpy(one_img).unsqueeze(0).float()
return one_img
def get_onnx_pred(onnx_model_path, one_img):
onnx_model = onnx.load(onnx_model_path)
onnx.checker.check_model(onnx_model)
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1)
sess = rt.InferenceSession(onnx_model_path)
onnx_result = sess.run(
None, {net_feed_input[0]: one_img.detach().numpy()})
onnx_result_dict = dict()
output_name = [node.name for node in onnx_model.graph.output]
for i,name in enumerate(output_name):
onnx_result_dict[name]=onnx_result[i]
input_name = net_feed_input[0]
return onnx_result_dict, input_name
def get_caffe_pred(model, input_name, inputs):
caffe_model.blobs[input_name].data[...] = inputs
caffe_outs = caffe_model.forward()
return caffe_outs
def compute_relative_err_onnx2caffe(onnx_result, caffe_outs):
total_err = 0
table_data = [
['Output', 'MAE', 'Relative_err']
]
for k,v in onnx_result.items():
mae_err = (np.abs(v - caffe_outs[k])).sum()
norm_onnx = (np.linalg.norm(v)).sum()
rel_err = mae_err/norm_onnx
total_err = total_err + rel_err
table_data.append([k, mae_err, rel_err])
table = AsciiTable(table_data)
print(table.table)
return total_err
def get_onnx_outputname(onnx_model_path):
model = onnx.load(onnx_model_path)
output_name = [node.name for node in model.graph.output]
return output_name
if __name__ == '__main__':
args = parse_args()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
3,
) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
if args.input_img is None:
input_data = torch.randn(1, (*input_shape))
else:
input_data = imread_img(args.input_img)
output_name = get_onnx_outputname(args.onnx_checkpoint)
onnx_result, input_name = get_onnx_pred(args.onnx_checkpoint, input_data)
prototxt_path=args.caffe_checkpoint.replace('.caffemodel','.prototxt')
caffe_model = caffe.Net(prototxt_path, caffe.TEST)
caffe_model.copy_from(args.caffe_checkpoint)
caffe_result = get_caffe_pred(caffe_model, input_name, input_data)
total_err = compute_relative_err_onnx2caffe(onnx_result, caffe_result)
print(f'TOTAL ERR BETWEEN CAFFE MODEL AND ONNX MODEL : TOTAL_ERR {total_err} ')
| false | true |
1c495033486807a83992b5b2fcd5ec296570ade8 | 576 | py | Python | code/decision_tree/decision_tree_iris.py | CrazyXiao/machine-learning | 8e1e8cb9cf6f4e1c403873168f2bacbd84a106bd | [
"MIT"
] | 200 | 2019-04-23T01:13:31.000Z | 2021-08-01T07:56:46.000Z | code/decision_tree/decision_tree_iris.py | CrazyXiao/machine-learning | 8e1e8cb9cf6f4e1c403873168f2bacbd84a106bd | [
"MIT"
] | null | null | null | code/decision_tree/decision_tree_iris.py | CrazyXiao/machine-learning | 8e1e8cb9cf6f4e1c403873168f2bacbd84a106bd | [
"MIT"
] | 10 | 2019-04-24T10:18:59.000Z | 2021-04-19T12:58:59.000Z | #!/usr/bin/env python
"""
iris 决策树
"""
from sklearn.datasets import load_iris
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# 加载数据
iris = load_iris()
X = iris.data
y = iris.target
# 训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print(X_train.shape)
# 构造分类器
classifier = tree.DecisionTreeClassifier()
classifier.fit(X_train, y_train)
# 测试集预测值
predictions = classifier.predict(X_test)
print(predictions)
# 准确率
print(accuracy_score(y_test, predictions))
| 16.457143 | 72 | 0.767361 |
from sklearn.datasets import load_iris
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print(X_train.shape)
classifier = tree.DecisionTreeClassifier()
classifier.fit(X_train, y_train)
predictions = classifier.predict(X_test)
print(predictions)
print(accuracy_score(y_test, predictions))
| true | true |
1c495055dbaff7ed5a9f296fd48afb316a4ab298 | 1,294 | py | Python | src/web/modules/ejudge/migrations/0004_auto_20160329_1924.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 5 | 2018-03-08T17:22:27.000Z | 2018-03-11T14:20:53.000Z | src/web/modules/ejudge/migrations/0004_auto_20160329_1924.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 263 | 2018-03-08T18:05:12.000Z | 2022-03-11T23:26:20.000Z | src/web/modules/ejudge/migrations/0004_auto_20160329_1924.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 6 | 2018-03-12T19:48:19.000Z | 2022-01-14T04:58:52.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('ejudge', '0003_auto_20160329_1823'),
]
operations = [
migrations.AddField(
model_name='queueelement',
name='wont_check_message',
field=models.TextField(default='', blank=True),
),
migrations.AlterField(
model_name='queueelement',
name='status',
field=models.PositiveIntegerField(default=1, validators=[djchoices.choices.ChoicesValidator({1: 'NOT FETCHED', 2: 'SUBMITTED', 3: 'CHECKED', 4: 'WONT CHECK'})], choices=[(1, 'NOT FETCHED'), (2, 'SUBMITTED'), (3, 'CHECKED'), (4, 'WONT CHECK')]),
),
migrations.AlterField(
model_name='queueelement',
name='submission',
field=models.ForeignKey(default=None, on_delete=models.CASCADE, blank=True, to='ejudge.Submission', null=True),
),
migrations.AlterField(
model_name='submission',
name='result',
field=models.ForeignKey(default=None, on_delete=models.CASCADE, blank=True, to='ejudge.SolutionCheckingResult', null=True),
),
]
| 35.944444 | 256 | 0.616692 |
from __future__ import unicode_literals
from django.db import models, migrations
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('ejudge', '0003_auto_20160329_1823'),
]
operations = [
migrations.AddField(
model_name='queueelement',
name='wont_check_message',
field=models.TextField(default='', blank=True),
),
migrations.AlterField(
model_name='queueelement',
name='status',
field=models.PositiveIntegerField(default=1, validators=[djchoices.choices.ChoicesValidator({1: 'NOT FETCHED', 2: 'SUBMITTED', 3: 'CHECKED', 4: 'WONT CHECK'})], choices=[(1, 'NOT FETCHED'), (2, 'SUBMITTED'), (3, 'CHECKED'), (4, 'WONT CHECK')]),
),
migrations.AlterField(
model_name='queueelement',
name='submission',
field=models.ForeignKey(default=None, on_delete=models.CASCADE, blank=True, to='ejudge.Submission', null=True),
),
migrations.AlterField(
model_name='submission',
name='result',
field=models.ForeignKey(default=None, on_delete=models.CASCADE, blank=True, to='ejudge.SolutionCheckingResult', null=True),
),
]
| true | true |
1c495283b4a9eb1215b4155542911802987dc8c2 | 151 | py | Python | reallySecureRandom.py | CabraKill/desafio_ford | 9d0f5c5f7396b4fb702df23e8871b9906867d583 | [
"MIT"
] | null | null | null | reallySecureRandom.py | CabraKill/desafio_ford | 9d0f5c5f7396b4fb702df23e8871b9906867d583 | [
"MIT"
] | null | null | null | reallySecureRandom.py | CabraKill/desafio_ford | 9d0f5c5f7396b4fb702df23e8871b9906867d583 | [
"MIT"
] | null | null | null | import random
MIN_NUMBER = 0
MAX_NUMBER = 1000
def randomIntNumber(min: int = MIN_NUMBER, max: int = MAX_NUMBER):
return random.randint(min, max) | 21.571429 | 66 | 0.741722 | import random
MIN_NUMBER = 0
MAX_NUMBER = 1000
def randomIntNumber(min: int = MIN_NUMBER, max: int = MAX_NUMBER):
return random.randint(min, max) | true | true |
1c4952934ed638a6e4875e47806d396365cee9cf | 10,465 | py | Python | pymc3/parallel_sampling.py | acolombi/pymc3 | 3cb45700156b63e786eb70909d3e1d6e1f21703a | [
"Apache-2.0"
] | 1 | 2018-06-11T03:13:00.000Z | 2018-06-11T03:13:00.000Z | pymc3/parallel_sampling.py | acolombi/pymc3 | 3cb45700156b63e786eb70909d3e1d6e1f21703a | [
"Apache-2.0"
] | 2 | 2017-03-02T05:56:13.000Z | 2019-12-06T19:15:42.000Z | pymc3/parallel_sampling.py | acolombi/pymc3 | 3cb45700156b63e786eb70909d3e1d6e1f21703a | [
"Apache-2.0"
] | null | null | null | import multiprocessing
import multiprocessing.sharedctypes
import ctypes
import time
import logging
from collections import namedtuple
import traceback
import six
import numpy as np
from . import theanof
logger = logging.getLogger('pymc3')
# Taken from https://hg.python.org/cpython/rev/c4f92b597074
class RemoteTraceback(Exception):
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
class ExceptionWithTraceback:
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
def rebuild_exc(exc, tb):
exc.__cause__ = RemoteTraceback(tb)
return exc
# Messages
# ('writing_done', is_last, sample_idx, tuning, stats)
# ('error', *exception_info)
# ('abort', reason)
# ('write_next',)
# ('start',)
class _Process(multiprocessing.Process):
"""Seperate process for each chain.
We communicate with the main process using a pipe,
and send finished samples using shared memory.
"""
def __init__(self, name, msg_pipe, step_method, shared_point,
draws, tune, seed):
super(_Process, self).__init__(daemon=True, name=name)
self._msg_pipe = msg_pipe
self._step_method = step_method
self._shared_point = shared_point
self._seed = seed
self._tt_seed = seed + 1
self._draws = draws
self._tune = tune
def run(self):
try:
# We do not create this in __init__, as pickling this
# would destroy the shared memory.
self._point = self._make_numpy_refs()
self._start_loop()
except KeyboardInterrupt:
pass
except BaseException as e:
e = ExceptionWithTraceback(e, e.__traceback__)
self._msg_pipe.send(('error', e))
finally:
self._msg_pipe.close()
def _make_numpy_refs(self):
shape_dtypes = self._step_method.vars_shape_dtype
point = {}
for name, (shape, dtype) in shape_dtypes.items():
array = self._shared_point[name]
self._shared_point[name] = array
point[name] = np.frombuffer(array, dtype).reshape(shape)
return point
def _write_point(self, point):
for name, vals in point.items():
self._point[name][...] = vals
def _recv_msg(self):
return self._msg_pipe.recv()
def _start_loop(self):
np.random.seed(self._seed)
theanof.set_tt_rng(self._tt_seed)
draw = 0
tuning = True
msg = self._recv_msg()
if msg[0] == 'abort':
raise KeyboardInterrupt()
if msg[0] != 'start':
raise ValueError('Unexpected msg ' + msg[0])
while True:
if draw < self._draws + self._tune:
point, stats = self._compute_point()
else:
return
if draw == self._tune:
self._step_method.stop_tuning()
tuning = False
msg = self._recv_msg()
if msg[0] == 'abort':
raise KeyboardInterrupt()
elif msg[0] == 'write_next':
self._write_point(point)
is_last = draw + 1 == self._draws + self._tune
if is_last:
warns = self._collect_warnings()
else:
warns = None
self._msg_pipe.send(
('writing_done', is_last, draw, tuning, stats, warns))
draw += 1
else:
raise ValueError('Unknown message ' + msg[0])
def _compute_point(self):
if self._step_method.generates_stats:
point, stats = self._step_method.step(self._point)
else:
point = self._step_method.step(self._point)
stats = None
return point, stats
def _collect_warnings(self):
if hasattr(self._step_method, 'warnings'):
return self._step_method.warnings()
else:
return []
class ProcessAdapter(object):
"""Control a Chain process from the main thread."""
def __init__(self, draws, tune, step_method, chain, seed, start):
self.chain = chain
process_name = "worker_chain_%s" % chain
self._msg_pipe, remote_conn = multiprocessing.Pipe()
self._shared_point = {}
self._point = {}
for name, (shape, dtype) in step_method.vars_shape_dtype.items():
size = 1
for dim in shape:
size *= int(dim)
size *= dtype.itemsize
if size != ctypes.c_size_t(size).value:
raise ValueError('Variable %s is too large' % name)
array = multiprocessing.sharedctypes.RawArray('c', size)
self._shared_point[name] = array
array_np = np.frombuffer(array, dtype).reshape(shape)
array_np[...] = start[name]
self._point[name] = array_np
self._readable = True
self._num_samples = 0
self._process = _Process(
process_name, remote_conn, step_method, self._shared_point,
draws, tune, seed)
# We fork right away, so that the main process can start tqdm threads
self._process.start()
@property
def shared_point_view(self):
"""May only be written to or read between a `recv_draw`
call from the process and a `write_next` or `abort` call.
"""
if not self._readable:
raise RuntimeError()
return self._point
def start(self):
self._msg_pipe.send(('start',))
def write_next(self):
self._readable = False
self._msg_pipe.send(('write_next',))
def abort(self):
self._msg_pipe.send(('abort',))
def join(self, timeout=None):
self._process.join(timeout)
def terminate(self):
self._process.terminate()
@staticmethod
def recv_draw(processes, timeout=3600):
if not processes:
raise ValueError('No processes.')
pipes = [proc._msg_pipe for proc in processes]
ready = multiprocessing.connection.wait(pipes)
if not ready:
raise multiprocessing.TimeoutError('No message from samplers.')
idxs = {id(proc._msg_pipe): proc for proc in processes}
proc = idxs[id(ready[0])]
msg = ready[0].recv()
if msg[0] == 'error':
old = msg[1]
six.raise_from(RuntimeError('Chain %s failed.' % proc.chain), old)
elif msg[0] == 'writing_done':
proc._readable = True
proc._num_samples += 1
return (proc,) + msg[1:]
else:
raise ValueError('Sampler sent bad message.')
@staticmethod
def terminate_all(processes, patience=2):
for process in processes:
try:
process.abort()
except EOFError:
pass
start_time = time.time()
try:
for process in processes:
timeout = time.time() + patience - start_time
if timeout < 0:
raise multiprocessing.TimeoutError()
process.join(timeout)
except multiprocessing.TimeoutError:
logger.warn('Chain processes did not terminate as expected. '
'Terminating forcefully...')
for process in processes:
process.terminate()
for process in processes:
process.join()
Draw = namedtuple(
'Draw',
['chain', 'is_last', 'draw_idx', 'tuning', 'stats', 'point', 'warnings']
)
class ParallelSampler(object):
def __init__(self, draws, tune, chains, cores, seeds, start_points,
step_method, start_chain_num=0, progressbar=True):
if progressbar:
import tqdm
tqdm_ = tqdm.tqdm
if any(len(arg) != chains for arg in [seeds, start_points]):
raise ValueError(
'Number of seeds and start_points must be %s.' % chains)
self._samplers = [
ProcessAdapter(draws, tune, step_method,
chain + start_chain_num, seed, start)
for chain, seed, start in zip(range(chains), seeds, start_points)
]
self._inactive = self._samplers.copy()
self._finished = []
self._active = []
self._max_active = cores
self._in_context = False
self._start_chain_num = start_chain_num
self._progress = None
if progressbar:
self._progress = tqdm_(
total=chains * (draws + tune), unit='draws',
desc='Sampling %s chains' % chains)
def _make_active(self):
while self._inactive and len(self._active) < self._max_active:
proc = self._inactive.pop(0)
proc.start()
proc.write_next()
self._active.append(proc)
def __iter__(self):
if not self._in_context:
raise ValueError('Use ParallelSampler as context manager.')
self._make_active()
while self._active:
draw = ProcessAdapter.recv_draw(self._active)
proc, is_last, draw, tuning, stats, warns = draw
if self._progress is not None:
self._progress.update()
if is_last:
proc.join()
self._active.remove(proc)
self._finished.append(proc)
self._make_active()
# We could also yield proc.shared_point_view directly,
# and only call proc.write_next() after the yield returns.
# This seems to be faster overally though, as the worker
# loses less time waiting.
point = {name: val.copy()
for name, val in proc.shared_point_view.items()}
# Already called for new proc in _make_active
if not is_last:
proc.write_next()
yield Draw(proc.chain, is_last, draw, tuning, stats, point, warns)
def __enter__(self):
self._in_context = True
return self
def __exit__(self, *args):
ProcessAdapter.terminate_all(self._samplers)
if self._progress is not None:
self._progress.close()
| 31.053412 | 78 | 0.572384 | import multiprocessing
import multiprocessing.sharedctypes
import ctypes
import time
import logging
from collections import namedtuple
import traceback
import six
import numpy as np
from . import theanof
logger = logging.getLogger('pymc3')
class RemoteTraceback(Exception):
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
class ExceptionWithTraceback:
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
def rebuild_exc(exc, tb):
exc.__cause__ = RemoteTraceback(tb)
return exc
class _Process(multiprocessing.Process):
def __init__(self, name, msg_pipe, step_method, shared_point,
draws, tune, seed):
super(_Process, self).__init__(daemon=True, name=name)
self._msg_pipe = msg_pipe
self._step_method = step_method
self._shared_point = shared_point
self._seed = seed
self._tt_seed = seed + 1
self._draws = draws
self._tune = tune
def run(self):
try:
self._point = self._make_numpy_refs()
self._start_loop()
except KeyboardInterrupt:
pass
except BaseException as e:
e = ExceptionWithTraceback(e, e.__traceback__)
self._msg_pipe.send(('error', e))
finally:
self._msg_pipe.close()
def _make_numpy_refs(self):
shape_dtypes = self._step_method.vars_shape_dtype
point = {}
for name, (shape, dtype) in shape_dtypes.items():
array = self._shared_point[name]
self._shared_point[name] = array
point[name] = np.frombuffer(array, dtype).reshape(shape)
return point
def _write_point(self, point):
for name, vals in point.items():
self._point[name][...] = vals
def _recv_msg(self):
return self._msg_pipe.recv()
def _start_loop(self):
np.random.seed(self._seed)
theanof.set_tt_rng(self._tt_seed)
draw = 0
tuning = True
msg = self._recv_msg()
if msg[0] == 'abort':
raise KeyboardInterrupt()
if msg[0] != 'start':
raise ValueError('Unexpected msg ' + msg[0])
while True:
if draw < self._draws + self._tune:
point, stats = self._compute_point()
else:
return
if draw == self._tune:
self._step_method.stop_tuning()
tuning = False
msg = self._recv_msg()
if msg[0] == 'abort':
raise KeyboardInterrupt()
elif msg[0] == 'write_next':
self._write_point(point)
is_last = draw + 1 == self._draws + self._tune
if is_last:
warns = self._collect_warnings()
else:
warns = None
self._msg_pipe.send(
('writing_done', is_last, draw, tuning, stats, warns))
draw += 1
else:
raise ValueError('Unknown message ' + msg[0])
def _compute_point(self):
if self._step_method.generates_stats:
point, stats = self._step_method.step(self._point)
else:
point = self._step_method.step(self._point)
stats = None
return point, stats
def _collect_warnings(self):
if hasattr(self._step_method, 'warnings'):
return self._step_method.warnings()
else:
return []
class ProcessAdapter(object):
def __init__(self, draws, tune, step_method, chain, seed, start):
self.chain = chain
process_name = "worker_chain_%s" % chain
self._msg_pipe, remote_conn = multiprocessing.Pipe()
self._shared_point = {}
self._point = {}
for name, (shape, dtype) in step_method.vars_shape_dtype.items():
size = 1
for dim in shape:
size *= int(dim)
size *= dtype.itemsize
if size != ctypes.c_size_t(size).value:
raise ValueError('Variable %s is too large' % name)
array = multiprocessing.sharedctypes.RawArray('c', size)
self._shared_point[name] = array
array_np = np.frombuffer(array, dtype).reshape(shape)
array_np[...] = start[name]
self._point[name] = array_np
self._readable = True
self._num_samples = 0
self._process = _Process(
process_name, remote_conn, step_method, self._shared_point,
draws, tune, seed)
self._process.start()
@property
def shared_point_view(self):
if not self._readable:
raise RuntimeError()
return self._point
def start(self):
self._msg_pipe.send(('start',))
def write_next(self):
self._readable = False
self._msg_pipe.send(('write_next',))
def abort(self):
self._msg_pipe.send(('abort',))
def join(self, timeout=None):
self._process.join(timeout)
def terminate(self):
self._process.terminate()
@staticmethod
def recv_draw(processes, timeout=3600):
if not processes:
raise ValueError('No processes.')
pipes = [proc._msg_pipe for proc in processes]
ready = multiprocessing.connection.wait(pipes)
if not ready:
raise multiprocessing.TimeoutError('No message from samplers.')
idxs = {id(proc._msg_pipe): proc for proc in processes}
proc = idxs[id(ready[0])]
msg = ready[0].recv()
if msg[0] == 'error':
old = msg[1]
six.raise_from(RuntimeError('Chain %s failed.' % proc.chain), old)
elif msg[0] == 'writing_done':
proc._readable = True
proc._num_samples += 1
return (proc,) + msg[1:]
else:
raise ValueError('Sampler sent bad message.')
@staticmethod
def terminate_all(processes, patience=2):
for process in processes:
try:
process.abort()
except EOFError:
pass
start_time = time.time()
try:
for process in processes:
timeout = time.time() + patience - start_time
if timeout < 0:
raise multiprocessing.TimeoutError()
process.join(timeout)
except multiprocessing.TimeoutError:
logger.warn('Chain processes did not terminate as expected. '
'Terminating forcefully...')
for process in processes:
process.terminate()
for process in processes:
process.join()
Draw = namedtuple(
'Draw',
['chain', 'is_last', 'draw_idx', 'tuning', 'stats', 'point', 'warnings']
)
class ParallelSampler(object):
def __init__(self, draws, tune, chains, cores, seeds, start_points,
step_method, start_chain_num=0, progressbar=True):
if progressbar:
import tqdm
tqdm_ = tqdm.tqdm
if any(len(arg) != chains for arg in [seeds, start_points]):
raise ValueError(
'Number of seeds and start_points must be %s.' % chains)
self._samplers = [
ProcessAdapter(draws, tune, step_method,
chain + start_chain_num, seed, start)
for chain, seed, start in zip(range(chains), seeds, start_points)
]
self._inactive = self._samplers.copy()
self._finished = []
self._active = []
self._max_active = cores
self._in_context = False
self._start_chain_num = start_chain_num
self._progress = None
if progressbar:
self._progress = tqdm_(
total=chains * (draws + tune), unit='draws',
desc='Sampling %s chains' % chains)
def _make_active(self):
while self._inactive and len(self._active) < self._max_active:
proc = self._inactive.pop(0)
proc.start()
proc.write_next()
self._active.append(proc)
def __iter__(self):
if not self._in_context:
raise ValueError('Use ParallelSampler as context manager.')
self._make_active()
while self._active:
draw = ProcessAdapter.recv_draw(self._active)
proc, is_last, draw, tuning, stats, warns = draw
if self._progress is not None:
self._progress.update()
if is_last:
proc.join()
self._active.remove(proc)
self._finished.append(proc)
self._make_active()
point = {name: val.copy()
for name, val in proc.shared_point_view.items()}
if not is_last:
proc.write_next()
yield Draw(proc.chain, is_last, draw, tuning, stats, point, warns)
def __enter__(self):
self._in_context = True
return self
def __exit__(self, *args):
ProcessAdapter.terminate_all(self._samplers)
if self._progress is not None:
self._progress.close()
| true | true |
1c4952dfb7b5146c980edaa2ae1af799017e768b | 167 | py | Python | sewer/config.py | prestix-studio/sewer | 67867f778eb92c9c14cd028116f5695b0223baa2 | [
"MIT"
] | 135 | 2017-12-31T22:01:33.000Z | 2022-01-20T18:18:11.000Z | sewer/config.py | prestix-studio/sewer | 67867f778eb92c9c14cd028116f5695b0223baa2 | [
"MIT"
] | 149 | 2018-01-10T10:36:18.000Z | 2021-07-01T16:22:47.000Z | sewer/config.py | prestix-studio/sewer | 67867f778eb92c9c14cd028116f5695b0223baa2 | [
"MIT"
] | 61 | 2018-03-05T16:58:55.000Z | 2021-05-21T01:30:07.000Z | ACME_DIRECTORY_URL_STAGING = "https://acme-staging-v02.api.letsencrypt.org/directory"
ACME_DIRECTORY_URL_PRODUCTION = "https://acme-v02.api.letsencrypt.org/directory"
| 55.666667 | 85 | 0.826347 | ACME_DIRECTORY_URL_STAGING = "https://acme-staging-v02.api.letsencrypt.org/directory"
ACME_DIRECTORY_URL_PRODUCTION = "https://acme-v02.api.letsencrypt.org/directory"
| true | true |
1c4952e55605b55e89e8c96cb5c304d56bad7210 | 2,668 | py | Python | src/send_status.py | Satish615/deep-learning-containers-1 | 76e750e828b6f583a6b7b1c291057059a14285b1 | [
"Apache-2.0"
] | 1 | 2021-12-17T15:50:48.000Z | 2021-12-17T15:50:48.000Z | src/send_status.py | Satish615/deep-learning-containers-1 | 76e750e828b6f583a6b7b1c291057059a14285b1 | [
"Apache-2.0"
] | null | null | null | src/send_status.py | Satish615/deep-learning-containers-1 | 76e750e828b6f583a6b7b1c291057059a14285b1 | [
"Apache-2.0"
] | null | null | null | import os
import argparse
import utils
from github import GitHubHandler
def get_args():
"""
Manage arguments to this script when called directly
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--status",
choices=["0", "1", "2"],
help="Github status to set. 0 is fail, 1 is success, 2 is pending",
)
return parser.parse_args()
def get_target_url(project):
"""
Set the link for "Details" on PR builds
:param project: CodeBuild project name associated with the running build
:return: Link for the "Details" link associated with a GitHub status check
"""
region = os.getenv("AWS_REGION")
logpath = os.getenv("CODEBUILD_LOG_PATH")
return f"https://{region}.console.aws.amazon.com/codesuite/codebuild/projects/{project}/build/{project}%3A{logpath}" \
f"/log?region={region}"
def set_build_description(state, project, trigger_job):
"""
Set the build description, based on the state, project name, and job that triggered the project.
:param state: <str> choices are "success", "failure", "error" or "pending"
:param project: Project name associated with the running CodeBuild job
:param trigger_job: The name of the CodeBuild project that triggered this build
:return: <str> Description to be posted to the PR build
"""
if state == "success":
return f"{project} succeeded for {trigger_job}."
elif state == "failure" or state == "error":
return f"{project} is in state {state.upper()} for {trigger_job}! Check details to debug."
elif state == "pending":
return f"{project} is pending for {trigger_job}..."
else:
return f"Unknown state: {state}"
def post_status(state):
"""
Post the status with a constructed context to the PR.
:param state: <str> choices are "success", "failure", "error" or "pending"
"""
project_name = utils.get_codebuild_project_name()
trigger_job = os.getenv("TEST_TRIGGER", "UNKNOWN-TEST-TRIGGER")
target_url = get_target_url(project_name)
context = f"{trigger_job}_{project_name}"
description = set_build_description(state, project_name, trigger_job)
handler = GitHubHandler()
handler.set_status(
state=state,
context=context,
description=description,
target_url=target_url
)
def main():
codebuild_statuses = {"0": "failure", "1": "success", "2": "pending"}
args = get_args()
state = codebuild_statuses[args.status]
# Send status for given state
if os.getenv("BUILD_CONTEXT") == "PR":
post_status(state)
if __name__ == "__main__":
main()
| 30.318182 | 122 | 0.664168 | import os
import argparse
import utils
from github import GitHubHandler
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--status",
choices=["0", "1", "2"],
help="Github status to set. 0 is fail, 1 is success, 2 is pending",
)
return parser.parse_args()
def get_target_url(project):
region = os.getenv("AWS_REGION")
logpath = os.getenv("CODEBUILD_LOG_PATH")
return f"https://{region}.console.aws.amazon.com/codesuite/codebuild/projects/{project}/build/{project}%3A{logpath}" \
f"/log?region={region}"
def set_build_description(state, project, trigger_job):
if state == "success":
return f"{project} succeeded for {trigger_job}."
elif state == "failure" or state == "error":
return f"{project} is in state {state.upper()} for {trigger_job}! Check details to debug."
elif state == "pending":
return f"{project} is pending for {trigger_job}..."
else:
return f"Unknown state: {state}"
def post_status(state):
project_name = utils.get_codebuild_project_name()
trigger_job = os.getenv("TEST_TRIGGER", "UNKNOWN-TEST-TRIGGER")
target_url = get_target_url(project_name)
context = f"{trigger_job}_{project_name}"
description = set_build_description(state, project_name, trigger_job)
handler = GitHubHandler()
handler.set_status(
state=state,
context=context,
description=description,
target_url=target_url
)
def main():
codebuild_statuses = {"0": "failure", "1": "success", "2": "pending"}
args = get_args()
state = codebuild_statuses[args.status]
if os.getenv("BUILD_CONTEXT") == "PR":
post_status(state)
if __name__ == "__main__":
main()
| true | true |
1c49536f4b591e818fca4649372187456dbc31aa | 493 | py | Python | parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Tipo.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 35 | 2020-12-07T03:11:43.000Z | 2021-04-15T17:38:16.000Z | parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Tipo.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 47 | 2020-12-09T01:29:09.000Z | 2021-01-13T05:37:50.000Z | parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Tipo.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | from Instrucciones.TablaSimbolos.Instruccion import Instruccion
class Tipo(Instruccion):
def __init__(self, id, tipo, owner, id2, valor, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print(self.valor + " linea: " + str(self.linea) + " columna: " + str(self.columna))
'''
instruccion = Tipo("hola mundo",None, 1,2)
instruccion.ejecutar(None,None)
''' | 30.8125 | 91 | 0.677485 | from Instrucciones.TablaSimbolos.Instruccion import Instruccion
class Tipo(Instruccion):
def __init__(self, id, tipo, owner, id2, valor, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print(self.valor + " linea: " + str(self.linea) + " columna: " + str(self.columna))
| true | true |
1c49548ead69b53400104cf9dac0bc4e40c5a598 | 3,709 | py | Python | scripts/setup/generate_secrets.py | GauravVirmani/zulip | 5a204d7c84d60e193f1ea0900d42848c5276a095 | [
"Apache-2.0"
] | null | null | null | scripts/setup/generate_secrets.py | GauravVirmani/zulip | 5a204d7c84d60e193f1ea0900d42848c5276a095 | [
"Apache-2.0"
] | null | null | null | scripts/setup/generate_secrets.py | GauravVirmani/zulip | 5a204d7c84d60e193f1ea0900d42848c5276a095 | [
"Apache-2.0"
] | 1 | 2021-06-10T15:12:52.000Z | 2021-06-10T15:12:52.000Z | #!/usr/bin/env python
# This tools generates /etc/zulip/zulip-secrets.conf
from __future__ import print_function
import sys
import os
import os.path
from os.path import dirname, abspath
if False:
from typing import Dict, Optional, Text
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'
from django.utils.crypto import get_random_string
import six
import argparse
from zerver.lib.str_utils import force_str
from zerver.lib.utils import generate_random_token
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
CAMO_CONFIG_FILENAME = '/etc/default/camo'
AUTOGENERATED_SETTINGS = ['shared_secret', 'avatar_salt', 'rabbitmq_password', 'local_database_password',
'initial_password_salt']
# TODO: We can eliminate this function if we refactor the install
# script to run generate_secrets before zulip-puppet-apply.
def generate_camo_config_file(camo_key):
# type: (Text) -> None
camo_config = """ENABLED=yes
PORT=9292
CAMO_KEY=%s
""" % (camo_key,)
with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:
camo_file.write(camo_config)
print("Generated Camo config file %s" % (CAMO_CONFIG_FILENAME,))
def generate_django_secretkey():
# type: () -> Text
"""Secret key generation taken from Django's startproject.py"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def get_old_conf(output_filename):
# type: (Text) -> Dict[str, Text]
if not os.path.exists(output_filename):
return {}
secrets_file = six.moves.configparser.RawConfigParser() # type: ignore # https://github.com/python/typeshed/issues/307
secrets_file.read(output_filename)
def get_secret(key):
# type: (Text) -> Optional[Text]
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
fields = AUTOGENERATED_SETTINGS + ['secret_key', 'camo_key']
return {name: get_secret(name) for name in fields}
def generate_secrets(development=False):
# type: (bool) -> None
if development:
OUTPUT_SETTINGS_FILENAME = "zproject/dev-secrets.conf"
else:
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
lines = [u'[secrets]\n']
def config_line(var, value):
# type: (Text, Text) -> Text
return "%s = %s\n" % (var, value)
old_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
for name in AUTOGENERATED_SETTINGS:
lines.append(config_line(name, old_conf.get(name, generate_random_token(64))))
secret_key = old_conf.get('secret_key', generate_django_secretkey())
lines.append(config_line('secret_key', secret_key))
camo_key = old_conf.get('camo_key', get_random_string(64))
lines.append(config_line('camo_key', camo_key))
if not development:
# Write the Camo config file directly
generate_camo_config_file(camo_key)
out = open(OUTPUT_SETTINGS_FILENAME, 'w')
out.write(force_str("".join(lines)))
out.close()
print("Generated %s with auto-generated secrets!" % (OUTPUT_SETTINGS_FILENAME,))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--development', action='store_true', dest='development', help='For setting up the developer env for zulip')
group.add_argument('--production', action='store_false', dest='development', help='For setting up the production env for zulip')
results = parser.parse_args()
generate_secrets(results.development)
| 34.342593 | 132 | 0.7134 |
from __future__ import print_function
import sys
import os
import os.path
from os.path import dirname, abspath
if False:
from typing import Dict, Optional, Text
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'
from django.utils.crypto import get_random_string
import six
import argparse
from zerver.lib.str_utils import force_str
from zerver.lib.utils import generate_random_token
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
CAMO_CONFIG_FILENAME = '/etc/default/camo'
AUTOGENERATED_SETTINGS = ['shared_secret', 'avatar_salt', 'rabbitmq_password', 'local_database_password',
'initial_password_salt']
def generate_camo_config_file(camo_key):
camo_config = """ENABLED=yes
PORT=9292
CAMO_KEY=%s
""" % (camo_key,)
with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:
camo_file.write(camo_config)
print("Generated Camo config file %s" % (CAMO_CONFIG_FILENAME,))
def generate_django_secretkey():
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def get_old_conf(output_filename):
if not os.path.exists(output_filename):
return {}
secrets_file = six.moves.configparser.RawConfigParser() f get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
fields = AUTOGENERATED_SETTINGS + ['secret_key', 'camo_key']
return {name: get_secret(name) for name in fields}
def generate_secrets(development=False):
if development:
OUTPUT_SETTINGS_FILENAME = "zproject/dev-secrets.conf"
else:
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
lines = [u'[secrets]\n']
def config_line(var, value):
return "%s = %s\n" % (var, value)
old_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
for name in AUTOGENERATED_SETTINGS:
lines.append(config_line(name, old_conf.get(name, generate_random_token(64))))
secret_key = old_conf.get('secret_key', generate_django_secretkey())
lines.append(config_line('secret_key', secret_key))
camo_key = old_conf.get('camo_key', get_random_string(64))
lines.append(config_line('camo_key', camo_key))
if not development:
generate_camo_config_file(camo_key)
out = open(OUTPUT_SETTINGS_FILENAME, 'w')
out.write(force_str("".join(lines)))
out.close()
print("Generated %s with auto-generated secrets!" % (OUTPUT_SETTINGS_FILENAME,))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--development', action='store_true', dest='development', help='For setting up the developer env for zulip')
group.add_argument('--production', action='store_false', dest='development', help='For setting up the production env for zulip')
results = parser.parse_args()
generate_secrets(results.development)
| true | true |
1c4954dc8435739b2d95abc3a8c025fdebc8c898 | 4,984 | py | Python | tia/tests/test_rlab_table.py | lsternlicht/tia | fe74d1876260a946e52bd733bc32da0698749f2c | [
"BSD-3-Clause"
] | 23 | 2017-11-13T01:05:49.000Z | 2022-03-30T01:38:00.000Z | tia/tests/test_rlab_table.py | lsternlicht/tia | fe74d1876260a946e52bd733bc32da0698749f2c | [
"BSD-3-Clause"
] | 1 | 2018-09-19T21:59:04.000Z | 2018-09-19T21:59:04.000Z | tia/tests/test_rlab_table.py | lsternlicht/tia | fe74d1876260a946e52bd733bc32da0698749f2c | [
"BSD-3-Clause"
] | 13 | 2018-11-26T21:53:36.000Z | 2022-01-09T00:10:27.000Z | import unittest
import pandas as pd
import pandas.util.testing as pdtest
import tia.rlab.table as tbl
class TestTable(unittest.TestCase):
def setUp(self):
self.df1 = df1 = pd.DataFrame({'A': [.55, .65], 'B': [1234., -5678.]}, index=['I1', 'I2'])
# Multi-index frame with multi-index
cols = pd.MultiIndex.from_arrays([['LEFT', 'LEFT', 'RIGHT', 'RIGHT'], ['A', 'B', 'A', 'B']])
idx = pd.MultiIndex.from_arrays([['TOP', 'BOTTOM'], ['I1', 'I2']])
self.mdf1 = pd.DataFrame([[.55, 1234., .55, 1234.], [.65, -5678., .65, -5678.]], columns=cols, index=idx)
def test_span_iter(self):
s = pd.Series([1, 1, 1, 3, 2, 2])
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
# reverse and ensure it does not break it
s = s[::-1]
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
def test_level_iter(self):
l1 = ['L_11', 'L_12']
l2 = ['L_21', 'L_22']
l3 = ['L_31', 'L_32']
midx = pd.MultiIndex.from_arrays([l1, l2, l3], names=['1', '2', '3'])
actual = list(tbl.level_iter(midx))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (1, 0, 'L_21'), (1, 1, 'L_22'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=[0, 2]))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=0))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12')]
self.assertEqual(actual, expected)
def test_region_formatter_iloc(self):
tf = tbl.TableFormatter(self.df1)
region = tf.cells
region.apply_format(lambda x: 'A')
expected = pd.DataFrame([['A', 'A'], ['A', 'A']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
#
# Use the location
#
region = region.iloc[:, 1]
region.apply_format(lambda x: 'B')
expected = pd.DataFrame([['A', 'B'], ['A', 'B']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
# Get single cell
region = region.iloc[1]
region.apply_format(lambda x: 'D')
expected = pd.DataFrame([['A', 'B'], ['A', 'D']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
# Get single cell
region = tf.cells.iloc[1, 0]
region.apply_format(lambda x: 'C')
expected = pd.DataFrame([['A', 'B'], ['C', 'D']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
def test_region_empty(self):
tf = tbl.TableFormatter(self.df1)
empty = tf['ALL'].empty_frame()
empty.apply_format(lambda x: x)
def test_detect_spans(self):
tf = tbl.TableFormatter(self.mdf1)
tf.header.detect_colspans()
self.assertEqual(['SPAN', (2, 0), (3, 0)], tf.style_cmds[0])
self.assertEqual(['SPAN', (4, 0), (5, 0)], tf.style_cmds[1])
tf = tbl.TableFormatter(self.mdf1.T)
tf.index.detect_rowspans()
self.assertEqual(['SPAN', (0, 2), (0, 3)], tf.style_cmds[0])
self.assertEqual(['SPAN', (0, 4), (0, 5)], tf.style_cmds[1])
def test_match(self):
tf = tbl.TableFormatter(self.mdf1)
vcopy = tf.formatted_values.copy()
tf.cells.match_column_labels(['A']).percent_format(precision=1)
vcopy.iloc[2, 4] = '55.0% ' # padded for neg
vcopy.iloc[3, 4] = '65.0% '
vcopy.iloc[2, 2] = '55.0% '
vcopy.iloc[3, 2] = '65.0% '
pdtest.assert_frame_equal(vcopy, tf.formatted_values)
def test_period_index(self):
df = pd.DataFrame({'x': [1., 2.], 'y': [3., 4.]}, index=pd.date_range('1/1/2015', freq='M', periods=2).to_period())
tf = tbl.TableFormatter(df)
# expected values
vcopy = tf.formatted_values.copy()
vcopy.iloc[1, 1] = '1 '
vcopy.iloc[2, 1] = '2 '
vcopy.iloc[1, 2] = '3 '
vcopy.iloc[2, 2] = '4 '
vcopy.iloc[1, 0] = '01/2015'
vcopy.iloc[2, 0] = '02/2015'
# buld the format
tf.cells.int_format()
tf.index.apply_format(lambda x: x.strftime('%m/%Y'))
pdtest.assert_frame_equal(vcopy, tf.formatted_values)
# Test when it is the columns
dfT = df.T
tfT = tbl.TableFormatter(dfT)
vcopy = tfT.formatted_values.copy()
vcopy.iloc[1, 1] = '1 '
vcopy.iloc[1, 2] = '2 '
vcopy.iloc[2, 1] = '3 '
vcopy.iloc[2, 2] = '4 '
vcopy.iloc[0, 1] = '01/2015'
vcopy.iloc[0, 2] = '02/2015'
# buld the format
tfT.cells.int_format()
tfT.header.apply_format(lambda x: x.strftime('%m/%Y'))
pdtest.assert_frame_equal(vcopy, tfT.formatted_values)
| 40.520325 | 123 | 0.552769 | import unittest
import pandas as pd
import pandas.util.testing as pdtest
import tia.rlab.table as tbl
class TestTable(unittest.TestCase):
def setUp(self):
self.df1 = df1 = pd.DataFrame({'A': [.55, .65], 'B': [1234., -5678.]}, index=['I1', 'I2'])
cols = pd.MultiIndex.from_arrays([['LEFT', 'LEFT', 'RIGHT', 'RIGHT'], ['A', 'B', 'A', 'B']])
idx = pd.MultiIndex.from_arrays([['TOP', 'BOTTOM'], ['I1', 'I2']])
self.mdf1 = pd.DataFrame([[.55, 1234., .55, 1234.], [.65, -5678., .65, -5678.]], columns=cols, index=idx)
def test_span_iter(self):
s = pd.Series([1, 1, 1, 3, 2, 2])
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
s = s[::-1]
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
def test_level_iter(self):
l1 = ['L_11', 'L_12']
l2 = ['L_21', 'L_22']
l3 = ['L_31', 'L_32']
midx = pd.MultiIndex.from_arrays([l1, l2, l3], names=['1', '2', '3'])
actual = list(tbl.level_iter(midx))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (1, 0, 'L_21'), (1, 1, 'L_22'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=[0, 2]))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=0))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12')]
self.assertEqual(actual, expected)
def test_region_formatter_iloc(self):
tf = tbl.TableFormatter(self.df1)
region = tf.cells
region.apply_format(lambda x: 'A')
expected = pd.DataFrame([['A', 'A'], ['A', 'A']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
region = region.iloc[:, 1]
region.apply_format(lambda x: 'B')
expected = pd.DataFrame([['A', 'B'], ['A', 'B']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
region = region.iloc[1]
region.apply_format(lambda x: 'D')
expected = pd.DataFrame([['A', 'B'], ['A', 'D']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
region = tf.cells.iloc[1, 0]
region.apply_format(lambda x: 'C')
expected = pd.DataFrame([['A', 'B'], ['C', 'D']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
def test_region_empty(self):
tf = tbl.TableFormatter(self.df1)
empty = tf['ALL'].empty_frame()
empty.apply_format(lambda x: x)
def test_detect_spans(self):
tf = tbl.TableFormatter(self.mdf1)
tf.header.detect_colspans()
self.assertEqual(['SPAN', (2, 0), (3, 0)], tf.style_cmds[0])
self.assertEqual(['SPAN', (4, 0), (5, 0)], tf.style_cmds[1])
tf = tbl.TableFormatter(self.mdf1.T)
tf.index.detect_rowspans()
self.assertEqual(['SPAN', (0, 2), (0, 3)], tf.style_cmds[0])
self.assertEqual(['SPAN', (0, 4), (0, 5)], tf.style_cmds[1])
def test_match(self):
tf = tbl.TableFormatter(self.mdf1)
vcopy = tf.formatted_values.copy()
tf.cells.match_column_labels(['A']).percent_format(precision=1)
vcopy.iloc[2, 4] = '55.0% '
vcopy.iloc[3, 4] = '65.0% '
vcopy.iloc[2, 2] = '55.0% '
vcopy.iloc[3, 2] = '65.0% '
pdtest.assert_frame_equal(vcopy, tf.formatted_values)
def test_period_index(self):
df = pd.DataFrame({'x': [1., 2.], 'y': [3., 4.]}, index=pd.date_range('1/1/2015', freq='M', periods=2).to_period())
tf = tbl.TableFormatter(df)
vcopy = tf.formatted_values.copy()
vcopy.iloc[1, 1] = '1 '
vcopy.iloc[2, 1] = '2 '
vcopy.iloc[1, 2] = '3 '
vcopy.iloc[2, 2] = '4 '
vcopy.iloc[1, 0] = '01/2015'
vcopy.iloc[2, 0] = '02/2015'
tf.cells.int_format()
tf.index.apply_format(lambda x: x.strftime('%m/%Y'))
pdtest.assert_frame_equal(vcopy, tf.formatted_values)
dfT = df.T
tfT = tbl.TableFormatter(dfT)
vcopy = tfT.formatted_values.copy()
vcopy.iloc[1, 1] = '1 '
vcopy.iloc[1, 2] = '2 '
vcopy.iloc[2, 1] = '3 '
vcopy.iloc[2, 2] = '4 '
vcopy.iloc[0, 1] = '01/2015'
vcopy.iloc[0, 2] = '02/2015'
tfT.cells.int_format()
tfT.header.apply_format(lambda x: x.strftime('%m/%Y'))
pdtest.assert_frame_equal(vcopy, tfT.formatted_values)
| true | true |
1c49550b54ddcb81eae7fc9c5f4d29cbe211b580 | 3,020 | py | Python | samples/tests/create_delete_entity_test.py | dxiao2003/dialogflow-python-client-v2 | 05a1d3f0682de2c7d8c0c4db3fa5fea8934dfe72 | [
"Apache-2.0"
] | 1 | 2019-03-31T23:25:46.000Z | 2019-03-31T23:25:46.000Z | samples/tests/create_delete_entity_test.py | dxiao2003/dialogflow-python-client-v2 | 05a1d3f0682de2c7d8c0c4db3fa5fea8934dfe72 | [
"Apache-2.0"
] | 15 | 2020-01-28T23:14:29.000Z | 2022-02-10T00:40:40.000Z | samples/tests/create_delete_entity_test.py | dxiao2003/dialogflow-python-client-v2 | 05a1d3f0682de2c7d8c0c4db3fa5fea8934dfe72 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
from .. import entity_type_management
from .. import entity_management
PROJECT_ID = os.getenv('GCLOUD_PROJECT')
ENTITY_TYPE_DISPLAY_NAME = 'fake_entity_type_for_testing'
ENTITY_VALUE_1 = 'fake_entity_for_testing_1'
ENTITY_VALUE_2 = 'fake_entity_for_testing_2'
SYNONYMS = ['fake_synonym_for_testing_1', 'fake_synonym_for_testing_2']
def test_create_entity_type(capsys):
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 0
entity_type = entity_type_management.create_entity_type(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME, 'KIND_MAP')
out, _ = capsys.readouterr()
assert 'display_name: "{}"'.format(ENTITY_TYPE_DISPLAY_NAME) in out
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 1
def test_create_entity(capsys):
entity_type_id = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)[0]
entity_management.create_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_1, [])
entity_management.create_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_2, SYNONYMS)
entity_management.list_entities(PROJECT_ID, entity_type_id)
out, _ = capsys.readouterr()
assert 'Entity value: {}'.format(ENTITY_VALUE_1) in out
assert 'Entity value: {}'.format(ENTITY_VALUE_2) in out
for synonym in SYNONYMS:
assert synonym in out
def test_delete_entity(capsys):
entity_type_id = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)[0]
entity_management.delete_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_1)
entity_management.delete_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_2)
entity_management.list_entities(PROJECT_ID, entity_type_id)
out, _ = capsys.readouterr()
assert out == ''
def test_delete_entity_type(capsys):
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
for entity_type_id in entity_type_ids:
entity_type_management.delete_entity_type(PROJECT_ID, entity_type_id)
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 0
| 33.555556 | 77 | 0.769536 |
from __future__ import absolute_import
import os
from .. import entity_type_management
from .. import entity_management
PROJECT_ID = os.getenv('GCLOUD_PROJECT')
ENTITY_TYPE_DISPLAY_NAME = 'fake_entity_type_for_testing'
ENTITY_VALUE_1 = 'fake_entity_for_testing_1'
ENTITY_VALUE_2 = 'fake_entity_for_testing_2'
SYNONYMS = ['fake_synonym_for_testing_1', 'fake_synonym_for_testing_2']
def test_create_entity_type(capsys):
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 0
entity_type = entity_type_management.create_entity_type(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME, 'KIND_MAP')
out, _ = capsys.readouterr()
assert 'display_name: "{}"'.format(ENTITY_TYPE_DISPLAY_NAME) in out
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 1
def test_create_entity(capsys):
entity_type_id = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)[0]
entity_management.create_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_1, [])
entity_management.create_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_2, SYNONYMS)
entity_management.list_entities(PROJECT_ID, entity_type_id)
out, _ = capsys.readouterr()
assert 'Entity value: {}'.format(ENTITY_VALUE_1) in out
assert 'Entity value: {}'.format(ENTITY_VALUE_2) in out
for synonym in SYNONYMS:
assert synonym in out
def test_delete_entity(capsys):
entity_type_id = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)[0]
entity_management.delete_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_1)
entity_management.delete_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_2)
entity_management.list_entities(PROJECT_ID, entity_type_id)
out, _ = capsys.readouterr()
assert out == ''
def test_delete_entity_type(capsys):
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
for entity_type_id in entity_type_ids:
entity_type_management.delete_entity_type(PROJECT_ID, entity_type_id)
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 0
| true | true |
1c4956b1aa878124dc5b391c52167a788f947529 | 10,521 | py | Python | trac/loader.py | netjunki/trac | db1015b33aa440ffcfd91689b8dcbb43db8e2a28 | [
"BSD-3-Clause"
] | 1 | 2017-08-03T07:04:40.000Z | 2017-08-03T07:04:40.000Z | trac/loader.py | trac-ja/trac-ja | 8defc74c222e3dbe154dfb5eb34e8c1a1f663558 | [
"BSD-3-Clause"
] | null | null | null | trac/loader.py | trac-ja/trac-ja | 8defc74c222e3dbe154dfb5eb34e8c1a1f663558 | [
"BSD-3-Clause"
] | 1 | 2021-02-16T09:00:13.000Z | 2021-02-16T09:00:13.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2006 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from glob import glob
import imp
import os.path
import pkg_resources
from pkg_resources import working_set, DistributionNotFound, VersionConflict, \
UnknownExtra
import sys
from trac.util import get_doc, get_module_path, get_sources, get_pkginfo
from trac.util.text import exception_to_unicode, to_unicode
__all__ = ['load_components']
def _enable_plugin(env, module):
"""Enable the given plugin module if it wasn't disabled explicitly."""
if env.is_component_enabled(module) is None:
env.enable_component(module)
def load_eggs(entry_point_name):
"""Loader that loads any eggs on the search path and `sys.path`."""
def _load_eggs(env, search_path, auto_enable=None):
# Note that the following doesn't seem to support unicode search_path
distributions, errors = working_set.find_plugins(
pkg_resources.Environment(search_path)
)
for dist in distributions:
if dist not in working_set:
env.log.debug('Adding plugin %s from %s', dist, dist.location)
working_set.add(dist)
def _log_error(item, e):
ue = exception_to_unicode(e)
if isinstance(e, DistributionNotFound):
env.log.debug('Skipping "%s": ("%s" not found)', item, ue)
elif isinstance(e, VersionConflict):
env.log.error('Skipping "%s": (version conflict "%s")',
item, ue)
elif isinstance(e, UnknownExtra):
env.log.error('Skipping "%s": (unknown extra "%s")', item, ue)
else:
env.log.error('Skipping "%s": %s', item,
exception_to_unicode(e, traceback=True))
for dist, e in errors.iteritems():
_log_error(dist, e)
for entry in sorted(working_set.iter_entry_points(entry_point_name),
key=lambda entry: entry.name):
env.log.debug('Loading %s from %s', entry.name, entry.dist.location)
try:
entry.load(require=True)
except Exception, e:
_log_error(entry, e)
else:
if os.path.dirname(entry.dist.location) == auto_enable:
_enable_plugin(env, entry.module_name)
return _load_eggs
def load_py_files():
"""Loader that look for Python source files in the plugins directories,
which simply get imported, thereby registering them with the component
manager if they define any components.
"""
def _load_py_files(env, search_path, auto_enable=None):
for path in search_path:
plugin_files = glob(os.path.join(path, '*.py'))
for plugin_file in plugin_files:
try:
plugin_name = os.path.basename(plugin_file[:-3])
env.log.debug('Loading file plugin %s from %s' % \
(plugin_name, plugin_file))
if plugin_name not in sys.modules:
module = imp.load_source(plugin_name, plugin_file)
if path == auto_enable:
_enable_plugin(env, plugin_name)
except Exception, e:
env.log.error('Failed to load plugin from %s: %s',
plugin_file,
exception_to_unicode(e, traceback=True))
return _load_py_files
def get_plugins_dir(env):
"""Return the path to the `plugins` directory of the environment."""
plugins_dir = os.path.realpath(os.path.join(env.path, 'plugins'))
return os.path.normcase(plugins_dir)
def load_components(env, extra_path=None, loaders=(load_eggs('trac.plugins'),
load_py_files())):
"""Load all plugin components found on the given search path."""
plugins_dir = get_plugins_dir(env)
search_path = [plugins_dir]
if extra_path:
search_path += list(extra_path)
for loadfunc in loaders:
loadfunc(env, search_path, auto_enable=plugins_dir)
def get_plugin_info(env, include_core=False):
"""Return package information about Trac core and installed plugins."""
path_sources = {}
def find_distribution(module):
name = module.__name__
path = get_module_path(module)
sources = path_sources.get(path)
if sources is None:
sources = path_sources[path] = get_sources(path)
dist = sources.get(name.replace('.', '/') + '.py')
if dist is None:
dist = sources.get(name.replace('.', '/') + '/__init__.py')
if dist is None:
# This is a plain Python source file, not an egg
dist = pkg_resources.Distribution(project_name=name,
version='',
location=module.__file__)
return dist
plugins_dir = get_plugins_dir(env)
plugins = {}
from trac.core import ComponentMeta
for component in ComponentMeta._components:
module = sys.modules[component.__module__]
dist = find_distribution(module)
plugin_filename = None
if os.path.realpath(os.path.dirname(dist.location)) == plugins_dir:
plugin_filename = os.path.basename(dist.location)
if dist.project_name not in plugins:
readonly = True
if plugin_filename and os.access(dist.location,
os.F_OK + os.W_OK):
readonly = False
# retrieve plugin metadata
info = get_pkginfo(dist)
if not info:
info = {}
for k in ('author', 'author_email', 'home_page', 'url',
'license', 'trac'):
v = getattr(module, k, '')
if v and isinstance(v, basestring):
if k == 'home_page' or k == 'url':
k = 'home_page'
v = v.replace('$', '').replace('URL: ', '')
else:
v = to_unicode(v)
info[k] = v
else:
# Info found; set all those fields to "None" that have the
# value "UNKNOWN" as this is the value for fields that
# aren't specified in "setup.py"
for k in info:
if info[k] == 'UNKNOWN':
info[k] = ''
else:
# Must be encoded as unicode as otherwise Genshi
# may raise a "UnicodeDecodeError".
info[k] = to_unicode(info[k])
# retrieve plugin version info
version = dist.version
if not version:
version = (getattr(module, 'version', '') or
getattr(module, 'revision', ''))
# special handling for "$Rev$" strings
version = version.replace('$', '').replace('Rev: ', 'r')
plugins[dist.project_name] = {
'name': dist.project_name, 'version': version,
'path': dist.location, 'plugin_filename': plugin_filename,
'readonly': readonly, 'info': info, 'modules': {},
}
modules = plugins[dist.project_name]['modules']
if module.__name__ not in modules:
summary, description = get_doc(module)
plugins[dist.project_name]['modules'][module.__name__] = {
'summary': summary, 'description': description,
'components': {},
}
full_name = module.__name__ + '.' + component.__name__
summary, description = get_doc(component)
c = component
if c in env and not issubclass(c, env.__class__):
c = component(env)
modules[module.__name__]['components'][component.__name__] = {
'full_name': full_name,
'summary': summary, 'description': description,
'enabled': env.is_component_enabled(component),
'required': getattr(c, 'required', False),
}
if not include_core:
for name in plugins.keys():
if name.lower() == 'trac':
plugins.pop(name)
return sorted(plugins.itervalues(),
key=lambda p: (p['name'].lower() != 'trac',
p['name'].lower()))
def match_plugins_to_frames(plugins, frames):
"""Add a `frame_idx` element to plugin information as returned by
`get_plugin_info()`, containing the index of the highest frame in the
list that was located in the plugin.
"""
egg_frames = [(i, f) for i, f in enumerate(frames)
if f['filename'].startswith('build/')]
def find_egg_frame_index(plugin):
for dist in pkg_resources.find_distributions(plugin['path'],
only=True):
try:
sources = dist.get_metadata('SOURCES.txt')
for src in sources.splitlines():
if src.endswith('.py'):
nsrc = src.replace('\\', '/')
for i, f in egg_frames:
if f['filename'].endswith(nsrc):
plugin['frame_idx'] = i
return
except KeyError:
pass # Metadata not found
for plugin in plugins:
base, ext = os.path.splitext(plugin['path'])
if ext == '.egg' and egg_frames:
find_egg_frame_index(plugin)
else:
for i, f in enumerate(frames):
if f['filename'].startswith(base):
plugin['frame_idx'] = i
break
| 42.084 | 80 | 0.550708 |
from glob import glob
import imp
import os.path
import pkg_resources
from pkg_resources import working_set, DistributionNotFound, VersionConflict, \
UnknownExtra
import sys
from trac.util import get_doc, get_module_path, get_sources, get_pkginfo
from trac.util.text import exception_to_unicode, to_unicode
__all__ = ['load_components']
def _enable_plugin(env, module):
"""Enable the given plugin module if it wasn't disabled explicitly."""
if env.is_component_enabled(module) is None:
env.enable_component(module)
def load_eggs(entry_point_name):
"""Loader that loads any eggs on the search path and `sys.path`."""
def _load_eggs(env, search_path, auto_enable=None):
# Note that the following doesn't seem to support unicode search_path
distributions, errors = working_set.find_plugins(
pkg_resources.Environment(search_path)
)
for dist in distributions:
if dist not in working_set:
env.log.debug('Adding plugin %s from %s', dist, dist.location)
working_set.add(dist)
def _log_error(item, e):
ue = exception_to_unicode(e)
if isinstance(e, DistributionNotFound):
env.log.debug('Skipping "%s": ("%s" not found)', item, ue)
elif isinstance(e, VersionConflict):
env.log.error('Skipping "%s": (version conflict "%s")',
item, ue)
elif isinstance(e, UnknownExtra):
env.log.error('Skipping "%s": (unknown extra "%s")', item, ue)
else:
env.log.error('Skipping "%s": %s', item,
exception_to_unicode(e, traceback=True))
for dist, e in errors.iteritems():
_log_error(dist, e)
for entry in sorted(working_set.iter_entry_points(entry_point_name),
key=lambda entry: entry.name):
env.log.debug('Loading %s from %s', entry.name, entry.dist.location)
try:
entry.load(require=True)
except Exception, e:
_log_error(entry, e)
else:
if os.path.dirname(entry.dist.location) == auto_enable:
_enable_plugin(env, entry.module_name)
return _load_eggs
def load_py_files():
"""Loader that look for Python source files in the plugins directories,
which simply get imported, thereby registering them with the component
manager if they define any components.
"""
def _load_py_files(env, search_path, auto_enable=None):
for path in search_path:
plugin_files = glob(os.path.join(path, '*.py'))
for plugin_file in plugin_files:
try:
plugin_name = os.path.basename(plugin_file[:-3])
env.log.debug('Loading file plugin %s from %s' % \
(plugin_name, plugin_file))
if plugin_name not in sys.modules:
module = imp.load_source(plugin_name, plugin_file)
if path == auto_enable:
_enable_plugin(env, plugin_name)
except Exception, e:
env.log.error('Failed to load plugin from %s: %s',
plugin_file,
exception_to_unicode(e, traceback=True))
return _load_py_files
def get_plugins_dir(env):
"""Return the path to the `plugins` directory of the environment."""
plugins_dir = os.path.realpath(os.path.join(env.path, 'plugins'))
return os.path.normcase(plugins_dir)
def load_components(env, extra_path=None, loaders=(load_eggs('trac.plugins'),
load_py_files())):
"""Load all plugin components found on the given search path."""
plugins_dir = get_plugins_dir(env)
search_path = [plugins_dir]
if extra_path:
search_path += list(extra_path)
for loadfunc in loaders:
loadfunc(env, search_path, auto_enable=plugins_dir)
def get_plugin_info(env, include_core=False):
"""Return package information about Trac core and installed plugins."""
path_sources = {}
def find_distribution(module):
name = module.__name__
path = get_module_path(module)
sources = path_sources.get(path)
if sources is None:
sources = path_sources[path] = get_sources(path)
dist = sources.get(name.replace('.', '/') + '.py')
if dist is None:
dist = sources.get(name.replace('.', '/') + '/__init__.py')
if dist is None:
dist = pkg_resources.Distribution(project_name=name,
version='',
location=module.__file__)
return dist
plugins_dir = get_plugins_dir(env)
plugins = {}
from trac.core import ComponentMeta
for component in ComponentMeta._components:
module = sys.modules[component.__module__]
dist = find_distribution(module)
plugin_filename = None
if os.path.realpath(os.path.dirname(dist.location)) == plugins_dir:
plugin_filename = os.path.basename(dist.location)
if dist.project_name not in plugins:
readonly = True
if plugin_filename and os.access(dist.location,
os.F_OK + os.W_OK):
readonly = False
info = get_pkginfo(dist)
if not info:
info = {}
for k in ('author', 'author_email', 'home_page', 'url',
'license', 'trac'):
v = getattr(module, k, '')
if v and isinstance(v, basestring):
if k == 'home_page' or k == 'url':
k = 'home_page'
v = v.replace('$', '').replace('URL: ', '')
else:
v = to_unicode(v)
info[k] = v
else:
for k in info:
if info[k] == 'UNKNOWN':
info[k] = ''
else:
# Must be encoded as unicode as otherwise Genshi
# may raise a "UnicodeDecodeError".
info[k] = to_unicode(info[k])
# retrieve plugin version info
version = dist.version
if not version:
version = (getattr(module, 'version', '') or
getattr(module, 'revision', ''))
# special handling for "$Rev$" strings
version = version.replace('$', '').replace('Rev: ', 'r')
plugins[dist.project_name] = {
'name': dist.project_name, 'version': version,
'path': dist.location, 'plugin_filename': plugin_filename,
'readonly': readonly, 'info': info, 'modules': {},
}
modules = plugins[dist.project_name]['modules']
if module.__name__ not in modules:
summary, description = get_doc(module)
plugins[dist.project_name]['modules'][module.__name__] = {
'summary': summary, 'description': description,
'components': {},
}
full_name = module.__name__ + '.' + component.__name__
summary, description = get_doc(component)
c = component
if c in env and not issubclass(c, env.__class__):
c = component(env)
modules[module.__name__]['components'][component.__name__] = {
'full_name': full_name,
'summary': summary, 'description': description,
'enabled': env.is_component_enabled(component),
'required': getattr(c, 'required', False),
}
if not include_core:
for name in plugins.keys():
if name.lower() == 'trac':
plugins.pop(name)
return sorted(plugins.itervalues(),
key=lambda p: (p['name'].lower() != 'trac',
p['name'].lower()))
def match_plugins_to_frames(plugins, frames):
"""Add a `frame_idx` element to plugin information as returned by
`get_plugin_info()`, containing the index of the highest frame in the
list that was located in the plugin.
"""
egg_frames = [(i, f) for i, f in enumerate(frames)
if f['filename'].startswith('build/')]
def find_egg_frame_index(plugin):
for dist in pkg_resources.find_distributions(plugin['path'],
only=True):
try:
sources = dist.get_metadata('SOURCES.txt')
for src in sources.splitlines():
if src.endswith('.py'):
nsrc = src.replace('\\', '/')
for i, f in egg_frames:
if f['filename'].endswith(nsrc):
plugin['frame_idx'] = i
return
except KeyError:
pass # Metadata not found
for plugin in plugins:
base, ext = os.path.splitext(plugin['path'])
if ext == '.egg' and egg_frames:
find_egg_frame_index(plugin)
else:
for i, f in enumerate(frames):
if f['filename'].startswith(base):
plugin['frame_idx'] = i
break
| false | true |
1c495713f2d7d3192ab5567d3b26f08e034a69eb | 11,617 | py | Python | pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py | onlyrico/PyABSA | d0905eb5253eaa564d2244cd777e3a734bca777a | [
"MIT"
] | null | null | null | pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py | onlyrico/PyABSA | d0905eb5253eaa564d2244cd777e3a734bca777a | [
"MIT"
] | null | null | null | pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py | onlyrico/PyABSA | d0905eb5253eaa564d2244cd777e3a734bca777a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# file: data_utils.py
# author: songyouwei <youwei0314@gmail.com>
# Copyright (C) 2018. All Rights Reserved.
import os
import pickle
import numpy as np
import tqdm
from findfile import find_file
from google_drive_downloader.google_drive_downloader import GoogleDriveDownloader as gdd
from torch.utils.data import Dataset
from transformers import AutoTokenizer
from pyabsa.core.apc.classic.__glove__.dataset_utils.dependency_graph import prepare_dependency_graph
from pyabsa.core.apc.dataset_utils.apc_utils import load_apc_datasets
from pyabsa.utils.pyabsa_utils import check_and_fix_labels
def prepare_glove840_embedding(glove_path):
glove840_id = '1G-vd6W1oF9ByyJ-pzp9dcqKnr_plh4Em'
if not os.path.exists(glove_path):
os.mkdir(glove_path)
elif os.path.isfile(glove_path):
return glove_path
elif os.path.isdir(glove_path):
embedding_file = None
dir_path = os.path.dirname(glove_path)
if find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip')[0]
if embedding_file:
print('Find potential embedding files: {}'.format(embedding_file))
return embedding_file
zip_glove_path = os.path.join(glove_path, '__glove__.840B.300d.txt.zip')
print('No GloVe embedding found at {},'
' downloading __glove__.840B.300d.txt (2GB transferred / 5.5GB unzipped)...'.format(glove_path))
gdd.download_file_from_google_drive(file_id=glove840_id,
dest_path=zip_glove_path,
unzip=True
)
glove_path = find_file(glove_path, 'txt', exclude_key='.zip')
return glove_path
def build_tokenizer(dataset_list, max_seq_len, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):
print('Loading tokenizer on {}'.format(os.path.join(opt.dataset_path, dat_fname)))
tokenizer = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))
else:
text = ''
for dataset_type in dataset_list:
for file in dataset_list[dataset_type]:
fin = open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(0, len(lines), 3):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
text_raw = text_left + " " + aspect + " " + text_right
text += text_raw + " "
tokenizer = Tokenizer(max_seq_len)
tokenizer.fit_on_text(text)
pickle.dump(tokenizer, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))
return tokenizer
def _load_word_vec(path, word2idx=None, embed_dim=300):
fin = open(path, 'r', encoding='utf-8', newline='\n', errors='ignore')
word_vec = {}
for line in tqdm.tqdm(fin, postfix='Loading embedding file...'):
tokens = line.rstrip().split()
word, vec = ' '.join(tokens[:-embed_dim]), tokens[-embed_dim:]
if word in word2idx.keys():
word_vec[word] = np.asarray(vec, dtype='float32')
return word_vec
def build_embedding_matrix(word2idx, embed_dim, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):
print('Loading cached embedding_matrix for {}'.format(os.path.join(opt.dataset_path, dat_fname)))
embedding_matrix = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))
else:
print('Extracting embedding_matrix for {}'.format(dat_fname))
glove_path = prepare_glove840_embedding(opt.dataset_path)
embedding_matrix = np.zeros((len(word2idx) + 2, embed_dim)) # idx 0 and len(word2idx)+1 are all-zeros
word_vec = _load_word_vec(glove_path, word2idx=word2idx, embed_dim=embed_dim)
for word, i in tqdm.tqdm(word2idx.items(), postfix='Building embedding_matrix {}'.format(dat_fname)):
vec = word_vec.get(word)
if vec is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = vec
pickle.dump(embedding_matrix, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))
return embedding_matrix
def pad_and_truncate(sequence, maxlen, dtype='int64', padding='post', truncating='post', value=0):
x = (np.ones(maxlen) * value).astype(dtype)
if truncating == 'pre':
trunc = sequence[-maxlen:]
else:
trunc = sequence[:maxlen]
trunc = np.asarray(trunc, dtype=dtype)
if padding == 'post':
x[:len(trunc)] = trunc
else:
x[-len(trunc):] = trunc
return x
class Tokenizer(object):
def __init__(self, max_seq_len, lower=True):
self.lower = lower
self.max_seq_len = max_seq_len
self.word2idx = {}
self.idx2word = {}
self.idx = 1
def fit_on_text(self, text):
if self.lower:
text = text.lower()
words = text.split()
for word in words:
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
if self.lower:
text = text.lower()
words = text.split()
unknownidx = len(self.word2idx) + 1
sequence = [self.word2idx[w] if w in self.word2idx else unknownidx for w in words]
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class Tokenizer4Pretraining:
def __init__(self, max_seq_len, pretrained_bert_name):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_name)
self.max_seq_len = max_seq_len
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class BERTBaselineABSADataset(Dataset):
bert_baseline_input_colses = {
'lstm_bert': ['text_indices'],
'td_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices'],
'tc_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices', 'aspect_indices'],
'atae_lstm_bert': ['text_indices', 'aspect_indices'],
'ian_bert': ['text_indices', 'aspect_indices'],
'memnet_bert': ['context_indices', 'aspect_indices'],
'ram_bert': ['text_indices', 'aspect_indices', 'left_indices'],
'cabasc_bert': ['text_indices', 'aspect_indices', 'left_with_aspect_indices', 'right_with_aspect_indices'],
'tnet_lf_bert': ['text_indices', 'aspect_indices', 'aspect_boundary'],
'aoa_bert': ['text_indices', 'aspect_indices'],
'mgan_bert': ['text_indices', 'aspect_indices', 'left_indices'],
'asgcn_bert': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],
}
def __init__(self, dataset_list, tokenizer, opt):
lines = load_apc_datasets(dataset_list)
all_data = []
label_set = set()
if not os.path.exists(opt.dataset_path):
os.mkdir(os.path.join(os.getcwd(), opt.dataset_path))
opt.dataset_path = os.path.join(os.getcwd(), opt.dataset_path)
graph_path = prepare_dependency_graph(dataset_list, opt.dataset_path, opt.max_seq_len)
fin = open(graph_path, 'rb')
idx2graph = pickle.load(fin)
for i in tqdm.tqdm(range(0, len(lines), 3), postfix='building word indices...'):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
polarity = lines[i + 2].strip()
text_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + ' ' + aspect + ' ' + text_right + " [SEP]")
context_indices = tokenizer.text_to_sequence(text_left + text_right)
left_indices = tokenizer.text_to_sequence(text_left)
left_with_aspect_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + " " + aspect + " [SEP]")
right_indices = tokenizer.text_to_sequence(text_right, reverse=False)
right_with_aspect_indices = tokenizer.text_to_sequence(aspect + " " + text_right, reverse=False)
aspect_indices = tokenizer.text_to_sequence(aspect)
aspect_len = np.sum(aspect_indices != 0)
left_len = min(opt.max_seq_len - aspect_len, np.sum(left_indices != 0))
left_indices = np.concatenate((left_indices[:left_len], np.asarray([0] * (opt.max_seq_len - left_len))))
aspect_boundary = np.asarray([left_len, left_len + aspect_len - 1], dtype=np.int64)
polarity = int(polarity)
dependency_graph = np.pad(idx2graph[i],
((0, max(0, opt.max_seq_len - idx2graph[i].shape[0])),
(0, max(0, opt.max_seq_len - idx2graph[i].shape[0]))),
'constant')
dependency_graph = dependency_graph[:, range(0, opt.max_seq_len)]
dependency_graph = dependency_graph[range(0, opt.max_seq_len), :]
data = {
'text_indices': text_indices
if 'text_indices' in opt.model.inputs else 0,
'context_indices': context_indices
if 'context_indices' in opt.model.inputs else 0,
'left_indices': left_indices
if 'left_indices' in opt.model.inputs else 0,
'left_with_aspect_indices': left_with_aspect_indices
if 'left_with_aspect_indices' in opt.model.inputs else 0,
'right_indices': right_indices
if 'right_indices' in opt.model.inputs else 0,
'right_with_aspect_indices': right_with_aspect_indices
if 'right_with_aspect_indices' in opt.model.inputs else 0,
'aspect_indices': aspect_indices
if 'aspect_indices' in opt.model.inputs else 0,
'aspect_boundary': aspect_boundary
if 'aspect_boundary' in opt.model.inputs else 0,
'dependency_graph': dependency_graph
if 'dependency_graph' in opt.model.inputs else 0,
'polarity': polarity,
}
label_set.add(polarity)
all_data.append(data)
check_and_fix_labels(label_set, 'polarity', all_data)
opt.polarities_dim = len(label_set)
self.data = all_data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
| 44.003788 | 120 | 0.627615 |
import os
import pickle
import numpy as np
import tqdm
from findfile import find_file
from google_drive_downloader.google_drive_downloader import GoogleDriveDownloader as gdd
from torch.utils.data import Dataset
from transformers import AutoTokenizer
from pyabsa.core.apc.classic.__glove__.dataset_utils.dependency_graph import prepare_dependency_graph
from pyabsa.core.apc.dataset_utils.apc_utils import load_apc_datasets
from pyabsa.utils.pyabsa_utils import check_and_fix_labels
def prepare_glove840_embedding(glove_path):
glove840_id = '1G-vd6W1oF9ByyJ-pzp9dcqKnr_plh4Em'
if not os.path.exists(glove_path):
os.mkdir(glove_path)
elif os.path.isfile(glove_path):
return glove_path
elif os.path.isdir(glove_path):
embedding_file = None
dir_path = os.path.dirname(glove_path)
if find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip')[0]
if embedding_file:
print('Find potential embedding files: {}'.format(embedding_file))
return embedding_file
zip_glove_path = os.path.join(glove_path, '__glove__.840B.300d.txt.zip')
print('No GloVe embedding found at {},'
' downloading __glove__.840B.300d.txt (2GB transferred / 5.5GB unzipped)...'.format(glove_path))
gdd.download_file_from_google_drive(file_id=glove840_id,
dest_path=zip_glove_path,
unzip=True
)
glove_path = find_file(glove_path, 'txt', exclude_key='.zip')
return glove_path
def build_tokenizer(dataset_list, max_seq_len, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):
print('Loading tokenizer on {}'.format(os.path.join(opt.dataset_path, dat_fname)))
tokenizer = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))
else:
text = ''
for dataset_type in dataset_list:
for file in dataset_list[dataset_type]:
fin = open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(0, len(lines), 3):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
text_raw = text_left + " " + aspect + " " + text_right
text += text_raw + " "
tokenizer = Tokenizer(max_seq_len)
tokenizer.fit_on_text(text)
pickle.dump(tokenizer, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))
return tokenizer
def _load_word_vec(path, word2idx=None, embed_dim=300):
fin = open(path, 'r', encoding='utf-8', newline='\n', errors='ignore')
word_vec = {}
for line in tqdm.tqdm(fin, postfix='Loading embedding file...'):
tokens = line.rstrip().split()
word, vec = ' '.join(tokens[:-embed_dim]), tokens[-embed_dim:]
if word in word2idx.keys():
word_vec[word] = np.asarray(vec, dtype='float32')
return word_vec
def build_embedding_matrix(word2idx, embed_dim, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):
print('Loading cached embedding_matrix for {}'.format(os.path.join(opt.dataset_path, dat_fname)))
embedding_matrix = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))
else:
print('Extracting embedding_matrix for {}'.format(dat_fname))
glove_path = prepare_glove840_embedding(opt.dataset_path)
embedding_matrix = np.zeros((len(word2idx) + 2, embed_dim))
word_vec = _load_word_vec(glove_path, word2idx=word2idx, embed_dim=embed_dim)
for word, i in tqdm.tqdm(word2idx.items(), postfix='Building embedding_matrix {}'.format(dat_fname)):
vec = word_vec.get(word)
if vec is not None:
embedding_matrix[i] = vec
pickle.dump(embedding_matrix, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))
return embedding_matrix
def pad_and_truncate(sequence, maxlen, dtype='int64', padding='post', truncating='post', value=0):
x = (np.ones(maxlen) * value).astype(dtype)
if truncating == 'pre':
trunc = sequence[-maxlen:]
else:
trunc = sequence[:maxlen]
trunc = np.asarray(trunc, dtype=dtype)
if padding == 'post':
x[:len(trunc)] = trunc
else:
x[-len(trunc):] = trunc
return x
class Tokenizer(object):
def __init__(self, max_seq_len, lower=True):
self.lower = lower
self.max_seq_len = max_seq_len
self.word2idx = {}
self.idx2word = {}
self.idx = 1
def fit_on_text(self, text):
if self.lower:
text = text.lower()
words = text.split()
for word in words:
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
if self.lower:
text = text.lower()
words = text.split()
unknownidx = len(self.word2idx) + 1
sequence = [self.word2idx[w] if w in self.word2idx else unknownidx for w in words]
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class Tokenizer4Pretraining:
def __init__(self, max_seq_len, pretrained_bert_name):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_name)
self.max_seq_len = max_seq_len
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class BERTBaselineABSADataset(Dataset):
bert_baseline_input_colses = {
'lstm_bert': ['text_indices'],
'td_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices'],
'tc_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices', 'aspect_indices'],
'atae_lstm_bert': ['text_indices', 'aspect_indices'],
'ian_bert': ['text_indices', 'aspect_indices'],
'memnet_bert': ['context_indices', 'aspect_indices'],
'ram_bert': ['text_indices', 'aspect_indices', 'left_indices'],
'cabasc_bert': ['text_indices', 'aspect_indices', 'left_with_aspect_indices', 'right_with_aspect_indices'],
'tnet_lf_bert': ['text_indices', 'aspect_indices', 'aspect_boundary'],
'aoa_bert': ['text_indices', 'aspect_indices'],
'mgan_bert': ['text_indices', 'aspect_indices', 'left_indices'],
'asgcn_bert': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],
}
def __init__(self, dataset_list, tokenizer, opt):
lines = load_apc_datasets(dataset_list)
all_data = []
label_set = set()
if not os.path.exists(opt.dataset_path):
os.mkdir(os.path.join(os.getcwd(), opt.dataset_path))
opt.dataset_path = os.path.join(os.getcwd(), opt.dataset_path)
graph_path = prepare_dependency_graph(dataset_list, opt.dataset_path, opt.max_seq_len)
fin = open(graph_path, 'rb')
idx2graph = pickle.load(fin)
for i in tqdm.tqdm(range(0, len(lines), 3), postfix='building word indices...'):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
polarity = lines[i + 2].strip()
text_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + ' ' + aspect + ' ' + text_right + " [SEP]")
context_indices = tokenizer.text_to_sequence(text_left + text_right)
left_indices = tokenizer.text_to_sequence(text_left)
left_with_aspect_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + " " + aspect + " [SEP]")
right_indices = tokenizer.text_to_sequence(text_right, reverse=False)
right_with_aspect_indices = tokenizer.text_to_sequence(aspect + " " + text_right, reverse=False)
aspect_indices = tokenizer.text_to_sequence(aspect)
aspect_len = np.sum(aspect_indices != 0)
left_len = min(opt.max_seq_len - aspect_len, np.sum(left_indices != 0))
left_indices = np.concatenate((left_indices[:left_len], np.asarray([0] * (opt.max_seq_len - left_len))))
aspect_boundary = np.asarray([left_len, left_len + aspect_len - 1], dtype=np.int64)
polarity = int(polarity)
dependency_graph = np.pad(idx2graph[i],
((0, max(0, opt.max_seq_len - idx2graph[i].shape[0])),
(0, max(0, opt.max_seq_len - idx2graph[i].shape[0]))),
'constant')
dependency_graph = dependency_graph[:, range(0, opt.max_seq_len)]
dependency_graph = dependency_graph[range(0, opt.max_seq_len), :]
data = {
'text_indices': text_indices
if 'text_indices' in opt.model.inputs else 0,
'context_indices': context_indices
if 'context_indices' in opt.model.inputs else 0,
'left_indices': left_indices
if 'left_indices' in opt.model.inputs else 0,
'left_with_aspect_indices': left_with_aspect_indices
if 'left_with_aspect_indices' in opt.model.inputs else 0,
'right_indices': right_indices
if 'right_indices' in opt.model.inputs else 0,
'right_with_aspect_indices': right_with_aspect_indices
if 'right_with_aspect_indices' in opt.model.inputs else 0,
'aspect_indices': aspect_indices
if 'aspect_indices' in opt.model.inputs else 0,
'aspect_boundary': aspect_boundary
if 'aspect_boundary' in opt.model.inputs else 0,
'dependency_graph': dependency_graph
if 'dependency_graph' in opt.model.inputs else 0,
'polarity': polarity,
}
label_set.add(polarity)
all_data.append(data)
check_and_fix_labels(label_set, 'polarity', all_data)
opt.polarities_dim = len(label_set)
self.data = all_data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
| true | true |
1c49582dab9e0f3c90ba1de50ff9860965a98b5d | 421 | py | Python | labs/4.1/server.py | alexellis/docker-blinkt-workshop | ae2204bbc85658b111e864ae4b39b05583eb4ebf | [
"MIT"
] | 171 | 2017-04-10T19:09:36.000Z | 2022-03-04T16:06:30.000Z | labs/4.1/server.py | mcne65/docker-blinkt-workshop | ae2204bbc85658b111e864ae4b39b05583eb4ebf | [
"MIT"
] | 4 | 2017-04-17T19:33:46.000Z | 2017-08-02T17:46:18.000Z | labs/4.1/server.py | mcne65/docker-blinkt-workshop | ae2204bbc85658b111e864ae4b39b05583eb4ebf | [
"MIT"
] | 30 | 2017-04-17T19:03:54.000Z | 2022-03-04T16:06:31.000Z | from flask import Flask, request, render_template
import json
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
file = open("/sys/class/thermal/thermal_zone0/temp")
data = file.read().rstrip() # remove trailing '\n' newline character.
file.close()
payload = json.dumps({ "temperature": data })
return payload
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
| 26.3125 | 73 | 0.657957 | from flask import Flask, request, render_template
import json
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
file = open("/sys/class/thermal/thermal_zone0/temp")
data = file.read().rstrip()
file.close()
payload = json.dumps({ "temperature": data })
return payload
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
| true | true |
1c4958fcf0f982431d31d142ff78a7ab416fc6e0 | 10,841 | py | Python | docs/source/conf.py | wlad111/pymc3 | 43432834be5bbca72caa32d40a848515eea554a8 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | wlad111/pymc3 | 43432834be5bbca72caa32d40a848515eea554a8 | [
"Apache-2.0"
] | null | null | null | docs/source/conf.py | wlad111/pymc3 | 43432834be5bbca72caa32d40a848515eea554a8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pymc3_ext documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 26 14:40:23 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pymc3_ext
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
sys.path.insert(0, os.path.abspath("sphinxext"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"matplotlib.sphinxext.plot_directive",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.mathjax",
"nbsphinx",
"numpydoc",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"sphinx.ext.autosectionlabel",
"sphinx.ext.napoleon",
"gallery_generator",
"recommonmark",
]
# Don't auto-generate summary for class members.
numpydoc_show_class_members = False
# Show the documentation of __init__ and the class docstring
autoclass_content = "both"
# Do not show the return type as seperate section
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "PyMC3"
copyright = "2018, The PyMC Development Team"
author = "PyMC developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pymc3_ext.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
nbsphinx_execute = "never"
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "friendly"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = ["."]
html_theme = "semantic_sphinx"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"navbar_links": [
("Tutorials", "nb_tutorials/index"),
("Examples", "nb_examples/index"),
("Books + Videos", "learn"),
("API", "api"),
("Developer Guide", "developer_guide"),
("About PyMC3", "history")
],
# "fixed_sidebar": "false",
# "description": "Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with Theano"
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../pymc3_logo.jpg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "../logos/PyMC3.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static", "nb_tutorials/_images", "nb_examples/_images"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {"**": ["about.html", "navigation.html", "searchbox.html"]}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pymc3doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pymc3_ext.tex", "PyMC3 Documentation", "PyMC developers", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pymc3_ext", "pymc3_ext Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pymc3_ext",
"pymc3_ext Documentation",
author,
"pymc3_ext",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
def setup(app):
app.add_stylesheet(
"https://cdn.jsdelivr.net/npm/semantic-ui@2.4.2/dist/semantic.min.css"
)
app.add_stylesheet("default.css")
| 32.555556 | 128 | 0.705009 |
import sys
import os
import pymc3_ext
sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
sys.path.insert(0, os.path.abspath("sphinxext"))
extensions = [
"matplotlib.sphinxext.plot_directive",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.mathjax",
"nbsphinx",
"numpydoc",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"sphinx.ext.autosectionlabel",
"sphinx.ext.napoleon",
"gallery_generator",
"recommonmark",
]
numpydoc_show_class_members = False
# Show the documentation of __init__ and the class docstring
autoclass_content = "both"
# Do not show the return type as seperate section
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "PyMC3"
copyright = "2018, The PyMC Development Team"
author = "PyMC developers"
# The version info for the project you're documenting, acts as replacement for
version = pymc3_ext.__version__
release = version
language = None
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
nbsphinx_execute = "never"
pygments_style = "friendly"
todo_include_todos = False
html_theme_path = ["."]
html_theme = "semantic_sphinx"
html_theme_options = {
"navbar_links": [
("Tutorials", "nb_tutorials/index"),
("Examples", "nb_examples/index"),
("Books + Videos", "learn"),
("API", "api"),
("Developer Guide", "developer_guide"),
("About PyMC3", "history")
],
}
html_logo = "../pymc3_logo.jpg"
html_favicon = "../logos/PyMC3.ico"
html_static_path = ["_static", "nb_tutorials/_images", "nb_examples/_images"]
html_sidebars = {"**": ["about.html", "navigation.html", "searchbox.html"]}
htmlhelp_basename = "pymc3doc"
latex_elements = {
}
latex_documents = [
(master_doc, "pymc3_ext.tex", "PyMC3 Documentation", "PyMC developers", "manual")
]
man_pages = [(master_doc, "pymc3_ext", "pymc3_ext Documentation", [author], 1)]
texinfo_documents = [
(
master_doc,
"pymc3_ext",
"pymc3_ext Documentation",
author,
"pymc3_ext",
"One line description of project.",
"Miscellaneous",
)
]
# texinfo_no_detailmenu = False
def setup(app):
app.add_stylesheet(
"https://cdn.jsdelivr.net/npm/semantic-ui@2.4.2/dist/semantic.min.css"
)
app.add_stylesheet("default.css")
| true | true |
1c495b77063d3e04fd1ff7e97fe4f6361eba5132 | 834 | py | Python | Chapter06/example4.py | jpgacrama/Mastering-Concurrency-in-Python | 3033840fe9b36320ba41a4f23a7d5284d0e47e7c | [
"MIT"
] | null | null | null | Chapter06/example4.py | jpgacrama/Mastering-Concurrency-in-Python | 3033840fe9b36320ba41a4f23a7d5284d0e47e7c | [
"MIT"
] | null | null | null | Chapter06/example4.py | jpgacrama/Mastering-Concurrency-in-Python | 3033840fe9b36320ba41a4f23a7d5284d0e47e7c | [
"MIT"
] | null | null | null | # ch6/example4.py
from multiprocessing import Process, current_process
import time
from os import system, name
def f1():
p = current_process()
print('Starting process %s, ID %s...' % (p.name, p.pid))
time.sleep(4)
print('Exiting process %s, ID %s...' % (p.name, p.pid))
def f2():
p = current_process()
print('Starting process %s, ID %s...' % (p.name, p.pid))
time.sleep(2)
print('Exiting process %s, ID %s...' % (p.name, p.pid))
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
if __name__ == '__main__':
clear()
p1 = Process(name='Worker 1', target=f1)
p1.daemon = True
p2 = Process(name='Worker 2', target=f2)
p1.start()
time.sleep(1)
p2.start()
| 22.540541 | 60 | 0.577938 |
from multiprocessing import Process, current_process
import time
from os import system, name
def f1():
p = current_process()
print('Starting process %s, ID %s...' % (p.name, p.pid))
time.sleep(4)
print('Exiting process %s, ID %s...' % (p.name, p.pid))
def f2():
p = current_process()
print('Starting process %s, ID %s...' % (p.name, p.pid))
time.sleep(2)
print('Exiting process %s, ID %s...' % (p.name, p.pid))
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
if __name__ == '__main__':
clear()
p1 = Process(name='Worker 1', target=f1)
p1.daemon = True
p2 = Process(name='Worker 2', target=f2)
p1.start()
time.sleep(1)
p2.start()
| true | true |
1c495d19aad684be7ab6647d0bbb9cc56a933309 | 218 | py | Python | src/packages/play_text.py | Tpool1/Asclepius | 760ab31a8933772faa76064a42b11ab6e12d6c9a | [
"MIT"
] | null | null | null | src/packages/play_text.py | Tpool1/Asclepius | 760ab31a8933772faa76064a42b11ab6e12d6c9a | [
"MIT"
] | null | null | null | src/packages/play_text.py | Tpool1/Asclepius | 760ab31a8933772faa76064a42b11ab6e12d6c9a | [
"MIT"
] | null | null | null | import pyttsx3
from packages.write_conversation_data import write_conversation_data
def play_text(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
write_conversation_data(text)
| 21.8 | 68 | 0.761468 | import pyttsx3
from packages.write_conversation_data import write_conversation_data
def play_text(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
write_conversation_data(text)
| true | true |
1c495f1161e27118531d15882e1e5c93d9149524 | 3,826 | py | Python | Airplane/chap10/autopilot.py | eyler94/ee674AirplaneSim | 3ba2c6e685c2688a7f372475a7cd1f55f583d10e | [
"MIT"
] | 1 | 2020-06-07T00:14:42.000Z | 2020-06-07T00:14:42.000Z | Submarine/chap10/autopilot.py | eyler94/ee674AirplaneSim | 3ba2c6e685c2688a7f372475a7cd1f55f583d10e | [
"MIT"
] | null | null | null | Submarine/chap10/autopilot.py | eyler94/ee674AirplaneSim | 3ba2c6e685c2688a7f372475a7cd1f55f583d10e | [
"MIT"
] | 1 | 2019-06-24T22:10:48.000Z | 2019-06-24T22:10:48.000Z | """
autopilot block for mavsim_python
- Beard & McLain, PUP, 2012
- Last Update:
2/6/2019 - RWB
"""
import sys
import numpy as np
sys.path.append('..')
import parameters.control_parameters as AP
from chap6.pid_controlBrendon import pid_control#, pi_control, pd_control_with_rate
from message_types.msg_state import msg_state
from tools.tools import Euler2Quaternion, Quaternion2Euler
from control import matlab
class autopilot:
def __init__(self, ts_control):
# instantiate lateral controllers
self.roll_from_aileron = pid_control( #pd_control_with_rate(
kp=AP.roll_kp,
kd=AP.roll_kd,
Ts=ts_control,
limit=np.radians(45))
self.course_from_roll = pid_control( #pi_control(
kp=AP.course_kp,
ki=AP.course_ki,
Ts=ts_control,
limit=np.radians(30))
self.sideslip_from_rudder = pid_control( #pi_control(
kp=AP.sideslip_kp,
ki=AP.sideslip_ki,
Ts=ts_control,
limit=np.radians(45))
self.yaw_damper = matlab.tf([0.5, 0.],[1.0, ],ts_control)
#
# num=np.array([[AP.yaw_damper_kp, 0]]),
# den=np.array([[1, 1/AP.yaw_damper_tau_r]]),
# Ts=ts_control)
# instantiate lateral controllers
self.pitch_from_elevator = pid_control( #pd_control_with_rate(
kp=AP.pitch_kp,
kd=AP.pitch_kd,
limit=np.radians(45))
self.altitude_from_pitch = pid_control( #pi_control(
kp=AP.altitude_kp,
ki=AP.altitude_ki,
Ts=ts_control,
limit=np.radians(30))
self.airspeed_from_throttle = pid_control( #pi_control(
kp=AP.airspeed_throttle_kp,
ki=AP.airspeed_throttle_ki,
Ts=ts_control,
limit=1.5,
throttle_flag=True)
self.commanded_state = msg_state()
def update(self, cmd, state):
# lateral autopilot
phi_c = cmd.phi_feedforward + self.course_from_roll.update(cmd.course_command,state.chi,reset_flag=True) #cmd.course_command
# delta_a = -8.13462186e-09 # Trim state
delta_a = self.roll_from_aileron.update_with_rate(phi_c, state.phi, state.p) # Controller based on chi command#
# delta_r = -1.21428507e-08
delta_r = self.sideslip_from_rudder.update(0,state.beta)
# longitudinal autopilot
h_c = cmd.altitude_command
theta_c = np.pi/16
theta_c = self.altitude_from_pitch.update(h_c, state.h)
# delta_e = -1.24785989e-01
delta_e = self.pitch_from_elevator.update_with_rate(theta_c, state.theta, state.q)
# delta_t = 3.14346798e-01 # Trim state
delta_t = self.airspeed_from_throttle.update(cmd.airspeed_command, state.Va)
# construct output and commanded states
delta = np.array([[delta_e], [delta_t], [delta_a], [delta_r]])
self.commanded_state.h = cmd.altitude_command
self.commanded_state.Va = cmd.airspeed_command
self.commanded_state.phi = phi_c
self.commanded_state.theta = theta_c
self.commanded_state.chi = cmd.course_command
return delta, self.commanded_state
def saturate(self, input, low_limit, up_limit):
if input <= low_limit:
output = low_limit
elif input >= up_limit:
output = up_limit
else:
output = input
return output
| 40.273684 | 133 | 0.575536 | import sys
import numpy as np
sys.path.append('..')
import parameters.control_parameters as AP
from chap6.pid_controlBrendon import pid_control
from message_types.msg_state import msg_state
from tools.tools import Euler2Quaternion, Quaternion2Euler
from control import matlab
class autopilot:
def __init__(self, ts_control):
self.roll_from_aileron = pid_control(
kp=AP.roll_kp,
kd=AP.roll_kd,
Ts=ts_control,
limit=np.radians(45))
self.course_from_roll = pid_control(
kp=AP.course_kp,
ki=AP.course_ki,
Ts=ts_control,
limit=np.radians(30))
self.sideslip_from_rudder = pid_control(
kp=AP.sideslip_kp,
ki=AP.sideslip_ki,
Ts=ts_control,
limit=np.radians(45))
self.yaw_damper = matlab.tf([0.5, 0.],[1.0, ],ts_control)
self.pitch_from_elevator = pid_control(
kp=AP.pitch_kp,
kd=AP.pitch_kd,
limit=np.radians(45))
self.altitude_from_pitch = pid_control(
kp=AP.altitude_kp,
ki=AP.altitude_ki,
Ts=ts_control,
limit=np.radians(30))
self.airspeed_from_throttle = pid_control(
kp=AP.airspeed_throttle_kp,
ki=AP.airspeed_throttle_ki,
Ts=ts_control,
limit=1.5,
throttle_flag=True)
self.commanded_state = msg_state()
def update(self, cmd, state):
phi_c = cmd.phi_feedforward + self.course_from_roll.update(cmd.course_command,state.chi,reset_flag=True)
ta_a = self.roll_from_aileron.update_with_rate(phi_c, state.phi, state.p)
delta_r = self.sideslip_from_rudder.update(0,state.beta)
h_c = cmd.altitude_command
theta_c = np.pi/16
theta_c = self.altitude_from_pitch.update(h_c, state.h)
delta_e = self.pitch_from_elevator.update_with_rate(theta_c, state.theta, state.q)
ta_t = self.airspeed_from_throttle.update(cmd.airspeed_command, state.Va)
delta = np.array([[delta_e], [delta_t], [delta_a], [delta_r]])
self.commanded_state.h = cmd.altitude_command
self.commanded_state.Va = cmd.airspeed_command
self.commanded_state.phi = phi_c
self.commanded_state.theta = theta_c
self.commanded_state.chi = cmd.course_command
return delta, self.commanded_state
def saturate(self, input, low_limit, up_limit):
if input <= low_limit:
output = low_limit
elif input >= up_limit:
output = up_limit
else:
output = input
return output
| true | true |
1c4960629fe2ffb245ea6b44937e597dbeb76aeb | 178 | py | Python | rund.py | devvspaces/mailfinder | a4d50a0d3bf80741e33df69c74c94daffebc435b | [
"MIT"
] | null | null | null | rund.py | devvspaces/mailfinder | a4d50a0d3bf80741e33df69c74c94daffebc435b | [
"MIT"
] | null | null | null | rund.py | devvspaces/mailfinder | a4d50a0d3bf80741e33df69c74c94daffebc435b | [
"MIT"
] | null | null | null | import re
with open('test.csv','r') as f:
response = f.read()
new_emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.com", response, re.I))
print(new_emails) | 29.666667 | 89 | 0.573034 | import re
with open('test.csv','r') as f:
response = f.read()
new_emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.com", response, re.I))
print(new_emails) | true | true |
1c496072aafd1dbd2e7caef2c92a7a1ad00fdb4b | 403 | py | Python | recipes/construct_webapp_class_manually.py | ammarsys/pyanywhere-wrapper | d8cde2d29900c25fc7ab3cd8103923f727b5dade | [
"MIT"
] | 5 | 2021-06-25T14:34:52.000Z | 2021-07-04T14:15:13.000Z | recipes/construct_webapp_class_manually.py | ammarsys/pyanywhere-wrapper | d8cde2d29900c25fc7ab3cd8103923f727b5dade | [
"MIT"
] | 1 | 2021-12-12T00:47:25.000Z | 2022-01-24T17:19:43.000Z | recipes/construct_webapp_class_manually.py | ammarsys/pyanywhere-wrapper | d8cde2d29900c25fc7ab3cd8103923f727b5dade | [
"MIT"
] | 1 | 2021-12-14T15:44:52.000Z | 2021-12-14T15:44:52.000Z | from pyaww.webapp import WebApp
my_webapp = WebApp(
{'id': 123,
'user': 'sampleuser',
'domain_name': 'something.com',
'python_version': '3.8',
'source_directory': '/home/something/',
'working_directory': '/home/something/',
'virtualenv_path': '/home/something/venv',
'expiry': 'some date',
'force_https': False
}
)
# do stuff with the webapp object now
| 25.1875 | 47 | 0.620347 | from pyaww.webapp import WebApp
my_webapp = WebApp(
{'id': 123,
'user': 'sampleuser',
'domain_name': 'something.com',
'python_version': '3.8',
'source_directory': '/home/something/',
'working_directory': '/home/something/',
'virtualenv_path': '/home/something/venv',
'expiry': 'some date',
'force_https': False
}
)
| true | true |
1c4961378a4b0a0bdb51ede423c33dd48070c102 | 10,890 | py | Python | templates/games.py | tiendat101001/PythonProgrammingPuzzles | e4a6504bf783ad1ab93686cedd5d1818af92a5e4 | [
"MIT"
] | null | null | null | templates/games.py | tiendat101001/PythonProgrammingPuzzles | e4a6504bf783ad1ab93686cedd5d1818af92a5e4 | [
"MIT"
] | null | null | null | templates/games.py | tiendat101001/PythonProgrammingPuzzles | e4a6504bf783ad1ab93686cedd5d1818af92a5e4 | [
"MIT"
] | null | null | null | """
Solve some two-player games
"""
from problems import Problem
from typing import List
# Hint: subclass Problem.Debug for quick testing. Run make_dataset.py to make the dataset
# See https://github.com/microsoft/PythonProgrammingPuzzles/wiki/How-to-add-a-puzzle for more info
class Nim(Problem):
"""Compute optimal play for the classic two-player game [Nim](https://en.wikipedia.org/wiki/Nim)
In the game of Nim, there are a number of heaps of objects. In each step, a player removes one or more
objects from a non-empty heap. The player who takes the last object wins. Nim has an elegant theory
for optimal play based on the xor of the bits.
"""
timeout = 10 # harder than most problems, get extra time
@staticmethod
def sat(cert: List[List[int]], heaps=[5, 9]): # cert is a sufficient list of desirable states to leave for opponent
good_leaves = {tuple(h) for h in cert} # for efficiency, we keep track of h as a tuple of n non-negative ints
cache = {}
def is_good_leave(h):
if h in cache:
return cache[h]
next_states = [(*h[:i], k, *h[i + 1:]) for i in range(len(h)) for k in range(h[i])]
conjecture = (h in good_leaves)
if conjecture: # check that it is a good leave
assert not any(is_good_leave(s) for s in next_states)
else: # check that it is a bad leave, only need to check one move
assert is_good_leave(next(s for s in next_states if s in good_leaves))
cache[h] = conjecture
return conjecture
return is_good_leave(tuple(heaps)) == (tuple(heaps) in good_leaves)
@staticmethod
def sol(heaps):
import itertools
def val(h): # return True if h is a good state to leave things in
xor = 0
for i in h:
xor ^= i
return xor == 0
return [list(h) for h in itertools.product(*[range(i + 1) for i in heaps]) if val(h)]
def gen_random(self):
num_heaps = self.random.randrange(10)
heaps = [self.random.randrange(10) for _ in range(num_heaps)]
prod = 1
for i in heaps:
prod *= i + 1
if prod < 10 ** 6:
self.add(dict(heaps=heaps))
class Mastermind(Problem):
"""Compute a strategy for winning in [mastermind](https://en.wikipedia.org/wiki/Mastermind_%28board_game%29)
in a given number of guesses.
Colors are represented by the letters A-F. The representation is as follows.
A transcript is a string describing the game so far. It consists of rows separated by newlines.
Each row has 4 letters A-F followed by a space and then two numbers indicating how many are exactly right
and how many are right but in the wrong location. A sample transcript is as follows:
```
AABB 11
ABCD 21
ABDC
```
This is the transcript as the game is in progress. The complete transcript might be:
```
AABB 11
ABCD 21
ABDC 30
ABDE 40
```
A winning strategy is described by a list of transcripts to visit. The next guess can be determined from
those partial transcripts.
"""
timeout = 10
@staticmethod
def sat(transcripts: List[str], max_moves=10):
COLORS = "ABCDEF"
def helper(secret: str, transcript=""):
if transcript.count("\n") == max_moves:
return False
guess = min([t for t in transcripts if t.startswith(transcript)], key=len)[-4:]
if guess == secret:
return True
assert all(g in COLORS for g in guess)
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return helper(secret, transcript + f"{guess} {sum(perfect.values())}{almost}\n")
return all(helper(r + s + t + u) for r in COLORS for s in COLORS for t in COLORS for u in COLORS)
@staticmethod
def sol(max_moves):
COLORS = "ABCDEF"
transcripts = []
ALL = [r + s + t + u for r in COLORS for s in COLORS for t in COLORS for u in COLORS]
def score(secret, guess):
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return f"{sum(perfect.values())}{almost}"
def mastermind(transcript="AABB", feasible=ALL): # mastermind moves
transcripts.append(transcript)
assert transcript.count("\n") <= max_moves
guess = transcript[-4:]
feasibles = {}
for secret in feasible:
scr = score(secret, guess)
if scr not in feasibles:
feasibles[scr] = []
feasibles[scr].append(secret)
for scr, secrets in feasibles.items():
if scr != "40":
guesser(transcript + f" {scr}\n", secrets)
def guesser(transcript, feasible): # guesser moves
def max_ambiguity(guess):
by_score = {}
for secret2 in feasible:
scr = score(secret2, guess)
if scr not in by_score:
by_score[scr] = 0
by_score[scr] += 1
# for OPTIMAL solution, use return max(by_score.values()) + 0.5 * (guess not in feasible) instead of:
return max(by_score.values())
# for optimal solution use guess = min(ALL, key=max_ambiguity) instead of:
guess = min(feasible, key=max_ambiguity)
mastermind(transcript + guess, feasible)
mastermind()
return transcripts
def gen(self, target_num_instances):
for max_moves in [6, 8, 10]:
self.add(dict(max_moves=max_moves))
class TicTacToeX(Problem):
"""Compute a strategy for X (first player) in tic-tac-toe that guarantees a tie.
We are looking for a strategy for X that, no matter what the opponent does, X does not lose.
A board is represented as a 9-char string like an X in the middle would be "....X...." and a
move is an integer 0-8. The answer is a list of "good boards" that X aims for, so no matter what O does there
is always good board that X can get to with a single move.
"""
@staticmethod
def sat(good_boards: List[str]):
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o): # returns True if X has a forced tie/win assuming it's X's turn to move.
x |= 1 << next(i for i in range(9) if (x | (1 << i), o) in board_bit_reps)
return not win[o] and (win[x] or all((x | o) & (1 << i) or tie(x, o | (1 << i)) for i in range(9)))
return tie(0, 0)
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)] # 9-bit representation
good_boards = []
def x_move(x, o): # returns True if x wins or ties, x's turn to move
if win[o]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and o_move(x | (1 << i), o):
good_boards.append("".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + (i == j)] for j in range(9)))
return True
return False # O wins
def o_move(x, o): # returns True if x wins or ties, x's turn to move
if win[x] or x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not x_move(x, o | (1 << i)):
return False
return True # O wins
res = x_move(0, 0)
assert res
return good_boards
class TicTacToeO(Problem):
"""Compute a strategy for O (second player) in tic-tac-toe that guarantees a tie.
We are looking for a strategy for O that, no matter what the opponent does, O does not lose.
A board is represented as a 9-char string like an X in the middle would be "....X...." and a
move is an integer 0-8. The answer is a list of "good boards" that O aims for, so no matter what X does there
is always good board that O can get to with a single move.
"""
@staticmethod
def sat(good_boards: List[str]):
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o): # returns True if O has a forced tie/win. It's O's turn to move.
if o | x != 511:
o |= 1 << next(i for i in range(9) if (x, o | (1 << i)) in board_bit_reps)
return not win[x] and (win[o] or all((x | o) & (1 << i) or tie(x | (1 << i), o) for i in range(9)))
return all(tie(1 << i, 0) for i in range(9))
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)] # 9-bit representation
good_boards = []
def x_move(x, o): # returns True if o wins or ties, x's turn to move
if win[o] or x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not o_move(x | (1 << i), o):
return False
return True # O wins/ties
def o_move(x, o): # returns True if o wins or ties, o's turn to move
if win[x]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and x_move(x, o | (1 << i)):
good_boards.append(
"".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + 2 * (i == j)] for j in range(9)))
return True
return False # X wins
res = x_move(0, 0)
assert res
return good_boards
class RockPaperScissors(Problem):
"""Find optimal strategy for Rock-Paper-Scissors zero-sum game
Find the distribution that guarantees maximum expected value of 0
"""
@staticmethod
def sat(probs: List[float]): # rock prob, paper prob, scissors prob
assert len(probs) == 3 and abs(sum(probs) - 1) < 1e-6
return max(probs[(i + 2) % 3] - probs[(i + 1) % 3] for i in range(3)) < 1e-6
@staticmethod
def sol():
return [1 / 3] * 3
if __name__ == "__main__":
Problem.debug_problems()
| 38.34507 | 120 | 0.559963 |
from problems import Problem
from typing import List
class Nim(Problem):
timeout = 10
@staticmethod
def sat(cert: List[List[int]], heaps=[5, 9]):
good_leaves = {tuple(h) for h in cert}
cache = {}
def is_good_leave(h):
if h in cache:
return cache[h]
next_states = [(*h[:i], k, *h[i + 1:]) for i in range(len(h)) for k in range(h[i])]
conjecture = (h in good_leaves)
if conjecture:
assert not any(is_good_leave(s) for s in next_states)
else:
assert is_good_leave(next(s for s in next_states if s in good_leaves))
cache[h] = conjecture
return conjecture
return is_good_leave(tuple(heaps)) == (tuple(heaps) in good_leaves)
@staticmethod
def sol(heaps):
import itertools
def val(h):
xor = 0
for i in h:
xor ^= i
return xor == 0
return [list(h) for h in itertools.product(*[range(i + 1) for i in heaps]) if val(h)]
def gen_random(self):
num_heaps = self.random.randrange(10)
heaps = [self.random.randrange(10) for _ in range(num_heaps)]
prod = 1
for i in heaps:
prod *= i + 1
if prod < 10 ** 6:
self.add(dict(heaps=heaps))
class Mastermind(Problem):
timeout = 10
@staticmethod
def sat(transcripts: List[str], max_moves=10):
COLORS = "ABCDEF"
def helper(secret: str, transcript=""):
if transcript.count("\n") == max_moves:
return False
guess = min([t for t in transcripts if t.startswith(transcript)], key=len)[-4:]
if guess == secret:
return True
assert all(g in COLORS for g in guess)
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return helper(secret, transcript + f"{guess} {sum(perfect.values())}{almost}\n")
return all(helper(r + s + t + u) for r in COLORS for s in COLORS for t in COLORS for u in COLORS)
@staticmethod
def sol(max_moves):
COLORS = "ABCDEF"
transcripts = []
ALL = [r + s + t + u for r in COLORS for s in COLORS for t in COLORS for u in COLORS]
def score(secret, guess):
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return f"{sum(perfect.values())}{almost}"
def mastermind(transcript="AABB", feasible=ALL):
transcripts.append(transcript)
assert transcript.count("\n") <= max_moves
guess = transcript[-4:]
feasibles = {}
for secret in feasible:
scr = score(secret, guess)
if scr not in feasibles:
feasibles[scr] = []
feasibles[scr].append(secret)
for scr, secrets in feasibles.items():
if scr != "40":
guesser(transcript + f" {scr}\n", secrets)
def guesser(transcript, feasible):
def max_ambiguity(guess):
by_score = {}
for secret2 in feasible:
scr = score(secret2, guess)
if scr not in by_score:
by_score[scr] = 0
by_score[scr] += 1
return max(by_score.values())
guess = min(feasible, key=max_ambiguity)
mastermind(transcript + guess, feasible)
mastermind()
return transcripts
def gen(self, target_num_instances):
for max_moves in [6, 8, 10]:
self.add(dict(max_moves=max_moves))
class TicTacToeX(Problem):
@staticmethod
def sat(good_boards: List[str]):
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o):
x |= 1 << next(i for i in range(9) if (x | (1 << i), o) in board_bit_reps)
return not win[o] and (win[x] or all((x | o) & (1 << i) or tie(x, o | (1 << i)) for i in range(9)))
return tie(0, 0)
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
good_boards = []
def x_move(x, o):
if win[o]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and o_move(x | (1 << i), o):
good_boards.append("".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + (i == j)] for j in range(9)))
return True
return False # O wins
def o_move(x, o): # returns True if x wins or ties, x's turn to move
if win[x] or x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not x_move(x, o | (1 << i)):
return False
return True
res = x_move(0, 0)
assert res
return good_boards
class TicTacToeO(Problem):
@staticmethod
def sat(good_boards: List[str]):
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o):
if o | x != 511:
o |= 1 << next(i for i in range(9) if (x, o | (1 << i)) in board_bit_reps)
return not win[x] and (win[o] or all((x | o) & (1 << i) or tie(x | (1 << i), o) for i in range(9)))
return all(tie(1 << i, 0) for i in range(9))
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
good_boards = []
def x_move(x, o):
if win[o] or x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not o_move(x | (1 << i), o):
return False
return True # O wins/ties
def o_move(x, o): # returns True if o wins or ties, o's turn to move
if win[x]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and x_move(x, o | (1 << i)):
good_boards.append(
"".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + 2 * (i == j)] for j in range(9)))
return True
return False
res = x_move(0, 0)
assert res
return good_boards
class RockPaperScissors(Problem):
@staticmethod
def sat(probs: List[float]):
assert len(probs) == 3 and abs(sum(probs) - 1) < 1e-6
return max(probs[(i + 2) % 3] - probs[(i + 1) % 3] for i in range(3)) < 1e-6
@staticmethod
def sol():
return [1 / 3] * 3
if __name__ == "__main__":
Problem.debug_problems()
| true | true |
1c4962bf3e816c1cf57f29913651635757597c12 | 6,853 | py | Python | cogs/tag.py | theoxan/BC_HelperBot | 0d34d6364588b5649ef4689727197e1cc8a63d36 | [
"Apache-2.0"
] | null | null | null | cogs/tag.py | theoxan/BC_HelperBot | 0d34d6364588b5649ef4689727197e1cc8a63d36 | [
"Apache-2.0"
] | null | null | null | cogs/tag.py | theoxan/BC_HelperBot | 0d34d6364588b5649ef4689727197e1cc8a63d36 | [
"Apache-2.0"
] | null | null | null | import os
from os import path
import json
from difflib import SequenceMatcher
import discord
from discord.ext import commands
from schema import SchemaError
from .utils.misc import tag_shema
from .utils import checkers, misc
class Tag(commands.Cog):
def __init__(self, bot):
self.bot = bot
tags_folder = {
category: {
path.splitext(tag_name)[0]: path.join(path.join('ressources/tags/', category), tag_name) for tag_name in os.listdir(path.join('ressources/tags', category)) if path.isdir(path.join('ressources/tags', category))
} for category in os.listdir('ressources/tags/') if os.path.isdir(os.path.join('ressources/tags/', category))
}
def complete_values(obj, ref=None):
if isinstance(obj, dict):
for key, value in obj.items():
if value == "*" and ref:
obj[key] = ref[key]
else:
obj[key] = complete_values(value, ref=ref[key] if ref else ref)
elif isinstance(obj, list) and all(isinstance(sub_obj, dict) for sub_obj in obj):
for i, sub_obj in enumerate(obj):
if i == 0 and not ref: continue
obj[i] = complete_values(obj[i], ref=ref[i] if ref else obj[0])
return obj
self.tags = {}
for category_name, tags_infos in tags_folder.items():
self.tags[category_name] = {}
for tag_name, tag_path in tags_infos.items():
try:
with open(tag_path, "r", encoding='utf-8') as f:
loaded_tag = json.load(f)
try:
loaded_tag = tag_shema.validate(loaded_tag)
except SchemaError as e:
self.bot.logger.warning(f'The tag {tag_name} from category {category_name} is improper.\n{e}')
continue
self.tags[category_name][loaded_tag["name"]] = complete_values(loaded_tag)
except Exception as e:
print(e)
self.bot.logger.warning(f"The tag {tag_path} cannot be loaded")
@commands.command(
name="tag",
usage="/tag <category> (<tag_name>|'list')",
description="Obtenir de l'aide rapidement"
)
@checkers.authorized_channels()
async def _tag(self, ctx, category=None, *, query=None):
category_tags = self.tags.get(category) # category_tags correspond a un dictionnaire avec plusieurs commandes
if category_tags is None and category is not None:
similors = ((name, SequenceMatcher(None, name, category).ratio()) for name in self.tags.keys())
similors = sorted(similors, key=lambda couple: couple[1], reverse=True)
if similors[0][1] > 0.8:
category = similors[0][0] # nom de la catégorie
category_tags = self.tags.get(category)
if category_tags is None:
format_list = lambda keys: "\n".join([f"- `{key}`" for key in keys])
embed = discord.Embed(
title="Catégorie non trouvée. Essayez parmi :",
description=format_list(self.tags.keys()),
color=discord.Color.from_rgb(47, 49, 54)
)
embed.set_footer(text=ctx.command.usage)
message = await ctx.send(embed=embed)
return await misc.delete_with_emote(ctx, message)
if query is None or query == "list":
format_list = lambda tags_values: "\n".join([f"- `{tag.get('name')}` : {tag.get('description')}" for tag in tags_values])
message = await ctx.channel.send(embed=discord.Embed(title=f"Voici les tags de la catégorie `{category}` :",
description=format_list(category_tags.values()),
color=discord.Color.from_rgb(47, 49, 54))
)
return await misc.delete_with_emote(ctx, message)
tag = category_tags.get(query) or discord.utils.find(lambda tag_: tag_.get('aliases') and query in tag_['aliases'], category_tags.values())
if tag is None:
similors = ((name, SequenceMatcher(None, name, query).ratio()) for name in category_tags.keys())
similors = sorted(similors, key=lambda couple: couple[1], reverse=True)
if similors[0][1] > 0.8:
query = similors[0][0] # nom du tag
tag = category_tags.get(query)
else:
similar_text = f"voulez vous-vous dire `{similors[0][0]}` ? Sinon "
return await ctx.send(f"Le tag n'a pas été trouvé, {similar_text if similors[0][1] > 0.5 else ''}regardez `/tag list`", delete_after=10)
message = None
response = tag.get('response')
choices = response.get('choices')
if choices:
reactions = ['0️⃣', '1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣']
message = await ctx.send("__Choisissez la cible :__\n"+'\n'.join([f"{reactions[i]} - `{choice['choice_name']}`" for i, choice in enumerate(choices)]))
self.bot.loop.create_task(misc.add_reactions(message, reactions[:len(choices)]))
try:
reaction, _ = await self.bot.wait_for('reaction_add', timeout=120, check=lambda react, usr: str(react.emoji) in reactions[:len(choices)] and usr.id == ctx.author.id and react.message.id == message.id)
except TimeoutError:
return await message.delete()
try: await message.clear_reactions()
except: pass
response = choices[reactions.index(str(reaction.emoji))]
embed = discord.Embed.from_dict(response.get("embed"))
embed.color = discord.Color.from_rgb(47, 49, 54)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
text = f'/tag {category} {query}'
url = discord.Embed.Empty
creator = await self.bot.fetch_user(tag.get('author')) if tag.get('author') else None
if creator:
text += f' • par {creator.name}#{creator.discriminator}'
url = creator.avatar_url
embed.set_footer(
text=text,
icon_url=url
)
if message: await message.edit(embed=embed, content="")
else: message = await ctx.channel.send(embed=embed)
try: await ctx.message.delete() # suppression de la commande
except: pass
try: await misc.delete_with_emote(ctx, message)
except: pass
def setup(bot):
bot.add_cog(Tag(bot))
bot.logger.info("Extension [tag] loaded successfully.")
| 44.79085 | 229 | 0.569678 | import os
from os import path
import json
from difflib import SequenceMatcher
import discord
from discord.ext import commands
from schema import SchemaError
from .utils.misc import tag_shema
from .utils import checkers, misc
class Tag(commands.Cog):
def __init__(self, bot):
self.bot = bot
tags_folder = {
category: {
path.splitext(tag_name)[0]: path.join(path.join('ressources/tags/', category), tag_name) for tag_name in os.listdir(path.join('ressources/tags', category)) if path.isdir(path.join('ressources/tags', category))
} for category in os.listdir('ressources/tags/') if os.path.isdir(os.path.join('ressources/tags/', category))
}
def complete_values(obj, ref=None):
if isinstance(obj, dict):
for key, value in obj.items():
if value == "*" and ref:
obj[key] = ref[key]
else:
obj[key] = complete_values(value, ref=ref[key] if ref else ref)
elif isinstance(obj, list) and all(isinstance(sub_obj, dict) for sub_obj in obj):
for i, sub_obj in enumerate(obj):
if i == 0 and not ref: continue
obj[i] = complete_values(obj[i], ref=ref[i] if ref else obj[0])
return obj
self.tags = {}
for category_name, tags_infos in tags_folder.items():
self.tags[category_name] = {}
for tag_name, tag_path in tags_infos.items():
try:
with open(tag_path, "r", encoding='utf-8') as f:
loaded_tag = json.load(f)
try:
loaded_tag = tag_shema.validate(loaded_tag)
except SchemaError as e:
self.bot.logger.warning(f'The tag {tag_name} from category {category_name} is improper.\n{e}')
continue
self.tags[category_name][loaded_tag["name"]] = complete_values(loaded_tag)
except Exception as e:
print(e)
self.bot.logger.warning(f"The tag {tag_path} cannot be loaded")
@commands.command(
name="tag",
usage="/tag <category> (<tag_name>|'list')",
description="Obtenir de l'aide rapidement"
)
@checkers.authorized_channels()
async def _tag(self, ctx, category=None, *, query=None):
category_tags = self.tags.get(category) # category_tags correspond a un dictionnaire avec plusieurs commandes
if category_tags is None and category is not None:
similors = ((name, SequenceMatcher(None, name, category).ratio()) for name in self.tags.keys())
similors = sorted(similors, key=lambda couple: couple[1], reverse=True)
if similors[0][1] > 0.8:
category = similors[0][0] # nom de la catégorie
category_tags = self.tags.get(category)
if category_tags is None:
format_list = lambda keys: "\n".join([f"- `{key}`" for key in keys])
embed = discord.Embed(
title="Catégorie non trouvée. Essayez parmi :",
description=format_list(self.tags.keys()),
color=discord.Color.from_rgb(47, 49, 54)
)
embed.set_footer(text=ctx.command.usage)
message = await ctx.send(embed=embed)
return await misc.delete_with_emote(ctx, message)
if query is None or query == "list":
format_list = lambda tags_values: "\n".join([f"- `{tag.get('name')}` : {tag.get('description')}" for tag in tags_values])
message = await ctx.channel.send(embed=discord.Embed(title=f"Voici les tags de la catégorie `{category}` :",
description=format_list(category_tags.values()),
color=discord.Color.from_rgb(47, 49, 54))
)
return await misc.delete_with_emote(ctx, message)
tag = category_tags.get(query) or discord.utils.find(lambda tag_: tag_.get('aliases') and query in tag_['aliases'], category_tags.values())
if tag is None:
similors = ((name, SequenceMatcher(None, name, query).ratio()) for name in category_tags.keys())
similors = sorted(similors, key=lambda couple: couple[1], reverse=True)
if similors[0][1] > 0.8:
query = similors[0][0] # nom du tag
tag = category_tags.get(query)
else:
similar_text = f"voulez vous-vous dire `{similors[0][0]}` ? Sinon "
return await ctx.send(f"Le tag n'a pas été trouvé, {similar_text if similors[0][1] > 0.5 else ''}regardez `/tag list`", delete_after=10)
message = None
response = tag.get('response')
choices = response.get('choices')
if choices:
reactions = ['0️⃣', '1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣']
message = await ctx.send("__Choisissez la cible :__\n"+'\n'.join([f"{reactions[i]} - `{choice['choice_name']}`" for i, choice in enumerate(choices)]))
self.bot.loop.create_task(misc.add_reactions(message, reactions[:len(choices)]))
try:
reaction, _ = await self.bot.wait_for('reaction_add', timeout=120, check=lambda react, usr: str(react.emoji) in reactions[:len(choices)] and usr.id == ctx.author.id and react.message.id == message.id)
except TimeoutError:
return await message.delete()
try: await message.clear_reactions()
except: pass
response = choices[reactions.index(str(reaction.emoji))]
embed = discord.Embed.from_dict(response.get("embed"))
embed.color = discord.Color.from_rgb(47, 49, 54)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
text = f'/tag {category} {query}'
url = discord.Embed.Empty
creator = await self.bot.fetch_user(tag.get('author')) if tag.get('author') else None
if creator:
text += f' • par {creator.name}#{creator.discriminator}'
url = creator.avatar_url
embed.set_footer(
text=text,
icon_url=url
)
if message: await message.edit(embed=embed, content="")
else: message = await ctx.channel.send(embed=embed)
try: await ctx.message.delete()
except: pass
try: await misc.delete_with_emote(ctx, message)
except: pass
def setup(bot):
bot.add_cog(Tag(bot))
bot.logger.info("Extension [tag] loaded successfully.")
| true | true |
1c4962dc7ba607d9e75b274ac8278eb1eb299cef | 1,718 | py | Python | Projects/Online Workouts/w3resource/Basic - Part-II/program-29.py | ivenpoker/Python-Projects | 2975e1bd687ec8dbcc7a4842c13466cb86292679 | [
"MIT"
] | 1 | 2019-09-23T15:51:45.000Z | 2019-09-23T15:51:45.000Z | Projects/Online Workouts/w3resource/Basic - Part-II/program-29.py | ivenpoker/Python-Projects | 2975e1bd687ec8dbcc7a4842c13466cb86292679 | [
"MIT"
] | 5 | 2021-02-08T20:47:19.000Z | 2022-03-12T00:35:44.000Z | Projects/Online Workouts/w3resource/Basic - Part-II/program-29.py | ivenpoker/Python-Projects | 2975e1bd687ec8dbcc7a4842c13466cb86292679 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#############################################################################
# #
# Program purpose: Find common divisor between two numbers in a given #
# pair. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : September 9, 2019 #
# #
#############################################################################
def find_divisor(num: int):
div_data = [x for x in range(1, num+1) if num % x is 0]
return div_data
def find_intersections(list_a: list, list_b: list):
main_inter = []
for x in range(len(list_a)):
if list_a[x] in list_b:
main_inter.append(list_a[x])
return main_inter
if __name__ == "__main__":
int_a = 0
int_b = 0
cont = True
# Get the first integer.
while cont:
try:
int_a = int(input("Enter first number: "))
cont = False
except ValueError as ve:
print(f"{ve}")
cont = True
# Get the second integer
while cont:
try:
int_b = int(input("Enter second number: "))
cont = False
except ValueError as ve:
print(f"{ve}")
div_a = find_divisor(int_a)
div_b = find_divisor(int_b)
print(f"Divisors of {int_a}: {div_a}")
print(f"Divisors of {int_b}: {div_b}")
print(f"Common divisors of {int_a} and {int_b}: "
f"{find_intersections(list_a=div_a, list_b=div_b)}")
| 31.236364 | 77 | 0.438882 | true | true | |
1c49648eb9b542c70be44a372ce24b2211d6407b | 777 | py | Python | yoti_python_sdk/doc_scan/session/retrieve/document_id_photo_response.py | getyoti/python | 3df169145d5c818d0e79743768dde78e482eec9b | [
"MIT"
] | 9 | 2017-11-12T05:38:58.000Z | 2021-08-04T16:33:26.000Z | yoti_python_sdk/doc_scan/session/retrieve/document_id_photo_response.py | getyoti/python | 3df169145d5c818d0e79743768dde78e482eec9b | [
"MIT"
] | 237 | 2017-04-26T09:40:44.000Z | 2022-02-24T10:29:43.000Z | yoti_python_sdk/doc_scan/session/retrieve/document_id_photo_response.py | getyoti/python | 3df169145d5c818d0e79743768dde78e482eec9b | [
"MIT"
] | 9 | 2017-05-02T11:41:44.000Z | 2021-04-28T13:49:20.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from yoti_python_sdk.doc_scan.session.retrieve.media_response import MediaResponse
class DocumentIdPhotoResponse(object):
"""
Represents the document ID photo response
"""
def __init__(self, data=None):
"""
:param data: the data to parse
:type data: dict or None
"""
if data is None:
data = dict()
if "media" in data.keys():
self.__media = MediaResponse(data["media"])
else:
self.__media = None
@property
def media(self):
"""
The media object for the document ID photo
:return: the media
:rtype: MediaResponse or None
"""
return self.__media
| 22.852941 | 82 | 0.584299 |
from __future__ import unicode_literals
from yoti_python_sdk.doc_scan.session.retrieve.media_response import MediaResponse
class DocumentIdPhotoResponse(object):
def __init__(self, data=None):
if data is None:
data = dict()
if "media" in data.keys():
self.__media = MediaResponse(data["media"])
else:
self.__media = None
@property
def media(self):
return self.__media
| true | true |
1c49663cca5de7c6f1eee0f2b738acf05391f261 | 6,920 | py | Python | gcpdiag/queries/logs.py | taylorjstacey/gcpdiag | 84ba1725cd3ed326b8da3e64bdd6569ed7ef20a4 | [
"Apache-2.0"
] | null | null | null | gcpdiag/queries/logs.py | taylorjstacey/gcpdiag | 84ba1725cd3ed326b8da3e64bdd6569ed7ef20a4 | [
"Apache-2.0"
] | null | null | null | gcpdiag/queries/logs.py | taylorjstacey/gcpdiag | 84ba1725cd3ed326b8da3e64bdd6569ed7ef20a4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Queries related to Cloud Logging.
The main functionality is querying log entries, which is supposed to be used as
follows:
1. Call query() with the logs query parameters that you need. This
returns a LogsQuery object which can be used to retrieve the logs later.
2. Call execute_queries() to execute all log query jobs. Similar
queries will be grouped together to minimize the number of required API
calls.
Multiple queries will be done in parallel, while always respecting the
Cloud Logging limit of 60 queries per 60 seconds.
3. Use the entries property on the LogsQuery object to iterate over the fetched
logs. Note that the entries are not guaranteed to be filtered by what was
given in the "filter_str" argument to query(), you will need to filter out
the entries in code as well when iterating over the log entries.
Side note: this module is not called 'logging' to avoid using the same name as
the standard python library for logging.
"""
import concurrent.futures
import dataclasses
import datetime
import logging
from typing import Any, Dict, Mapping, Optional, Sequence, Set, Tuple
import dateutil.parser
import ratelimit
from gcpdiag import caching, config
from gcpdiag.queries import apis
@dataclasses.dataclass
class _LogsQueryJob:
"""A group of log queries that will be executed with a single API call."""
project_id: str
resource_type: str
log_name: str
filters: Set[str]
future: Optional[concurrent.futures.Future] = None
class LogsQuery:
"""A log search job that was started with prefetch_logs()."""
job: _LogsQueryJob
def __init__(self, job):
self.job = job
@property
def entries(self) -> Sequence:
if not self.job.future:
raise RuntimeError(
'log query wasn\'t executed. did you forget to call execute_queries()?'
)
elif self.job.future.running():
logging.info(
'waiting for logs query results (project: %s, resource type: %s)',
self.job.project_id, self.job.resource_type)
return self.job.future.result()
jobs_todo: Dict[Tuple[str, str, str], _LogsQueryJob] = {}
def query(project_id: str, resource_type: str, log_name: str,
filter_str: str) -> LogsQuery:
# Aggregate by project_id, resource_type, log_name
job_key = (project_id, resource_type, log_name)
job = jobs_todo.setdefault(
job_key,
_LogsQueryJob(
project_id=project_id,
resource_type=resource_type,
log_name=log_name,
filters=set(),
))
job.filters.add(filter_str)
return LogsQuery(job=job)
@ratelimit.sleep_and_retry
@ratelimit.limits(calls=config.LOGGING_RATELIMIT_REQUESTS,
period=config.LOGGING_RATELIMIT_PERIOD_SECONDS)
def _ratelimited_execute(req):
"""Wrapper to req.execute() with rate limiting to avoid hitting quotas."""
return req.execute(num_retries=config.API_RETRIES)
def _execute_query_job(job: _LogsQueryJob):
logging_api = apis.get_api('logging', 'v2', job.project_id)
# Convert "within" relative time to an absolute timestamp.
start_time = datetime.datetime.now(
datetime.timezone.utc) - datetime.timedelta(days=config.WITHIN_DAYS)
filter_lines = ['timestamp>"%s"' % start_time.isoformat(timespec='seconds')]
filter_lines.append('resource.type="%s"' % job.resource_type)
if job.log_name.startswith('log_id('):
# Special case: log_id(logname)
# https://cloud.google.com/logging/docs/view/logging-query-language#functions
filter_lines.append(job.log_name)
else:
filter_lines.append('logName="%s"' % job.log_name)
if len(job.filters) == 1:
filter_lines.append('(' + next(iter(job.filters)) + ')')
else:
filter_lines.append(
'(' + ' OR '.join(['(' + val + ')' for val in sorted(job.filters)]) +
')')
filter_str = '\n'.join(filter_lines)
logging.info('searching logs in project %s (resource type: %s)',
job.project_id, job.resource_type)
# Fetch all logs and put the results in temporary storage (diskcache.Deque)
deque = caching.get_tmp_deque('tmp-logs-')
req = logging_api.entries().list(
body={
'resourceNames': [f'projects/{job.project_id}'],
'filter': filter_str,
'orderBy': 'timestamp desc',
'pageSize': config.LOGGING_PAGE_SIZE
})
fetched_entries_count = 0
query_pages = 0
query_start_time = datetime.datetime.now()
while req is not None:
query_pages += 1
res = _ratelimited_execute(req)
if 'entries' in res:
for e in res['entries']:
fetched_entries_count += 1
deque.appendleft(e)
# Verify that we aren't above limits, exit otherwise.
if fetched_entries_count > config.LOGGING_FETCH_MAX_ENTRIES:
logging.warning(
'maximum number of log entries (%d) reached (project: %s, query: %s).',
config.LOGGING_FETCH_MAX_ENTRIES, job.project_id,
filter_str.replace('\n', ' AND '))
return deque
run_time = (datetime.datetime.now() - query_start_time).total_seconds()
if run_time >= config.LOGGING_FETCH_MAX_TIME_SECONDS:
logging.warning(
'maximum query runtime for log query reached (project: %s, query: %s).',
job.project_id, filter_str.replace('\n', ' AND '))
return deque
req = logging_api.entries().list_next(req, res)
if req is not None:
logging.info(
'still fetching logs (project: %s, resource type: %s, max wait: %ds)',
job.project_id, job.resource_type,
config.LOGGING_FETCH_MAX_TIME_SECONDS - run_time)
query_end_time = datetime.datetime.now()
logging.debug('logging query run time: %s, pages: %d, query: %s',
query_end_time - query_start_time, query_pages,
filter_str.replace('\n', ' AND '))
return deque
def execute_queries(executor: concurrent.futures.Executor):
global jobs_todo
jobs_executing = jobs_todo
jobs_todo = {}
for job in jobs_executing.values():
job.future = executor.submit(_execute_query_job, job)
def log_entry_timestamp_str(log_entry: Mapping[str, Any]):
# Use receiveTimestamp so that we don't have any time synchronization issues
# (i.e. don't trust the timestamp field)
t = dateutil.parser.parse(log_entry['receiveTimestamp'])
return t.astimezone().isoformat(sep=' ', timespec='seconds')
| 35.854922 | 82 | 0.702601 |
import concurrent.futures
import dataclasses
import datetime
import logging
from typing import Any, Dict, Mapping, Optional, Sequence, Set, Tuple
import dateutil.parser
import ratelimit
from gcpdiag import caching, config
from gcpdiag.queries import apis
@dataclasses.dataclass
class _LogsQueryJob:
project_id: str
resource_type: str
log_name: str
filters: Set[str]
future: Optional[concurrent.futures.Future] = None
class LogsQuery:
job: _LogsQueryJob
def __init__(self, job):
self.job = job
@property
def entries(self) -> Sequence:
if not self.job.future:
raise RuntimeError(
'log query wasn\'t executed. did you forget to call execute_queries()?'
)
elif self.job.future.running():
logging.info(
'waiting for logs query results (project: %s, resource type: %s)',
self.job.project_id, self.job.resource_type)
return self.job.future.result()
jobs_todo: Dict[Tuple[str, str, str], _LogsQueryJob] = {}
def query(project_id: str, resource_type: str, log_name: str,
filter_str: str) -> LogsQuery:
# Aggregate by project_id, resource_type, log_name
job_key = (project_id, resource_type, log_name)
job = jobs_todo.setdefault(
job_key,
_LogsQueryJob(
project_id=project_id,
resource_type=resource_type,
log_name=log_name,
filters=set(),
))
job.filters.add(filter_str)
return LogsQuery(job=job)
@ratelimit.sleep_and_retry
@ratelimit.limits(calls=config.LOGGING_RATELIMIT_REQUESTS,
period=config.LOGGING_RATELIMIT_PERIOD_SECONDS)
def _ratelimited_execute(req):
return req.execute(num_retries=config.API_RETRIES)
def _execute_query_job(job: _LogsQueryJob):
logging_api = apis.get_api('logging', 'v2', job.project_id)
# Convert "within" relative time to an absolute timestamp.
start_time = datetime.datetime.now(
datetime.timezone.utc) - datetime.timedelta(days=config.WITHIN_DAYS)
filter_lines = ['timestamp>"%s"' % start_time.isoformat(timespec='seconds')]
filter_lines.append('resource.type="%s"' % job.resource_type)
if job.log_name.startswith('log_id('):
# Special case: log_id(logname)
# https://cloud.google.com/logging/docs/view/logging-query-language#functions
filter_lines.append(job.log_name)
else:
filter_lines.append('logName="%s"' % job.log_name)
if len(job.filters) == 1:
filter_lines.append('(' + next(iter(job.filters)) + ')')
else:
filter_lines.append(
'(' + ' OR '.join(['(' + val + ')' for val in sorted(job.filters)]) +
')')
filter_str = '\n'.join(filter_lines)
logging.info('searching logs in project %s (resource type: %s)',
job.project_id, job.resource_type)
# Fetch all logs and put the results in temporary storage (diskcache.Deque)
deque = caching.get_tmp_deque('tmp-logs-')
req = logging_api.entries().list(
body={
'resourceNames': [f'projects/{job.project_id}'],
'filter': filter_str,
'orderBy': 'timestamp desc',
'pageSize': config.LOGGING_PAGE_SIZE
})
fetched_entries_count = 0
query_pages = 0
query_start_time = datetime.datetime.now()
while req is not None:
query_pages += 1
res = _ratelimited_execute(req)
if 'entries' in res:
for e in res['entries']:
fetched_entries_count += 1
deque.appendleft(e)
# Verify that we aren't above limits, exit otherwise.
if fetched_entries_count > config.LOGGING_FETCH_MAX_ENTRIES:
logging.warning(
'maximum number of log entries (%d) reached (project: %s, query: %s).',
config.LOGGING_FETCH_MAX_ENTRIES, job.project_id,
filter_str.replace('\n', ' AND '))
return deque
run_time = (datetime.datetime.now() - query_start_time).total_seconds()
if run_time >= config.LOGGING_FETCH_MAX_TIME_SECONDS:
logging.warning(
'maximum query runtime for log query reached (project: %s, query: %s).',
job.project_id, filter_str.replace('\n', ' AND '))
return deque
req = logging_api.entries().list_next(req, res)
if req is not None:
logging.info(
'still fetching logs (project: %s, resource type: %s, max wait: %ds)',
job.project_id, job.resource_type,
config.LOGGING_FETCH_MAX_TIME_SECONDS - run_time)
query_end_time = datetime.datetime.now()
logging.debug('logging query run time: %s, pages: %d, query: %s',
query_end_time - query_start_time, query_pages,
filter_str.replace('\n', ' AND '))
return deque
def execute_queries(executor: concurrent.futures.Executor):
global jobs_todo
jobs_executing = jobs_todo
jobs_todo = {}
for job in jobs_executing.values():
job.future = executor.submit(_execute_query_job, job)
def log_entry_timestamp_str(log_entry: Mapping[str, Any]):
# (i.e. don't trust the timestamp field)
t = dateutil.parser.parse(log_entry['receiveTimestamp'])
return t.astimezone().isoformat(sep=' ', timespec='seconds')
| true | true |
1c49666b9c4d832f37834fa730f66dc1774b3e18 | 1,174 | py | Python | Adafruit_DHT/__init__.py | HydAu/Adafruit_Python_DHT | 9e8109bb4ab5ec9127e53e792c1f69eddfd2f687 | [
"MIT"
] | 1 | 2015-11-17T15:05:13.000Z | 2015-11-17T15:05:13.000Z | Adafruit_DHT/__init__.py | HydAu/Adafruit_Python_DHT | 9e8109bb4ab5ec9127e53e792c1f69eddfd2f687 | [
"MIT"
] | null | null | null | Adafruit_DHT/__init__.py | HydAu/Adafruit_Python_DHT | 9e8109bb4ab5ec9127e53e792c1f69eddfd2f687 | [
"MIT"
] | 1 | 2016-02-14T11:59:45.000Z | 2016-02-14T11:59:45.000Z | # Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from common import DHT11, DHT22, AM2302, read, read_retry | 55.904762 | 80 | 0.783646 |
from common import DHT11, DHT22, AM2302, read, read_retry | true | true |
1c4966ade42aaa97510f7628a11791f6090266df | 123 | py | Python | game/admin.py | 0xecho/2048-er | 732f9c250f8cb632068a93d4622d9f7d2f65a147 | [
"MIT"
] | 5 | 2021-10-04T15:38:58.000Z | 2021-12-30T07:43:30.000Z | game/admin.py | 0xecho/2048-er | 732f9c250f8cb632068a93d4622d9f7d2f65a147 | [
"MIT"
] | null | null | null | game/admin.py | 0xecho/2048-er | 732f9c250f8cb632068a93d4622d9f7d2f65a147 | [
"MIT"
] | null | null | null | from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Submission) | 20.5 | 38 | 0.804878 | from django.contrib import admin
from . import models
admin.site.register(models.Submission) | true | true |
1c496702676689a5a25c37ec1873b560deec1093 | 18,565 | py | Python | ucscsdk/mometa/license/LicenseDownloader.py | parag-may4/ucscsdk | 2ea762fa070330e3a4e2c21b46b157469555405b | [
"Apache-2.0"
] | 9 | 2016-12-22T08:39:25.000Z | 2019-09-10T15:36:19.000Z | ucscsdk/mometa/license/LicenseDownloader.py | parag-may4/ucscsdk | 2ea762fa070330e3a4e2c21b46b157469555405b | [
"Apache-2.0"
] | 10 | 2017-01-31T06:59:56.000Z | 2021-11-09T09:14:37.000Z | ucscsdk/mometa/license/LicenseDownloader.py | parag-may4/ucscsdk | 2ea762fa070330e3a4e2c21b46b157469555405b | [
"Apache-2.0"
] | 13 | 2016-11-14T07:42:58.000Z | 2022-02-10T17:32:05.000Z | """This module contains the general information for LicenseDownloader ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class LicenseDownloaderConsts():
ADMIN_STATE_IDLE = "idle"
ADMIN_STATE_RESTART = "restart"
FSM_PREV_DOWNLOAD_BEGIN = "DownloadBegin"
FSM_PREV_DOWNLOAD_DELETE_LOCAL = "DownloadDeleteLocal"
FSM_PREV_DOWNLOAD_FAIL = "DownloadFail"
FSM_PREV_DOWNLOAD_LOCAL = "DownloadLocal"
FSM_PREV_DOWNLOAD_SUCCESS = "DownloadSuccess"
FSM_PREV_DOWNLOAD_VALIDATE_LOCAL = "DownloadValidateLocal"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_ERROR = "ERR-DNLD-error"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIGEST_VALIDATION_ERROR = "ERR-Digest-Validation-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GEN_CERT_ERROR = "ERR-Exec-Gen-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GET_CA_CERT_ERROR = "ERR-Exec-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_GET_CA_CERT_ERROR = "ERR-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_CERT_ERROR = "ERR-Get-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_OUT_DIGET_MESSAGE_ERROR = "ERR-Get-Out-Diget-Message-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_REQUEST_ERROR = "ERR-HTTP-Request-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IPV6_ADDR_CONFIGURED = "ERR-Ipv6-addr-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POLICY_RESOLUTION_IN_PROGRESS = "ERR-Policy-resolution-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_VM_IP_MASK_GATEWAY_ERROR = "ERR-Update-VM-IP-Mask-Gateway-error"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_CHASSISPACK_UNDER_DG = "ERR-create-chassispack-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_HFP_UNDER_DG = "ERR-create-hfp-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_ESTIMATE_IMPACT_ON_RECONNECT = "ERR-estimate-impact-on-reconnect"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_NFS_DOWN = "ERR-nfs-down"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_DOWNLOAD_BEGIN = "DownloadBegin"
FSM_STATUS_DOWNLOAD_DELETE_LOCAL = "DownloadDeleteLocal"
FSM_STATUS_DOWNLOAD_FAIL = "DownloadFail"
FSM_STATUS_DOWNLOAD_LOCAL = "DownloadLocal"
FSM_STATUS_DOWNLOAD_SUCCESS = "DownloadSuccess"
FSM_STATUS_DOWNLOAD_VALIDATE_LOCAL = "DownloadValidateLocal"
FSM_STATUS_NOP = "nop"
PROT_FTP = "ftp"
PROT_LOCAL = "local"
PROT_SCP = "scp"
PROT_SFTP = "sftp"
PROT_TFTP = "tftp"
TRANSFER_STATE_DOWNLOADED = "downloaded"
TRANSFER_STATE_DOWNLOADING = "downloading"
TRANSFER_STATE_FAILED = "failed"
TRANSFER_STATE_INIT = "init"
class LicenseDownloader(ManagedObject):
"""This is LicenseDownloader class."""
consts = LicenseDownloaderConsts()
naming_props = set([u'fileName'])
mo_meta = MoMeta("LicenseDownloader", "licenseDownloader", "dnld-[file_name]", VersionMeta.Version111a, "InputOutput", 0x7ff, [], ["admin"], [u'licenseEp'], [u'eventInst', u'faultInst', u'licenseDownloaderFsm', u'licenseDownloaderFsmTask', u'licenseProp'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["idle", "restart"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"file_name": MoPropertyMeta("file_name", "fileName", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x8, 1, 64, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["DownloadBegin", "DownloadDeleteLocal", "DownloadFail", "DownloadLocal", "DownloadSuccess", "DownloadValidateLocal", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-error", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Digest-Validation-error", "ERR-Exec-Gen-Cert-error", "ERR-Exec-Get-CA-Cert-error", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-Get-CA-Cert-error", "ERR-Get-Cert-error", "ERR-Get-Out-Diget-Message-error", "ERR-HTTP-Request-error", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-Ipv6-addr-configured", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-Policy-resolution-in-progress", "ERR-TOKEN-request-denied", "ERR-Update-VM-IP-Mask-Gateway-error", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-create-chassispack-under-dg", "ERR-create-hfp-under-dg", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-estimate-impact-on-reconnect", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-nfs-down", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-role-set-error", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-password-strength-check", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-set-error", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["DownloadBegin", "DownloadDeleteLocal", "DownloadFail", "DownloadLocal", "DownloadSuccess", "DownloadValidateLocal", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"prot": MoPropertyMeta("prot", "prot", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["ftp", "local", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], []),
"remote_path": MoPropertyMeta("remote_path", "remotePath", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"server": MoPropertyMeta("server", "server", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x100, 1, 64, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"transfer_state": MoPropertyMeta("transfer_state", "transferState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["downloaded", "downloading", "failed", "init"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x400, 0, 510, None, [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"dn": "dn",
"fileName": "file_name",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"prot": "prot",
"pwd": "pwd",
"remotePath": "remote_path",
"rn": "rn",
"server": "server",
"status": "status",
"transferState": "transfer_state",
"user": "user",
}
def __init__(self, parent_mo_or_dn, file_name, **kwargs):
self._dirty_mask = 0
self.file_name = file_name
self.admin_state = None
self.child_action = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.prot = None
self.pwd = None
self.remote_path = None
self.server = None
self.status = None
self.transfer_state = None
self.user = None
ManagedObject.__init__(self, "LicenseDownloader", parent_mo_or_dn, **kwargs)
| 86.348837 | 2,742 | 0.752707 |
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class LicenseDownloaderConsts():
ADMIN_STATE_IDLE = "idle"
ADMIN_STATE_RESTART = "restart"
FSM_PREV_DOWNLOAD_BEGIN = "DownloadBegin"
FSM_PREV_DOWNLOAD_DELETE_LOCAL = "DownloadDeleteLocal"
FSM_PREV_DOWNLOAD_FAIL = "DownloadFail"
FSM_PREV_DOWNLOAD_LOCAL = "DownloadLocal"
FSM_PREV_DOWNLOAD_SUCCESS = "DownloadSuccess"
FSM_PREV_DOWNLOAD_VALIDATE_LOCAL = "DownloadValidateLocal"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_ERROR = "ERR-DNLD-error"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIGEST_VALIDATION_ERROR = "ERR-Digest-Validation-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GEN_CERT_ERROR = "ERR-Exec-Gen-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GET_CA_CERT_ERROR = "ERR-Exec-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_GET_CA_CERT_ERROR = "ERR-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_CERT_ERROR = "ERR-Get-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_OUT_DIGET_MESSAGE_ERROR = "ERR-Get-Out-Diget-Message-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_REQUEST_ERROR = "ERR-HTTP-Request-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IPV6_ADDR_CONFIGURED = "ERR-Ipv6-addr-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POLICY_RESOLUTION_IN_PROGRESS = "ERR-Policy-resolution-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_VM_IP_MASK_GATEWAY_ERROR = "ERR-Update-VM-IP-Mask-Gateway-error"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_CHASSISPACK_UNDER_DG = "ERR-create-chassispack-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_HFP_UNDER_DG = "ERR-create-hfp-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_ESTIMATE_IMPACT_ON_RECONNECT = "ERR-estimate-impact-on-reconnect"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_NFS_DOWN = "ERR-nfs-down"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_DOWNLOAD_BEGIN = "DownloadBegin"
FSM_STATUS_DOWNLOAD_DELETE_LOCAL = "DownloadDeleteLocal"
FSM_STATUS_DOWNLOAD_FAIL = "DownloadFail"
FSM_STATUS_DOWNLOAD_LOCAL = "DownloadLocal"
FSM_STATUS_DOWNLOAD_SUCCESS = "DownloadSuccess"
FSM_STATUS_DOWNLOAD_VALIDATE_LOCAL = "DownloadValidateLocal"
FSM_STATUS_NOP = "nop"
PROT_FTP = "ftp"
PROT_LOCAL = "local"
PROT_SCP = "scp"
PROT_SFTP = "sftp"
PROT_TFTP = "tftp"
TRANSFER_STATE_DOWNLOADED = "downloaded"
TRANSFER_STATE_DOWNLOADING = "downloading"
TRANSFER_STATE_FAILED = "failed"
TRANSFER_STATE_INIT = "init"
class LicenseDownloader(ManagedObject):
consts = LicenseDownloaderConsts()
naming_props = set([u'fileName'])
mo_meta = MoMeta("LicenseDownloader", "licenseDownloader", "dnld-[file_name]", VersionMeta.Version111a, "InputOutput", 0x7ff, [], ["admin"], [u'licenseEp'], [u'eventInst', u'faultInst', u'licenseDownloaderFsm', u'licenseDownloaderFsmTask', u'licenseProp'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["idle", "restart"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"file_name": MoPropertyMeta("file_name", "fileName", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x8, 1, 64, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["DownloadBegin", "DownloadDeleteLocal", "DownloadFail", "DownloadLocal", "DownloadSuccess", "DownloadValidateLocal", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-error", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Digest-Validation-error", "ERR-Exec-Gen-Cert-error", "ERR-Exec-Get-CA-Cert-error", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-Get-CA-Cert-error", "ERR-Get-Cert-error", "ERR-Get-Out-Diget-Message-error", "ERR-HTTP-Request-error", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-Ipv6-addr-configured", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-Policy-resolution-in-progress", "ERR-TOKEN-request-denied", "ERR-Update-VM-IP-Mask-Gateway-error", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-create-chassispack-under-dg", "ERR-create-hfp-under-dg", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-estimate-impact-on-reconnect", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-nfs-down", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-role-set-error", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-password-strength-check", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-set-error", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["DownloadBegin", "DownloadDeleteLocal", "DownloadFail", "DownloadLocal", "DownloadSuccess", "DownloadValidateLocal", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"prot": MoPropertyMeta("prot", "prot", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["ftp", "local", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], []),
"remote_path": MoPropertyMeta("remote_path", "remotePath", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"server": MoPropertyMeta("server", "server", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x100, 1, 64, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"transfer_state": MoPropertyMeta("transfer_state", "transferState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["downloaded", "downloading", "failed", "init"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x400, 0, 510, None, [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"dn": "dn",
"fileName": "file_name",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"prot": "prot",
"pwd": "pwd",
"remotePath": "remote_path",
"rn": "rn",
"server": "server",
"status": "status",
"transferState": "transfer_state",
"user": "user",
}
def __init__(self, parent_mo_or_dn, file_name, **kwargs):
self._dirty_mask = 0
self.file_name = file_name
self.admin_state = None
self.child_action = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.prot = None
self.pwd = None
self.remote_path = None
self.server = None
self.status = None
self.transfer_state = None
self.user = None
ManagedObject.__init__(self, "LicenseDownloader", parent_mo_or_dn, **kwargs)
| true | true |
1c496867149d9c74d5f66efd40cf073fe0da023f | 22,149 | py | Python | critiquebrainz/frontend/views/review.py | akshaaatt/critiquebrainz | 39184152af5f23adaa991c4b43ecbbb6f086f809 | [
"Apache-2.0"
] | 70 | 2015-03-10T00:08:21.000Z | 2022-02-20T05:36:53.000Z | critiquebrainz/frontend/views/review.py | akshaaatt/critiquebrainz | 39184152af5f23adaa991c4b43ecbbb6f086f809 | [
"Apache-2.0"
] | 279 | 2015-12-08T14:10:45.000Z | 2022-03-29T13:54:23.000Z | critiquebrainz/frontend/views/review.py | akshaaatt/critiquebrainz | 39184152af5f23adaa991c4b43ecbbb6f086f809 | [
"Apache-2.0"
] | 95 | 2015-03-12T21:39:42.000Z | 2022-03-10T00:51:04.000Z | from math import ceil
from brainzutils.musicbrainz_db.exceptions import NoDataFoundException
from flask import Blueprint, render_template, request, redirect, url_for, jsonify
from flask_babel import gettext, get_locale, lazy_gettext
from flask_login import login_required, current_user
from langdetect import detect
from markdown import markdown
from werkzeug.exceptions import Unauthorized, NotFound, Forbidden, BadRequest
import critiquebrainz.db.comment as db_comment
import critiquebrainz.db.moderation_log as db_moderation_log
import critiquebrainz.db.review as db_review
import critiquebrainz.db.spam_report as db_spam_report
import critiquebrainz.db.users as db_users
from critiquebrainz.db import vote as db_vote, exceptions as db_exceptions, revision as db_revision
from critiquebrainz.db.moderation_log import AdminActions
from critiquebrainz.db.review import ENTITY_TYPES
from critiquebrainz.frontend import flash
from critiquebrainz.frontend.external import mbspotify, soundcloud
from critiquebrainz.frontend.external.musicbrainz_db.entities import get_multiple_entities, get_entity_by_id
from critiquebrainz.frontend.forms.comment import CommentEditForm
from critiquebrainz.frontend.forms.log import AdminActionForm
from critiquebrainz.frontend.forms.review import ReviewCreateForm, ReviewEditForm, ReviewReportForm
from critiquebrainz.frontend.login import admin_view
from critiquebrainz.frontend.views import get_avg_rating
from critiquebrainz.utils import side_by_side_diff
review_bp = Blueprint('review', __name__)
RESULTS_LIMIT = 10
def get_review_or_404(review_id):
"""Get a review using review ID or raise error 404."""
try:
review = db_review.get_by_id(review_id)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("Can't find a review with ID: %(review_id)s!", review_id=review_id))
return review
@review_bp.route('/')
def browse():
entity_type = request.args.get('entity_type', default=None)
if entity_type == 'all':
entity_type = None
page = int(request.args.get('page', default=1))
if page < 1:
return redirect(url_for('.browse'))
limit = 3 * 9 # 9 rows
offset = (page - 1) * limit
reviews, count = db_review.list_reviews(sort='published_on', limit=limit, offset=offset, entity_type=entity_type)
if not reviews:
if page - 1 > count / limit:
return redirect(url_for('review.browse', page=int(ceil(count / limit))))
if not entity_type:
raise NotFound(gettext("No reviews to display."))
# Loading info about entities for reviews
entities = [(str(review["entity_id"]), review["entity_type"]) for review in reviews]
entities_info = get_multiple_entities(entities)
return render_template('review/browse.html', reviews=reviews, entities=entities_info,
page=page, limit=limit, count=count, entity_type=entity_type)
# TODO(psolanki): Refactor this function to remove PyLint warning.
# pylint: disable=too-many-branches
@review_bp.route('/<uuid:id>/revisions/<int:rev>')
@review_bp.route('/<uuid:id>')
def entity(id, rev=None):
review = get_review_or_404(id)
# Not showing review if it isn't published yet and not viewed by author.
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["is_hidden"]:
if not current_user.is_admin():
raise Forbidden(gettext("Review has been hidden. "
"You need to be an administrator to view it."))
flash.warn(gettext("Review has been hidden."))
spotify_mappings = None
soundcloud_url = None
if review["entity_type"] == 'release_group':
spotify_mappings = mbspotify.mappings(str(review["entity_id"]))
soundcloud_url = soundcloud.get_url(str(review["entity_id"]))
count = db_revision.get_count(id)
if not rev:
rev = count
if rev < count:
flash.info(gettext('You are viewing an old revision, the review has been updated since then.'))
elif rev > count:
raise NotFound(gettext("The revision you are looking for does not exist."))
revision = db_revision.get(id, offset=count - rev)[0]
if not review["is_draft"] and current_user.is_authenticated:
# if user is logged in, get their vote for this review
try:
vote = db_vote.get(user_id=current_user.id, revision_id=revision['id'])
except db_exceptions.NoDataFoundException:
vote = None
else: # otherwise set vote to None, its value will not be used
vote = None
if revision["text"] is None:
review["text_html"] = None
else:
review["text_html"] = markdown(revision['text'], safe_mode="escape")
review["rating"] = revision["rating"]
user_all_reviews, _ = db_review.list_reviews(
user_id=review["user_id"],
sort="random",
exclude=[review["id"]],
)
other_reviews = user_all_reviews[:3]
avg_rating = get_avg_rating(review["entity_id"], review["entity_type"])
comments, count = db_comment.list_comments(review_id=id)
for comment in comments:
comment["text_html"] = markdown(comment["last_revision"]["text"], safe_mode="escape")
comment_form = CommentEditForm(review_id=id)
return render_template('review/entity/%s.html' % review["entity_type"], review=review,
spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url,
vote=vote, other_reviews=other_reviews, avg_rating=avg_rating,
comment_count=count, comments=comments, comment_form=comment_form)
@review_bp.route('/<uuid:review_id>/revision/<int:revision_id>')
def redirect_to_entity(review_id, revision_id):
try:
revision_number = db_revision.get_revision_number(review_id, revision_id)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision you are looking for does not exist."))
return redirect(url_for('.entity', id=review_id, rev=revision_number))
@review_bp.route('/<uuid:id>/revisions/compare')
def compare(id):
review = get_review_or_404(id)
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
count = db_revision.get_count(id)
old, new = int(request.args.get('old') or count - 1), int(request.args.get('new') or count)
if old > count or new > count:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
if old > new:
return redirect(url_for('.compare', id=id, old=new, new=old))
left = db_revision.get(id, offset=count - old)[0]
right = db_revision.get(id, offset=count - new)[0]
left['number'], right['number'] = old, new
left['text'], right['text'] = side_by_side_diff(left['text'], right['text'])
return render_template('review/compare.html', review=review, left=left, right=right)
@review_bp.route('/<uuid:id>/revisions')
def revisions(id):
review = get_review_or_404(id)
# Not showing review if it isn't published yet and not viewed by author.
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound("Can't find a review with the specified ID.")
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
try:
count = db_revision.get_count(id)
revisions = db_revision.get(id, limit=RESULTS_LIMIT)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
votes = db_revision.get_all_votes(id)
results = list(zip(reversed(range(count - RESULTS_LIMIT, count)), revisions))
return render_template('review/revisions.html', review=review, results=results,
count=count, limit=RESULTS_LIMIT, votes=votes)
@review_bp.route('/<uuid:id>/revisions/more')
def revisions_more(id):
review = get_review_or_404(id)
# Not showing review if it isn't published yet and not viewed by author.
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound("Can't find a review with the specified ID.")
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
page = int(request.args.get('page', default=0))
offset = page * RESULTS_LIMIT
try:
count = db_revision.get_count(id)
revisions = db_revision.get(id, limit=RESULTS_LIMIT, offset=offset)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
votes = db_revision.get_all_votes(id)
results = list(zip(reversed(range(count - offset - RESULTS_LIMIT, count - offset)), revisions))
template = render_template('review/revision_results.html', review=review, results=results, votes=votes, count=count)
return jsonify(results=template, more=(count - offset - RESULTS_LIMIT) > 0)
@review_bp.route('/write/<entity_type>/<entity_id>/', methods=('GET', 'POST'))
@review_bp.route('/write/')
@login_required
def create(entity_type=None, entity_id=None):
if not (entity_id or entity_type):
for allowed_type in ENTITY_TYPES:
if mbid := request.args.get(allowed_type):
entity_type = allowed_type
entity_id = mbid
break
if entity_type:
return redirect(url_for('.create', entity_type=entity_type, entity_id=entity_id))
flash.info(gettext("Please choose an entity to review."))
return redirect(url_for('search.selector', next=url_for('.create')))
if entity_type not in ENTITY_TYPES:
raise BadRequest("You can't write reviews about this type of entity.")
if current_user.is_blocked:
flash.error(gettext("You are not allowed to write new reviews because your "
"account has been blocked by a moderator."))
return redirect(url_for('user.reviews', user_id=current_user.id))
# Checking if the user already wrote a review for this entity
reviews, count = db_review.list_reviews(user_id=current_user.id, entity_id=entity_id, inc_drafts=True, inc_hidden=True)
review = reviews[0] if count != 0 else None
if review:
if review['is_draft']:
return redirect(url_for('review.edit', id=review['id']))
elif review['is_hidden']:
return redirect(url_for('review.entity', id=review['id']))
else:
flash.error(gettext("You have already published a review for this entity"))
return redirect(url_for('review.entity', id=review["id"]))
if current_user.is_review_limit_exceeded:
flash.error(gettext("You have exceeded your limit of reviews per day."))
return redirect(url_for('user.reviews', user_id=current_user.id))
form = ReviewCreateForm(default_license_id=current_user.license_choice, default_language=get_locale())
if form.validate_on_submit():
is_draft = form.state.data == 'draft'
if form.text.data == '':
form.text.data = None
review = db_review.create(user_id=current_user.id, entity_id=entity_id, entity_type=entity_type,
text=form.text.data, rating=form.rating.data, license_id=form.license_choice.data,
language=form.language.data, is_draft=is_draft)
if form.remember_license.data:
db_users.update(current_user.id, user_new_info={
"license_choice": form.license_choice.data,
})
if is_draft:
flash.success(gettext("Review has been saved!"))
else:
flash.success(gettext("Review has been published!"))
return redirect(url_for('.entity', id=review['id']))
try:
entity = get_entity_by_id(entity_id, entity_type)
except NoDataFoundException:
raise NotFound(gettext("Sorry, we couldn't find a %s with that MusicBrainz ID." % entity_type))
if not entity:
flash.error(gettext("You can only write a review for an entity that exists on MusicBrainz!"))
return redirect(url_for('search.selector', next=url_for('.create')))
if entity_type == 'release_group':
spotify_mappings = mbspotify.mappings(entity_id)
soundcloud_url = soundcloud.get_url(entity_id)
if not form.errors:
flash.info(gettext("Please provide some text or a rating for this review."))
return render_template('review/modify/write.html', form=form, entity_type=entity_type, entity=entity,
spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url)
entity_title = None
if 'title' in entity:
entity_title = entity['title']
elif 'name' in entity:
entity_title = entity['name']
if not form.errors:
flash.info(gettext("Please provide some text or a rating for this review."))
return render_template('review/modify/write.html', form=form, entity_type=entity_type,
entity_title=entity_title, entity=entity)
@review_bp.route('/<uuid:id>/edit', methods=('GET', 'POST'))
@login_required
def edit(id):
review = get_review_or_404(id)
if review["is_draft"] and current_user != review["user"]:
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["user"] != current_user:
raise Unauthorized(gettext("Only author can edit this review."))
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
form = ReviewEditForm(default_license_id=review["license_id"], default_language=review["language"])
if not review["is_draft"]:
# Can't change license if review is published.
del form.license_choice
# Check if contents of the review are updated
if form.text.data == review['text'] and form.rating.data == review['rating']:
form.errors['edit'] = ["You must edit either text or rating to update the review."]
elif form.validate_on_submit():
if review["is_draft"]:
license_choice = form.license_choice.data
else:
license_choice = None
if form.text.data == '':
form.text.data = None
try:
db_review.update(
review_id=review["id"],
drafted=review["is_draft"],
text=form.text.data,
rating=form.rating.data,
is_draft=(form.state.data == 'draft'),
license_id=license_choice,
language=form.language.data,
)
except db_exceptions.BadDataException:
raise BadRequest(lazy_gettext("Changing license of a published review\
or converting a published review back to drafts is not allowed."))
flash.success(gettext("Review has been updated."))
return redirect(url_for('.entity', id=review["id"]))
else:
form.text.data = review["text"]
form.rating.data = review["rating"]
if review["entity_type"] == 'release_group':
spotify_mappings = mbspotify.mappings(str(review["entity_id"]))
soundcloud_url = soundcloud.get_url(str(review["entity_id"]))
return render_template('review/modify/edit.html', form=form, review=review, entity_type=review["entity_type"],
entity=entity, spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url)
return render_template('review/modify/edit.html', form=form, review=review, entity_type=review["entity_type"])
@review_bp.route('/write/get_language', methods=['POST'])
@login_required
def get_language():
"""Return the most likely language of the text."""
return detect(request.form['text'])
@review_bp.route('/<uuid:id>/delete', methods=['GET', 'POST'])
@login_required
def delete(id):
review = get_review_or_404(id)
if review["user"] != current_user and not current_user.is_admin():
raise Unauthorized(gettext("Only the author or an admin can delete this review."))
if request.method == 'POST':
db_review.delete(review["id"])
flash.success(gettext("Review has been deleted."))
return redirect(url_for('user.reviews', user_id=current_user.id))
return render_template('review/delete.html', review=review)
@review_bp.route('/<uuid:review_id>/vote', methods=['POST'])
@login_required
def vote_submit(review_id):
review_id = str(review_id)
if 'yes' in request.form:
vote = True
elif 'no' in request.form:
vote = False
else:
vote = None
review = get_review_or_404(review_id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
if review["user"] == current_user:
flash.error(gettext("You cannot rate your own review."))
return redirect(url_for('.entity', id=review_id))
if current_user.is_vote_limit_exceeded and not db_users.has_voted(current_user.id, review_id):
flash.error(gettext("You have exceeded your limit of votes per day."))
return redirect(url_for('.entity', id=review_id))
if current_user.is_blocked:
flash.error(gettext("You are not allowed to rate this review because "
"your account has been blocked by a moderator."))
return redirect(url_for('.entity', id=review_id))
db_vote.submit(
user_id=current_user.id,
revision_id=review["last_revision"]["id"],
vote=vote, # overwrites an existing vote, if needed
)
flash.success(gettext("You have rated this review!"))
return redirect(url_for('.entity', id=review_id))
@review_bp.route('/<uuid:id>/vote/delete', methods=['GET'])
@login_required
def vote_delete(id):
review = get_review_or_404(id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
try:
vote = db_vote.get(user_id=current_user.id, revision_id=review["last_revision"]["id"])
flash.success(gettext("You have deleted your vote for this review!"))
db_vote.delete(user_id=vote["user_id"], revision_id=vote["revision_id"])
except db_exceptions.NoDataFoundException:
flash.error(gettext("This review is not rated yet."))
return redirect(url_for('.entity', id=id))
@review_bp.route('/<uuid:id>/report', methods=['GET', 'POST'])
@login_required
def report(id):
review = get_review_or_404(id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
if review["user"] == current_user:
flash.error(gettext("You cannot report your own review."))
return redirect(url_for('.entity', id=id))
if current_user.is_blocked:
flash.error(gettext("You are not allowed to report this review because "
"your account has been blocked by a moderator."))
return redirect(url_for('.entity', id=id))
last_revision_id = review["last_revision"]["id"]
report = db_spam_report.get(current_user.id, last_revision_id)
if report:
flash.error(gettext("You have already reported this review."))
return redirect(url_for('.entity', id=id))
form = ReviewReportForm()
if form.validate_on_submit():
db_spam_report.create(last_revision_id, current_user.id, form.reason.data)
flash.success(gettext("Review has been reported."))
return redirect(url_for('.entity', id=id))
return render_template('review/report.html', review=review, form=form)
@review_bp.route('/<uuid:id>/hide', methods=['GET', 'POST'])
@login_required
@admin_view
def hide(id):
review = get_review_or_404(id)
if review["is_hidden"]:
flash.info(gettext("Review is already hidden."))
return redirect(url_for('.entity', id=review["id"]))
form = AdminActionForm()
if form.validate_on_submit():
db_review.set_hidden_state(review["id"], is_hidden=True)
db_moderation_log.create(admin_id=current_user.id, action=AdminActions.ACTION_HIDE_REVIEW,
reason=form.reason.data, review_id=review["id"])
review_reports, count = db_spam_report.list_reports(review_id=review["id"]) # pylint: disable=unused-variable
for report in review_reports:
db_spam_report.archive(report["user_id"], report["revision_id"])
flash.success(gettext("Review has been hidden."))
return redirect(url_for('.entity', id=review["id"]))
return render_template('log/action.html', review=review, form=form, action=AdminActions.ACTION_HIDE_REVIEW.value)
@review_bp.route('/<uuid:id>/unhide', methods=['GET', 'POST'])
@login_required
@admin_view
def unhide(id):
review = get_review_or_404(id)
if not review["is_hidden"]:
flash.info(gettext("Review is not hidden."))
return redirect(url_for('.entity', id=review["id"]))
form = AdminActionForm()
if form.validate_on_submit():
db_review.set_hidden_state(review["id"], is_hidden=False)
db_moderation_log.create(admin_id=current_user.id, action=AdminActions.ACTION_UNHIDE_REVIEW,
reason=form.reason.data, review_id=review["id"])
flash.success(gettext("Review is not hidden anymore."))
return redirect(url_for('.entity', id=review["id"]))
return render_template('log/action.html', review=review, form=form, action=AdminActions.ACTION_UNHIDE_REVIEW.value)
| 45.202041 | 123 | 0.678676 | from math import ceil
from brainzutils.musicbrainz_db.exceptions import NoDataFoundException
from flask import Blueprint, render_template, request, redirect, url_for, jsonify
from flask_babel import gettext, get_locale, lazy_gettext
from flask_login import login_required, current_user
from langdetect import detect
from markdown import markdown
from werkzeug.exceptions import Unauthorized, NotFound, Forbidden, BadRequest
import critiquebrainz.db.comment as db_comment
import critiquebrainz.db.moderation_log as db_moderation_log
import critiquebrainz.db.review as db_review
import critiquebrainz.db.spam_report as db_spam_report
import critiquebrainz.db.users as db_users
from critiquebrainz.db import vote as db_vote, exceptions as db_exceptions, revision as db_revision
from critiquebrainz.db.moderation_log import AdminActions
from critiquebrainz.db.review import ENTITY_TYPES
from critiquebrainz.frontend import flash
from critiquebrainz.frontend.external import mbspotify, soundcloud
from critiquebrainz.frontend.external.musicbrainz_db.entities import get_multiple_entities, get_entity_by_id
from critiquebrainz.frontend.forms.comment import CommentEditForm
from critiquebrainz.frontend.forms.log import AdminActionForm
from critiquebrainz.frontend.forms.review import ReviewCreateForm, ReviewEditForm, ReviewReportForm
from critiquebrainz.frontend.login import admin_view
from critiquebrainz.frontend.views import get_avg_rating
from critiquebrainz.utils import side_by_side_diff
review_bp = Blueprint('review', __name__)
RESULTS_LIMIT = 10
def get_review_or_404(review_id):
try:
review = db_review.get_by_id(review_id)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("Can't find a review with ID: %(review_id)s!", review_id=review_id))
return review
@review_bp.route('/')
def browse():
entity_type = request.args.get('entity_type', default=None)
if entity_type == 'all':
entity_type = None
page = int(request.args.get('page', default=1))
if page < 1:
return redirect(url_for('.browse'))
limit = 3 * 9 # 9 rows
offset = (page - 1) * limit
reviews, count = db_review.list_reviews(sort='published_on', limit=limit, offset=offset, entity_type=entity_type)
if not reviews:
if page - 1 > count / limit:
return redirect(url_for('review.browse', page=int(ceil(count / limit))))
if not entity_type:
raise NotFound(gettext("No reviews to display."))
# Loading info about entities for reviews
entities = [(str(review["entity_id"]), review["entity_type"]) for review in reviews]
entities_info = get_multiple_entities(entities)
return render_template('review/browse.html', reviews=reviews, entities=entities_info,
page=page, limit=limit, count=count, entity_type=entity_type)
# TODO(psolanki): Refactor this function to remove PyLint warning.
# pylint: disable=too-many-branches
@review_bp.route('/<uuid:id>/revisions/<int:rev>')
@review_bp.route('/<uuid:id>')
def entity(id, rev=None):
review = get_review_or_404(id)
# Not showing review if it isn't published yet and not viewed by author.
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["is_hidden"]:
if not current_user.is_admin():
raise Forbidden(gettext("Review has been hidden. "
"You need to be an administrator to view it."))
flash.warn(gettext("Review has been hidden."))
spotify_mappings = None
soundcloud_url = None
if review["entity_type"] == 'release_group':
spotify_mappings = mbspotify.mappings(str(review["entity_id"]))
soundcloud_url = soundcloud.get_url(str(review["entity_id"]))
count = db_revision.get_count(id)
if not rev:
rev = count
if rev < count:
flash.info(gettext('You are viewing an old revision, the review has been updated since then.'))
elif rev > count:
raise NotFound(gettext("The revision you are looking for does not exist."))
revision = db_revision.get(id, offset=count - rev)[0]
if not review["is_draft"] and current_user.is_authenticated:
# if user is logged in, get their vote for this review
try:
vote = db_vote.get(user_id=current_user.id, revision_id=revision['id'])
except db_exceptions.NoDataFoundException:
vote = None
else: # otherwise set vote to None, its value will not be used
vote = None
if revision["text"] is None:
review["text_html"] = None
else:
review["text_html"] = markdown(revision['text'], safe_mode="escape")
review["rating"] = revision["rating"]
user_all_reviews, _ = db_review.list_reviews(
user_id=review["user_id"],
sort="random",
exclude=[review["id"]],
)
other_reviews = user_all_reviews[:3]
avg_rating = get_avg_rating(review["entity_id"], review["entity_type"])
comments, count = db_comment.list_comments(review_id=id)
for comment in comments:
comment["text_html"] = markdown(comment["last_revision"]["text"], safe_mode="escape")
comment_form = CommentEditForm(review_id=id)
return render_template('review/entity/%s.html' % review["entity_type"], review=review,
spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url,
vote=vote, other_reviews=other_reviews, avg_rating=avg_rating,
comment_count=count, comments=comments, comment_form=comment_form)
@review_bp.route('/<uuid:review_id>/revision/<int:revision_id>')
def redirect_to_entity(review_id, revision_id):
try:
revision_number = db_revision.get_revision_number(review_id, revision_id)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision you are looking for does not exist."))
return redirect(url_for('.entity', id=review_id, rev=revision_number))
@review_bp.route('/<uuid:id>/revisions/compare')
def compare(id):
review = get_review_or_404(id)
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
count = db_revision.get_count(id)
old, new = int(request.args.get('old') or count - 1), int(request.args.get('new') or count)
if old > count or new > count:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
if old > new:
return redirect(url_for('.compare', id=id, old=new, new=old))
left = db_revision.get(id, offset=count - old)[0]
right = db_revision.get(id, offset=count - new)[0]
left['number'], right['number'] = old, new
left['text'], right['text'] = side_by_side_diff(left['text'], right['text'])
return render_template('review/compare.html', review=review, left=left, right=right)
@review_bp.route('/<uuid:id>/revisions')
def revisions(id):
review = get_review_or_404(id)
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound("Can't find a review with the specified ID.")
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
try:
count = db_revision.get_count(id)
revisions = db_revision.get(id, limit=RESULTS_LIMIT)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
votes = db_revision.get_all_votes(id)
results = list(zip(reversed(range(count - RESULTS_LIMIT, count)), revisions))
return render_template('review/revisions.html', review=review, results=results,
count=count, limit=RESULTS_LIMIT, votes=votes)
@review_bp.route('/<uuid:id>/revisions/more')
def revisions_more(id):
review = get_review_or_404(id)
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound("Can't find a review with the specified ID.")
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
page = int(request.args.get('page', default=0))
offset = page * RESULTS_LIMIT
try:
count = db_revision.get_count(id)
revisions = db_revision.get(id, limit=RESULTS_LIMIT, offset=offset)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
votes = db_revision.get_all_votes(id)
results = list(zip(reversed(range(count - offset - RESULTS_LIMIT, count - offset)), revisions))
template = render_template('review/revision_results.html', review=review, results=results, votes=votes, count=count)
return jsonify(results=template, more=(count - offset - RESULTS_LIMIT) > 0)
@review_bp.route('/write/<entity_type>/<entity_id>/', methods=('GET', 'POST'))
@review_bp.route('/write/')
@login_required
def create(entity_type=None, entity_id=None):
if not (entity_id or entity_type):
for allowed_type in ENTITY_TYPES:
if mbid := request.args.get(allowed_type):
entity_type = allowed_type
entity_id = mbid
break
if entity_type:
return redirect(url_for('.create', entity_type=entity_type, entity_id=entity_id))
flash.info(gettext("Please choose an entity to review."))
return redirect(url_for('search.selector', next=url_for('.create')))
if entity_type not in ENTITY_TYPES:
raise BadRequest("You can't write reviews about this type of entity.")
if current_user.is_blocked:
flash.error(gettext("You are not allowed to write new reviews because your "
"account has been blocked by a moderator."))
return redirect(url_for('user.reviews', user_id=current_user.id))
# Checking if the user already wrote a review for this entity
reviews, count = db_review.list_reviews(user_id=current_user.id, entity_id=entity_id, inc_drafts=True, inc_hidden=True)
review = reviews[0] if count != 0 else None
if review:
if review['is_draft']:
return redirect(url_for('review.edit', id=review['id']))
elif review['is_hidden']:
return redirect(url_for('review.entity', id=review['id']))
else:
flash.error(gettext("You have already published a review for this entity"))
return redirect(url_for('review.entity', id=review["id"]))
if current_user.is_review_limit_exceeded:
flash.error(gettext("You have exceeded your limit of reviews per day."))
return redirect(url_for('user.reviews', user_id=current_user.id))
form = ReviewCreateForm(default_license_id=current_user.license_choice, default_language=get_locale())
if form.validate_on_submit():
is_draft = form.state.data == 'draft'
if form.text.data == '':
form.text.data = None
review = db_review.create(user_id=current_user.id, entity_id=entity_id, entity_type=entity_type,
text=form.text.data, rating=form.rating.data, license_id=form.license_choice.data,
language=form.language.data, is_draft=is_draft)
if form.remember_license.data:
db_users.update(current_user.id, user_new_info={
"license_choice": form.license_choice.data,
})
if is_draft:
flash.success(gettext("Review has been saved!"))
else:
flash.success(gettext("Review has been published!"))
return redirect(url_for('.entity', id=review['id']))
try:
entity = get_entity_by_id(entity_id, entity_type)
except NoDataFoundException:
raise NotFound(gettext("Sorry, we couldn't find a %s with that MusicBrainz ID." % entity_type))
if not entity:
flash.error(gettext("You can only write a review for an entity that exists on MusicBrainz!"))
return redirect(url_for('search.selector', next=url_for('.create')))
if entity_type == 'release_group':
spotify_mappings = mbspotify.mappings(entity_id)
soundcloud_url = soundcloud.get_url(entity_id)
if not form.errors:
flash.info(gettext("Please provide some text or a rating for this review."))
return render_template('review/modify/write.html', form=form, entity_type=entity_type, entity=entity,
spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url)
entity_title = None
if 'title' in entity:
entity_title = entity['title']
elif 'name' in entity:
entity_title = entity['name']
if not form.errors:
flash.info(gettext("Please provide some text or a rating for this review."))
return render_template('review/modify/write.html', form=form, entity_type=entity_type,
entity_title=entity_title, entity=entity)
@review_bp.route('/<uuid:id>/edit', methods=('GET', 'POST'))
@login_required
def edit(id):
review = get_review_or_404(id)
if review["is_draft"] and current_user != review["user"]:
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["user"] != current_user:
raise Unauthorized(gettext("Only author can edit this review."))
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
form = ReviewEditForm(default_license_id=review["license_id"], default_language=review["language"])
if not review["is_draft"]:
# Can't change license if review is published.
del form.license_choice
if form.text.data == review['text'] and form.rating.data == review['rating']:
form.errors['edit'] = ["You must edit either text or rating to update the review."]
elif form.validate_on_submit():
if review["is_draft"]:
license_choice = form.license_choice.data
else:
license_choice = None
if form.text.data == '':
form.text.data = None
try:
db_review.update(
review_id=review["id"],
drafted=review["is_draft"],
text=form.text.data,
rating=form.rating.data,
is_draft=(form.state.data == 'draft'),
license_id=license_choice,
language=form.language.data,
)
except db_exceptions.BadDataException:
raise BadRequest(lazy_gettext("Changing license of a published review\
or converting a published review back to drafts is not allowed."))
flash.success(gettext("Review has been updated."))
return redirect(url_for('.entity', id=review["id"]))
else:
form.text.data = review["text"]
form.rating.data = review["rating"]
if review["entity_type"] == 'release_group':
spotify_mappings = mbspotify.mappings(str(review["entity_id"]))
soundcloud_url = soundcloud.get_url(str(review["entity_id"]))
return render_template('review/modify/edit.html', form=form, review=review, entity_type=review["entity_type"],
entity=entity, spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url)
return render_template('review/modify/edit.html', form=form, review=review, entity_type=review["entity_type"])
@review_bp.route('/write/get_language', methods=['POST'])
@login_required
def get_language():
return detect(request.form['text'])
@review_bp.route('/<uuid:id>/delete', methods=['GET', 'POST'])
@login_required
def delete(id):
review = get_review_or_404(id)
if review["user"] != current_user and not current_user.is_admin():
raise Unauthorized(gettext("Only the author or an admin can delete this review."))
if request.method == 'POST':
db_review.delete(review["id"])
flash.success(gettext("Review has been deleted."))
return redirect(url_for('user.reviews', user_id=current_user.id))
return render_template('review/delete.html', review=review)
@review_bp.route('/<uuid:review_id>/vote', methods=['POST'])
@login_required
def vote_submit(review_id):
review_id = str(review_id)
if 'yes' in request.form:
vote = True
elif 'no' in request.form:
vote = False
else:
vote = None
review = get_review_or_404(review_id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
if review["user"] == current_user:
flash.error(gettext("You cannot rate your own review."))
return redirect(url_for('.entity', id=review_id))
if current_user.is_vote_limit_exceeded and not db_users.has_voted(current_user.id, review_id):
flash.error(gettext("You have exceeded your limit of votes per day."))
return redirect(url_for('.entity', id=review_id))
if current_user.is_blocked:
flash.error(gettext("You are not allowed to rate this review because "
"your account has been blocked by a moderator."))
return redirect(url_for('.entity', id=review_id))
db_vote.submit(
user_id=current_user.id,
revision_id=review["last_revision"]["id"],
vote=vote,
)
flash.success(gettext("You have rated this review!"))
return redirect(url_for('.entity', id=review_id))
@review_bp.route('/<uuid:id>/vote/delete', methods=['GET'])
@login_required
def vote_delete(id):
review = get_review_or_404(id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
try:
vote = db_vote.get(user_id=current_user.id, revision_id=review["last_revision"]["id"])
flash.success(gettext("You have deleted your vote for this review!"))
db_vote.delete(user_id=vote["user_id"], revision_id=vote["revision_id"])
except db_exceptions.NoDataFoundException:
flash.error(gettext("This review is not rated yet."))
return redirect(url_for('.entity', id=id))
@review_bp.route('/<uuid:id>/report', methods=['GET', 'POST'])
@login_required
def report(id):
review = get_review_or_404(id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
if review["user"] == current_user:
flash.error(gettext("You cannot report your own review."))
return redirect(url_for('.entity', id=id))
if current_user.is_blocked:
flash.error(gettext("You are not allowed to report this review because "
"your account has been blocked by a moderator."))
return redirect(url_for('.entity', id=id))
last_revision_id = review["last_revision"]["id"]
report = db_spam_report.get(current_user.id, last_revision_id)
if report:
flash.error(gettext("You have already reported this review."))
return redirect(url_for('.entity', id=id))
form = ReviewReportForm()
if form.validate_on_submit():
db_spam_report.create(last_revision_id, current_user.id, form.reason.data)
flash.success(gettext("Review has been reported."))
return redirect(url_for('.entity', id=id))
return render_template('review/report.html', review=review, form=form)
@review_bp.route('/<uuid:id>/hide', methods=['GET', 'POST'])
@login_required
@admin_view
def hide(id):
review = get_review_or_404(id)
if review["is_hidden"]:
flash.info(gettext("Review is already hidden."))
return redirect(url_for('.entity', id=review["id"]))
form = AdminActionForm()
if form.validate_on_submit():
db_review.set_hidden_state(review["id"], is_hidden=True)
db_moderation_log.create(admin_id=current_user.id, action=AdminActions.ACTION_HIDE_REVIEW,
reason=form.reason.data, review_id=review["id"])
review_reports, count = db_spam_report.list_reports(review_id=review["id"])
for report in review_reports:
db_spam_report.archive(report["user_id"], report["revision_id"])
flash.success(gettext("Review has been hidden."))
return redirect(url_for('.entity', id=review["id"]))
return render_template('log/action.html', review=review, form=form, action=AdminActions.ACTION_HIDE_REVIEW.value)
@review_bp.route('/<uuid:id>/unhide', methods=['GET', 'POST'])
@login_required
@admin_view
def unhide(id):
review = get_review_or_404(id)
if not review["is_hidden"]:
flash.info(gettext("Review is not hidden."))
return redirect(url_for('.entity', id=review["id"]))
form = AdminActionForm()
if form.validate_on_submit():
db_review.set_hidden_state(review["id"], is_hidden=False)
db_moderation_log.create(admin_id=current_user.id, action=AdminActions.ACTION_UNHIDE_REVIEW,
reason=form.reason.data, review_id=review["id"])
flash.success(gettext("Review is not hidden anymore."))
return redirect(url_for('.entity', id=review["id"]))
return render_template('log/action.html', review=review, form=form, action=AdminActions.ACTION_UNHIDE_REVIEW.value)
| true | true |
1c496954c9ff5125c6093492798868e790e4c9d0 | 1,004 | py | Python | framework/modelhublib/imageconverters/sitkToNumpyConverter.py | modelhub-ai/modelhub-engine | 81e893fb7669ee9912178346efbf828dd8c0410b | [
"MIT"
] | 6 | 2018-10-13T10:11:51.000Z | 2022-02-21T08:28:10.000Z | framework/modelhublib/imageconverters/sitkToNumpyConverter.py | modelhub-ai/modelhub-docker | 81e893fb7669ee9912178346efbf828dd8c0410b | [
"MIT"
] | 34 | 2018-03-06T16:25:10.000Z | 2018-06-26T21:55:13.000Z | framework/modelhublib/imageconverters/sitkToNumpyConverter.py | modelhub-ai/modelhub-engine | 81e893fb7669ee9912178346efbf828dd8c0410b | [
"MIT"
] | 3 | 2019-08-15T18:09:32.000Z | 2022-02-16T07:55:27.000Z | import SimpleITK
import numpy as np
from .imageConverter import ImageConverter
class SitkToNumpyConverter(ImageConverter):
"""
Converts SimpltITK.Image objects to Numpy
"""
def _convert(self, image):
"""
Args:
image (SimpleITK.Image): Image object to convert.
Returns:
Input image object converted to numpy array with 4 dimensions [batchsize, z/color, height, width]
Raises:
IOError if input is not of type SimpleITK.Image or cannot be converted for other reasons.
"""
if isinstance(image, SimpleITK.Image):
return self.__convertToNumpy(image)
else:
raise IOError("Image is not of type \"SimpleITK.Image\".")
def __convertToNumpy(self, image):
npArr = SimpleITK.GetArrayFromImage(image)
if npArr.ndim == 2:
npArr = npArr[np.newaxis,:]
npArr = npArr[np.newaxis,:].astype(np.float32)
return npArr
| 27.888889 | 109 | 0.615538 | import SimpleITK
import numpy as np
from .imageConverter import ImageConverter
class SitkToNumpyConverter(ImageConverter):
def _convert(self, image):
if isinstance(image, SimpleITK.Image):
return self.__convertToNumpy(image)
else:
raise IOError("Image is not of type \"SimpleITK.Image\".")
def __convertToNumpy(self, image):
npArr = SimpleITK.GetArrayFromImage(image)
if npArr.ndim == 2:
npArr = npArr[np.newaxis,:]
npArr = npArr[np.newaxis,:].astype(np.float32)
return npArr
| true | true |
1c49697d08ff8fc6969f3ffb49a5cca3fa09e575 | 6,405 | py | Python | code/05-soz_subgraph.py | akashpattnaik/pre-ictal-similarity | 85f963aa0c6d2d0a6e971ffa005c400e136a0a76 | [
"MIT"
] | null | null | null | code/05-soz_subgraph.py | akashpattnaik/pre-ictal-similarity | 85f963aa0c6d2d0a6e971ffa005c400e136a0a76 | [
"MIT"
] | null | null | null | code/05-soz_subgraph.py | akashpattnaik/pre-ictal-similarity | 85f963aa0c6d2d0a6e971ffa005c400e136a0a76 | [
"MIT"
] | null | null | null | # %%
# %load_ext autoreload
# %autoreload 2
# Imports and environment setup
import numpy as np
import sys
import os
from numpy.core.fromnumeric import sort
import pandas as pd
import json
from scipy.io import loadmat
import matplotlib.pyplot as plt
from tqdm import tqdm
from os.path import join as ospj
from scipy.stats import zscore
import time
from kneed import KneeLocator
from scipy.stats import mannwhitneyu
code_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(ospj(code_path, 'tools'))
from plot_spectrogram import plot_spectrogram
from movmean import movmean
from pull_sz_starts import pull_sz_starts
from pull_patient_localization import pull_patient_localization
from mpl_toolkits.axes_grid1 import make_axes_locatable
from time2ind import time2ind
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from sklearn.decomposition import NMF
from sklearn.metrics.cluster import adjusted_rand_score
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
# Get paths from config file and metadata
with open(ospj(code_path, "config.json")) as f:
config = json.load(f)
repo_path = config['repositoryPath']
metadata_path = config['metadataPath']
palette = config['lightColors']
DTW_FLAG = config['flags']["DTW_FLAG"]
electrodes = config['electrodes']
bands = config['bands']
data_path = ospj(repo_path, 'data')
figure_path = ospj(repo_path, 'figures')
metadata_fname = ospj(metadata_path, "DATA_MASTER.json")
with open(metadata_fname) as f:
metadata = json.load(f)['PATIENTS']
seizure_metadata = pd.read_excel(ospj(data_path, "seizure_metadata.xlsx"))
# flags
SAVE_PLOT = True
NMF_FLAG = True
FIXED_PREICTAL_SEC = 60 * 30
LEAD_SZ_WINDOW_SEC = (FIXED_PREICTAL_SEC + 60 * 15) # 15 min buffer
def soz_state(H, soz_electrodes, metric="max_all", is_zscore=False):
'''
soz_mask: soz electrodes are true and non_soz electrodes are false
metric: determines how to find soz state. max_all takes the state where soz
channels have higher bandpower in all frequency bands
'''
n_components = H.shape[0]
n_electrodes = soz_electrodes.shape[0]
# reshape to (component, frequency band, electrode)
component_arr = np.reshape(H, (n_components, -1, n_electrodes))
if is_zscore:
component_z = np.zeros(component_arr.shape)
for i_comp in range(n_components):
component_z[i_comp, :, :] = zscore(component_arr[i_comp, :, :], axis=1)
component_arr = component_z
# sort to put non-soz first
sort_soz_inds = np.argsort(soz_electrodes)
n_soz = np.sum(soz_electrodes)
n_non_soz = n_electrodes - n_soz
n_iter = 10000
u_stats = np.zeros(n_components)
null_z = np.zeros(n_components)
for i_comp in range(n_components):
# randomly resample electrodes and take the mean bandpower of sample
means = np.zeros(n_iter)
for iter in range(n_iter):
means[iter] = np.mean(component_arr[i_comp, :, np.random.choice(n_electrodes, n_soz)])
# append true soz
means = np.append(means, np.mean(component_arr[i_comp, :, soz_electrodes]))
# calculate z_score of true soz and save
null_z[i_comp] = zscore(means)[-1]
sz_u_stats = np.zeros(component_arr.shape[1])
for i in range(component_arr.shape[1]):
stat, p = mannwhitneyu(component_arr[i_comp][i, soz_electrodes], component_arr[i_comp][i, ~soz_electrodes])
sz_u_stats[i] = stat
u_stats[i_comp] = np.max(sz_u_stats)
pt_soz_state_resamp = np.argmax(np.abs(null_z))
pt_soz_state_u = np.argmax(u_stats)
pct_non_zero = np.sum(component_arr[pt_soz_state_u,:,:] == 0) / np.size(component_arr[pt_soz_state_u,:,:])
var = np.max(np.var(component_arr[pt_soz_state_u,:,:], axis=1))
return pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var
patient_localization_mat = loadmat(ospj(metadata_path, 'patient_localization_final.mat'))['patient_localization']
patients, labels, ignore, resect, gm_wm, coords, region, soz = pull_patient_localization(ospj(metadata_path, 'patient_localization_final.mat'))
# %%
# Plot the NMF subgraphs and expression
for index, row in seizure_metadata.iterrows():
# for index, row in patient_cohort.iterrows():
# if row['Ignore']:
# continue
pt = row["Patient"]
pt_data_path = ospj(data_path, pt)
sz_num = row["Seizure number"]
remaining_sz_ids = np.load(ospj(pt_data_path, "remaining_sz_ids.npy"))
if sz_num not in remaining_sz_ids:
continue
if row["Seizure category"] == "Other":
continue
print("Calculating dissimilarity for seizure {}, {}".format(sz_num, pt))
t_sec = np.load(ospj(pt_data_path, "lead_sz_t_sec_band-{}_elec-{}.npy".format(bands, electrodes)))
sz_id = np.load(ospj(pt_data_path, "lead_sz_sz_id_band-{}_elec-{}.npy".format(bands, electrodes)))
W = np.load(ospj(pt_data_path, "nmf_expression_band-{}_elec-{}_sz-{}.npy".format(bands, electrodes, sz_num)))
H = np.load(ospj(pt_data_path, "nmf_components_band-{}_elec-{}_sz_{}.npy".format(bands, electrodes, sz_num)))
n_components = H.shape[0]
# pull and format electrode metadata
electrodes_mat = loadmat(ospj(pt_data_path, "selected_electrodes_elec-{}.mat".format(electrodes)))
target_electrode_region_inds = electrodes_mat['targetElectrodesRegionInds'][0]
pt_index = patients.index(pt)
sz_starts = pull_sz_starts(pt, metadata)
# find seizure onset zone and state with most seizure onset zone
soz_electrodes = np.array(np.squeeze(soz[pt_index][target_electrode_region_inds, :]), dtype=bool)
pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var = soz_state(H, soz_electrodes)
seizure_metadata.at[index, 'SOZ Sensitive State (resampling)'] = pt_soz_state_resamp
seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u
seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u
seizure_metadata.at[index, 'Ratio of non-zero component entries'] = pct_non_zero
seizure_metadata.at[index, 'Maximum variance across bands'] = var
np.save(ospj(pt_data_path, "soz_electrodes_band-{}_elec-{}.npy".format(bands, electrodes)), soz_electrodes)
seizure_metadata.to_excel(ospj(data_path, "seizure_metadata_with_soz_subgraph.xlsx"))
# %%
| 38.584337 | 143 | 0.735207 |
import numpy as np
import sys
import os
from numpy.core.fromnumeric import sort
import pandas as pd
import json
from scipy.io import loadmat
import matplotlib.pyplot as plt
from tqdm import tqdm
from os.path import join as ospj
from scipy.stats import zscore
import time
from kneed import KneeLocator
from scipy.stats import mannwhitneyu
code_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(ospj(code_path, 'tools'))
from plot_spectrogram import plot_spectrogram
from movmean import movmean
from pull_sz_starts import pull_sz_starts
from pull_patient_localization import pull_patient_localization
from mpl_toolkits.axes_grid1 import make_axes_locatable
from time2ind import time2ind
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from sklearn.decomposition import NMF
from sklearn.metrics.cluster import adjusted_rand_score
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
with open(ospj(code_path, "config.json")) as f:
config = json.load(f)
repo_path = config['repositoryPath']
metadata_path = config['metadataPath']
palette = config['lightColors']
DTW_FLAG = config['flags']["DTW_FLAG"]
electrodes = config['electrodes']
bands = config['bands']
data_path = ospj(repo_path, 'data')
figure_path = ospj(repo_path, 'figures')
metadata_fname = ospj(metadata_path, "DATA_MASTER.json")
with open(metadata_fname) as f:
metadata = json.load(f)['PATIENTS']
seizure_metadata = pd.read_excel(ospj(data_path, "seizure_metadata.xlsx"))
SAVE_PLOT = True
NMF_FLAG = True
FIXED_PREICTAL_SEC = 60 * 30
LEAD_SZ_WINDOW_SEC = (FIXED_PREICTAL_SEC + 60 * 15)
def soz_state(H, soz_electrodes, metric="max_all", is_zscore=False):
n_components = H.shape[0]
n_electrodes = soz_electrodes.shape[0]
component_arr = np.reshape(H, (n_components, -1, n_electrodes))
if is_zscore:
component_z = np.zeros(component_arr.shape)
for i_comp in range(n_components):
component_z[i_comp, :, :] = zscore(component_arr[i_comp, :, :], axis=1)
component_arr = component_z
sort_soz_inds = np.argsort(soz_electrodes)
n_soz = np.sum(soz_electrodes)
n_non_soz = n_electrodes - n_soz
n_iter = 10000
u_stats = np.zeros(n_components)
null_z = np.zeros(n_components)
for i_comp in range(n_components):
means = np.zeros(n_iter)
for iter in range(n_iter):
means[iter] = np.mean(component_arr[i_comp, :, np.random.choice(n_electrodes, n_soz)])
means = np.append(means, np.mean(component_arr[i_comp, :, soz_electrodes]))
null_z[i_comp] = zscore(means)[-1]
sz_u_stats = np.zeros(component_arr.shape[1])
for i in range(component_arr.shape[1]):
stat, p = mannwhitneyu(component_arr[i_comp][i, soz_electrodes], component_arr[i_comp][i, ~soz_electrodes])
sz_u_stats[i] = stat
u_stats[i_comp] = np.max(sz_u_stats)
pt_soz_state_resamp = np.argmax(np.abs(null_z))
pt_soz_state_u = np.argmax(u_stats)
pct_non_zero = np.sum(component_arr[pt_soz_state_u,:,:] == 0) / np.size(component_arr[pt_soz_state_u,:,:])
var = np.max(np.var(component_arr[pt_soz_state_u,:,:], axis=1))
return pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var
patient_localization_mat = loadmat(ospj(metadata_path, 'patient_localization_final.mat'))['patient_localization']
patients, labels, ignore, resect, gm_wm, coords, region, soz = pull_patient_localization(ospj(metadata_path, 'patient_localization_final.mat'))
for index, row in seizure_metadata.iterrows():
pt = row["Patient"]
pt_data_path = ospj(data_path, pt)
sz_num = row["Seizure number"]
remaining_sz_ids = np.load(ospj(pt_data_path, "remaining_sz_ids.npy"))
if sz_num not in remaining_sz_ids:
continue
if row["Seizure category"] == "Other":
continue
print("Calculating dissimilarity for seizure {}, {}".format(sz_num, pt))
t_sec = np.load(ospj(pt_data_path, "lead_sz_t_sec_band-{}_elec-{}.npy".format(bands, electrodes)))
sz_id = np.load(ospj(pt_data_path, "lead_sz_sz_id_band-{}_elec-{}.npy".format(bands, electrodes)))
W = np.load(ospj(pt_data_path, "nmf_expression_band-{}_elec-{}_sz-{}.npy".format(bands, electrodes, sz_num)))
H = np.load(ospj(pt_data_path, "nmf_components_band-{}_elec-{}_sz_{}.npy".format(bands, electrodes, sz_num)))
n_components = H.shape[0]
electrodes_mat = loadmat(ospj(pt_data_path, "selected_electrodes_elec-{}.mat".format(electrodes)))
target_electrode_region_inds = electrodes_mat['targetElectrodesRegionInds'][0]
pt_index = patients.index(pt)
sz_starts = pull_sz_starts(pt, metadata)
soz_electrodes = np.array(np.squeeze(soz[pt_index][target_electrode_region_inds, :]), dtype=bool)
pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var = soz_state(H, soz_electrodes)
seizure_metadata.at[index, 'SOZ Sensitive State (resampling)'] = pt_soz_state_resamp
seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u
seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u
seizure_metadata.at[index, 'Ratio of non-zero component entries'] = pct_non_zero
seizure_metadata.at[index, 'Maximum variance across bands'] = var
np.save(ospj(pt_data_path, "soz_electrodes_band-{}_elec-{}.npy".format(bands, electrodes)), soz_electrodes)
seizure_metadata.to_excel(ospj(data_path, "seizure_metadata_with_soz_subgraph.xlsx"))
| true | true |
1c4969f2e22ab20faedf093583573663bfaa39a7 | 2,013 | py | Python | services/backend/thiamsu/forms.py | LKKTGB/thiamsu | f08d453c6b35c801c57f2501e42565da56900814 | [
"MIT"
] | 10 | 2020-08-25T08:57:36.000Z | 2021-12-31T01:04:18.000Z | services/backend/thiamsu/forms.py | LKKTGB/thiamsu | f08d453c6b35c801c57f2501e42565da56900814 | [
"MIT"
] | 13 | 2020-04-26T08:41:30.000Z | 2021-06-10T17:34:25.000Z | services/backend/thiamsu/forms.py | LKKTGB/thiamsu | f08d453c6b35c801c57f2501e42565da56900814 | [
"MIT"
] | 1 | 2020-09-06T17:54:13.000Z | 2020-09-06T17:54:13.000Z | from django import forms
from django.forms import formset_factory
from django.forms.formsets import BaseFormSet
from django.forms.widgets import HiddenInput
from thiamsu.utils import get_youtube_id_from_url
class SongAdminForm(forms.ModelForm):
def clean_youtube_url(self):
youtube_id = get_youtube_id_from_url(self.cleaned_data["youtube_url"])
if not youtube_id:
raise forms.ValidationError(
"Invalid URL: %(url)s",
code="invalid youtube url",
params={"url": self.cleaned_data["youtube_url"]},
)
return self.cleaned_data["youtube_url"]
class TranslationForm(forms.Form):
line_no = forms.IntegerField(widget=forms.HiddenInput)
lang = forms.CharField(max_length=5, widget=forms.HiddenInput)
content = forms.CharField(max_length=1000, required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["line_no"].widget.attrs["readonly"] = True
self.fields["lang"].widget.attrs["readonly"] = True
class BaseTranslationFormSet(BaseFormSet):
def __init__(self, original_lyrics=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# set original lyric as label of each line
if not original_lyrics or len(original_lyrics) != len(self.forms):
return
for i, form in enumerate(self.forms):
form.fields["content"].label = original_lyrics[i]
TranslationFormSet = formset_factory(
TranslationForm, formset=BaseTranslationFormSet, extra=0
)
class SongReadonlyForm(forms.Form):
readonly = forms.BooleanField(required=False)
class UserFavoriteSongForm(forms.Form):
method = forms.ChoiceField(choices=[(m, m) for m in ("POST", "DELETE")])
song_id = forms.IntegerField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["method"].widget = HiddenInput()
self.fields["song_id"].widget = HiddenInput()
| 33.55 | 78 | 0.682067 | from django import forms
from django.forms import formset_factory
from django.forms.formsets import BaseFormSet
from django.forms.widgets import HiddenInput
from thiamsu.utils import get_youtube_id_from_url
class SongAdminForm(forms.ModelForm):
def clean_youtube_url(self):
youtube_id = get_youtube_id_from_url(self.cleaned_data["youtube_url"])
if not youtube_id:
raise forms.ValidationError(
"Invalid URL: %(url)s",
code="invalid youtube url",
params={"url": self.cleaned_data["youtube_url"]},
)
return self.cleaned_data["youtube_url"]
class TranslationForm(forms.Form):
line_no = forms.IntegerField(widget=forms.HiddenInput)
lang = forms.CharField(max_length=5, widget=forms.HiddenInput)
content = forms.CharField(max_length=1000, required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["line_no"].widget.attrs["readonly"] = True
self.fields["lang"].widget.attrs["readonly"] = True
class BaseTranslationFormSet(BaseFormSet):
def __init__(self, original_lyrics=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if not original_lyrics or len(original_lyrics) != len(self.forms):
return
for i, form in enumerate(self.forms):
form.fields["content"].label = original_lyrics[i]
TranslationFormSet = formset_factory(
TranslationForm, formset=BaseTranslationFormSet, extra=0
)
class SongReadonlyForm(forms.Form):
readonly = forms.BooleanField(required=False)
class UserFavoriteSongForm(forms.Form):
method = forms.ChoiceField(choices=[(m, m) for m in ("POST", "DELETE")])
song_id = forms.IntegerField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["method"].widget = HiddenInput()
self.fields["song_id"].widget = HiddenInput()
| true | true |
1c496b2598cdfd5fc69e5d28a1e867bb4e332220 | 2,682 | py | Python | tests/test_16_cc_oauth2_service.py | peppelinux/JWTConnect-Python-OidcService | af979f45666bc47b62c69ddcbb199a15c7b96597 | [
"Apache-2.0"
] | 1 | 2020-09-30T13:07:46.000Z | 2020-09-30T13:07:46.000Z | tests/test_16_cc_oauth2_service.py | peppelinux/JWTConnect-Python-OidcService | af979f45666bc47b62c69ddcbb199a15c7b96597 | [
"Apache-2.0"
] | null | null | null | tests/test_16_cc_oauth2_service.py | peppelinux/JWTConnect-Python-OidcService | af979f45666bc47b62c69ddcbb199a15c7b96597 | [
"Apache-2.0"
] | null | null | null | import pytest
from oidcservice.service_factory import service_factory
from oidcservice.service_context import ServiceContext
from oidcservice.state_interface import InMemoryStateDataBase
KEYDEF = [{"type": "EC", "crv": "P-256", "use": ["sig"]}]
class TestRP():
@pytest.fixture(autouse=True)
def create_service(self):
client_config = {
'client_id': 'client_id',
'client_secret': 'another password'
}
service_context = ServiceContext(config=client_config)
db = InMemoryStateDataBase()
self.service = {
'token': service_factory("CCAccessToken",
['oauth2/client_credentials', 'oauth2'],
state_db=db,
service_context=service_context),
'refresh_token': service_factory("CCRefreshAccessToken",
['oauth2/client_credentials',
'oauth2'],
state_db=db,
service_context=service_context)
}
self.service['token'].endpoint = 'https://example.com/token'
self.service['refresh_token'].endpoint = 'https://example.com/token'
def test_token_get_request(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.service['token']
_info = _srv.get_request_parameters(request_args=request_args)
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info['body'] == 'grant_type=client_credentials'
assert _info['headers'] == {
'Authorization': 'Basic Y2xpZW50X2lkOmFub3RoZXIrcGFzc3dvcmQ=',
'Content-Type': 'application/x-www-form-urlencoded'
}
def test_refresh_token_get_request(self):
_srv = self.service['token']
_srv.update_service_context({
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
})
_srv = self.service['refresh_token']
_info = _srv.get_request_parameters()
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info[
'body'] == 'grant_type=refresh_token'
assert _info['headers'] == {
'Authorization': 'Bearer tGzv3JOkF0XG5Qx2TlKWIA',
'Content-Type': 'application/x-www-form-urlencoded'
}
| 41.90625 | 77 | 0.568978 | import pytest
from oidcservice.service_factory import service_factory
from oidcservice.service_context import ServiceContext
from oidcservice.state_interface import InMemoryStateDataBase
KEYDEF = [{"type": "EC", "crv": "P-256", "use": ["sig"]}]
class TestRP():
@pytest.fixture(autouse=True)
def create_service(self):
client_config = {
'client_id': 'client_id',
'client_secret': 'another password'
}
service_context = ServiceContext(config=client_config)
db = InMemoryStateDataBase()
self.service = {
'token': service_factory("CCAccessToken",
['oauth2/client_credentials', 'oauth2'],
state_db=db,
service_context=service_context),
'refresh_token': service_factory("CCRefreshAccessToken",
['oauth2/client_credentials',
'oauth2'],
state_db=db,
service_context=service_context)
}
self.service['token'].endpoint = 'https://example.com/token'
self.service['refresh_token'].endpoint = 'https://example.com/token'
def test_token_get_request(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.service['token']
_info = _srv.get_request_parameters(request_args=request_args)
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info['body'] == 'grant_type=client_credentials'
assert _info['headers'] == {
'Authorization': 'Basic Y2xpZW50X2lkOmFub3RoZXIrcGFzc3dvcmQ=',
'Content-Type': 'application/x-www-form-urlencoded'
}
def test_refresh_token_get_request(self):
_srv = self.service['token']
_srv.update_service_context({
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
})
_srv = self.service['refresh_token']
_info = _srv.get_request_parameters()
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info[
'body'] == 'grant_type=refresh_token'
assert _info['headers'] == {
'Authorization': 'Bearer tGzv3JOkF0XG5Qx2TlKWIA',
'Content-Type': 'application/x-www-form-urlencoded'
}
| true | true |
1c496c2139c67302856260e7708094386979d059 | 1,100 | py | Python | src/txamqp/test/test_heartbeat.py | sbraz/txamqp | 10caf998dd8c05a7321cd10c24a83832bf58bd0c | [
"Apache-2.0"
] | 17 | 2016-12-20T13:21:18.000Z | 2021-09-22T07:44:15.000Z | src/txamqp/test/test_heartbeat.py | sbraz/txamqp | 10caf998dd8c05a7321cd10c24a83832bf58bd0c | [
"Apache-2.0"
] | 13 | 2017-07-05T07:52:33.000Z | 2022-03-25T10:14:15.000Z | src/txamqp/test/test_heartbeat.py | sbraz/txamqp | 10caf998dd8c05a7321cd10c24a83832bf58bd0c | [
"Apache-2.0"
] | 12 | 2017-06-27T18:48:20.000Z | 2021-02-15T12:22:11.000Z | from twisted.internet import reactor
from twisted.internet.defer import Deferred
from txamqp.testlib import TestBase
from txamqp.protocol import AMQClient
class SpyAMQClient(AMQClient):
called_reschedule_check = 0
called_send_hb = 0
def reschedule_check_heartbeat(self, dummy=None):
AMQClient.reschedule_check_heartbeat(self)
self.called_reschedule_check += 1
def send_heartbeat(self):
AMQClient.send_heartbeat(self)
self.called_send_hb += 1
class HeartbeatTests(TestBase):
"""
Tests handling of heartbeat frames
"""
heartbeat = 1
clientClass = SpyAMQClient
def test_heartbeat(self):
"""
Test that heartbeat frames are sent and received
"""
d = Deferred()
def check_pulse(_):
self.assertTrue(self.client.called_send_hb, "A heartbeat frame was recently sent")
self.assertTrue(self.client.called_reschedule_check, "A heartbeat frame was recently received")
d.addCallback(check_pulse)
reactor.callLater(3, d.callback, None)
return d
| 27.5 | 107 | 0.690909 | from twisted.internet import reactor
from twisted.internet.defer import Deferred
from txamqp.testlib import TestBase
from txamqp.protocol import AMQClient
class SpyAMQClient(AMQClient):
called_reschedule_check = 0
called_send_hb = 0
def reschedule_check_heartbeat(self, dummy=None):
AMQClient.reschedule_check_heartbeat(self)
self.called_reschedule_check += 1
def send_heartbeat(self):
AMQClient.send_heartbeat(self)
self.called_send_hb += 1
class HeartbeatTests(TestBase):
heartbeat = 1
clientClass = SpyAMQClient
def test_heartbeat(self):
d = Deferred()
def check_pulse(_):
self.assertTrue(self.client.called_send_hb, "A heartbeat frame was recently sent")
self.assertTrue(self.client.called_reschedule_check, "A heartbeat frame was recently received")
d.addCallback(check_pulse)
reactor.callLater(3, d.callback, None)
return d
| true | true |
1c496c2c582376bc0e7ee6a044286bdeda0d3676 | 25,695 | py | Python | tools/management/commands/upload_excel_bias.py | protwis/protwis | da9a455499343ab4e12902b99dcc259cda4a8d38 | [
"Apache-2.0"
] | 21 | 2016-01-20T09:33:14.000Z | 2021-12-20T19:19:45.000Z | tools/management/commands/upload_excel_bias.py | protwis/protwis | da9a455499343ab4e12902b99dcc259cda4a8d38 | [
"Apache-2.0"
] | 75 | 2016-02-26T16:29:58.000Z | 2022-03-21T12:35:13.000Z | tools/management/commands/upload_excel_bias.py | protwis/protwis | da9a455499343ab4e12902b99dcc259cda4a8d38 | [
"Apache-2.0"
] | 77 | 2016-01-22T08:44:26.000Z | 2022-02-01T15:54:56.000Z | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connection
from django.db import IntegrityError
from django.utils.text import slugify
from django.http import HttpResponse, JsonResponse
from decimal import Decimal
from build.management.commands.base_build import Command as BaseBuild
from common.tools import fetch_from_cache, save_to_cache, fetch_from_web_api
from residue.models import Residue
from protein.models import Protein, ProteinCouplings
from ligand.models import BiasedExperiment, ExperimentAssay, BiasedExperimentVendors, AnalyzedExperiment, ExperimentAssayAuthors, Ligand, LigandProperities, LigandType, LigandVendorLink
from mutation.models import Mutation
from ligand.functions import get_or_make_ligand
from common.models import WebLink, WebResource, Publication
from django.db import connection
import queue
import logging
import os
from datetime import datetime
import xlrd
import operator
import traceback
import time
import math
import pytz
import re
MISSING_PROTEINS = {}
SKIPPED = 0
class Command(BaseBuild):
mylog = logging.getLogger(__name__)
mylog.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('biasDataTest.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
mylog.addHandler(file_handler)
help = 'Reads bias data and imports it'
structure_data_dir = os.sep.join([settings.DATA_DIR, 'ligand_data', 'bias_data'])
publication_cache = {}
ligand_cache = {}
data_all = []
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-f', '--filename',
action='append',
dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing bias records')
parser.add_argument('--test_run', action='store_true', help='Skip this during a test run',
default=False)
def handle(self, *args, **options):
if options['test_run']:
print('Skipping in test run')
return
# delete any existing structure data
if options['purge']:
try:
print('Started purging bias data')
self.purge_bias_data()
print('Ended purging bias data')
except Exception as msg:
print(msg)
self.logger.error(msg)
# import the structure data
self.prepare_all_data(options['filename'])
try:
print('CREATING BIAS DATA')
print(options['filename'])
# self.prepare_all_data(options['filename'])
self.logger.info('COMPLETED CREATING BIAS')
except Exception as msg:
print('--error--', msg, '\n')
self.logger.info("The error appeared in def handle")
def purge_bias_data(self):
delete_bias_excel = BiasedExperiment.objects.all()
delete_bias_excel.delete()
delete_bias_experiment = AnalyzedExperiment.objects.all()
delete_bias_experiment.delete()
self.logger.info("Bias data purgedAk47aspirine1Ak47aspirine1Ak47aspirine1Ak47aspirine1")
def loaddatafromexcel(self, excelpath):
'''
Reads excel file (require specific excel sheet)
'''
num_rows = 0
try:
workbook = xlrd.open_workbook(excelpath)
worksheets = workbook.sheet_names()
temp = []
for worksheet_name in worksheets:
worksheet = workbook.sheet_by_name(worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0 # skip first, otherwise -1
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
curr_cell = -1
temprow = []
while curr_cell < num_cells:
curr_cell += 1
cell_value = worksheet.cell_value(curr_row, curr_cell)
cell_type = worksheet.cell_type(curr_row, curr_cell)
# fix wrong spaced cells
if cell_value == " ":
cell_value = ""
temprow.append(cell_value)
temp.append(temprow)
# if curr_row>10: break
return temp
except:
self.logger.info(
"The error appeared during reading the excel", num_rows)
def initialize_return_row(self,excel_row):
d = dict()
d['submitting_group'] = None
d['reference'] = None
d['ligand_name'] = None
d['ligand_type'] = None
d['ligand_id'] = None
d['ligand_reference'] = None
d['emax_ligand_name'] = None
d['emax_ligand_type'] = None
d['emax_ligand_id'] = None
d['receptor'] = None
d['receptor_uniprot_id'] = None
d['cell_line'] = None
d['protein'] = None
d['protein_assay'] = None
d['protein_assay_method'] = None
d['protein_time_resolved'] = None
d['protein_ligand_function'] = None
d['protein_mtype'] = None
d['protein_relation'] = None
d['protein_activity_quantity'] = None
d['protein_activity_quantity_unit'] = None
d['protein_activity_quality'] = None
d['protein_efficacy_measure'] = None
d['protein_efficacy_relation'] = None
d['protein_efficacy_quantity'] = 0.0
d['protein_efficacy_quantity_unit'] = None
d['pathway_bias_initial'] = None
d['pathway_bias'] = None
d['protein_activity_equation'] = None
d['protein_efficacy_equation'] = None
d['auxiliary_protein'] = None
d['source_file'] = excel_row
self.logger.info("empty dict created error")
return d
def return_row(self, r,excel_row):
d = self.initialize_return_row(excel_row)
d['submitting_group'] = r[0]
d['reference'] = r[1]
try:
d['ligand_name'] = str(int(r[4]))
except:
d['ligand_name'] = r[4]
d['ligand_type'] = r[5]
try:
d['ligand_id'] = int(r[6])
except:
d['ligand_id'] = r[6]
d['ligand_reference'] = r[7]
d['emax_ligand_name'] = r[8]
d['emax_ligand_type'] = r[9]
try:
d['emax_ligand_id'] = int(r[10])
except:
d['emax_ligand_id'] = r[10]
d['receptor'] = r[11].lower().strip()
d['receptor_uniprot_id'] = r[12]
d['cell_line'] = r[13]
d['protein'] = r[14].strip().replace('α','a').replace('β','B').replace('g','G').lower()
d['protein_assay'] = r[15].strip()
d['protein_assay_method'] = r[16]
d['protein_time_resolved'] = r[17]
d['protein_ligand_function'] = r[18]
d['protein_mtype'] = r[19]
d['protein_relation'] = r[20]
d['protein_activity_quantity_unit'] = r[22]
d['protein_activity_quality'] = r[23]
d['protein_efficacy_measure'] = r[24]
d['protein_efficacy_relation'] = r[25]
d['protein_efficacy_quantity_unit'] = r[27]
if r[21] is not None and r[21] != '':
d['protein_activity_quantity'] = r[21]
if r[26] is not None and r[26] != '':
d['protein_efficacy_quantity'] = r[26]
if r[28] is not None and r[28] != '':
try:
d['pathway_bias_initial'] = float(r[28])
except:
try:
d['pathway_bias_initial'] = float(r[28].replace('\U00002013', '-'))
except:
d['pathway_bias_initial'] = r[28]
self.logger.info("pathway_bias_initial error")
if r[29] is not None and r[29] != '':
try:
d['pathway_bias'] = float(r[29])
except:
try:
d['pathway_bias'] = float(r[29].replace('\U00002013', '-'))
except:
d['pathway_bias'] = None
d['auxiliary_protein'] = r[30]
d['source_file'] = excel_row
return d
def analyse_rows(self, rows, source_file):
"""
Reads excel rows one by one
"""
skipped = list()
# Analyse the rows from excel and assign the right headers
temp = []
for i, r in enumerate(rows, 1):
d = dict()
# code to skip rows in excel for faster testing
# if i < 7609:
# continue
# if i > 838:
# break
if i % 100 == 0:
print(i)
d = self.return_row(r=r,excel_row=i)
try:
d['protein_activity_quantity'] = re.sub('[^\d\.,]', '', d['protein_activity_quantity'])
d['protein_activity_quantity'] = round(float(d['protein_activity_quantity']),2)
except:
d['protein_activity_quantity'] = d['protein_activity_quantity']
try:
d['protein_efficacy_quantity'] = round(d['protein_efficacy_quantity'],0)
except:
d['protein_efficacy_quantity'] = d['protein_efficacy_quantity']
d['protein_activity_quantity'], d['protein_mtype'] = self.fetch_measurements(d['protein_activity_quantity'],
d['protein_mtype'],
d['protein_activity_quantity_unit'])
if (d['protein'] == '' or
d['protein'] == None):
if d['protein_assay'] == 'pERK1/2 activation' or d['protein_assay'] =="pERK1-2":
d['protein'] = 'pERK1-2'
family = self.define_g_family(d['protein'].lower(), d['protein_assay'])
pub = self.fetch_publication(d['reference'])
l = self.fetch_ligand(
d['ligand_id'], d['ligand_type'], d['ligand_name'], d['source_file'])
#fetch endogenous ligand
protein = self.fetch_protein(d['receptor'], d['source_file'])
# fetch reference_ligand
reference_ligand = self.fetch_ligand(
d['emax_ligand_id'], d['emax_ligand_type'], d['emax_ligand_name'], d['source_file'])
# fetch protein
protein = self.fetch_protein(d['receptor'], d['source_file'])
if protein == None:
skipped.append(d)
continue
end_ligand = self.fetch_endogenous(protein)
auxiliary_protein = self.fetch_protein(d['auxiliary_protein'], d['source_file'])
if l == None:
print('*************error row',d,l)
## TODO: check if it was already uploaded
experiment_entry = BiasedExperiment(submission_author=d['submitting_group'],
publication=pub,
ligand=l,
receptor=protein,
auxiliary_protein = auxiliary_protein,
endogenous_ligand = end_ligand,
ligand_source_id = d['ligand_id'],
ligand_source_type = d['ligand_type'],
)
# try:
experiment_entry.save()
self.fetch_vendor(l,experiment_entry)
# except:
# print('skipping line', d)
# continue
experiment_assay = ExperimentAssay(biased_experiment=experiment_entry,
signalling_protein=d['protein'],
family = family,
cell_line=d['cell_line'],
assay_type=d['protein_assay'],
assay_measure=d['protein_assay_method'],
assay_time_resolved=d['protein_time_resolved'],
ligand_function=d['protein_ligand_function'],
quantitive_measure_type=d['protein_mtype'],
quantitive_activity=d['protein_activity_quantity'],
quantitive_activity_sign=d['protein_activity_equation'],
quantitive_unit=d['protein_activity_quantity_unit'],
qualitative_activity=d['protein_activity_quality'],
quantitive_efficacy=d['protein_efficacy_quantity'],
efficacy_measure_type=d['protein_efficacy_measure'],
efficacy_sign=d['protein_efficacy_equation'],
efficacy_unit=d['protein_efficacy_quantity_unit'],
bias_reference=d['ligand_reference'],
bias_value=d['pathway_bias'],
bias_value_initial=d['pathway_bias_initial'],
emax_ligand_reference=reference_ligand
)
experiment_assay.save()
#fetch authors
self.fetch_publication_authors(pub,experiment_assay)
temp.append(d)
return temp
def fetch_publication_authors(self,publication, experiment_assay):
counter = 0
author_list = list()
if publication.authors != None:
for authors in publication.authors.split(','):
author_list.append(authors.strip())
author_list.reverse()
for i in author_list:
if counter < 3:
assay_author = ExperimentAssayAuthors(experiment = experiment_assay,
author=i)
assay_author.save()
counter=counter+1
# assay_author = ExperimentAssayAuthors(experiment = experiment_assay,
def fetch_measurements(self, potency, p_type, unit):
if potency is not None:
if p_type.lower() == 'pec50':
potency = 10**(potency*(-1))
p_type = 'EC50'
elif p_type.lower() == 'logec50':
potency = 10**(potency)
p_type = 'EC50'
elif p_type.lower() == 'pic50':
potency = 10**(potency*(-1))
p_type = 'IC50'
elif p_type.lower() == 'logic50':
potency = 10**(potency)
p_type = 'IC50'
if potency is not None:
if p_type.lower() == 'ec50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-3)
if p_type.lower() == 'ic50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-3)
return potency,p_type
else:
self.logger.info("potency convertion error")
return None, None
def define_g_family(self, protein, assay_type):
family = None
if (protein == 'b-arrestin' or
protein == 'b-arrestin-1 (non-visual arrestin-2)' or
protein == 'b-arrestin-2 (non-visual arrestin-3)'):
family = 'B-arr'
elif (protein == 'gi/o-family' or
protein == 'gai1' or
protein == 'gai2' or
protein == 'gai3' or
protein == 'gao' or
protein == 'gaoA' or
protein == 'gai' or
protein == 'gai1' or
protein == 'gai2' or
protein == 'gai3' or
protein == 'gai1/2' or
protein == 'gao' or
protein == 'gaoA' or
protein == 'gaoB' or
protein == 'gao1' or
protein == 'gat1' or
protein == 'gat2' or
protein == 'gat3' or
protein == 'gaz' or
protein == 'gaob'):
family = 'Gi/o'
elif (protein == 'gq-family' or
protein == 'ga12' or
protein==' gaq' or
protein=='gaq/11' or
protein=='gaq/14' or
protein=='gaq/15' or
protein=='gaq/16'):
family = 'Gq/11'
elif (protein == 'g12/13-family' or
protein == 'ga12' or
protein == 'ga13'):
family = 'G12/13'
elif (protein == 'gs-family' or
protein == 'gas' or
protein == 'gaolf'):
family = 'Gs'
elif (protein == 'pERK1/2 activation' or
protein =="perk1-2"):
family = 'pERK1-2'
elif (protein == '' or protein is None):
if assay_type == 'Ca2+ accumulation':
family = 'CA2'
def fetch_receptor_trunsducers(self, receptor):
primary = set()
temp = list()
try:
gprotein = ProteinCouplings.objects.filter(protein=receptor)
for x in gprotein:
if x.transduction and x.transduction == 'primary':
primary.add(x.g_protein.name)
for i in primary:
temp.append(str(i))
return temp
except:
self.logger.info('receptor not found error')
return None
def fetch_endogenous(self, protein):
try:
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM protein_endogenous_ligands WHERE protein_id =%s", [protein.pk])
row = cursor.fetchone()
end_ligand = Ligand.objects.filter(id=row[2])
test = end_ligand.get()
return test
except:
self.logger.info("The error appeared in def fetch_endogenous")
return None
def fetch_vendor(self, ligand,experiment_entry):
temp = ligand
links = LigandVendorLink.objects.filter(lp=ligand.properities.id)
# vendor_count = 0
for x in links:
if x.vendor.name not in ['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem']:
ligand_vendor = BiasedExperimentVendors(experiment=experiment_entry,
vendor=x)
ligand_vendor.save()
self.logger.info("ligand_vendor saved")
def fetch_protein(self,protein_from_excel, source):
"""
fetch receptor with Protein model
requires: protein id, source
"""
test = None
if Protein.objects.filter(entry_name=protein_from_excel):
protein = Protein.objects.filter(entry_name=protein_from_excel)
test = protein.get()
elif Protein.objects.filter(web_links__index=protein_from_excel, web_links__web_resource__slug='uniprot'):
protein1 = Protein.objects.filter(
web_links__index=protein_from_excel, web_links__web_resource__slug='uniprot')
test = protein1[0]
if test == None:
self.logger.info("fetch_protein error")
return test
def fetch_ligand(self, ligand_id, ligand_type, ligand_name, source_file):
"""
fetch ligands with Ligand model
requires: ligand id, ligand id type, ligand name
requires: source_file name
"""
l = None
try:
if ligand_id in self.ligand_cache:
l = self.ligand_cache[ligand_id]
else:
l = get_or_make_ligand(ligand_id, ligand_type, ligand_name)
self.ligand_cache[ligand_id] = l
if l == None:
l = self.create_empty_ligand(ligand_name)
except:
web_resource = WebResource.objects.get(slug='pubchem')
try:
l = Ligand.objects.get(properities__web_links__web_resource=web_resource,
properities__web_links__index=ligand_id)
except:
l = self.create_empty_ligand(ligand_name)
# print('null ligand', l)
return l
def fetch_publication(self, publication_doi):
"""
fetch publication with Publication model
requires: publication doi or pmid
"""
try:
float(publication_doi)
publication_doi = str(int(publication_doi))
except ValueError:
pass
if publication_doi.isdigit(): # assume pubmed
pub_type = 'pubmed'
else: # assume doi
pub_type = 'doi'
if publication_doi not in self.publication_cache:
pub = False
if pub_type == 'doi':
pub = Publication.get_or_create_from_doi(publication_doi)
elif pub_type == 'pubmed':
pub = Publication.get_or_create_from_pubmed(publication_doi)
if not pub:
self.mylog.debug(
"publication fetching error | module: fetch_publication. Row # is : " + str(publication_doi) + ' ' + pub_type)
self.publication_cache[publication_doi] = pub
else:
pub = self.publication_cache[publication_doi]
return pub
def fetch_experiment(self, publication, ligand, receptor, source):
"""
fetch receptor with Protein model
requires: protein id, source
"""
try:
experiment = AnalyzedExperiment.objects.filter(
publication=publication, ligand=ligand, receptor=receptor, source=source)
experiment = experiment.get()
return True
except Exception as msg:
experiment = None
self.mylog.exception(
"Experiment AnalyzedExperiment error | module: AnalyzedExperiment.")
return False
def prepare_all_data(self, filenames):
if not filenames:
filenames = os.listdir(self.structure_data_dir)
for source_file in filenames:
source_file_path = os.sep.join(
[self.structure_data_dir, source_file]).replace('//', '/')
if os.path.isfile(source_file_path) and source_file[0] != '.':
self.logger.info('Reading file {}'.format(source_file_path))
print('Reading file {}'.format(source_file_path))
# read the yaml file
rows = []
if source_file[-4:] == 'xlsx' or source_file[-3:] == 'xls':
if "~$" in source_file:
# ignore open excel files
continue
rows = self.loaddatafromexcel(source_file_path)
rows = self.analyse_rows(rows, source_file)
else:
self.mylog.debug('unknown format'.source_file)
continue
self.data_all += rows
print(len(self.data_all), " total data points")
print("Finished")
def create_empty_ligand(self, ligand_name):
# gtoplig webresource
lp = self.build_ligand_properties()
ligand = Ligand()
ligand.properities = lp
ligand.name = ligand_name
ligand.canonical = True
ligand.ambigious_alias = False
ligand.pdbe = None
try:
ligand.save()
except IntegrityError:
self.logger.info("empty ligand found")
return Ligand.objects.get(name=ligand_name, canonical=True)
return ligand
def build_ligand_properties(self):
lp = LigandProperities()
lt = LigandType.objects.get(name = 'small molecule')
lp.ligand_type = lt
lp.smiles = None
lp.inchikey = None
lp.sequence= None
lp.mw = None
lp.rotatable_bonds = None
lp.hacc = None
lp.hdon = None
lp.logp = None
lp.save()
self.logger.info("Could not create ligand, empty is returned")
return lp
| 39.96112 | 185 | 0.521502 | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connection
from django.db import IntegrityError
from django.utils.text import slugify
from django.http import HttpResponse, JsonResponse
from decimal import Decimal
from build.management.commands.base_build import Command as BaseBuild
from common.tools import fetch_from_cache, save_to_cache, fetch_from_web_api
from residue.models import Residue
from protein.models import Protein, ProteinCouplings
from ligand.models import BiasedExperiment, ExperimentAssay, BiasedExperimentVendors, AnalyzedExperiment, ExperimentAssayAuthors, Ligand, LigandProperities, LigandType, LigandVendorLink
from mutation.models import Mutation
from ligand.functions import get_or_make_ligand
from common.models import WebLink, WebResource, Publication
from django.db import connection
import queue
import logging
import os
from datetime import datetime
import xlrd
import operator
import traceback
import time
import math
import pytz
import re
MISSING_PROTEINS = {}
SKIPPED = 0
class Command(BaseBuild):
mylog = logging.getLogger(__name__)
mylog.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('biasDataTest.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
mylog.addHandler(file_handler)
help = 'Reads bias data and imports it'
structure_data_dir = os.sep.join([settings.DATA_DIR, 'ligand_data', 'bias_data'])
publication_cache = {}
ligand_cache = {}
data_all = []
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-f', '--filename',
action='append',
dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing bias records')
parser.add_argument('--test_run', action='store_true', help='Skip this during a test run',
default=False)
def handle(self, *args, **options):
if options['test_run']:
print('Skipping in test run')
return
if options['purge']:
try:
print('Started purging bias data')
self.purge_bias_data()
print('Ended purging bias data')
except Exception as msg:
print(msg)
self.logger.error(msg)
self.prepare_all_data(options['filename'])
try:
print('CREATING BIAS DATA')
print(options['filename'])
self.logger.info('COMPLETED CREATING BIAS')
except Exception as msg:
print('--error--', msg, '\n')
self.logger.info("The error appeared in def handle")
def purge_bias_data(self):
delete_bias_excel = BiasedExperiment.objects.all()
delete_bias_excel.delete()
delete_bias_experiment = AnalyzedExperiment.objects.all()
delete_bias_experiment.delete()
self.logger.info("Bias data purgedAk47aspirine1Ak47aspirine1Ak47aspirine1Ak47aspirine1")
def loaddatafromexcel(self, excelpath):
num_rows = 0
try:
workbook = xlrd.open_workbook(excelpath)
worksheets = workbook.sheet_names()
temp = []
for worksheet_name in worksheets:
worksheet = workbook.sheet_by_name(worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
curr_cell = -1
temprow = []
while curr_cell < num_cells:
curr_cell += 1
cell_value = worksheet.cell_value(curr_row, curr_cell)
cell_type = worksheet.cell_type(curr_row, curr_cell)
if cell_value == " ":
cell_value = ""
temprow.append(cell_value)
temp.append(temprow)
return temp
except:
self.logger.info(
"The error appeared during reading the excel", num_rows)
def initialize_return_row(self,excel_row):
d = dict()
d['submitting_group'] = None
d['reference'] = None
d['ligand_name'] = None
d['ligand_type'] = None
d['ligand_id'] = None
d['ligand_reference'] = None
d['emax_ligand_name'] = None
d['emax_ligand_type'] = None
d['emax_ligand_id'] = None
d['receptor'] = None
d['receptor_uniprot_id'] = None
d['cell_line'] = None
d['protein'] = None
d['protein_assay'] = None
d['protein_assay_method'] = None
d['protein_time_resolved'] = None
d['protein_ligand_function'] = None
d['protein_mtype'] = None
d['protein_relation'] = None
d['protein_activity_quantity'] = None
d['protein_activity_quantity_unit'] = None
d['protein_activity_quality'] = None
d['protein_efficacy_measure'] = None
d['protein_efficacy_relation'] = None
d['protein_efficacy_quantity'] = 0.0
d['protein_efficacy_quantity_unit'] = None
d['pathway_bias_initial'] = None
d['pathway_bias'] = None
d['protein_activity_equation'] = None
d['protein_efficacy_equation'] = None
d['auxiliary_protein'] = None
d['source_file'] = excel_row
self.logger.info("empty dict created error")
return d
def return_row(self, r,excel_row):
d = self.initialize_return_row(excel_row)
d['submitting_group'] = r[0]
d['reference'] = r[1]
try:
d['ligand_name'] = str(int(r[4]))
except:
d['ligand_name'] = r[4]
d['ligand_type'] = r[5]
try:
d['ligand_id'] = int(r[6])
except:
d['ligand_id'] = r[6]
d['ligand_reference'] = r[7]
d['emax_ligand_name'] = r[8]
d['emax_ligand_type'] = r[9]
try:
d['emax_ligand_id'] = int(r[10])
except:
d['emax_ligand_id'] = r[10]
d['receptor'] = r[11].lower().strip()
d['receptor_uniprot_id'] = r[12]
d['cell_line'] = r[13]
d['protein'] = r[14].strip().replace('α','a').replace('β','B').replace('g','G').lower()
d['protein_assay'] = r[15].strip()
d['protein_assay_method'] = r[16]
d['protein_time_resolved'] = r[17]
d['protein_ligand_function'] = r[18]
d['protein_mtype'] = r[19]
d['protein_relation'] = r[20]
d['protein_activity_quantity_unit'] = r[22]
d['protein_activity_quality'] = r[23]
d['protein_efficacy_measure'] = r[24]
d['protein_efficacy_relation'] = r[25]
d['protein_efficacy_quantity_unit'] = r[27]
if r[21] is not None and r[21] != '':
d['protein_activity_quantity'] = r[21]
if r[26] is not None and r[26] != '':
d['protein_efficacy_quantity'] = r[26]
if r[28] is not None and r[28] != '':
try:
d['pathway_bias_initial'] = float(r[28])
except:
try:
d['pathway_bias_initial'] = float(r[28].replace('\U00002013', '-'))
except:
d['pathway_bias_initial'] = r[28]
self.logger.info("pathway_bias_initial error")
if r[29] is not None and r[29] != '':
try:
d['pathway_bias'] = float(r[29])
except:
try:
d['pathway_bias'] = float(r[29].replace('\U00002013', '-'))
except:
d['pathway_bias'] = None
d['auxiliary_protein'] = r[30]
d['source_file'] = excel_row
return d
def analyse_rows(self, rows, source_file):
skipped = list()
temp = []
for i, r in enumerate(rows, 1):
d = dict()
if i % 100 == 0:
print(i)
d = self.return_row(r=r,excel_row=i)
try:
d['protein_activity_quantity'] = re.sub('[^\d\.,]', '', d['protein_activity_quantity'])
d['protein_activity_quantity'] = round(float(d['protein_activity_quantity']),2)
except:
d['protein_activity_quantity'] = d['protein_activity_quantity']
try:
d['protein_efficacy_quantity'] = round(d['protein_efficacy_quantity'],0)
except:
d['protein_efficacy_quantity'] = d['protein_efficacy_quantity']
d['protein_activity_quantity'], d['protein_mtype'] = self.fetch_measurements(d['protein_activity_quantity'],
d['protein_mtype'],
d['protein_activity_quantity_unit'])
if (d['protein'] == '' or
d['protein'] == None):
if d['protein_assay'] == 'pERK1/2 activation' or d['protein_assay'] =="pERK1-2":
d['protein'] = 'pERK1-2'
family = self.define_g_family(d['protein'].lower(), d['protein_assay'])
pub = self.fetch_publication(d['reference'])
l = self.fetch_ligand(
d['ligand_id'], d['ligand_type'], d['ligand_name'], d['source_file'])
protein = self.fetch_protein(d['receptor'], d['source_file'])
reference_ligand = self.fetch_ligand(
d['emax_ligand_id'], d['emax_ligand_type'], d['emax_ligand_name'], d['source_file'])
protein = self.fetch_protein(d['receptor'], d['source_file'])
if protein == None:
skipped.append(d)
continue
end_ligand = self.fetch_endogenous(protein)
auxiliary_protein = self.fetch_protein(d['auxiliary_protein'], d['source_file'])
if l == None:
print('*************error row',d,l)
eriment(submission_author=d['submitting_group'],
publication=pub,
ligand=l,
receptor=protein,
auxiliary_protein = auxiliary_protein,
endogenous_ligand = end_ligand,
ligand_source_id = d['ligand_id'],
ligand_source_type = d['ligand_type'],
)
experiment_entry.save()
self.fetch_vendor(l,experiment_entry)
experiment_assay = ExperimentAssay(biased_experiment=experiment_entry,
signalling_protein=d['protein'],
family = family,
cell_line=d['cell_line'],
assay_type=d['protein_assay'],
assay_measure=d['protein_assay_method'],
assay_time_resolved=d['protein_time_resolved'],
ligand_function=d['protein_ligand_function'],
quantitive_measure_type=d['protein_mtype'],
quantitive_activity=d['protein_activity_quantity'],
quantitive_activity_sign=d['protein_activity_equation'],
quantitive_unit=d['protein_activity_quantity_unit'],
qualitative_activity=d['protein_activity_quality'],
quantitive_efficacy=d['protein_efficacy_quantity'],
efficacy_measure_type=d['protein_efficacy_measure'],
efficacy_sign=d['protein_efficacy_equation'],
efficacy_unit=d['protein_efficacy_quantity_unit'],
bias_reference=d['ligand_reference'],
bias_value=d['pathway_bias'],
bias_value_initial=d['pathway_bias_initial'],
emax_ligand_reference=reference_ligand
)
experiment_assay.save()
self.fetch_publication_authors(pub,experiment_assay)
temp.append(d)
return temp
def fetch_publication_authors(self,publication, experiment_assay):
counter = 0
author_list = list()
if publication.authors != None:
for authors in publication.authors.split(','):
author_list.append(authors.strip())
author_list.reverse()
for i in author_list:
if counter < 3:
assay_author = ExperimentAssayAuthors(experiment = experiment_assay,
author=i)
assay_author.save()
counter=counter+1
def fetch_measurements(self, potency, p_type, unit):
if potency is not None:
if p_type.lower() == 'pec50':
potency = 10**(potency*(-1))
p_type = 'EC50'
elif p_type.lower() == 'logec50':
potency = 10**(potency)
p_type = 'EC50'
elif p_type.lower() == 'pic50':
potency = 10**(potency*(-1))
p_type = 'IC50'
elif p_type.lower() == 'logic50':
potency = 10**(potency)
p_type = 'IC50'
if potency is not None:
if p_type.lower() == 'ec50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-3)
if p_type.lower() == 'ic50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-3)
return potency,p_type
else:
self.logger.info("potency convertion error")
return None, None
def define_g_family(self, protein, assay_type):
family = None
if (protein == 'b-arrestin' or
protein == 'b-arrestin-1 (non-visual arrestin-2)' or
protein == 'b-arrestin-2 (non-visual arrestin-3)'):
family = 'B-arr'
elif (protein == 'gi/o-family' or
protein == 'gai1' or
protein == 'gai2' or
protein == 'gai3' or
protein == 'gao' or
protein == 'gaoA' or
protein == 'gai' or
protein == 'gai1' or
protein == 'gai2' or
protein == 'gai3' or
protein == 'gai1/2' or
protein == 'gao' or
protein == 'gaoA' or
protein == 'gaoB' or
protein == 'gao1' or
protein == 'gat1' or
protein == 'gat2' or
protein == 'gat3' or
protein == 'gaz' or
protein == 'gaob'):
family = 'Gi/o'
elif (protein == 'gq-family' or
protein == 'ga12' or
protein==' gaq' or
protein=='gaq/11' or
protein=='gaq/14' or
protein=='gaq/15' or
protein=='gaq/16'):
family = 'Gq/11'
elif (protein == 'g12/13-family' or
protein == 'ga12' or
protein == 'ga13'):
family = 'G12/13'
elif (protein == 'gs-family' or
protein == 'gas' or
protein == 'gaolf'):
family = 'Gs'
elif (protein == 'pERK1/2 activation' or
protein =="perk1-2"):
family = 'pERK1-2'
elif (protein == '' or protein is None):
if assay_type == 'Ca2+ accumulation':
family = 'CA2'
def fetch_receptor_trunsducers(self, receptor):
primary = set()
temp = list()
try:
gprotein = ProteinCouplings.objects.filter(protein=receptor)
for x in gprotein:
if x.transduction and x.transduction == 'primary':
primary.add(x.g_protein.name)
for i in primary:
temp.append(str(i))
return temp
except:
self.logger.info('receptor not found error')
return None
def fetch_endogenous(self, protein):
try:
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM protein_endogenous_ligands WHERE protein_id =%s", [protein.pk])
row = cursor.fetchone()
end_ligand = Ligand.objects.filter(id=row[2])
test = end_ligand.get()
return test
except:
self.logger.info("The error appeared in def fetch_endogenous")
return None
def fetch_vendor(self, ligand,experiment_entry):
temp = ligand
links = LigandVendorLink.objects.filter(lp=ligand.properities.id)
for x in links:
if x.vendor.name not in ['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem']:
ligand_vendor = BiasedExperimentVendors(experiment=experiment_entry,
vendor=x)
ligand_vendor.save()
self.logger.info("ligand_vendor saved")
def fetch_protein(self,protein_from_excel, source):
test = None
if Protein.objects.filter(entry_name=protein_from_excel):
protein = Protein.objects.filter(entry_name=protein_from_excel)
test = protein.get()
elif Protein.objects.filter(web_links__index=protein_from_excel, web_links__web_resource__slug='uniprot'):
protein1 = Protein.objects.filter(
web_links__index=protein_from_excel, web_links__web_resource__slug='uniprot')
test = protein1[0]
if test == None:
self.logger.info("fetch_protein error")
return test
def fetch_ligand(self, ligand_id, ligand_type, ligand_name, source_file):
l = None
try:
if ligand_id in self.ligand_cache:
l = self.ligand_cache[ligand_id]
else:
l = get_or_make_ligand(ligand_id, ligand_type, ligand_name)
self.ligand_cache[ligand_id] = l
if l == None:
l = self.create_empty_ligand(ligand_name)
except:
web_resource = WebResource.objects.get(slug='pubchem')
try:
l = Ligand.objects.get(properities__web_links__web_resource=web_resource,
properities__web_links__index=ligand_id)
except:
l = self.create_empty_ligand(ligand_name)
return l
def fetch_publication(self, publication_doi):
try:
float(publication_doi)
publication_doi = str(int(publication_doi))
except ValueError:
pass
if publication_doi.isdigit():
pub_type = 'pubmed'
else:
pub_type = 'doi'
if publication_doi not in self.publication_cache:
pub = False
if pub_type == 'doi':
pub = Publication.get_or_create_from_doi(publication_doi)
elif pub_type == 'pubmed':
pub = Publication.get_or_create_from_pubmed(publication_doi)
if not pub:
self.mylog.debug(
"publication fetching error | module: fetch_publication. Row # is : " + str(publication_doi) + ' ' + pub_type)
self.publication_cache[publication_doi] = pub
else:
pub = self.publication_cache[publication_doi]
return pub
def fetch_experiment(self, publication, ligand, receptor, source):
try:
experiment = AnalyzedExperiment.objects.filter(
publication=publication, ligand=ligand, receptor=receptor, source=source)
experiment = experiment.get()
return True
except Exception as msg:
experiment = None
self.mylog.exception(
"Experiment AnalyzedExperiment error | module: AnalyzedExperiment.")
return False
def prepare_all_data(self, filenames):
if not filenames:
filenames = os.listdir(self.structure_data_dir)
for source_file in filenames:
source_file_path = os.sep.join(
[self.structure_data_dir, source_file]).replace('//', '/')
if os.path.isfile(source_file_path) and source_file[0] != '.':
self.logger.info('Reading file {}'.format(source_file_path))
print('Reading file {}'.format(source_file_path))
rows = []
if source_file[-4:] == 'xlsx' or source_file[-3:] == 'xls':
if "~$" in source_file:
continue
rows = self.loaddatafromexcel(source_file_path)
rows = self.analyse_rows(rows, source_file)
else:
self.mylog.debug('unknown format'.source_file)
continue
self.data_all += rows
print(len(self.data_all), " total data points")
print("Finished")
def create_empty_ligand(self, ligand_name):
lp = self.build_ligand_properties()
ligand = Ligand()
ligand.properities = lp
ligand.name = ligand_name
ligand.canonical = True
ligand.ambigious_alias = False
ligand.pdbe = None
try:
ligand.save()
except IntegrityError:
self.logger.info("empty ligand found")
return Ligand.objects.get(name=ligand_name, canonical=True)
return ligand
def build_ligand_properties(self):
lp = LigandProperities()
lt = LigandType.objects.get(name = 'small molecule')
lp.ligand_type = lt
lp.smiles = None
lp.inchikey = None
lp.sequence= None
lp.mw = None
lp.rotatable_bonds = None
lp.hacc = None
lp.hdon = None
lp.logp = None
lp.save()
self.logger.info("Could not create ligand, empty is returned")
return lp
| true | true |
1c496c984a5305a874109d556f037a1da44afd9d | 363 | py | Python | Day01-15/code/Day15/pdf2.py | EngrSaad2/Python-100-Days | ab0b26714b1df50d02a1433dc82f2a3fb025be5c | [
"Apache-2.0"
] | 6 | 2020-04-22T14:07:51.000Z | 2021-09-07T12:55:23.000Z | Day01-15/code/Day15/pdf2.py | 2462612540/Python-Language | a676d1274a04ff03f1aea0de9c58019d6ef8f5fe | [
"Apache-2.0"
] | null | null | null | Day01-15/code/Day15/pdf2.py | 2462612540/Python-Language | a676d1274a04ff03f1aea0de9c58019d6ef8f5fe | [
"Apache-2.0"
] | 4 | 2019-08-25T05:51:00.000Z | 2021-04-16T08:14:16.000Z | """
读取PDF文件
Version: 0.1
Author: 骆昊
Date: 2018-03-26
"""
from PyPDF2 import PdfFileReader
with open('./res/Python课程大纲.pdf', 'rb') as f:
reader = PdfFileReader(f, strict=False)
print(reader.numPages)
if reader.isEncrypted:
reader.decrypt('')
current_page = reader.getPage(5)
print(current_page)
print(current_page.extractText())
| 19.105263 | 45 | 0.680441 |
from PyPDF2 import PdfFileReader
with open('./res/Python课程大纲.pdf', 'rb') as f:
reader = PdfFileReader(f, strict=False)
print(reader.numPages)
if reader.isEncrypted:
reader.decrypt('')
current_page = reader.getPage(5)
print(current_page)
print(current_page.extractText())
| true | true |
1c496ca75c47d276175856efd760bf5ff55c3465 | 547 | py | Python | augment/aug_insert_junk_chars.py | biubiubiiu/SpamClassification | c7159c77baf5f1ba09ce1af9fc0f7e0c10332864 | [
"Apache-2.0"
] | null | null | null | augment/aug_insert_junk_chars.py | biubiubiiu/SpamClassification | c7159c77baf5f1ba09ce1af9fc0f7e0c10332864 | [
"Apache-2.0"
] | null | null | null | augment/aug_insert_junk_chars.py | biubiubiiu/SpamClassification | c7159c77baf5f1ba09ce1af9fc0f7e0c10332864 | [
"Apache-2.0"
] | 1 | 2022-03-01T13:10:46.000Z | 2022-03-01T13:10:46.000Z | import random
from resources import list_junk_charaters
from .base_operation import BaseOperation
class InsertJunkCharacters(BaseOperation):
"""Insert meaningless a character into text"""
def __init__(self):
super(InsertJunkCharacters, self).__init__()
self.junk_chars = list_junk_charaters()
def can_replace(self, s):
return True
def transform(self, s):
idx = random.randint(0, len(s))
char_to_insert = random.choice(self.junk_chars)
return s[:idx] + char_to_insert + s[idx:]
| 26.047619 | 55 | 0.694698 | import random
from resources import list_junk_charaters
from .base_operation import BaseOperation
class InsertJunkCharacters(BaseOperation):
def __init__(self):
super(InsertJunkCharacters, self).__init__()
self.junk_chars = list_junk_charaters()
def can_replace(self, s):
return True
def transform(self, s):
idx = random.randint(0, len(s))
char_to_insert = random.choice(self.junk_chars)
return s[:idx] + char_to_insert + s[idx:]
| true | true |
1c496dfc9ef80a210ba798d35c3fe379edc60e8a | 5,072 | py | Python | server/server.py | TwistedSim/CoupIO | f517fb52b0b1050066d60fd0b389238e247cc90f | [
"MIT"
] | 3 | 2020-12-07T00:03:26.000Z | 2020-12-07T01:51:27.000Z | server/server.py | TwistedSim/CoupIO | f517fb52b0b1050066d60fd0b389238e247cc90f | [
"MIT"
] | null | null | null | server/server.py | TwistedSim/CoupIO | f517fb52b0b1050066d60fd0b389238e247cc90f | [
"MIT"
] | 1 | 2020-12-05T17:35:16.000Z | 2020-12-05T17:35:16.000Z | import asyncio
import inspect
import socketio
import random
from typing import Type
from games.game_interface import GameInterface, Game
class Server(socketio.AsyncNamespace):
current_games = {}
game_class = None
sio = None
start_lock = asyncio.Lock()
@classmethod
def configure(cls, sio: socketio.Server, game: Type[GameInterface]):
cls.game_class = game
cls.sio = sio
server_methods = [m[0] for m in inspect.getmembers(cls, predicate=inspect.isfunction) if m[0].startswith('on_')]
for method in inspect.getmembers(cls.game_class, predicate=inspect.ismethod):
if method[0] in server_methods:
raise NameError(f'A event handler for {method[0]} already exists in the server interface.')
if method[0].startswith('on_'):
cls.sio.on(method[0][3:], handler=method[1])
async def on_connect(self, sid, environ):
print(f'Client {sid} connected')
await self.sio.send(f'Connected to {Server.game_class.__name__} server', room=sid)
async def on_create_game(self, sid):
new_game = self.game_class(self.sio, sid)
self.current_games[new_game.uuid] = new_game
await self.sio.send(f'New game created', room=sid)
print(f'Client {sid} create a new game {new_game.uuid}')
return new_game.uuid
async def on_find_random_game(self, sid):
available_games = [
game for game in self.current_games.values() if game.is_valid]
if available_games:
return random.choice(available_games).uuid
else:
await self.sio.send(f'No game available')
async def on_join_game(self, sid, game_uuid):
game = self.current_games[game_uuid]
if len(self.sio.rooms(sid)) > 1:
await self.sio.send(f'You already are in game {self.sio.rooms(sid)[1]}', room=sid)
elif game_uuid not in self.current_games:
await self.sio.send(f'Game {game_uuid} does not exists', room=sid)
elif not game.is_valid:
await self.sio.send(f'Game {game_uuid} is not available', room=sid)
elif game.is_full:
await self.sio.send(f'Game {game_uuid} is full', room=sid)
else:
await game.add_player(sid)
self.sio.enter_room(sid, game_uuid)
await self.sio.send(f'Game {game_uuid} joined', room=sid)
await self.sio.send(f'A new player joined the game', room=game_uuid, skip_sid=sid)
await self.sio.emit('player_joined_game', (game_uuid, game.nb_player, False), room=game_uuid, skip_sid=game.owner)
await self.sio.emit('player_joined_game', (game_uuid, game.nb_player, True), room=game.owner)
print(f'Client {sid} join the game {game_uuid}')
async def leave(self, sid, game_uuid):
self.sio.leave_room(sid, game_uuid)
await self.current_games[game_uuid].remove_player(sid)
print(f'Client {sid} left game {game_uuid}')
await self.sio.send(f'Left room {game_uuid}', room=sid)
await self.sio.send('A player left the game', room=game_uuid)
if self.current_games[game_uuid].status == Game.Status.Running:
self.current_games[game_uuid].status = Game.Status.Aborted
elif sid == self.current_games[game_uuid].owner:
self.current_games[game_uuid].status = Game.Status.Aborted
print(f'Game {game_uuid} was closed by the owner')
await self.sio.send(f'Game {game_uuid} was close by owner', room=game_uuid)
elif self.current_games[game_uuid].nb_player == 0:
self.current_games[game_uuid].status = Game.Status.Aborted
print(f'Game {game_uuid} was removed since there is no player left')
if self.current_games[game_uuid].status == Game.Status.Aborted:
await self.sio.send(f'Game was aborted', room=game_uuid)
await self.sio.emit('game_aborted', game_uuid, room=game_uuid)
await self.sio.close_room(game_uuid)
async def on_disconnect(self, sid):
for game in self.sio.rooms(sid):
if game != sid:
await self.leave(sid, game)
print(f'Client {sid} disconnected')
async def on_start_game(self, sid, game_uuid):
async with self.start_lock:
game = self.current_games[game_uuid]
if game.owner != sid:
await self.sio.send(f'Only the owner of the game can start the game', room=sid)
elif not game.is_ready:
await self.sio.send(f'The game cannot start until it is ready', room=sid)
else:
await self.sio.send(f'Game {game.uuid} started', room=game_uuid)
print(f'Client {sid} started the game {game.uuid}')
# TODO start the game in another process
# TODO use different socket.io namespace according to the game
await game.start()
print(f'Game {game.uuid} is completed.')
await self.sio.close_room(game.uuid)
| 44.104348 | 126 | 0.638604 | import asyncio
import inspect
import socketio
import random
from typing import Type
from games.game_interface import GameInterface, Game
class Server(socketio.AsyncNamespace):
current_games = {}
game_class = None
sio = None
start_lock = asyncio.Lock()
@classmethod
def configure(cls, sio: socketio.Server, game: Type[GameInterface]):
cls.game_class = game
cls.sio = sio
server_methods = [m[0] for m in inspect.getmembers(cls, predicate=inspect.isfunction) if m[0].startswith('on_')]
for method in inspect.getmembers(cls.game_class, predicate=inspect.ismethod):
if method[0] in server_methods:
raise NameError(f'A event handler for {method[0]} already exists in the server interface.')
if method[0].startswith('on_'):
cls.sio.on(method[0][3:], handler=method[1])
async def on_connect(self, sid, environ):
print(f'Client {sid} connected')
await self.sio.send(f'Connected to {Server.game_class.__name__} server', room=sid)
async def on_create_game(self, sid):
new_game = self.game_class(self.sio, sid)
self.current_games[new_game.uuid] = new_game
await self.sio.send(f'New game created', room=sid)
print(f'Client {sid} create a new game {new_game.uuid}')
return new_game.uuid
async def on_find_random_game(self, sid):
available_games = [
game for game in self.current_games.values() if game.is_valid]
if available_games:
return random.choice(available_games).uuid
else:
await self.sio.send(f'No game available')
async def on_join_game(self, sid, game_uuid):
game = self.current_games[game_uuid]
if len(self.sio.rooms(sid)) > 1:
await self.sio.send(f'You already are in game {self.sio.rooms(sid)[1]}', room=sid)
elif game_uuid not in self.current_games:
await self.sio.send(f'Game {game_uuid} does not exists', room=sid)
elif not game.is_valid:
await self.sio.send(f'Game {game_uuid} is not available', room=sid)
elif game.is_full:
await self.sio.send(f'Game {game_uuid} is full', room=sid)
else:
await game.add_player(sid)
self.sio.enter_room(sid, game_uuid)
await self.sio.send(f'Game {game_uuid} joined', room=sid)
await self.sio.send(f'A new player joined the game', room=game_uuid, skip_sid=sid)
await self.sio.emit('player_joined_game', (game_uuid, game.nb_player, False), room=game_uuid, skip_sid=game.owner)
await self.sio.emit('player_joined_game', (game_uuid, game.nb_player, True), room=game.owner)
print(f'Client {sid} join the game {game_uuid}')
async def leave(self, sid, game_uuid):
self.sio.leave_room(sid, game_uuid)
await self.current_games[game_uuid].remove_player(sid)
print(f'Client {sid} left game {game_uuid}')
await self.sio.send(f'Left room {game_uuid}', room=sid)
await self.sio.send('A player left the game', room=game_uuid)
if self.current_games[game_uuid].status == Game.Status.Running:
self.current_games[game_uuid].status = Game.Status.Aborted
elif sid == self.current_games[game_uuid].owner:
self.current_games[game_uuid].status = Game.Status.Aborted
print(f'Game {game_uuid} was closed by the owner')
await self.sio.send(f'Game {game_uuid} was close by owner', room=game_uuid)
elif self.current_games[game_uuid].nb_player == 0:
self.current_games[game_uuid].status = Game.Status.Aborted
print(f'Game {game_uuid} was removed since there is no player left')
if self.current_games[game_uuid].status == Game.Status.Aborted:
await self.sio.send(f'Game was aborted', room=game_uuid)
await self.sio.emit('game_aborted', game_uuid, room=game_uuid)
await self.sio.close_room(game_uuid)
async def on_disconnect(self, sid):
for game in self.sio.rooms(sid):
if game != sid:
await self.leave(sid, game)
print(f'Client {sid} disconnected')
async def on_start_game(self, sid, game_uuid):
async with self.start_lock:
game = self.current_games[game_uuid]
if game.owner != sid:
await self.sio.send(f'Only the owner of the game can start the game', room=sid)
elif not game.is_ready:
await self.sio.send(f'The game cannot start until it is ready', room=sid)
else:
await self.sio.send(f'Game {game.uuid} started', room=game_uuid)
print(f'Client {sid} started the game {game.uuid}')
await game.start()
print(f'Game {game.uuid} is completed.')
await self.sio.close_room(game.uuid)
| true | true |
1c496fe67e0d21c3e64a5837cd6d0721b4b6ee09 | 1,189 | py | Python | tests/gogs_tools_tests/test_gogs_utils.py | mondele/tx-manager | ddbbeeae5990a327ffc14b42c478d3ea435c0533 | [
"MIT"
] | 3 | 2017-03-17T02:25:21.000Z | 2017-05-18T22:18:20.000Z | tests/gogs_tools_tests/test_gogs_utils.py | mondele/tx-manager | ddbbeeae5990a327ffc14b42c478d3ea435c0533 | [
"MIT"
] | 184 | 2016-10-13T02:56:16.000Z | 2021-03-25T21:27:20.000Z | tests/gogs_tools_tests/test_gogs_utils.py | mondele/tx-manager | ddbbeeae5990a327ffc14b42c478d3ea435c0533 | [
"MIT"
] | 16 | 2016-09-15T23:34:19.000Z | 2019-07-25T07:06:32.000Z | from __future__ import absolute_import, unicode_literals, print_function
import mock
import unittest
from libraries.gogs_tools.gogs_handler import GogsHandler
class GogsHandlerTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.handler = GogsHandler("https://www.example.com/")
cls.handler.gogs_api = mock.MagicMock()
def setUp(self):
"""Runs before each test."""
self.handler.gogs_api.reset_mock()
def test_authenticate_user_token(self):
def valid_auth(token):
return token.token == "valid"
self.handler.gogs_api.valid_authentication = valid_auth
self.assertTrue(self.handler.authenticate_user_token("valid"))
self.assertFalse(self.handler.authenticate_user_token("invalid"))
def test_get_user(self):
def valid_auth(token):
return token.token == "valid"
self.handler.gogs_api.valid_authentication = valid_auth
mock_user = mock.MagicMock()
self.handler.gogs_api.authenticated_user.return_value = mock_user
self.assertIs(self.handler.get_user("valid"), mock_user)
self.assertIsNone(self.handler.get_user("invalid"))
| 36.030303 | 73 | 0.707317 | from __future__ import absolute_import, unicode_literals, print_function
import mock
import unittest
from libraries.gogs_tools.gogs_handler import GogsHandler
class GogsHandlerTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.handler = GogsHandler("https://www.example.com/")
cls.handler.gogs_api = mock.MagicMock()
def setUp(self):
self.handler.gogs_api.reset_mock()
def test_authenticate_user_token(self):
def valid_auth(token):
return token.token == "valid"
self.handler.gogs_api.valid_authentication = valid_auth
self.assertTrue(self.handler.authenticate_user_token("valid"))
self.assertFalse(self.handler.authenticate_user_token("invalid"))
def test_get_user(self):
def valid_auth(token):
return token.token == "valid"
self.handler.gogs_api.valid_authentication = valid_auth
mock_user = mock.MagicMock()
self.handler.gogs_api.authenticated_user.return_value = mock_user
self.assertIs(self.handler.get_user("valid"), mock_user)
self.assertIsNone(self.handler.get_user("invalid"))
| true | true |
1c49703f28f036f4d4ac9547a92dd0ad4100c1c4 | 1,229 | py | Python | lib/lockfile.py | kaolin/rigor | c3489bf36088282368daee8fd71e9a64344145de | [
"BSD-2-Clause"
] | 5 | 2018-03-28T08:43:08.000Z | 2021-10-30T15:47:07.000Z | lib/lockfile.py | blindsightcorp/rigor | d4176afed5b82cef3daf778ed00fe9be66d231fb | [
"BSD-2-Clause"
] | 2 | 2016-10-10T19:10:26.000Z | 2017-05-03T23:01:37.000Z | lib/lockfile.py | kaolin/rigor | c3489bf36088282368daee8fd71e9a64344145de | [
"BSD-2-Clause"
] | 7 | 2016-05-25T00:15:43.000Z | 2017-06-26T17:32:45.000Z | """ File used to synchronize operations between processes """
import os
class LockFile(object):
"""
Use this to lock operations that need to occur only once, even if several
processes try to run the operation. It works by getting an exclusive lock on
the listed file. It will fail with an exception if the lock already is held
by some other process. Note that the lock is reentrant for any code sharing
the same instance of this class.
Usage:
>>> with LockFile('/tmp/rigor-foo.lock') as lock:
... # do critical stuff...
... pass
"""
def __init__(self, path):
self._path = path
self._lock = None
def acquire(self):
"""
Acquires a reentrant lock. If the lock already exists in this method, it
will simply return; otherwise, it will acquire the lock. It will throw an
exception if the lock cannot be acquired.
"""
if not self._lock:
self._lock = os.open(self._path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
def release(self):
"""
Releases the lock and removes the file from disk.
"""
if self._lock:
os.close(self._lock)
os.unlink(self._path)
def __enter__(self):
self.acquire()
return self
def __exit__(self, _exc_type, _exc_value, _exc_traceback):
self.release()
| 26.717391 | 78 | 0.703824 |
import os
class LockFile(object):
def __init__(self, path):
self._path = path
self._lock = None
def acquire(self):
if not self._lock:
self._lock = os.open(self._path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
def release(self):
if self._lock:
os.close(self._lock)
os.unlink(self._path)
def __enter__(self):
self.acquire()
return self
def __exit__(self, _exc_type, _exc_value, _exc_traceback):
self.release()
| true | true |
1c4970531f6fba9cef04cbc507d9376efaba246c | 416 | py | Python | products/migrations/0004_auto_20180914_2257.py | bubaic/e-shop | d0156d02d6e74e35d115f8742b55809466126513 | [
"MIT"
] | 1 | 2022-02-21T18:00:48.000Z | 2022-02-21T18:00:48.000Z | products/migrations/0004_auto_20180914_2257.py | bubaic/e-shop | d0156d02d6e74e35d115f8742b55809466126513 | [
"MIT"
] | null | null | null | products/migrations/0004_auto_20180914_2257.py | bubaic/e-shop | d0156d02d6e74e35d115f8742b55809466126513 | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2018-09-14 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_product_image'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.FileField(blank=True, null=True, upload_to='get_image_path'),
),
]
| 21.894737 | 86 | 0.610577 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_product_image'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.FileField(blank=True, null=True, upload_to='get_image_path'),
),
]
| true | true |
1c49712da3c586e84e204bc68db748b83fe51cbd | 164 | py | Python | 01_Day_Introduction/euclidian_distance.py | fernandovicentinpavanello/30-days-of-Python | 3e04ef64a0997bb71eeac57911e47f2f6414ae75 | [
"MIT"
] | 1 | 2022-03-08T07:08:39.000Z | 2022-03-08T07:08:39.000Z | 01_Day_Introduction/euclidian_distance.py | luizpavanello/30-days-of-Python | 3c727a76b6185a5ba684c393c5cdfc759c3c4b01 | [
"MIT"
] | null | null | null | 01_Day_Introduction/euclidian_distance.py | luizpavanello/30-days-of-Python | 3c727a76b6185a5ba684c393c5cdfc759c3c4b01 | [
"MIT"
] | null | null | null | # Python Euclidian Distance using math.dist
from math import dist
point_1 = (2, 3)
point_2 = (10, 8)
print(dist(point_1, point_2))
# Result: 9.433981132056603
| 14.909091 | 43 | 0.719512 |
from math import dist
point_1 = (2, 3)
point_2 = (10, 8)
print(dist(point_1, point_2))
| true | true |
1c4972132ffd5f30ca850ea943fd539eece66d4f | 4,004 | py | Python | wheat/wallet/util/trade_utils.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | 15 | 2021-07-12T14:27:42.000Z | 2022-02-09T04:32:44.000Z | wheat/wallet/util/trade_utils.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | 21 | 2021-07-12T23:25:36.000Z | 2021-10-29T23:19:55.000Z | wheat/wallet/util/trade_utils.py | grayfallstown/wheat-blockchain | f391cdd30a0cbcdb2adf4439a25581fd28b42c1f | [
"Apache-2.0"
] | 8 | 2021-07-12T13:15:19.000Z | 2022-03-15T08:41:18.000Z | from typing import Dict, Optional, Tuple
from wheat.types.blockchain_format.program import Program, INFINITE_COST
from wheat.types.condition_opcodes import ConditionOpcode
from wheat.types.spend_bundle import SpendBundle
from wheat.util.condition_tools import conditions_dict_for_solution
from wheat.wallet.cc_wallet import cc_utils
from wheat.wallet.trade_record import TradeRecord
from wheat.wallet.trading.trade_status import TradeStatus
def trade_status_ui_string(status: TradeStatus):
if status is TradeStatus.PENDING_CONFIRM:
return "Pending Confirmation"
elif status is TradeStatus.CANCELED:
return "Canceled"
elif status is TradeStatus.CONFIRMED:
return "Confirmed"
elif status is TradeStatus.PENDING_CANCEL:
return "Pending Cancellation"
elif status is TradeStatus.FAILED:
return "Failed"
elif status is TradeStatus.PENDING_ACCEPT:
return "Pending"
def trade_record_to_dict(record: TradeRecord) -> Dict:
"""Convenience function to return only part of trade record we care about and show correct status to the ui"""
result = {}
result["trade_id"] = record.trade_id.hex()
result["sent"] = record.sent
result["my_offer"] = record.my_offer
result["created_at_time"] = record.created_at_time
result["accepted_at_time"] = record.accepted_at_time
result["confirmed_at_index"] = record.confirmed_at_index
result["status"] = trade_status_ui_string(TradeStatus(record.status))
success, offer_dict, error = get_discrepancies_for_spend_bundle(record.spend_bundle)
if success is False or offer_dict is None:
raise ValueError(error)
result["offer_dict"] = offer_dict
return result
# Returns the relative difference in value between the amount outputted by a puzzle and solution and a coin's amount
def get_output_discrepancy_for_puzzle_and_solution(coin, puzzle, solution):
discrepancy = coin.amount - get_output_amount_for_puzzle_and_solution(puzzle, solution)
return discrepancy
# Returns the amount of value outputted by a puzzle and solution
def get_output_amount_for_puzzle_and_solution(puzzle: Program, solution: Program) -> int:
error, conditions, cost = conditions_dict_for_solution(puzzle, solution, INFINITE_COST)
total = 0
if conditions:
for _ in conditions.get(ConditionOpcode.CREATE_COIN, []):
total += Program.to(_.vars[1]).as_int()
return total
def get_discrepancies_for_spend_bundle(
trade_offer: SpendBundle,
) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
try:
cc_discrepancies: Dict[str, int] = dict()
for coinsol in trade_offer.coin_spends:
puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal))
solution: Program = Program.from_bytes(bytes(coinsol.solution))
# work out the deficits between coin amount and expected output for each
r = cc_utils.uncurry_cc(puzzle)
if r:
# Calculate output amounts
mod_hash, genesis_checker, inner_puzzle = r
innersol = solution.first()
total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol)
colour = bytes(genesis_checker).hex()
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - total
else:
cc_discrepancies[colour] = coinsol.coin.amount - total
else:
coin_amount = coinsol.coin.amount
out_amount = get_output_amount_for_puzzle_and_solution(puzzle, solution)
diff = coin_amount - out_amount
if "wheat" in cc_discrepancies:
cc_discrepancies["wheat"] = cc_discrepancies["wheat"] + diff
else:
cc_discrepancies["wheat"] = diff
return True, cc_discrepancies, None
except Exception as e:
return False, None, e
| 42.595745 | 116 | 0.699051 | from typing import Dict, Optional, Tuple
from wheat.types.blockchain_format.program import Program, INFINITE_COST
from wheat.types.condition_opcodes import ConditionOpcode
from wheat.types.spend_bundle import SpendBundle
from wheat.util.condition_tools import conditions_dict_for_solution
from wheat.wallet.cc_wallet import cc_utils
from wheat.wallet.trade_record import TradeRecord
from wheat.wallet.trading.trade_status import TradeStatus
def trade_status_ui_string(status: TradeStatus):
if status is TradeStatus.PENDING_CONFIRM:
return "Pending Confirmation"
elif status is TradeStatus.CANCELED:
return "Canceled"
elif status is TradeStatus.CONFIRMED:
return "Confirmed"
elif status is TradeStatus.PENDING_CANCEL:
return "Pending Cancellation"
elif status is TradeStatus.FAILED:
return "Failed"
elif status is TradeStatus.PENDING_ACCEPT:
return "Pending"
def trade_record_to_dict(record: TradeRecord) -> Dict:
result = {}
result["trade_id"] = record.trade_id.hex()
result["sent"] = record.sent
result["my_offer"] = record.my_offer
result["created_at_time"] = record.created_at_time
result["accepted_at_time"] = record.accepted_at_time
result["confirmed_at_index"] = record.confirmed_at_index
result["status"] = trade_status_ui_string(TradeStatus(record.status))
success, offer_dict, error = get_discrepancies_for_spend_bundle(record.spend_bundle)
if success is False or offer_dict is None:
raise ValueError(error)
result["offer_dict"] = offer_dict
return result
def get_output_discrepancy_for_puzzle_and_solution(coin, puzzle, solution):
discrepancy = coin.amount - get_output_amount_for_puzzle_and_solution(puzzle, solution)
return discrepancy
# Returns the amount of value outputted by a puzzle and solution
def get_output_amount_for_puzzle_and_solution(puzzle: Program, solution: Program) -> int:
error, conditions, cost = conditions_dict_for_solution(puzzle, solution, INFINITE_COST)
total = 0
if conditions:
for _ in conditions.get(ConditionOpcode.CREATE_COIN, []):
total += Program.to(_.vars[1]).as_int()
return total
def get_discrepancies_for_spend_bundle(
trade_offer: SpendBundle,
) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
try:
cc_discrepancies: Dict[str, int] = dict()
for coinsol in trade_offer.coin_spends:
puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal))
solution: Program = Program.from_bytes(bytes(coinsol.solution))
# work out the deficits between coin amount and expected output for each
r = cc_utils.uncurry_cc(puzzle)
if r:
# Calculate output amounts
mod_hash, genesis_checker, inner_puzzle = r
innersol = solution.first()
total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol)
colour = bytes(genesis_checker).hex()
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - total
else:
cc_discrepancies[colour] = coinsol.coin.amount - total
else:
coin_amount = coinsol.coin.amount
out_amount = get_output_amount_for_puzzle_and_solution(puzzle, solution)
diff = coin_amount - out_amount
if "wheat" in cc_discrepancies:
cc_discrepancies["wheat"] = cc_discrepancies["wheat"] + diff
else:
cc_discrepancies["wheat"] = diff
return True, cc_discrepancies, None
except Exception as e:
return False, None, e
| true | true |
1c4973a004a9278329b4a2ea713e7f3e1c39f8cc | 10,727 | py | Python | venv/Lib/site-packages/selenium/webdriver/firefox/webdriver.py | dasxran/seleniumMachineLearning | 3098f836913a89847cb9e308189383a4ea981139 | [
"MIT"
] | 64 | 2020-07-22T06:24:18.000Z | 2022-03-27T10:48:15.000Z | venv/Lib/site-packages/selenium/webdriver/firefox/webdriver.py | dasxran/seleniumMachineLearning | 3098f836913a89847cb9e308189383a4ea981139 | [
"MIT"
] | 51 | 2021-04-08T11:39:59.000Z | 2021-05-07T12:01:27.000Z | venv/Lib/site-packages/selenium/webdriver/firefox/webdriver.py | dasxran/seleniumMachineLearning | 3098f836913a89847cb9e308189383a4ea981139 | [
"MIT"
] | 21 | 2019-03-11T04:25:23.000Z | 2022-02-03T08:54:33.000Z | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
try:
basestring
except NameError: # Python 3.x
basestring = str
import shutil
import sys
from contextlib import contextmanager
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .extension_connection import ExtensionConnection
from .firefox_binary import FirefoxBinary
from .firefox_profile import FirefoxProfile
from .options import Options
from .remote_connection import FirefoxRemoteConnection
from .service import Service
from .webelement import FirefoxWebElement
class WebDriver(RemoteWebDriver):
# There is no native event support on Mac
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
CONTEXT_CHROME = "chrome"
CONTEXT_CONTENT = "content"
_web_element_cls = FirefoxWebElement
def __init__(self, firefox_profile=None, firefox_binary=None,
timeout=30, capabilities=None, proxy=None,
executable_path="geckodriver", options=None,
service_log_path="geckodriver.log", firefox_options=None,
service_args=None, desired_capabilities=None, log_path=None,
keep_alive=True):
"""Starts a new local session of Firefox.
Based on the combination and specificity of the various keyword
arguments, a capabilities dictionary will be constructed that
is passed to the remote end.
The keyword arguments given to this constructor are helpers to
more easily allow Firefox WebDriver sessions to be customised
with different options. They are mapped on to a capabilities
dictionary that is passed on to the remote end.
As some of the options, such as `firefox_profile` and
`options.profile` are mutually exclusive, precedence is
given from how specific the setting is. `capabilities` is the
least specific keyword argument, followed by `options`,
followed by `firefox_binary` and `firefox_profile`.
In practice this means that if `firefox_profile` and
`options.profile` are both set, the selected profile
instance will always come from the most specific variable.
In this case that would be `firefox_profile`. This will result in
`options.profile` to be ignored because it is considered
a less specific setting than the top-level `firefox_profile`
keyword argument. Similarily, if you had specified a
`capabilities["moz:firefoxOptions"]["profile"]` Base64 string,
this would rank below `options.profile`.
:param firefox_profile: Instance of ``FirefoxProfile`` object
or a string. If undefined, a fresh profile will be created
in a temporary location on the system.
:param firefox_binary: Instance of ``FirefoxBinary`` or full
path to the Firefox binary. If undefined, the system default
Firefox installation will be used.
:param timeout: Time to wait for Firefox to launch when using
the extension connection.
:param capabilities: Dictionary of desired capabilities.
:param proxy: The proxy settings to us when communicating with
Firefox via the extension connection.
:param executable_path: Full path to override which geckodriver
binary to use for Firefox 47.0.1 and greater, which
defaults to picking up the binary from the system path.
:param options: Instance of ``options.Options``.
:param service_log_path: Where to log information from the driver.
:param firefox_options: Deprecated argument for options
:param service_args: List of args to pass to the driver service
:param desired_capabilities: alias of capabilities. In future
versions of this library, this will replace 'capabilities'.
This will make the signature consistent with RemoteWebDriver.
:param log_path: Deprecated argument for service_log_path
:param keep_alive: Whether to configure remote_connection.RemoteConnection to use
HTTP keep-alive.
"""
if log_path:
warnings.warn('use service_log_path instead of log_path',
DeprecationWarning, stacklevel=2)
service_log_path = log_path
if firefox_options:
warnings.warn('use options instead of firefox_options',
DeprecationWarning, stacklevel=2)
options = firefox_options
self.binary = None
self.profile = None
self.service = None
# If desired capabilities is set, alias it to capabilities.
# If both are set ignore desired capabilities.
if capabilities is None and desired_capabilities:
capabilities = desired_capabilities
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX.copy()
if options is None:
options = Options()
capabilities = dict(capabilities)
if capabilities.get("binary"):
self.binary = capabilities["binary"]
# options overrides capabilities
if options is not None:
if options.binary is not None:
self.binary = options.binary
if options.profile is not None:
self.profile = options.profile
# firefox_binary and firefox_profile
# override options
if firefox_binary is not None:
if isinstance(firefox_binary, basestring):
firefox_binary = FirefoxBinary(firefox_binary)
self.binary = firefox_binary
options.binary = firefox_binary
if firefox_profile is not None:
if isinstance(firefox_profile, basestring):
firefox_profile = FirefoxProfile(firefox_profile)
self.profile = firefox_profile
options.profile = firefox_profile
# W3C remote
# TODO(ato): Perform conformance negotiation
if capabilities.get("marionette"):
capabilities.pop("marionette")
self.service = Service(
executable_path,
service_args=service_args,
log_path=service_log_path)
self.service.start()
capabilities.update(options.to_capabilities())
executor = FirefoxRemoteConnection(
remote_server_addr=self.service.service_url)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
# Selenium remote
else:
if self.binary is None:
self.binary = FirefoxBinary()
if self.profile is None:
self.profile = FirefoxProfile()
# disable native events if globally disabled
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if proxy is not None:
proxy.add_to_capabilities(capabilities)
executor = ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=keep_alive)
self._is_remote = False
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except Exception:
# We don't care about the message because something probably has gone wrong
pass
if self.w3c:
self.service.stop()
else:
self.binary.kill()
if self.profile is not None:
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
# Extension commands:
def set_context(self, context):
self.execute("SET_CONTEXT", {"context": context})
@contextmanager
def context(self, context):
"""Sets the context that Selenium commands are running in using
a `with` statement. The state of the context on the server is
saved before entering the block, and restored upon exiting it.
:param context: Context, may be one of the class properties
`CONTEXT_CHROME` or `CONTEXT_CONTENT`.
Usage example::
with selenium.context(selenium.CONTEXT_CHROME):
# chrome scope
... do stuff ...
"""
initial_context = self.execute('GET_CONTEXT').pop('value')
self.set_context(context)
try:
yield
finally:
self.set_context(initial_context)
def install_addon(self, path, temporary=None):
"""
Installs Firefox addon.
Returns identifier of installed addon. This identifier can later
be used to uninstall addon.
:param path: Absolute path to the addon that will be installed.
:Usage:
driver.install_addon('/path/to/firebug.xpi')
"""
payload = {"path": path}
if temporary is not None:
payload["temporary"] = temporary
return self.execute("INSTALL_ADDON", payload)["value"]
def uninstall_addon(self, identifier):
"""
Uninstalls Firefox addon using its identifier.
:Usage:
driver.uninstall_addon('addon@foo.com')
"""
self.execute("UNINSTALL_ADDON", {"id": identifier})
| 38.725632 | 89 | 0.648271 |
import warnings
try:
basestring
except NameError:
basestring = str
import shutil
import sys
from contextlib import contextmanager
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .extension_connection import ExtensionConnection
from .firefox_binary import FirefoxBinary
from .firefox_profile import FirefoxProfile
from .options import Options
from .remote_connection import FirefoxRemoteConnection
from .service import Service
from .webelement import FirefoxWebElement
class WebDriver(RemoteWebDriver):
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
CONTEXT_CHROME = "chrome"
CONTEXT_CONTENT = "content"
_web_element_cls = FirefoxWebElement
def __init__(self, firefox_profile=None, firefox_binary=None,
timeout=30, capabilities=None, proxy=None,
executable_path="geckodriver", options=None,
service_log_path="geckodriver.log", firefox_options=None,
service_args=None, desired_capabilities=None, log_path=None,
keep_alive=True):
if log_path:
warnings.warn('use service_log_path instead of log_path',
DeprecationWarning, stacklevel=2)
service_log_path = log_path
if firefox_options:
warnings.warn('use options instead of firefox_options',
DeprecationWarning, stacklevel=2)
options = firefox_options
self.binary = None
self.profile = None
self.service = None
if capabilities is None and desired_capabilities:
capabilities = desired_capabilities
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX.copy()
if options is None:
options = Options()
capabilities = dict(capabilities)
if capabilities.get("binary"):
self.binary = capabilities["binary"]
if options is not None:
if options.binary is not None:
self.binary = options.binary
if options.profile is not None:
self.profile = options.profile
if firefox_binary is not None:
if isinstance(firefox_binary, basestring):
firefox_binary = FirefoxBinary(firefox_binary)
self.binary = firefox_binary
options.binary = firefox_binary
if firefox_profile is not None:
if isinstance(firefox_profile, basestring):
firefox_profile = FirefoxProfile(firefox_profile)
self.profile = firefox_profile
options.profile = firefox_profile
if capabilities.get("marionette"):
capabilities.pop("marionette")
self.service = Service(
executable_path,
service_args=service_args,
log_path=service_log_path)
self.service.start()
capabilities.update(options.to_capabilities())
executor = FirefoxRemoteConnection(
remote_server_addr=self.service.service_url)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
else:
if self.binary is None:
self.binary = FirefoxBinary()
if self.profile is None:
self.profile = FirefoxProfile()
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if proxy is not None:
proxy.add_to_capabilities(capabilities)
executor = ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=keep_alive)
self._is_remote = False
def quit(self):
try:
RemoteWebDriver.quit(self)
except Exception:
pass
if self.w3c:
self.service.stop()
else:
self.binary.kill()
if self.profile is not None:
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
# Extension commands:
def set_context(self, context):
self.execute("SET_CONTEXT", {"context": context})
@contextmanager
def context(self, context):
initial_context = self.execute('GET_CONTEXT').pop('value')
self.set_context(context)
try:
yield
finally:
self.set_context(initial_context)
def install_addon(self, path, temporary=None):
payload = {"path": path}
if temporary is not None:
payload["temporary"] = temporary
return self.execute("INSTALL_ADDON", payload)["value"]
def uninstall_addon(self, identifier):
self.execute("UNINSTALL_ADDON", {"id": identifier})
| true | true |
1c497475a53a4c0124cb3f312edcf589a9dd4c1d | 14,550 | py | Python | models/variation/pix2pix_tm2_mc_full_in2_model.py | tkuri/irradiance_estimation | 3f7e0e8d4772222faad7257a70a8dec0198e4810 | [
"BSD-3-Clause"
] | 1 | 2020-07-22T18:06:40.000Z | 2020-07-22T18:06:40.000Z | models/variation/pix2pix_tm2_mc_full_in2_model.py | tkuri/irradiance_estimation | 3f7e0e8d4772222faad7257a70a8dec0198e4810 | [
"BSD-3-Clause"
] | null | null | null | models/variation/pix2pix_tm2_mc_full_in2_model.py | tkuri/irradiance_estimation | 3f7e0e8d4772222faad7257a70a8dec0198e4810 | [
"BSD-3-Clause"
] | null | null | null | import torch
from .base_model import BaseModel
from . import networks
from torch.nn import functional as F
class Pix2PixTm2McFullIn2Model(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned3')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B', 'real_C', 'real_C_itp', 'ltm_slice00', 'ltm_slice12', 'ltm_slice24', 'matrix_1_0', 'matrix_1_1', 'matrix_1_2', 'matrix_1_3', 'matrix_2_0', 'matrix_2_1', 'matrix_2_2', 'matrix_2_3']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
# self.model_names = ['G', 'D']
self.model_names = ['G', 'G2', 'D']
else: # during test time, only load G
self.model_names = ['G', 'G2']
# define networks (both generator and discriminator)
self.output_nc = opt.output_nc
self.light_res = opt.light_res
self.intermediate_nc = opt.intermediate_nc
print('opt.output_nc', opt.output_nc)
print('light_res', self.light_res)
print('intermediate_nc', self.intermediate_nc)
self.netG = networks.define_G(opt.input_nc + opt.input2_nc, opt.output_nc*self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG2 = networks.define_G(opt.input_nc + opt.input2_nc, self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
self.netD = networks.define_D(opt.input_nc + opt.input2_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_G2)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.real_C = input['C'].to(self.device)
# self.real_C_itp = F.interpolate(self.real_C, (self.light_res, self.light_res), mode='bicubic', align_corners=False)
self.real_C_itp = F.interpolate(self.real_C, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.real_C_itp_flat = self.real_C_itp.view(-1, self.light_res**2, 1) # [1, lsxls, 1]
self.real_C_itp = torch.clamp((F.interpolate(self.real_C_itp, (self.real_C.size(-2), self.real_C.size(-1)), mode='nearest')-0.5)/0.5, min=-1.0, max=1.0)
self.real_AC = torch.cat([self.real_A, self.real_C], dim=1)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
# print("test")
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
sub_matrix1 = self.netG(self.real_AC) # [1, 3xmc, 256, 256]
sub_matrix2 = self.netG2(self.real_AC) # [1, mc, 256, 256]
sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)# [1, mc, ls, ls]
self.sub_matrix_1 = sub_matrix1.clone()
self.sub_matrix_2 = sub_matrix2.clone()
self.matrix_1 = torch.clamp((sub_matrix1*self.matrix_1_gain-0.5)/0.5, min=-1.0, max=1.0)
self.matrix_1_0 = self.matrix_1[:, [0, self.intermediate_nc, self.intermediate_nc*2], :, :]
self.matrix_1_1 = self.matrix_1[:, [1, 1 + self.intermediate_nc, 1 + self.intermediate_nc*2], :, :]
self.matrix_1_2 = self.matrix_1[:, [2, 2 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]
self.matrix_1_3 = self.matrix_1[:, [3, 3 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]
self.matrix_2 = torch.clamp((F.interpolate(sub_matrix2, (self.real_B.size(-2), self.real_B.size(-1)), mode='nearest')*self.matrix_2_gain-0.5)/0.5, min=-1.0, max=1.0)
self.matrix_2_0 = torch.unsqueeze(self.matrix_2[:, 0, :, :], 1)
self.matrix_2_1 = torch.unsqueeze(self.matrix_2[:, 1, :, :], 1)
self.matrix_2_2 = torch.unsqueeze(self.matrix_2[:, 2, :, :], 1)
self.matrix_2_3 = torch.unsqueeze(self.matrix_2[:, 3, :, :], 1)
sub_matrix1 = sub_matrix1.view(-1, sub_matrix1.size(1), sub_matrix1.size(2)*sub_matrix1.size(3)) # [1, 3xmc, 256x256]
sub_matrix2 = sub_matrix2.view(-1, sub_matrix2.size(1), sub_matrix2.size(2)*sub_matrix2.size(3)) # [1, mc, lsxls]
sub_matrix1 = torch.transpose(sub_matrix1, 1, 2) # [1, 256x256, 3xmc]
sm1R = sub_matrix1[:, :, 0:self.intermediate_nc] # [1, 256x256, mc]
sm1G = sub_matrix1[:, :, self.intermediate_nc:self.intermediate_nc*2]
sm1B = sub_matrix1[:, :, self.intermediate_nc*2:self.intermediate_nc*3]
bufR = torch.matmul(sm1R, sub_matrix2) # [1, 256x256, lsxls]
bufG = torch.matmul(sm1G, sub_matrix2)
bufB = torch.matmul(sm1B, sub_matrix2)
trans_matrix = torch.cat([bufR, bufG, bufB], dim=1) # [1, 3x256x256, lsxls]
ltm = torch.transpose(trans_matrix, 1, 2) #[25, 25, 3x256x256]
ltm = ltm.reshape(ltm.size(0), ltm.size(1)*self.real_B.size(1), self.real_B.size(2)*self.real_B.size(3)) #[25, 25x3, 256x256]
ltm = ltm.reshape(ltm.size(0), ltm.size(1), self.real_B.size(2), self.real_B.size(3)) #[25, 25x3, 256, 256]
self.ltm_slice00 = torch.clamp((ltm[:, 0:3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) # [25, 3, 256, 256]
self.ltm_slice12 = torch.clamp((ltm[:, 3*12:3*12+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) # [25, 3, 256, 256]
self.ltm_slice24 = torch.clamp((ltm[:, 3*24:3*24+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) # [25, 3, 256, 256]
# trans_matrix = torch.matmul(sub_matrix1, sub_matrix2) #[1, 3x256x256, lsxls]
# print('trans_matrix:', trans_matrix.size())
tmR = trans_matrix[:, 0:256**2, :] # [1, 256x256, lsxls]
tmG = trans_matrix[:, 256**2:(256**2)*2, :]
tmB = trans_matrix[:, (256**2)*2:(256**2)*3, :]
# print('tmR:', tmR.size())
bufR = torch.matmul(tmR, self.real_C_itp_flat) # [1, 256x256, 1]
bufG = torch.matmul(tmG, self.real_C_itp_flat)
bufB = torch.matmul(tmB, self.real_C_itp_flat)
# print('bufR:', bufR.size())
buf = torch.cat([bufR, bufG, bufB], dim=2) # [1, 256x256, 3]
buf = torch.transpose(buf, 1, 2) # [1, 3, 256x256]
buf = (buf - 0.5) / 0.5
buf = torch.clamp(buf, min=-1.0, max=1.0)
# print('buf:', buf.size())
self.fake_B = buf.view(self.real_B.size()) # [1, 3, 256, 256]
def forward_linebuf(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
sub_matrix1 = self.netG(self.real_AC) # [1, 3, 256, 256]
sub_matrix2 = self.netG2(self.real_AC) # [1, 1, 256, 256]
sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.fake_B = torch.zeros_like(self.real_B)
sub_matrix2 = sub_matrix2.view(-1, 1, sub_matrix2.size(-2)*sub_matrix2.size(-1)) * 0.5 + 0.5 # [1, 1, 256x256]
for l in range(sub_matrix1.size(2)):
sub_matrix1_buf = sub_matrix1[:, :, l, :].reshape(-1, sub_matrix1.size(1)*sub_matrix1.size(3), 1) * 0.5 + 0.5 # [1, 3x256, 1]
trans_matrix = torch.matmul(sub_matrix1_buf, sub_matrix2) #[1, 3x256, 256x256]
# print('trans_matrix:', trans_matrix.size())
tmR = trans_matrix[:, 0:256, :] # [1, 256, 256x256]
tmG = trans_matrix[:, 256:256*2, :]
tmB = trans_matrix[:, 256*2:256*3, :]
# print('self.real_C_itp_flat:', self.real_C_itp_flat.size())
# print('tmR:', tmR.size())
bufR = torch.matmul(tmR, self.real_C_itp_flat * 10.0) # [1, 256, 1]
bufG = torch.matmul(tmG, self.real_C_itp_flat * 10.0)
bufB = torch.matmul(tmB, self.real_C_itp_flat * 10.0)
# print('bufR:', bufR.size())
buf = torch.cat([bufR, bufG, bufB], dim=2) # [1, 256, 3]
buf = torch.transpose(buf, 1, 2) # [1, 3, 256]
buf = (buf - 0.5) / 0.5
buf = buf.reshape(self.fake_B.size(0), self.fake_B.size(1), self.fake_B.size(3))
# print('buf:', buf.size())
# print('fake_B:', self.fake_B.size())
self.fake_B[:, :, l, :] = buf # [1, 3, 1, 256] <- [1,3,256]
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Fake; stop backprop to the generator by detaching fake_B
# fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
# pred_fake = self.netD(fake_AB.detach())
fake_ACB = torch.cat((self.real_AC, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
pred_fake = self.netD(fake_ACB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
# real_AB = torch.cat((self.real_A, self.real_B), 1)
# pred_real = self.netD(real_AB)
real_ACB = torch.cat((self.real_AC, self.real_B), 1)
pred_real = self.netD(real_ACB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
# First, G(A) should fake the discriminator
# fake_AB = torch.cat((self.real_A, self.fake_B), 1)
# pred_fake = self.netD(fake_AB)
fake_ACB = torch.cat((self.real_AC, self.fake_B), 1)
pred_fake = self.netD(fake_ACB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
# self.optimizer_G.zero_grad() # set G's gradients to zero
# self.backward_G() # calculate graidents for G
# self.optimizer_G.step() # udpate G's weights
self.optimizer_G.zero_grad() # set G's gradients to zero
self.optimizer_G2.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
self.optimizer_G2.step() # udpate G's weights
| 58.669355 | 239 | 0.620619 | import torch
from .base_model import BaseModel
from . import networks
from torch.nn import functional as F
class Pix2PixTm2McFullIn2Model(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned3')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
self.visual_names = ['real_A', 'fake_B', 'real_B', 'real_C', 'real_C_itp', 'ltm_slice00', 'ltm_slice12', 'ltm_slice24', 'matrix_1_0', 'matrix_1_1', 'matrix_1_2', 'matrix_1_3', 'matrix_2_0', 'matrix_2_1', 'matrix_2_2', 'matrix_2_3']
if self.isTrain:
self.model_names = ['G', 'G2', 'D']
else:
self.model_names = ['G', 'G2']
self.output_nc = opt.output_nc
self.light_res = opt.light_res
self.intermediate_nc = opt.intermediate_nc
print('opt.output_nc', opt.output_nc)
print('light_res', self.light_res)
print('intermediate_nc', self.intermediate_nc)
self.netG = networks.define_G(opt.input_nc + opt.input2_nc, opt.output_nc*self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG2 = networks.define_G(opt.input_nc + opt.input2_nc, self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: e_D(opt.input_nc + opt.input2_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_G2)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.real_C = input['C'].to(self.device)
self.real_C_itp = F.interpolate(self.real_C, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.real_C_itp_flat = self.real_C_itp.view(-1, self.light_res**2, 1)
self.real_C_itp = torch.clamp((F.interpolate(self.real_C_itp, (self.real_C.size(-2), self.real_C.size(-1)), mode='nearest')-0.5)/0.5, min=-1.0, max=1.0)
self.real_AC = torch.cat([self.real_A, self.real_C], dim=1)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
sub_matrix1 = self.netG(self.real_AC)
sub_matrix2 = self.netG2(self.real_AC)
sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.sub_matrix_1 = sub_matrix1.clone()
self.sub_matrix_2 = sub_matrix2.clone()
self.matrix_1 = torch.clamp((sub_matrix1*self.matrix_1_gain-0.5)/0.5, min=-1.0, max=1.0)
self.matrix_1_0 = self.matrix_1[:, [0, self.intermediate_nc, self.intermediate_nc*2], :, :]
self.matrix_1_1 = self.matrix_1[:, [1, 1 + self.intermediate_nc, 1 + self.intermediate_nc*2], :, :]
self.matrix_1_2 = self.matrix_1[:, [2, 2 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]
self.matrix_1_3 = self.matrix_1[:, [3, 3 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]
self.matrix_2 = torch.clamp((F.interpolate(sub_matrix2, (self.real_B.size(-2), self.real_B.size(-1)), mode='nearest')*self.matrix_2_gain-0.5)/0.5, min=-1.0, max=1.0)
self.matrix_2_0 = torch.unsqueeze(self.matrix_2[:, 0, :, :], 1)
self.matrix_2_1 = torch.unsqueeze(self.matrix_2[:, 1, :, :], 1)
self.matrix_2_2 = torch.unsqueeze(self.matrix_2[:, 2, :, :], 1)
self.matrix_2_3 = torch.unsqueeze(self.matrix_2[:, 3, :, :], 1)
sub_matrix1 = sub_matrix1.view(-1, sub_matrix1.size(1), sub_matrix1.size(2)*sub_matrix1.size(3))
sub_matrix2 = sub_matrix2.view(-1, sub_matrix2.size(1), sub_matrix2.size(2)*sub_matrix2.size(3))
sub_matrix1 = torch.transpose(sub_matrix1, 1, 2)
sm1R = sub_matrix1[:, :, 0:self.intermediate_nc]
sm1G = sub_matrix1[:, :, self.intermediate_nc:self.intermediate_nc*2]
sm1B = sub_matrix1[:, :, self.intermediate_nc*2:self.intermediate_nc*3]
bufR = torch.matmul(sm1R, sub_matrix2)
bufG = torch.matmul(sm1G, sub_matrix2)
bufB = torch.matmul(sm1B, sub_matrix2)
trans_matrix = torch.cat([bufR, bufG, bufB], dim=1)
ltm = torch.transpose(trans_matrix, 1, 2)
ltm = ltm.reshape(ltm.size(0), ltm.size(1)*self.real_B.size(1), self.real_B.size(2)*self.real_B.size(3))
ltm = ltm.reshape(ltm.size(0), ltm.size(1), self.real_B.size(2), self.real_B.size(3))
self.ltm_slice00 = torch.clamp((ltm[:, 0:3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0)
self.ltm_slice12 = torch.clamp((ltm[:, 3*12:3*12+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0)
self.ltm_slice24 = torch.clamp((ltm[:, 3*24:3*24+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0)
= trans_matrix[:, 0:256**2, :]
tmG = trans_matrix[:, 256**2:(256**2)*2, :]
tmB = trans_matrix[:, (256**2)*2:(256**2)*3, :]
bufR = torch.matmul(tmR, self.real_C_itp_flat)
bufG = torch.matmul(tmG, self.real_C_itp_flat)
bufB = torch.matmul(tmB, self.real_C_itp_flat)
buf = torch.cat([bufR, bufG, bufB], dim=2)
buf = torch.transpose(buf, 1, 2)
buf = (buf - 0.5) / 0.5
buf = torch.clamp(buf, min=-1.0, max=1.0)
self.fake_B = buf.view(self.real_B.size())
def forward_linebuf(self):
sub_matrix1 = self.netG(self.real_AC)
sub_matrix2 = self.netG2(self.real_AC)
sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.fake_B = torch.zeros_like(self.real_B)
sub_matrix2 = sub_matrix2.view(-1, 1, sub_matrix2.size(-2)*sub_matrix2.size(-1)) * 0.5 + 0.5
for l in range(sub_matrix1.size(2)):
sub_matrix1_buf = sub_matrix1[:, :, l, :].reshape(-1, sub_matrix1.size(1)*sub_matrix1.size(3), 1) * 0.5 + 0.5
trans_matrix = torch.matmul(sub_matrix1_buf, sub_matrix2)
tmR = trans_matrix[:, 0:256, :]
tmG = trans_matrix[:, 256:256*2, :]
tmB = trans_matrix[:, 256*2:256*3, :]
bufR = torch.matmul(tmR, self.real_C_itp_flat * 10.0)
bufG = torch.matmul(tmG, self.real_C_itp_flat * 10.0)
bufB = torch.matmul(tmB, self.real_C_itp_flat * 10.0)
buf = torch.cat([bufR, bufG, bufB], dim=2)
buf = torch.transpose(buf, 1, 2)
buf = (buf - 0.5) / 0.5
buf = buf.reshape(self.fake_B.size(0), self.fake_B.size(1), self.fake_B.size(3))
self.fake_B[:, :, l, :] = buf
def backward_D(self):
_fake = self.netD(fake_ACB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
real_ACB = torch.cat((self.real_AC, self.real_B), 1)
pred_real = self.netD(real_ACB)
self.loss_D_real = self.criterionGAN(pred_real, True)
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
fake_ACB = torch.cat((self.real_AC, self.fake_B), 1)
pred_fake = self.netD(fake_ACB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward()
self.set_requires_grad(self.netD, True)
self.optimizer_D.zero_grad()
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
self.set_requires_grad(self.netD, False)
) # calculate graidents for G
# self.optimizer_G.step() # udpate G's weights
self.optimizer_G.zero_grad()
self.optimizer_G2.zero_grad() # set G's gradients to zero
self.backward_G()
self.optimizer_G.step()
self.optimizer_G2.step() # udpate G's weights
| true | true |
1c497491d95957aa66acaa71fdefe22a342c41c1 | 19,473 | py | Python | mudata/_core/io.py | scverse/mudata | fbfc634e8f17bd70ed67bb8a37951564f16b61e6 | [
"BSD-3-Clause"
] | 12 | 2022-01-10T14:11:23.000Z | 2022-03-17T13:03:45.000Z | mudata/_core/io.py | scverse/mudata | fbfc634e8f17bd70ed67bb8a37951564f16b61e6 | [
"BSD-3-Clause"
] | 10 | 2022-01-24T15:09:03.000Z | 2022-03-29T03:47:28.000Z | mudata/_core/io.py | scverse/mudata | fbfc634e8f17bd70ed67bb8a37951564f16b61e6 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import zarr
from typing import Union
from os import PathLike
import os
from warnings import warn
from collections.abc import MutableMapping
import numpy as np
import h5py
import anndata as ad
from anndata import AnnData
# from anndata.compat import _read_hdf5_attribute # 0.8
from pathlib import Path
from scipy import sparse
from mudata import MuData
from .file_backing import MuDataFileManager, AnnDataFileManager
#
# Saving multimodal data objects
#
def _write_h5mu(file: h5py.File, mdata: MuData, write_data=True, **kwargs):
from anndata._io.specs.registry import write_elem
from .. import __version__, __mudataversion__, __anndataversion__
write_elem(
file,
"obs",
mdata.strings_to_categoricals(mdata._shrink_attr("obs", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(
file,
"var",
mdata.strings_to_categoricals(mdata._shrink_attr("var", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(file, "obsm", dict(mdata.obsm), dataset_kwargs=kwargs)
write_elem(file, "varm", dict(mdata.varm), dataset_kwargs=kwargs)
write_elem(file, "obsp", dict(mdata.obsp), dataset_kwargs=kwargs)
write_elem(file, "varp", dict(mdata.varp), dataset_kwargs=kwargs)
write_elem(file, "uns", dict(mdata.uns), dataset_kwargs=kwargs)
write_elem(file, "obsmap", dict(mdata.obsmap), dataset_kwargs=kwargs)
write_elem(file, "varmap", dict(mdata.varmap), dataset_kwargs=kwargs)
attrs = file.attrs
attrs["axis"] = mdata.axis
mod = file.require_group("mod")
for k, v in mdata.mod.items():
group = mod.require_group(k)
adata = mdata.mod[k]
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
if write_data:
write_elem(group, "X", adata.X, dataset_kwargs=kwargs)
if adata.raw is not None:
write_elem(group, "raw", adata.raw)
write_elem(group, "obs", adata.obs, dataset_kwargs=kwargs)
write_elem(group, "var", adata.var, dataset_kwargs=kwargs)
write_elem(group, "obsm", dict(adata.obsm), dataset_kwargs=kwargs)
write_elem(group, "varm", dict(adata.varm), dataset_kwargs=kwargs)
write_elem(group, "obsp", dict(adata.obsp), dataset_kwargs=kwargs)
write_elem(group, "varp", dict(adata.varp), dataset_kwargs=kwargs)
write_elem(group, "layers", dict(adata.layers), dataset_kwargs=kwargs)
write_elem(group, "uns", dict(adata.uns), dataset_kwargs=kwargs)
attrs = group.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
mod_attrs = mod.attrs
mod_attrs["mod-order"] = list(mdata.mod.keys())
attrs = file.attrs
attrs["encoding-type"] = "MuData"
attrs["encoding-version"] = __mudataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
# Restore top-level annotation
if not mdata.is_view or not mdata.isbacked:
mdata.update()
def write_zarr(
store: Union[MutableMapping, str, Path],
data: Union[MuData, AnnData],
chunks=None,
write_data=True,
**kwargs,
):
"""
Write MuData or AnnData object to the Zarr store
Matrices - sparse or dense - are currently stored as they are.
"""
import zarr
from anndata._io.specs.registry import write_elem
from anndata._io.zarr import write_zarr as anndata_write_zarr
from .. import __version__, __mudataversion__, __anndataversion__
if isinstance(data, AnnData):
adata = data
anndata_write_zarr(store, adata, chunks=chunks, **kwargs)
elif isinstance(data, MuData):
if isinstance(store, Path):
store = str(store)
file = zarr.open(store, mode="w")
mdata = data
write_elem(
file,
"obs",
mdata.strings_to_categoricals(mdata._shrink_attr("obs", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(
file,
"var",
mdata.strings_to_categoricals(mdata._shrink_attr("var", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(file, "obsm", dict(mdata.obsm), dataset_kwargs=kwargs)
write_elem(file, "varm", dict(mdata.varm), dataset_kwargs=kwargs)
write_elem(file, "obsp", dict(mdata.obsp), dataset_kwargs=kwargs)
write_elem(file, "varp", dict(mdata.varp), dataset_kwargs=kwargs)
write_elem(file, "uns", dict(mdata.uns), dataset_kwargs=kwargs)
write_elem(file, "obsmap", dict(mdata.obsmap), dataset_kwargs=kwargs)
write_elem(file, "varmap", dict(mdata.varmap), dataset_kwargs=kwargs)
attrs = file.attrs
attrs["axis"] = mdata.axis
mod = file.require_group("mod")
for k, v in mdata.mod.items():
group = mod.require_group(k)
adata = mdata.mod[k]
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
if write_data:
if chunks is not None and not isinstance(adata.X, sparse.spmatrix):
write_elem(group, "X", adata.X, dataset_kwargs=dict(chunks=chunks, **kwargs))
else:
write_elem(group, "X", adata.X, dataset_kwargs=kwargs)
if adata.raw is not None:
write_elem(group, "raw", adata.raw)
write_elem(group, "obs", adata.obs, dataset_kwargs=kwargs)
write_elem(group, "var", adata.var, dataset_kwargs=kwargs)
write_elem(group, "obsm", dict(adata.obsm), dataset_kwargs=kwargs)
write_elem(group, "varm", dict(adata.varm), dataset_kwargs=kwargs)
write_elem(group, "obsp", dict(adata.obsp), dataset_kwargs=kwargs)
write_elem(group, "varp", dict(adata.varp), dataset_kwargs=kwargs)
write_elem(group, "layers", dict(adata.layers), dataset_kwargs=kwargs)
write_elem(group, "uns", dict(adata.uns), dataset_kwargs=kwargs)
attrs = group.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
mod_attrs = mod.attrs
mod_attrs["mod-order"] = list(mdata.mod.keys())
attrs = file.attrs
attrs["encoding-type"] = "MuData"
attrs["encoding-version"] = __mudataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
# Restore top-level annotation
if not mdata.is_view or not mdata.isbacked:
mdata.update()
def write_h5mu(filename: PathLike, mdata: MuData, **kwargs):
"""
Write MuData object to the HDF5 file
Matrices - sparse or dense - are currently stored as they are.
"""
from .. import __version__, __mudataversion__, __anndataversion__
with h5py.File(filename, "w", userblock_size=512) as f:
_write_h5mu(f, mdata, **kwargs)
with open(filename, "br+") as f:
nbytes = f.write(
f"MuData (format-version={__mudataversion__};creator=muon;creator-version={__version__})".encode(
"utf-8"
)
)
f.write(
b"\0" * (512 - nbytes)
) # this is only needed because the H5file was written in append mode
def write_h5ad(filename: PathLike, mod: str, data: Union[MuData, AnnData]):
"""
Write AnnData object to the HDF5 file with a MuData container
Currently is based on anndata._io.h5ad.write_h5ad internally.
Matrices - sparse or dense - are currently stored as they are.
Ideally this is merged later to anndata._io.h5ad.write_h5ad.
"""
from anndata._io.specs.registry import write_elem
from anndata._io.h5ad import write_h5ad
from .. import __version__, __anndataversion__
if isinstance(data, AnnData):
adata = data
elif isinstance(data, MuData):
adata = data.mod[mod]
else:
raise TypeError(f"Expected AnnData or MuData object with {mod} modality")
with h5py.File(filename, "r+") as f:
# Check that 'mod' is present
if not "mod" in f:
raise ValueError("The .h5mu object has to contain .mod slot")
fm = f["mod"]
# Remove the modality if it exists
if mod in fm:
del fm[mod]
fmd = fm.create_group(mod)
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
filepath = Path(filename)
if not (adata.isbacked and Path(adata.filename) == Path(filepath)):
write_elem(fmd, f"X", adata.X)
# NOTE: Calling write_elem() does not allow writing .raw into .h5mu modalities
if adata.raw is not None:
write_elem(f, f"mod/{mod}/raw", adata.raw)
write_elem(fmd, "obs", adata.obs)
write_elem(fmd, "var", adata.var)
write_elem(fmd, "obsm", dict(adata.obsm))
write_elem(fmd, "varm", dict(adata.varm))
write_elem(fmd, "obsp", dict(adata.obsp))
write_elem(fmd, "varp", dict(adata.varp))
write_elem(fmd, "layers", dict(adata.layers))
write_elem(fmd, "uns", dict(adata.uns))
attrs = fmd.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "muon"
attrs["encoder-version"] = __version__
write_anndata = write_h5ad
def write(filename: PathLike, data: Union[MuData, AnnData]):
"""
Write MuData or AnnData to an HDF5 file
This function is designed to enhance I/O ease of use.
It recognises the following formats of filename:
- for MuData
- FILE.h5mu
- for AnnData
- FILE.h5mu/MODALITY
- FILE.h5mu/mod/MODALITY
- FILE.h5ad
"""
import re
if filename.endswith(".h5mu") or isinstance(data, MuData):
assert filename.endswith(".h5mu") and isinstance(
data, MuData
), "Can only save MuData object to .h5mu file"
write_h5mu(filename, data)
else:
assert isinstance(data, AnnData), "Only MuData and AnnData objects are accepted"
m = re.search("^(.+)\.(h5mu)[/]?([A-Za-z]*)[/]?([/A-Za-z]*)$", filename)
if m is not None:
m = m.groups()
else:
raise ValueError("Expected non-empty .h5ad or .h5mu file name")
filepath = ".".join([m[0], m[1]])
if m[1] == "h5mu":
if m[3] == "":
# .h5mu/<modality>
return write_h5ad(filepath, m[2], data)
elif m[2] == "mod":
# .h5mu/mod/<modality>
return write_h5ad(filepath, m[3], data)
else:
raise ValueError(
"If a single modality to be written from a .h5mu file, \
provide it after the filename separated by slash symbol:\
.h5mu/rna or .h5mu/mod/rna"
)
elif m[1] == "h5ad":
return data.write(filepath)
else:
raise ValueError()
#
# Reading from multimodal data objects
#
def read_h5mu(filename: PathLike, backed: Union[str, bool, None] = None):
"""
Read MuData object from HDF5 file
"""
assert backed in [
None,
True,
False,
"r",
"r+",
], "Argument `backed` should be boolean, or r/r+, or None"
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe
if backed is True or not backed:
mode = "r"
else:
mode = backed
manager = MuDataFileManager(filename, mode) if backed else MuDataFileManager()
with open(filename, "rb") as f:
ish5mu = f.read(6) == b"MuData"
if not ish5mu:
if h5py.is_hdf5(filename):
warn(
"The HDF5 file was not created by muon, we can't guarantee that everything will work correctly"
)
else:
raise ValueError("The file is not an HDF5 file")
with h5py.File(filename, mode) as f:
d = {}
for k in f.keys():
if k in ["obs", "var"]:
d[k] = read_dataframe(f[k])
if k == "mod":
mods = {}
gmods = f[k]
for m in gmods.keys():
ad = _read_h5mu_mod(gmods[m], manager, backed not in (None, False))
mods[m] = ad
mod_order = None
if "mod-order" in gmods.attrs:
mod_order = gmods.attrs["mod-order"]
# TODO: use in v0.8
# mod_order = _read_hdf5_attribute(k, "mod-order")
if mod_order is not None and all([m in gmods for m in mod_order]):
mods = {k: mods[k] for k in mod_order}
d[k] = mods
else:
d[k] = read_elem(f[k])
if "axis" in f.attrs:
d["axis"] = f.attrs["axis"]
mu = MuData._init_from_dict_(**d)
mu.file = manager
return mu
def read_zarr(store: Union[str, Path, MutableMapping, zarr.Group]):
"""\
Read from a hierarchical Zarr array store.
Parameters
----------
store
The filename, a :class:`~typing.MutableMapping`, or a Zarr storage class.
"""
import zarr
from anndata._io.specs.registry import read_elem
from anndata._io.zarr import (
read_zarr as anndata_read_zarr,
read_dataframe,
_read_legacy_raw,
_clean_uns,
)
if isinstance(store, Path):
store = str(store)
f = zarr.open(store, mode="r")
d = {}
if "mod" not in f.keys():
return anndata_read_zarr(store)
manager = MuDataFileManager()
for k in f.keys():
if k in {"obs", "var"}:
d[k] = read_dataframe(f[k])
if k == "mod":
mods = {}
gmods = f[k]
for m in gmods.keys():
ad = _read_zarr_mod(gmods[m], manager)
mods[m] = ad
d[k] = mods
else: # Base case
d[k] = read_elem(f[k])
mu = MuData._init_from_dict_(**d)
mu.file = manager
return mu
def _read_zarr_mod(g: zarr.Group, manager: MuDataFileManager = None, backed: bool = False) -> dict:
import zarr
from anndata._io.specs.registry import read_elem
from anndata._io.zarr import read_dataframe, _read_legacy_raw
from anndata import Raw
d = {}
for k in g.keys():
if k in ("obs", "var"):
d[k] = read_dataframe(g[k])
elif k == "X":
X = g["X"]
if isinstance(X, zarr.Group):
dtype = X["data"].dtype
elif hasattr(X, "dtype"):
dtype = X.dtype
else:
raise ValueError()
d["dtype"] = dtype
if not backed:
d["X"] = read_elem(X)
elif k != "raw":
d[k] = read_elem(g[k])
ad = AnnData(**d)
if manager is not None:
ad.file = AnnDataFileManager(ad, os.path.basename(g.name), manager)
raw = _read_legacy_raw(
g,
d.get("raw"),
read_dataframe,
read_elem,
attrs=("var", "varm") if backed else ("var", "varm", "X"),
)
if raw:
ad._raw = Raw(ad, **raw)
return ad
def _read_h5mu_mod(
g: "h5py.Group", manager: MuDataFileManager = None, backed: bool = False
) -> dict:
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe, _read_raw
from anndata import Raw
d = {}
for k in g.keys():
if k in ("obs", "var"):
d[k] = read_dataframe(g[k])
elif k == "X":
X = g["X"]
if isinstance(X, h5py.Group):
dtype = X["data"].dtype
elif hasattr(X, "dtype"):
dtype = X.dtype
else:
raise ValueError()
d["dtype"] = dtype
if not backed:
d["X"] = read_elem(X)
elif k != "raw":
d[k] = read_elem(g[k])
ad = AnnData(**d)
if manager is not None:
ad.file = AnnDataFileManager(ad, os.path.basename(g.name), manager)
raw = _read_raw(g, attrs=("var", "varm") if backed else ("var", "varm", "X"))
if raw:
ad._raw = Raw(ad, **raw)
return ad
def read_h5ad(
filename: PathLike,
mod: str,
backed: Union[str, bool, None] = None,
) -> AnnData:
"""
Read AnnData object from inside a .h5mu file
or from a standalone .h5ad file
Currently replicates and modifies anndata._io.h5ad.read_h5ad.
Matrices are loaded as they are in the file (sparse or dense).
Ideally this is merged later to anndata._io.h5ad.read_h5ad.
"""
assert backed in [
None,
True,
False,
"r",
"r+",
], "Argument `backed` should be boolean, or r/r+, or None"
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe, _read_raw
d = {}
hdf5_mode = "r"
if backed not in {None, False}:
hdf5_mode = backed
if hdf5_mode is True:
hdf5_mode = "r+"
assert hdf5_mode in {"r", "r+"}
backed = True
manager = MuDataFileManager(filename, hdf5_mode)
else:
backed = False
manager = None
with h5py.File(filename, hdf5_mode) as f_root:
f = f_root["mod"][mod]
return _read_h5mu_mod(f, manager, backed)
read_anndata = read_h5ad
def read(filename: PathLike, **kwargs) -> Union[MuData, AnnData]:
"""
Read MuData object from HDF5 file
or AnnData object (a single modality) inside it
This function is designed to enhance I/O ease of use.
It recognises the following formats:
- FILE.h5mu
- FILE.h5mu/MODALITY
- FILE.h5mu/mod/MODALITY
- FILE.h5ad
"""
import re
m = re.search("^(.+)\.(h5mu)[/]?([A-Za-z]*)[/]?([/A-Za-z]*)$", filename)
if m is not None:
m = m.groups()
else:
if filename.endswith(".h5ad"):
m = [filename[:-5], "h5ad", "", ""]
else:
raise ValueError("Expected non-empty .h5ad or .h5mu file name")
filepath = ".".join([m[0], m[1]])
if m[1] == "h5mu":
if all(i == 0 for i in map(len, m[2:])):
# Ends with .h5mu
return read_h5mu(filepath, **kwargs)
elif m[3] == "":
# .h5mu/<modality>
return read_h5ad(filepath, m[2], **kwargs)
elif m[2] == "mod":
# .h5mu/mod/<modality>
return read_h5ad(filepath, m[3], **kwargs)
else:
raise ValueError(
"If a single modality to be read from a .h5mu file, \
provide it after the filename separated by slash symbol:\
.h5mu/rna or .h5mu/mod/rna"
)
elif m[1] == "h5ad":
return ad.read_h5ad(filepath, **kwargs)
else:
raise ValueError("The file format is not recognised, expected to be an .h5mu or .h5ad file")
| 31.612013 | 111 | 0.583629 | from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import zarr
from typing import Union
from os import PathLike
import os
from warnings import warn
from collections.abc import MutableMapping
import numpy as np
import h5py
import anndata as ad
from anndata import AnnData
pathlib import Path
from scipy import sparse
from mudata import MuData
from .file_backing import MuDataFileManager, AnnDataFileManager
def _write_h5mu(file: h5py.File, mdata: MuData, write_data=True, **kwargs):
from anndata._io.specs.registry import write_elem
from .. import __version__, __mudataversion__, __anndataversion__
write_elem(
file,
"obs",
mdata.strings_to_categoricals(mdata._shrink_attr("obs", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(
file,
"var",
mdata.strings_to_categoricals(mdata._shrink_attr("var", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(file, "obsm", dict(mdata.obsm), dataset_kwargs=kwargs)
write_elem(file, "varm", dict(mdata.varm), dataset_kwargs=kwargs)
write_elem(file, "obsp", dict(mdata.obsp), dataset_kwargs=kwargs)
write_elem(file, "varp", dict(mdata.varp), dataset_kwargs=kwargs)
write_elem(file, "uns", dict(mdata.uns), dataset_kwargs=kwargs)
write_elem(file, "obsmap", dict(mdata.obsmap), dataset_kwargs=kwargs)
write_elem(file, "varmap", dict(mdata.varmap), dataset_kwargs=kwargs)
attrs = file.attrs
attrs["axis"] = mdata.axis
mod = file.require_group("mod")
for k, v in mdata.mod.items():
group = mod.require_group(k)
adata = mdata.mod[k]
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
if write_data:
write_elem(group, "X", adata.X, dataset_kwargs=kwargs)
if adata.raw is not None:
write_elem(group, "raw", adata.raw)
write_elem(group, "obs", adata.obs, dataset_kwargs=kwargs)
write_elem(group, "var", adata.var, dataset_kwargs=kwargs)
write_elem(group, "obsm", dict(adata.obsm), dataset_kwargs=kwargs)
write_elem(group, "varm", dict(adata.varm), dataset_kwargs=kwargs)
write_elem(group, "obsp", dict(adata.obsp), dataset_kwargs=kwargs)
write_elem(group, "varp", dict(adata.varp), dataset_kwargs=kwargs)
write_elem(group, "layers", dict(adata.layers), dataset_kwargs=kwargs)
write_elem(group, "uns", dict(adata.uns), dataset_kwargs=kwargs)
attrs = group.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
mod_attrs = mod.attrs
mod_attrs["mod-order"] = list(mdata.mod.keys())
attrs = file.attrs
attrs["encoding-type"] = "MuData"
attrs["encoding-version"] = __mudataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
if not mdata.is_view or not mdata.isbacked:
mdata.update()
def write_zarr(
store: Union[MutableMapping, str, Path],
data: Union[MuData, AnnData],
chunks=None,
write_data=True,
**kwargs,
):
import zarr
from anndata._io.specs.registry import write_elem
from anndata._io.zarr import write_zarr as anndata_write_zarr
from .. import __version__, __mudataversion__, __anndataversion__
if isinstance(data, AnnData):
adata = data
anndata_write_zarr(store, adata, chunks=chunks, **kwargs)
elif isinstance(data, MuData):
if isinstance(store, Path):
store = str(store)
file = zarr.open(store, mode="w")
mdata = data
write_elem(
file,
"obs",
mdata.strings_to_categoricals(mdata._shrink_attr("obs", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(
file,
"var",
mdata.strings_to_categoricals(mdata._shrink_attr("var", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(file, "obsm", dict(mdata.obsm), dataset_kwargs=kwargs)
write_elem(file, "varm", dict(mdata.varm), dataset_kwargs=kwargs)
write_elem(file, "obsp", dict(mdata.obsp), dataset_kwargs=kwargs)
write_elem(file, "varp", dict(mdata.varp), dataset_kwargs=kwargs)
write_elem(file, "uns", dict(mdata.uns), dataset_kwargs=kwargs)
write_elem(file, "obsmap", dict(mdata.obsmap), dataset_kwargs=kwargs)
write_elem(file, "varmap", dict(mdata.varmap), dataset_kwargs=kwargs)
attrs = file.attrs
attrs["axis"] = mdata.axis
mod = file.require_group("mod")
for k, v in mdata.mod.items():
group = mod.require_group(k)
adata = mdata.mod[k]
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
if write_data:
if chunks is not None and not isinstance(adata.X, sparse.spmatrix):
write_elem(group, "X", adata.X, dataset_kwargs=dict(chunks=chunks, **kwargs))
else:
write_elem(group, "X", adata.X, dataset_kwargs=kwargs)
if adata.raw is not None:
write_elem(group, "raw", adata.raw)
write_elem(group, "obs", adata.obs, dataset_kwargs=kwargs)
write_elem(group, "var", adata.var, dataset_kwargs=kwargs)
write_elem(group, "obsm", dict(adata.obsm), dataset_kwargs=kwargs)
write_elem(group, "varm", dict(adata.varm), dataset_kwargs=kwargs)
write_elem(group, "obsp", dict(adata.obsp), dataset_kwargs=kwargs)
write_elem(group, "varp", dict(adata.varp), dataset_kwargs=kwargs)
write_elem(group, "layers", dict(adata.layers), dataset_kwargs=kwargs)
write_elem(group, "uns", dict(adata.uns), dataset_kwargs=kwargs)
attrs = group.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
mod_attrs = mod.attrs
mod_attrs["mod-order"] = list(mdata.mod.keys())
attrs = file.attrs
attrs["encoding-type"] = "MuData"
attrs["encoding-version"] = __mudataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
if not mdata.is_view or not mdata.isbacked:
mdata.update()
def write_h5mu(filename: PathLike, mdata: MuData, **kwargs):
from .. import __version__, __mudataversion__, __anndataversion__
with h5py.File(filename, "w", userblock_size=512) as f:
_write_h5mu(f, mdata, **kwargs)
with open(filename, "br+") as f:
nbytes = f.write(
f"MuData (format-version={__mudataversion__};creator=muon;creator-version={__version__})".encode(
"utf-8"
)
)
f.write(
b"\0" * (512 - nbytes)
)
def write_h5ad(filename: PathLike, mod: str, data: Union[MuData, AnnData]):
from anndata._io.specs.registry import write_elem
from anndata._io.h5ad import write_h5ad
from .. import __version__, __anndataversion__
if isinstance(data, AnnData):
adata = data
elif isinstance(data, MuData):
adata = data.mod[mod]
else:
raise TypeError(f"Expected AnnData or MuData object with {mod} modality")
with h5py.File(filename, "r+") as f:
if not "mod" in f:
raise ValueError("The .h5mu object has to contain .mod slot")
fm = f["mod"]
if mod in fm:
del fm[mod]
fmd = fm.create_group(mod)
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
filepath = Path(filename)
if not (adata.isbacked and Path(adata.filename) == Path(filepath)):
write_elem(fmd, f"X", adata.X)
if adata.raw is not None:
write_elem(f, f"mod/{mod}/raw", adata.raw)
write_elem(fmd, "obs", adata.obs)
write_elem(fmd, "var", adata.var)
write_elem(fmd, "obsm", dict(adata.obsm))
write_elem(fmd, "varm", dict(adata.varm))
write_elem(fmd, "obsp", dict(adata.obsp))
write_elem(fmd, "varp", dict(adata.varp))
write_elem(fmd, "layers", dict(adata.layers))
write_elem(fmd, "uns", dict(adata.uns))
attrs = fmd.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "muon"
attrs["encoder-version"] = __version__
write_anndata = write_h5ad
def write(filename: PathLike, data: Union[MuData, AnnData]):
import re
if filename.endswith(".h5mu") or isinstance(data, MuData):
assert filename.endswith(".h5mu") and isinstance(
data, MuData
), "Can only save MuData object to .h5mu file"
write_h5mu(filename, data)
else:
assert isinstance(data, AnnData), "Only MuData and AnnData objects are accepted"
m = re.search("^(.+)\.(h5mu)[/]?([A-Za-z]*)[/]?([/A-Za-z]*)$", filename)
if m is not None:
m = m.groups()
else:
raise ValueError("Expected non-empty .h5ad or .h5mu file name")
filepath = ".".join([m[0], m[1]])
if m[1] == "h5mu":
if m[3] == "":
return write_h5ad(filepath, m[2], data)
elif m[2] == "mod":
return write_h5ad(filepath, m[3], data)
else:
raise ValueError(
"If a single modality to be written from a .h5mu file, \
provide it after the filename separated by slash symbol:\
.h5mu/rna or .h5mu/mod/rna"
)
elif m[1] == "h5ad":
return data.write(filepath)
else:
raise ValueError()
def read_h5mu(filename: PathLike, backed: Union[str, bool, None] = None):
assert backed in [
None,
True,
False,
"r",
"r+",
], "Argument `backed` should be boolean, or r/r+, or None"
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe
if backed is True or not backed:
mode = "r"
else:
mode = backed
manager = MuDataFileManager(filename, mode) if backed else MuDataFileManager()
with open(filename, "rb") as f:
ish5mu = f.read(6) == b"MuData"
if not ish5mu:
if h5py.is_hdf5(filename):
warn(
"The HDF5 file was not created by muon, we can't guarantee that everything will work correctly"
)
else:
raise ValueError("The file is not an HDF5 file")
with h5py.File(filename, mode) as f:
d = {}
for k in f.keys():
if k in ["obs", "var"]:
d[k] = read_dataframe(f[k])
if k == "mod":
mods = {}
gmods = f[k]
for m in gmods.keys():
ad = _read_h5mu_mod(gmods[m], manager, backed not in (None, False))
mods[m] = ad
mod_order = None
if "mod-order" in gmods.attrs:
mod_order = gmods.attrs["mod-order"]
# TODO: use in v0.8
# mod_order = _read_hdf5_attribute(k, "mod-order")
if mod_order is not None and all([m in gmods for m in mod_order]):
mods = {k: mods[k] for k in mod_order}
d[k] = mods
else:
d[k] = read_elem(f[k])
if "axis" in f.attrs:
d["axis"] = f.attrs["axis"]
mu = MuData._init_from_dict_(**d)
mu.file = manager
return mu
def read_zarr(store: Union[str, Path, MutableMapping, zarr.Group]):
import zarr
from anndata._io.specs.registry import read_elem
from anndata._io.zarr import (
read_zarr as anndata_read_zarr,
read_dataframe,
_read_legacy_raw,
_clean_uns,
)
if isinstance(store, Path):
store = str(store)
f = zarr.open(store, mode="r")
d = {}
if "mod" not in f.keys():
return anndata_read_zarr(store)
manager = MuDataFileManager()
for k in f.keys():
if k in {"obs", "var"}:
d[k] = read_dataframe(f[k])
if k == "mod":
mods = {}
gmods = f[k]
for m in gmods.keys():
ad = _read_zarr_mod(gmods[m], manager)
mods[m] = ad
d[k] = mods
else: # Base case
d[k] = read_elem(f[k])
mu = MuData._init_from_dict_(**d)
mu.file = manager
return mu
def _read_zarr_mod(g: zarr.Group, manager: MuDataFileManager = None, backed: bool = False) -> dict:
import zarr
from anndata._io.specs.registry import read_elem
from anndata._io.zarr import read_dataframe, _read_legacy_raw
from anndata import Raw
d = {}
for k in g.keys():
if k in ("obs", "var"):
d[k] = read_dataframe(g[k])
elif k == "X":
X = g["X"]
if isinstance(X, zarr.Group):
dtype = X["data"].dtype
elif hasattr(X, "dtype"):
dtype = X.dtype
else:
raise ValueError()
d["dtype"] = dtype
if not backed:
d["X"] = read_elem(X)
elif k != "raw":
d[k] = read_elem(g[k])
ad = AnnData(**d)
if manager is not None:
ad.file = AnnDataFileManager(ad, os.path.basename(g.name), manager)
raw = _read_legacy_raw(
g,
d.get("raw"),
read_dataframe,
read_elem,
attrs=("var", "varm") if backed else ("var", "varm", "X"),
)
if raw:
ad._raw = Raw(ad, **raw)
return ad
def _read_h5mu_mod(
g: "h5py.Group", manager: MuDataFileManager = None, backed: bool = False
) -> dict:
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe, _read_raw
from anndata import Raw
d = {}
for k in g.keys():
if k in ("obs", "var"):
d[k] = read_dataframe(g[k])
elif k == "X":
X = g["X"]
if isinstance(X, h5py.Group):
dtype = X["data"].dtype
elif hasattr(X, "dtype"):
dtype = X.dtype
else:
raise ValueError()
d["dtype"] = dtype
if not backed:
d["X"] = read_elem(X)
elif k != "raw":
d[k] = read_elem(g[k])
ad = AnnData(**d)
if manager is not None:
ad.file = AnnDataFileManager(ad, os.path.basename(g.name), manager)
raw = _read_raw(g, attrs=("var", "varm") if backed else ("var", "varm", "X"))
if raw:
ad._raw = Raw(ad, **raw)
return ad
def read_h5ad(
filename: PathLike,
mod: str,
backed: Union[str, bool, None] = None,
) -> AnnData:
assert backed in [
None,
True,
False,
"r",
"r+",
], "Argument `backed` should be boolean, or r/r+, or None"
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe, _read_raw
d = {}
hdf5_mode = "r"
if backed not in {None, False}:
hdf5_mode = backed
if hdf5_mode is True:
hdf5_mode = "r+"
assert hdf5_mode in {"r", "r+"}
backed = True
manager = MuDataFileManager(filename, hdf5_mode)
else:
backed = False
manager = None
with h5py.File(filename, hdf5_mode) as f_root:
f = f_root["mod"][mod]
return _read_h5mu_mod(f, manager, backed)
read_anndata = read_h5ad
def read(filename: PathLike, **kwargs) -> Union[MuData, AnnData]:
import re
m = re.search("^(.+)\.(h5mu)[/]?([A-Za-z]*)[/]?([/A-Za-z]*)$", filename)
if m is not None:
m = m.groups()
else:
if filename.endswith(".h5ad"):
m = [filename[:-5], "h5ad", "", ""]
else:
raise ValueError("Expected non-empty .h5ad or .h5mu file name")
filepath = ".".join([m[0], m[1]])
if m[1] == "h5mu":
if all(i == 0 for i in map(len, m[2:])):
# Ends with .h5mu
return read_h5mu(filepath, **kwargs)
elif m[3] == "":
# .h5mu/<modality>
return read_h5ad(filepath, m[2], **kwargs)
elif m[2] == "mod":
# .h5mu/mod/<modality>
return read_h5ad(filepath, m[3], **kwargs)
else:
raise ValueError(
"If a single modality to be read from a .h5mu file, \
provide it after the filename separated by slash symbol:\
.h5mu/rna or .h5mu/mod/rna"
)
elif m[1] == "h5ad":
return ad.read_h5ad(filepath, **kwargs)
else:
raise ValueError("The file format is not recognised, expected to be an .h5mu or .h5ad file")
| true | true |
1c4974b9fca1aa6488c9bc567b5f3b3cb8f9a5fd | 3,464 | py | Python | salt/modules/sysmod.py | ageron/salt | 72a0a89011e55ce7c875e948b5f0e97e70328153 | [
"Apache-2.0"
] | 2 | 2019-03-30T02:12:56.000Z | 2021-03-08T18:59:46.000Z | salt/modules/sysmod.py | ageron/salt | 72a0a89011e55ce7c875e948b5f0e97e70328153 | [
"Apache-2.0"
] | null | null | null | salt/modules/sysmod.py | ageron/salt | 72a0a89011e55ce7c875e948b5f0e97e70328153 | [
"Apache-2.0"
] | null | null | null | '''
The sys module provides information about the available functions on the
minion.
'''
# Import python libs
import logging
# Import salt libs
# TODO: should probably use _getargs() from salt.utils?
from salt.state import _getargs
log = logging.getLogger(__name__)
def __virtual__():
'''
Return as sys
'''
return 'sys'
def doc(module=''):
'''
Return the docstrings for all modules. Optionally, specify a module or a
function to narrow the selection.
The strings are aggregated into a single document on the master for easy
reading.
CLI Example::
salt '*' sys.doc
salt '*' sys.doc sys
salt '*' sys.doc sys.doc
'''
docs = {}
if module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
target_mod = module + '.' if not module.endswith('.') else module
else:
target_mod = ''
for fun in __salt__:
if fun == module or fun.startswith(target_mod):
docs[fun] = __salt__[fun].__doc__
return docs
def list_functions(module=''):
'''
List the functions for all modules. Optionally, specify a module to list
from.
CLI Example::
salt '*' sys.list_functions
salt '*' sys.list_functions sys
'''
names = set()
if module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
module = module + '.' if not module.endswith('.') else module
for func in __salt__:
if func.startswith(module):
names.add(func)
return sorted(names)
def list_modules():
'''
List the modules loaded on the minion
CLI Example::
salt '*' sys.list_modules
'''
modules = set()
for func in __salt__:
comps = func.split('.')
if len(comps) < 2:
continue
modules.add(comps[0])
return sorted(modules)
def reload_modules():
'''
Tell the minion to reload the execution modules
CLI Example::
salt '*' sys.reload_modules
'''
# This is handled inside the minion.py file, the function is caught before
# it ever gets here
return True
def argspec(module=''):
'''
Return the argument specification of functions in Salt execution
modules.
CLI Example::
salt '*' sys.argspec pkg.install
salt '*' sys.argspec sys
salt '*' sys.argspec
'''
ret = {}
# TODO: cp.get_file will also match cp.get_file_str. this is the
# same logic as sys.doc, and it is not working as expected, see
# issue #3614
if module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
comps = module.split('.')
comps = filter(None, comps)
if len(comps) < 2:
module = module + '.' if not module.endswith('.') else module
for fun in __salt__:
if fun.startswith(module):
try:
aspec = _getargs(__salt__[fun])
except TypeError:
# this happens if not callable
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
return ret
| 24.920863 | 78 | 0.582852 |
import logging
from salt.state import _getargs
log = logging.getLogger(__name__)
def __virtual__():
return 'sys'
def doc(module=''):
docs = {}
if module:
target_mod = module + '.' if not module.endswith('.') else module
else:
target_mod = ''
for fun in __salt__:
if fun == module or fun.startswith(target_mod):
docs[fun] = __salt__[fun].__doc__
return docs
def list_functions(module=''):
names = set()
if module:
module = module + '.' if not module.endswith('.') else module
for func in __salt__:
if func.startswith(module):
names.add(func)
return sorted(names)
def list_modules():
modules = set()
for func in __salt__:
comps = func.split('.')
if len(comps) < 2:
continue
modules.add(comps[0])
return sorted(modules)
def reload_modules():
return True
def argspec(module=''):
ret = {}
if module:
comps = module.split('.')
comps = filter(None, comps)
if len(comps) < 2:
module = module + '.' if not module.endswith('.') else module
for fun in __salt__:
if fun.startswith(module):
try:
aspec = _getargs(__salt__[fun])
except TypeError:
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
return ret
| true | true |
1c49773883879141ec47340b240f609fe8894f09 | 518 | py | Python | tests/test_functions.py | brisvag/mdocfile | abab15dac94460de7c62d339d7a2d497bbb722fd | [
"BSD-3-Clause"
] | 1 | 2022-02-23T02:42:35.000Z | 2022-02-23T02:42:35.000Z | tests/test_functions.py | brisvag/mdocfile | abab15dac94460de7c62d339d7a2d497bbb722fd | [
"BSD-3-Clause"
] | 1 | 2022-03-28T13:11:37.000Z | 2022-03-30T14:19:31.000Z | tests/test_functions.py | brisvag/mdocfile | abab15dac94460de7c62d339d7a2d497bbb722fd | [
"BSD-3-Clause"
] | 1 | 2022-03-18T13:23:08.000Z | 2022-03-18T13:23:08.000Z | import pandas as pd
import pytest
from mdocfile.functions import read
@pytest.mark.parametrize(
'camel_to_snake', [True, False]
)
def test_read(tilt_series_mdoc_file, camel_to_snake: bool):
df = read(tilt_series_mdoc_file, camel_to_snake=camel_to_snake)
print(camel_to_snake, len(df.columns))
print(df.columns)
assert isinstance(df, pd.DataFrame)
assert df.shape == (41, 26)
if camel_to_snake:
assert 'tilt_angle' in df.columns
else:
assert 'TiltAngle' in df.columns
| 25.9 | 67 | 0.720077 | import pandas as pd
import pytest
from mdocfile.functions import read
@pytest.mark.parametrize(
'camel_to_snake', [True, False]
)
def test_read(tilt_series_mdoc_file, camel_to_snake: bool):
df = read(tilt_series_mdoc_file, camel_to_snake=camel_to_snake)
print(camel_to_snake, len(df.columns))
print(df.columns)
assert isinstance(df, pd.DataFrame)
assert df.shape == (41, 26)
if camel_to_snake:
assert 'tilt_angle' in df.columns
else:
assert 'TiltAngle' in df.columns
| true | true |
1c49774d2ad0cf760e33d25aee3e251a29965c7f | 32,566 | py | Python | pyhap/camera.py | sander-vd/HAP-python | 991761ceadfd7796d454d61c87be7f5d4b75d432 | [
"Apache-2.0"
] | 3 | 2019-12-07T22:42:38.000Z | 2022-01-20T08:44:46.000Z | pyhap/camera.py | sander-vd/HAP-python | 991761ceadfd7796d454d61c87be7f5d4b75d432 | [
"Apache-2.0"
] | null | null | null | pyhap/camera.py | sander-vd/HAP-python | 991761ceadfd7796d454d61c87be7f5d4b75d432 | [
"Apache-2.0"
] | 1 | 2021-05-15T22:34:52.000Z | 2021-05-15T22:34:52.000Z | """Contains the Camera accessory and related.
When a HAP client (e.g. iOS) wants to start a video stream it does the following:
[0. Read supported RTP configuration]
[0. Read supported video configuration]
[0. Read supported audio configuration]
[0. Read the current streaming status]
1. Sets the SetupEndpoints characteristic to notify the camera about its IP address,
selected security parameters, etc.
2. The camera responds to the above by setting the SetupEndpoints with its IP address,
etc.
3. The client sets the SelectedRTPStreamConfiguration characteristic to notify the
camera of its prefered audio and video configuration and to initiate the start of the
streaming.
4. The camera starts the streaming with the above configuration.
[5. At some point the client can reconfigure or stop the stream similarly to step 3.]
"""
import asyncio
import functools
import os
import ipaddress
import logging
import struct
from uuid import UUID
from pyhap import RESOURCE_DIR
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_CAMERA
from pyhap.util import to_base64_str, byte_bool
from pyhap import tlv
SETUP_TYPES = {
'SESSION_ID': b'\x01',
'STATUS': b'\x02',
'ADDRESS': b'\x03',
'VIDEO_SRTP_PARAM': b'\x04',
'AUDIO_SRTP_PARAM': b'\x05',
'VIDEO_SSRC': b'\x06',
'AUDIO_SSRC': b'\x07'
}
SETUP_STATUS = {
'SUCCESS': b'\x00',
'BUSY': b'\x01',
'ERROR': b'\x02'
}
SETUP_IPV = {
'IPV4': b'\x00',
'IPV6': b'\x01'
}
SETUP_ADDR_INFO = {
'ADDRESS_VER': b'\x01',
'ADDRESS': b'\x02',
'VIDEO_RTP_PORT': b'\x03',
'AUDIO_RTP_PORT': b'\x04'
}
SETUP_SRTP_PARAM = {
'CRYPTO': b'\x01',
'MASTER_KEY': b'\x02',
'MASTER_SALT': b'\x03'
}
STREAMING_STATUS = {
'AVAILABLE': b'\x00',
'STREAMING': b'\x01',
'BUSY': b'\x02'
}
RTP_CONFIG_TYPES = {
'CRYPTO': b'\x02'
}
SRTP_CRYPTO_SUITES = {
'AES_CM_128_HMAC_SHA1_80': b'\x00',
'AES_CM_256_HMAC_SHA1_80': b'\x01',
'NONE': b'\x02'
}
VIDEO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'ATTRIBUTES': b'\x03',
'RTP_PARAM': b'\x04'
}
VIDEO_CODEC_TYPES = {
'H264': b'\x00'
}
VIDEO_CODEC_PARAM_TYPES = {
'PROFILE_ID': b'\x01',
'LEVEL': b'\x02',
'PACKETIZATION_MODE': b'\x03',
'CVO_ENABLED': b'\x04',
'CVO_ID': b'\x05'
}
VIDEO_CODEC_PARAM_CVO_TYPES = {
'UNSUPPORTED': b'\x01',
'SUPPORTED': b'\x02'
}
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES = {
'BASELINE': b'\x00',
'MAIN': b'\x01',
'HIGH': b'\x02'
}
VIDEO_CODEC_PARAM_LEVEL_TYPES = {
'TYPE3_1': b'\x00',
'TYPE3_2': b'\x01',
'TYPE4_0': b'\x02'
}
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES = {
'NON_INTERLEAVED': b'\x00'
}
VIDEO_ATTRIBUTES_TYPES = {
'IMAGE_WIDTH': b'\x01',
'IMAGE_HEIGHT': b'\x02',
'FRAME_RATE': b'\x03'
}
SUPPORTED_VIDEO_CONFIG_TAG = b'\x01'
SELECTED_STREAM_CONFIGURATION_TYPES = {
'SESSION': b'\x01',
'VIDEO': b'\x02',
'AUDIO': b'\x03'
}
RTP_PARAM_TYPES = {
'PAYLOAD_TYPE': b'\x01',
'SYNCHRONIZATION_SOURCE': b'\x02',
'MAX_BIT_RATE': b'\x03',
'RTCP_SEND_INTERVAL': b'\x04',
'MAX_MTU': b'\x05',
'COMFORT_NOISE_PAYLOAD_TYPE': b'\x06'
}
AUDIO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'RTP_PARAM': b'\x03',
'COMFORT_NOISE': b'\x04'
}
AUDIO_CODEC_TYPES = {
'PCMU': b'\x00',
'PCMA': b'\x01',
'AACELD': b'\x02',
'OPUS': b'\x03'
}
AUDIO_CODEC_PARAM_TYPES = {
'CHANNEL': b'\x01',
'BIT_RATE': b'\x02',
'SAMPLE_RATE': b'\x03',
'PACKET_TIME': b'\x04'
}
AUDIO_CODEC_PARAM_BIT_RATE_TYPES = {
'VARIABLE': b'\x00',
'CONSTANT': b'\x01'
}
AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES = {
'KHZ_8': b'\x00',
'KHZ_16': b'\x01',
'KHZ_24': b'\x02'
}
SUPPORTED_AUDIO_CODECS_TAG = b'\x01'
SUPPORTED_COMFORT_NOISE_TAG = b'\x02'
SUPPORTED_AUDIO_CONFIG_TAG = b'\x02'
SET_CONFIG_REQUEST_TAG = b'\x02'
SESSION_ID = b'\x01'
NO_SRTP = b'\x01\x01\x02\x02\x00\x03\x00'
'''Configuration value for no SRTP.'''
FFMPEG_CMD = (
# pylint: disable=bad-continuation
'ffmpeg -re -f avfoundation -i 0:0 -threads 0 '
'-vcodec libx264 -an -pix_fmt yuv420p -r {fps} -f rawvideo -tune zerolatency '
'-vf scale={width}:{height} -b:v {v_max_bitrate}k -bufsize {v_max_bitrate}k '
'-payload_type 99 -ssrc {v_ssrc} -f rtp '
'-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {v_srtp_key} '
'srtp://{address}:{v_port}?rtcpport={v_port}&'
'localrtcpport={v_port}&pkt_size=1378'
)
'''Template for the ffmpeg command.'''
class Camera(Accessory):
"""An Accessory that can negotiated camera stream settings with iOS and start a
stream.
"""
category = CATEGORY_CAMERA
@staticmethod
def get_supported_rtp_config(support_srtp):
"""Return a tlv representation of the RTP configuration we support.
SRTP support allows only the AES_CM_128_HMAC_SHA1_80 cipher for now.
:param support_srtp: True if SRTP is supported, False otherwise.
:type support_srtp: bool
"""
if support_srtp:
crypto = SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80']
else:
crypto = SRTP_CRYPTO_SUITES['NONE']
return tlv.encode(RTP_CONFIG_TYPES['CRYPTO'], crypto, to_base64=True)
@staticmethod
def get_supported_video_stream_config(video_params):
"""Return a tlv representation of the supported video stream configuration.
Expected video parameters:
- codec
- resolutions
:param video_params: Supported video configurations
:type video_params: dict
"""
codec_params_tlv = tlv.encode(
VIDEO_CODEC_PARAM_TYPES['PACKETIZATION_MODE'],
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES['NON_INTERLEAVED'])
codec_params = video_params['codec']
for profile in codec_params['profiles']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['PROFILE_ID'], profile)
for level in codec_params['levels']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['LEVEL'], level)
attr_tlv = b''
for resolution in video_params['resolutions']:
res_tlv = tlv.encode(
VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH'], struct.pack('<H', resolution[0]),
VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT'], struct.pack('<H', resolution[1]),
VIDEO_ATTRIBUTES_TYPES['FRAME_RATE'], struct.pack('<H', resolution[2]))
attr_tlv += tlv.encode(VIDEO_TYPES['ATTRIBUTES'], res_tlv)
config_tlv = tlv.encode(VIDEO_TYPES['CODEC'], VIDEO_CODEC_TYPES['H264'],
VIDEO_TYPES['CODEC_PARAM'], codec_params_tlv)
return tlv.encode(SUPPORTED_VIDEO_CONFIG_TAG, config_tlv + attr_tlv,
to_base64=True)
@staticmethod
def get_supported_audio_stream_config(audio_params):
"""Return a tlv representation of the supported audio stream configuration.
iOS supports only AACELD and OPUS
Expected audio parameters:
- codecs
- comfort_noise
:param audio_params: Supported audio configurations
:type audio_params: dict
"""
has_supported_codec = False
configs = b''
for codec_param in audio_params['codecs']:
param_type = codec_param['type']
if param_type == 'OPUS':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
elif param_type == 'AAC-eld':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['AACELD']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
else:
logging.warning('Unsupported codec %s', param_type)
continue
param_samplerate = codec_param['samplerate']
if param_samplerate == 8:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_8']
elif param_samplerate == 16:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_16']
elif param_samplerate == 24:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
else:
logging.warning('Unsupported sample rate %s', param_samplerate)
continue
param_tlv = tlv.encode(AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs += tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
if not has_supported_codec:
logging.warning('Client does not support any audio codec that iOS supports.')
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
param_tlv = tlv.encode(
AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs = tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
comfort_noise = byte_bool(
audio_params.get('comfort_noise', False))
audio_config = to_base64_str(
configs + tlv.encode(SUPPORTED_COMFORT_NOISE_TAG, comfort_noise))
return audio_config
def __init__(self, options, *args, **kwargs):
"""Initialize a camera accessory with the given options.
:param options: Describes the supported video and audio configuration
of this camera. Expected values are video, audio, srtp and address.
Example configuration:
.. code-block:: python
{
"video": {
"codec": {
"profiles": [
camera.VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["BASELINE"],
],
"levels": [
camera.VIDEO_CODEC_PARAM_LEVEL_TYPES['TYPE3_1'],
],
},
"resolutions": [
[320, 240, 15], # Width, Height, framerate
[1024, 768, 30],
[640, 480, 30],
[640, 360, 30],
[480, 360, 30],
[480, 270, 30],
[320, 240, 30],
[320, 180, 30],
],
},
"audio": {
"codecs": [
{
'type': 'OPUS',
'samplerate': 24,
},
{
'type': 'AAC-eld',
'samplerate': 16
}
],
},
"address": "192.168.1.226", # Address from which the camera will stream
}
Additional optional values are:
- srtp - boolean, defaults to False. Whether the camera supports SRTP.
- start_stream_cmd - string specifying the command to be executed to start
the stream. The string can contain the keywords, corresponding to the
video and audio configuration that was negotiated between the camera
and the client. See the ``start`` method for a full list of parameters.
:type options: ``dict``
"""
self.streaming_status = STREAMING_STATUS['AVAILABLE']
self.has_srtp = options.get('srtp', False)
self.start_stream_cmd = options.get('start_stream_cmd', FFMPEG_CMD)
self.stream_address = options['address']
try:
ipaddress.IPv4Address(self.stream_address)
self.stream_address_isv6 = b'\x00'
except ValueError:
self.stream_address_isv6 = b'\x01'
self.sessions = {}
super().__init__(*args, **kwargs)
self.add_preload_service('Microphone')
management = self.add_preload_service('CameraRTPStreamManagement')
management.configure_char('StreamingStatus',
getter_callback=self._get_streaimg_status)
management.configure_char('SupportedRTPConfiguration',
value=self.get_supported_rtp_config(
options.get('srtp', False)))
management.configure_char('SupportedVideoStreamConfiguration',
value=self.get_supported_video_stream_config(
options['video']))
management.configure_char('SupportedAudioStreamConfiguration',
value=self.get_supported_audio_stream_config(
options['audio']))
management.configure_char('SelectedRTPStreamConfiguration',
setter_callback=self.set_selected_stream_configuration)
management.configure_char('SetupEndpoints',
setter_callback=self.set_endpoints)
async def _start_stream(self, objs, reconfigure): # pylint: disable=unused-argument
"""Start or reconfigure video streaming for the given session.
Schedules ``self.start_stream`` or ``self.reconfigure``.
No support for reconfigure currently.
:param objs: TLV-decoded SelectedRTPStreamConfiguration
:type objs: ``dict``
:param reconfigure: Whether the stream should be reconfigured instead of
started.
:type reconfigure: bool
"""
video_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['VIDEO'])
audio_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['AUDIO'])
opts = {}
if video_tlv:
video_objs = tlv.decode(video_tlv)
video_codec_params = video_objs.get(VIDEO_TYPES['CODEC_PARAM'])
if video_codec_params:
video_codec_param_objs = tlv.decode(video_codec_params)
opts['v_profile_id'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['PROFILE_ID']]
opts['v_level'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['LEVEL']]
video_attrs = video_objs.get(VIDEO_TYPES['ATTRIBUTES'])
if video_attrs:
video_attr_objs = tlv.decode(video_attrs)
opts['width'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH']])[0]
opts['height'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT']])[0]
opts['fps'] = struct.unpack('<B',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['FRAME_RATE']])[0]
video_rtp_param = video_objs.get(VIDEO_TYPES['RTP_PARAM'])
if video_rtp_param:
video_rtp_param_objs = tlv.decode(video_rtp_param)
# TODO: Optionals, handle the case where they are missing
opts['v_ssrc'] = 1 or struct.unpack('<I',
video_rtp_param_objs.get(
RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']))[0]
opts['v_payload_type'] = \
video_rtp_param_objs.get(RTP_PARAM_TYPES['PAYLOAD_TYPE'])
opts['v_max_bitrate'] = struct.unpack('<H',
video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_BIT_RATE']))[0]
opts['v_rtcp_interval'] = struct.unpack('<f',
video_rtp_param_objs.get(RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']))[0]
opts['v_max_mtu'] = video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_MTU'])
if audio_tlv:
audio_objs = tlv.decode(audio_tlv)
opts['a_codec'] = audio_objs[AUDIO_TYPES['CODEC']]
audio_codec_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['CODEC_PARAM']])
audio_rtp_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['RTP_PARAM']])
opts['a_comfort_noise'] = audio_objs[AUDIO_TYPES['COMFORT_NOISE']]
opts['a_channel'] = \
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['CHANNEL']][0]
opts['a_bitrate'] = struct.unpack('?',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['BIT_RATE']])[0]
opts['a_sample_rate'] = 8 * (
1 + audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE']][0])
opts['a_packet_time'] = struct.unpack('<B',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['PACKET_TIME']])[0]
opts['a_ssrc'] = struct.unpack('<I',
audio_rtp_param_objs[RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']])[0]
opts['a_payload_type'] = audio_rtp_param_objs[RTP_PARAM_TYPES['PAYLOAD_TYPE']]
opts['a_max_bitrate'] = struct.unpack('<H',
audio_rtp_param_objs[RTP_PARAM_TYPES['MAX_BIT_RATE']])[0]
opts['a_rtcp_interval'] = struct.unpack('<f',
audio_rtp_param_objs[RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']])[0]
opts['a_comfort_payload_type'] = \
audio_rtp_param_objs[RTP_PARAM_TYPES['COMFORT_NOISE_PAYLOAD_TYPE']]
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions[session_id]
opts.update(session_info)
success = await self.reconfigure_stream(session_info, opts) if reconfigure \
else await self.start_stream(session_info, opts)
if success:
self.streaming_status = STREAMING_STATUS['STREAMING']
else:
logging.error('[%s] Faled to start/reconfigure stream, deleting session.',
session_id)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def _get_streaimg_status(self):
"""Get the streaming status in TLV format.
Called when iOS reads the StreaminStatus ``Characteristic``.
"""
return tlv.encode(b'\x01', self.streaming_status, to_base64=True)
async def _stop_stream(self, objs):
"""Stop the stream for the specified session.
Schedules ``self.stop_stream``.
:param objs: TLV-decoded SelectedRTPStreamConfiguration value.
:param objs: ``dict``
"""
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions.get(session_id)
if not session_info:
logging.error('Requested to stop stream for session %s, but no '
'such session was found', session_id)
return
await self.stop_stream(session_info)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def set_selected_stream_configuration(self, value):
"""Set the selected stream configuration.
Called from iOS to set the SelectedRTPStreamConfiguration ``Characteristic``.
This method schedules a stream for the session in ``value`` to be start, stopped
or reconfigured, depending on the request.
:param value: base64-encoded selected configuration in TLV format
:type value: ``str``
"""
logging.debug('set_selected_stream_config - value - %s', value)
objs = tlv.decode(value, from_base64=True)
if SELECTED_STREAM_CONFIGURATION_TYPES['SESSION'] not in objs:
logging.error('Bad request to set selected stream configuration.')
return
session = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
request_type = session[b'\x02'][0]
logging.debug('Set stream config request: %d', request_type)
if request_type == 1:
job = functools.partial(self._start_stream, reconfigure=False)
elif request_type == 0:
job = self._stop_stream
elif request_type == 4:
job = functools.partial(self._start_stream, reconfigure=True)
else:
logging.error('Unknown request type %d', request_type)
return
self.driver.add_job(job, objs)
def set_endpoints(self, value):
"""Configure streaming endpoints.
Called when iOS sets the SetupEndpoints ``Characteristic``. The endpoint
information for the camera should be set as the current value of SetupEndpoints.
:param value: The base64-encoded stream session details in TLV format.
:param value: ``str``
"""
objs = tlv.decode(value, from_base64=True)
session_id = UUID(bytes=objs[SETUP_TYPES['SESSION_ID']])
# Extract address info
address_tlv = objs[SETUP_TYPES['ADDRESS']]
address_info_objs = tlv.decode(address_tlv)
is_ipv6 = struct.unpack('?',
address_info_objs[SETUP_ADDR_INFO['ADDRESS_VER']])[0]
address = address_info_objs[SETUP_ADDR_INFO['ADDRESS']].decode('utf8')
target_video_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['VIDEO_RTP_PORT']])[0]
target_audio_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['AUDIO_RTP_PORT']])[0]
# Video SRTP Params
video_srtp_tlv = objs[SETUP_TYPES['VIDEO_SRTP_PARAM']]
video_info_objs = tlv.decode(video_srtp_tlv)
video_crypto_suite = video_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
video_master_key = video_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
video_master_salt = video_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
# Audio SRTP Params
audio_srtp_tlv = objs[SETUP_TYPES['AUDIO_SRTP_PARAM']]
audio_info_objs = tlv.decode(audio_srtp_tlv)
audio_crypto_suite = audio_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
audio_master_key = audio_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
audio_master_salt = audio_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
logging.debug('Received endpoint configuration:'
'\nsession_id: %s\naddress: %s\nis_ipv6: %s'
'\ntarget_video_port: %s\ntarget_audio_port: %s'
'\nvideo_crypto_suite: %s\nvideo_srtp: %s'
'\naudio_crypto_suite: %s\naudio_srtp: %s',
session_id, address, is_ipv6, target_video_port, target_audio_port,
video_crypto_suite,
to_base64_str(video_master_key + video_master_salt),
audio_crypto_suite,
to_base64_str(audio_master_key + audio_master_salt))
# Configure the SetupEndpoints response
if self.has_srtp:
video_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], video_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], video_master_salt)
audio_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], audio_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], audio_master_salt)
else:
video_srtp_tlv = NO_SRTP
audio_srtp_tlv = NO_SRTP
# TODO: Use os.urandom(4) but within the allowed value bounds
video_ssrc = b'\x01'
audio_ssrc = b'\x01'
res_address_tlv = tlv.encode(
SETUP_ADDR_INFO['ADDRESS_VER'], self.stream_address_isv6,
SETUP_ADDR_INFO['ADDRESS'], self.stream_address.encode('utf-8'),
SETUP_ADDR_INFO['VIDEO_RTP_PORT'], struct.pack('<H', target_video_port),
SETUP_ADDR_INFO['AUDIO_RTP_PORT'], struct.pack('<H', target_audio_port))
response_tlv = tlv.encode(
SETUP_TYPES['SESSION_ID'], session_id.bytes,
SETUP_TYPES['STATUS'], SETUP_STATUS['SUCCESS'],
SETUP_TYPES['ADDRESS'], res_address_tlv,
SETUP_TYPES['VIDEO_SRTP_PARAM'], video_srtp_tlv,
SETUP_TYPES['AUDIO_SRTP_PARAM'], audio_srtp_tlv,
SETUP_TYPES['VIDEO_SSRC'], video_ssrc,
SETUP_TYPES['AUDIO_SSRC'], audio_ssrc,
to_base64=True)
self.sessions[session_id] = {
'id': session_id,
'address': address,
'v_port': target_video_port,
'v_srtp_key': to_base64_str(video_master_key + video_master_salt),
# TODO: 'v_ssrc': video_ssrc,
'a_port': target_audio_port,
'audio_srtp_key': to_base64_str(audio_master_key + audio_master_salt),
'a_ssrc': audio_ssrc
}
self.get_service('CameraRTPStreamManagement')\
.get_characteristic('SetupEndpoints')\
.set_value(response_tlv)
async def stop(self):
"""Stop all streaming sessions."""
await asyncio.gather(*(
self.stop_stream(session_info) for session_info in self.sessions.values()))
# ### For client extensions ###
async def start_stream(self, session_info, stream_config):
"""Start a new stream with the given configuration.
This method can be implemented to start a new stream. Any specific information
about the started stream can be persisted in the ``session_info`` argument.
The same will be passed to ``stop_stream`` when the stream for this session
needs to be stopped.
The default implementation starts a new process with the command in
``self.start_stream_cmd``, formatted with the ``stream_config``.
:param session_info: Contains information about the current session. Can be used
for session storage. Available keys:
- id - The session ID.
:type session_info: ``dict``
:param stream_config: Stream configuration, as negotiated with the HAP client.
Implementations can only use part of these. Available keys:
General configuration:
- address - The IP address from which the camera will stream
- v_port - Remote port to which to stream video
- v_srtp_key - Base64-encoded key and salt value for the
AES_CM_128_HMAC_SHA1_80 cipher to use when streaming video.
The key and the salt are concatenated before encoding
- a_port - Remote audio port to which to stream audio
- a_srtp_key - As v_srtp_params, but for the audio stream.
Video configuration:
- v_profile_id - The profile ID for the H.264 codec, e.g. baseline.
Refer to ``VIDEO_CODEC_PARAM_PROFILE_ID_TYPES``.
- v_level - The level in the profile ID, e.g. 3:1.
Refer to ``VIDEO_CODEC_PARAM_LEVEL_TYPES``.
- width - Video width
- height - Video height
- fps - Video frame rate
- v_ssrc - Video synchronisation source
- v_payload_type - Type of the video codec
- v_max_bitrate - Maximum bit rate generated by the codec in kbps
and averaged over 1 second
- v_rtcp_interval - Minimum RTCP interval in seconds
- v_max_mtu - MTU that the IP camera must use to transmit
Video RTP packets.
Audio configuration:
- a_bitrate - Whether the bitrate is variable or constant
- a_codec - Audio codec
- a_comfort_noise - Wheter to use a comfort noise codec
- a_channel - Number of audio channels
- a_sample_rate - Audio sample rate in KHz
- a_packet_time - Length of time represented by the media in a packet
- a_ssrc - Audio synchronisation source
- a_payload_type - Type of the audio codec
- a_max_bitrate - Maximum bit rate generated by the codec in kbps
and averaged over 1 second
- a_rtcp_interval - Minimum RTCP interval in seconds
- a_comfort_payload_type - The type of codec for comfort noise
:return: True if and only if starting the stream command was successful.
:rtype: ``bool``
"""
logging.debug('[%s] Starting stream with the following parameters: %s',
session_info['id'], stream_config)
cmd = self.start_stream_cmd.format(**stream_config).split()
logging.debug('Executing start stream command: "%s"', ' '.join(cmd))
try:
process = await asyncio.create_subprocess_exec(*cmd,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.PIPE,
limit=1024)
except Exception as e: # pylint: disable=broad-except
logging.error('Failed to start streaming process because of error: %s', e)
return False
session_info['process'] = process
logging.info('[%s] Started stream process - PID %d',
session_info['id'], process.pid)
return True
async def stop_stream(self, session_info): # pylint: disable=no-self-use
"""Stop the stream for the given ``session_id``.
This method can be implemented if custom stop stream commands are needed. The
default implementation gets the ``process`` value from the ``session_info``
object and terminates it (assumes it is a ``subprocess.Popen`` object).
:param session_info: The session info object. Available keys:
- id - The session ID.
:type session_info: ``dict``
"""
session_id = session_info['id']
ffmpeg_process = session_info.get('process')
if ffmpeg_process:
logging.info('[%s] Stopping stream.', session_id)
try:
ffmpeg_process.terminate()
_, stderr = await asyncio.wait_for(
ffmpeg_process.communicate(), timeout=2.0)
logging.debug('Stream command stderr: %s', stderr)
except asyncio.TimeoutError:
logging.error('Timeout while waiting for the stream process '
'to terminate. Trying with kill.')
ffmpeg_process.kill()
await ffmpeg_process.wait()
logging.debug('Stream process stopped.')
else:
logging.warning('No process for session ID %s', session_id)
async def reconfigure_stream(self, session_info, stream_config):
"""Reconfigure the stream so that it uses the given ``stream_config``.
:param session_info: The session object for the session that needs to
be reconfigured. Available keys:
- id - The session id.
:type session_id: ``dict``
:return: True if and only if the reconfiguration is successful.
:rtype: ``bool``
"""
await self.start_stream(session_info, stream_config)
def get_snapshot(self, image_size): # pylint: disable=unused-argument, no-self-use
"""Return a jpeg of a snapshot from the camera.
Overwrite to implement getting snapshots from your camera.
:param image_size: ``dict`` describing the requested image size. Contains the
keys "image-width" and "image-height"
"""
with open(os.path.join(RESOURCE_DIR, 'snapshot.jpg'), 'rb') as fp:
return fp.read()
| 38.67696 | 90 | 0.605171 |
import asyncio
import functools
import os
import ipaddress
import logging
import struct
from uuid import UUID
from pyhap import RESOURCE_DIR
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_CAMERA
from pyhap.util import to_base64_str, byte_bool
from pyhap import tlv
SETUP_TYPES = {
'SESSION_ID': b'\x01',
'STATUS': b'\x02',
'ADDRESS': b'\x03',
'VIDEO_SRTP_PARAM': b'\x04',
'AUDIO_SRTP_PARAM': b'\x05',
'VIDEO_SSRC': b'\x06',
'AUDIO_SSRC': b'\x07'
}
SETUP_STATUS = {
'SUCCESS': b'\x00',
'BUSY': b'\x01',
'ERROR': b'\x02'
}
SETUP_IPV = {
'IPV4': b'\x00',
'IPV6': b'\x01'
}
SETUP_ADDR_INFO = {
'ADDRESS_VER': b'\x01',
'ADDRESS': b'\x02',
'VIDEO_RTP_PORT': b'\x03',
'AUDIO_RTP_PORT': b'\x04'
}
SETUP_SRTP_PARAM = {
'CRYPTO': b'\x01',
'MASTER_KEY': b'\x02',
'MASTER_SALT': b'\x03'
}
STREAMING_STATUS = {
'AVAILABLE': b'\x00',
'STREAMING': b'\x01',
'BUSY': b'\x02'
}
RTP_CONFIG_TYPES = {
'CRYPTO': b'\x02'
}
SRTP_CRYPTO_SUITES = {
'AES_CM_128_HMAC_SHA1_80': b'\x00',
'AES_CM_256_HMAC_SHA1_80': b'\x01',
'NONE': b'\x02'
}
VIDEO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'ATTRIBUTES': b'\x03',
'RTP_PARAM': b'\x04'
}
VIDEO_CODEC_TYPES = {
'H264': b'\x00'
}
VIDEO_CODEC_PARAM_TYPES = {
'PROFILE_ID': b'\x01',
'LEVEL': b'\x02',
'PACKETIZATION_MODE': b'\x03',
'CVO_ENABLED': b'\x04',
'CVO_ID': b'\x05'
}
VIDEO_CODEC_PARAM_CVO_TYPES = {
'UNSUPPORTED': b'\x01',
'SUPPORTED': b'\x02'
}
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES = {
'BASELINE': b'\x00',
'MAIN': b'\x01',
'HIGH': b'\x02'
}
VIDEO_CODEC_PARAM_LEVEL_TYPES = {
'TYPE3_1': b'\x00',
'TYPE3_2': b'\x01',
'TYPE4_0': b'\x02'
}
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES = {
'NON_INTERLEAVED': b'\x00'
}
VIDEO_ATTRIBUTES_TYPES = {
'IMAGE_WIDTH': b'\x01',
'IMAGE_HEIGHT': b'\x02',
'FRAME_RATE': b'\x03'
}
SUPPORTED_VIDEO_CONFIG_TAG = b'\x01'
SELECTED_STREAM_CONFIGURATION_TYPES = {
'SESSION': b'\x01',
'VIDEO': b'\x02',
'AUDIO': b'\x03'
}
RTP_PARAM_TYPES = {
'PAYLOAD_TYPE': b'\x01',
'SYNCHRONIZATION_SOURCE': b'\x02',
'MAX_BIT_RATE': b'\x03',
'RTCP_SEND_INTERVAL': b'\x04',
'MAX_MTU': b'\x05',
'COMFORT_NOISE_PAYLOAD_TYPE': b'\x06'
}
AUDIO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'RTP_PARAM': b'\x03',
'COMFORT_NOISE': b'\x04'
}
AUDIO_CODEC_TYPES = {
'PCMU': b'\x00',
'PCMA': b'\x01',
'AACELD': b'\x02',
'OPUS': b'\x03'
}
AUDIO_CODEC_PARAM_TYPES = {
'CHANNEL': b'\x01',
'BIT_RATE': b'\x02',
'SAMPLE_RATE': b'\x03',
'PACKET_TIME': b'\x04'
}
AUDIO_CODEC_PARAM_BIT_RATE_TYPES = {
'VARIABLE': b'\x00',
'CONSTANT': b'\x01'
}
AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES = {
'KHZ_8': b'\x00',
'KHZ_16': b'\x01',
'KHZ_24': b'\x02'
}
SUPPORTED_AUDIO_CODECS_TAG = b'\x01'
SUPPORTED_COMFORT_NOISE_TAG = b'\x02'
SUPPORTED_AUDIO_CONFIG_TAG = b'\x02'
SET_CONFIG_REQUEST_TAG = b'\x02'
SESSION_ID = b'\x01'
NO_SRTP = b'\x01\x01\x02\x02\x00\x03\x00'
FFMPEG_CMD = (
'ffmpeg -re -f avfoundation -i 0:0 -threads 0 '
'-vcodec libx264 -an -pix_fmt yuv420p -r {fps} -f rawvideo -tune zerolatency '
'-vf scale={width}:{height} -b:v {v_max_bitrate}k -bufsize {v_max_bitrate}k '
'-payload_type 99 -ssrc {v_ssrc} -f rtp '
'-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {v_srtp_key} '
'srtp://{address}:{v_port}?rtcpport={v_port}&'
'localrtcpport={v_port}&pkt_size=1378'
)
class Camera(Accessory):
category = CATEGORY_CAMERA
@staticmethod
def get_supported_rtp_config(support_srtp):
if support_srtp:
crypto = SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80']
else:
crypto = SRTP_CRYPTO_SUITES['NONE']
return tlv.encode(RTP_CONFIG_TYPES['CRYPTO'], crypto, to_base64=True)
@staticmethod
def get_supported_video_stream_config(video_params):
codec_params_tlv = tlv.encode(
VIDEO_CODEC_PARAM_TYPES['PACKETIZATION_MODE'],
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES['NON_INTERLEAVED'])
codec_params = video_params['codec']
for profile in codec_params['profiles']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['PROFILE_ID'], profile)
for level in codec_params['levels']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['LEVEL'], level)
attr_tlv = b''
for resolution in video_params['resolutions']:
res_tlv = tlv.encode(
VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH'], struct.pack('<H', resolution[0]),
VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT'], struct.pack('<H', resolution[1]),
VIDEO_ATTRIBUTES_TYPES['FRAME_RATE'], struct.pack('<H', resolution[2]))
attr_tlv += tlv.encode(VIDEO_TYPES['ATTRIBUTES'], res_tlv)
config_tlv = tlv.encode(VIDEO_TYPES['CODEC'], VIDEO_CODEC_TYPES['H264'],
VIDEO_TYPES['CODEC_PARAM'], codec_params_tlv)
return tlv.encode(SUPPORTED_VIDEO_CONFIG_TAG, config_tlv + attr_tlv,
to_base64=True)
@staticmethod
def get_supported_audio_stream_config(audio_params):
has_supported_codec = False
configs = b''
for codec_param in audio_params['codecs']:
param_type = codec_param['type']
if param_type == 'OPUS':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
elif param_type == 'AAC-eld':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['AACELD']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
else:
logging.warning('Unsupported codec %s', param_type)
continue
param_samplerate = codec_param['samplerate']
if param_samplerate == 8:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_8']
elif param_samplerate == 16:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_16']
elif param_samplerate == 24:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
else:
logging.warning('Unsupported sample rate %s', param_samplerate)
continue
param_tlv = tlv.encode(AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs += tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
if not has_supported_codec:
logging.warning('Client does not support any audio codec that iOS supports.')
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
param_tlv = tlv.encode(
AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs = tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
comfort_noise = byte_bool(
audio_params.get('comfort_noise', False))
audio_config = to_base64_str(
configs + tlv.encode(SUPPORTED_COMFORT_NOISE_TAG, comfort_noise))
return audio_config
def __init__(self, options, *args, **kwargs):
self.streaming_status = STREAMING_STATUS['AVAILABLE']
self.has_srtp = options.get('srtp', False)
self.start_stream_cmd = options.get('start_stream_cmd', FFMPEG_CMD)
self.stream_address = options['address']
try:
ipaddress.IPv4Address(self.stream_address)
self.stream_address_isv6 = b'\x00'
except ValueError:
self.stream_address_isv6 = b'\x01'
self.sessions = {}
super().__init__(*args, **kwargs)
self.add_preload_service('Microphone')
management = self.add_preload_service('CameraRTPStreamManagement')
management.configure_char('StreamingStatus',
getter_callback=self._get_streaimg_status)
management.configure_char('SupportedRTPConfiguration',
value=self.get_supported_rtp_config(
options.get('srtp', False)))
management.configure_char('SupportedVideoStreamConfiguration',
value=self.get_supported_video_stream_config(
options['video']))
management.configure_char('SupportedAudioStreamConfiguration',
value=self.get_supported_audio_stream_config(
options['audio']))
management.configure_char('SelectedRTPStreamConfiguration',
setter_callback=self.set_selected_stream_configuration)
management.configure_char('SetupEndpoints',
setter_callback=self.set_endpoints)
async def _start_stream(self, objs, reconfigure):
video_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['VIDEO'])
audio_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['AUDIO'])
opts = {}
if video_tlv:
video_objs = tlv.decode(video_tlv)
video_codec_params = video_objs.get(VIDEO_TYPES['CODEC_PARAM'])
if video_codec_params:
video_codec_param_objs = tlv.decode(video_codec_params)
opts['v_profile_id'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['PROFILE_ID']]
opts['v_level'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['LEVEL']]
video_attrs = video_objs.get(VIDEO_TYPES['ATTRIBUTES'])
if video_attrs:
video_attr_objs = tlv.decode(video_attrs)
opts['width'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH']])[0]
opts['height'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT']])[0]
opts['fps'] = struct.unpack('<B',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['FRAME_RATE']])[0]
video_rtp_param = video_objs.get(VIDEO_TYPES['RTP_PARAM'])
if video_rtp_param:
video_rtp_param_objs = tlv.decode(video_rtp_param)
opts['v_ssrc'] = 1 or struct.unpack('<I',
video_rtp_param_objs.get(
RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']))[0]
opts['v_payload_type'] = \
video_rtp_param_objs.get(RTP_PARAM_TYPES['PAYLOAD_TYPE'])
opts['v_max_bitrate'] = struct.unpack('<H',
video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_BIT_RATE']))[0]
opts['v_rtcp_interval'] = struct.unpack('<f',
video_rtp_param_objs.get(RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']))[0]
opts['v_max_mtu'] = video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_MTU'])
if audio_tlv:
audio_objs = tlv.decode(audio_tlv)
opts['a_codec'] = audio_objs[AUDIO_TYPES['CODEC']]
audio_codec_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['CODEC_PARAM']])
audio_rtp_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['RTP_PARAM']])
opts['a_comfort_noise'] = audio_objs[AUDIO_TYPES['COMFORT_NOISE']]
opts['a_channel'] = \
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['CHANNEL']][0]
opts['a_bitrate'] = struct.unpack('?',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['BIT_RATE']])[0]
opts['a_sample_rate'] = 8 * (
1 + audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE']][0])
opts['a_packet_time'] = struct.unpack('<B',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['PACKET_TIME']])[0]
opts['a_ssrc'] = struct.unpack('<I',
audio_rtp_param_objs[RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']])[0]
opts['a_payload_type'] = audio_rtp_param_objs[RTP_PARAM_TYPES['PAYLOAD_TYPE']]
opts['a_max_bitrate'] = struct.unpack('<H',
audio_rtp_param_objs[RTP_PARAM_TYPES['MAX_BIT_RATE']])[0]
opts['a_rtcp_interval'] = struct.unpack('<f',
audio_rtp_param_objs[RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']])[0]
opts['a_comfort_payload_type'] = \
audio_rtp_param_objs[RTP_PARAM_TYPES['COMFORT_NOISE_PAYLOAD_TYPE']]
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions[session_id]
opts.update(session_info)
success = await self.reconfigure_stream(session_info, opts) if reconfigure \
else await self.start_stream(session_info, opts)
if success:
self.streaming_status = STREAMING_STATUS['STREAMING']
else:
logging.error('[%s] Faled to start/reconfigure stream, deleting session.',
session_id)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def _get_streaimg_status(self):
return tlv.encode(b'\x01', self.streaming_status, to_base64=True)
async def _stop_stream(self, objs):
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions.get(session_id)
if not session_info:
logging.error('Requested to stop stream for session %s, but no '
'such session was found', session_id)
return
await self.stop_stream(session_info)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def set_selected_stream_configuration(self, value):
logging.debug('set_selected_stream_config - value - %s', value)
objs = tlv.decode(value, from_base64=True)
if SELECTED_STREAM_CONFIGURATION_TYPES['SESSION'] not in objs:
logging.error('Bad request to set selected stream configuration.')
return
session = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
request_type = session[b'\x02'][0]
logging.debug('Set stream config request: %d', request_type)
if request_type == 1:
job = functools.partial(self._start_stream, reconfigure=False)
elif request_type == 0:
job = self._stop_stream
elif request_type == 4:
job = functools.partial(self._start_stream, reconfigure=True)
else:
logging.error('Unknown request type %d', request_type)
return
self.driver.add_job(job, objs)
def set_endpoints(self, value):
objs = tlv.decode(value, from_base64=True)
session_id = UUID(bytes=objs[SETUP_TYPES['SESSION_ID']])
address_tlv = objs[SETUP_TYPES['ADDRESS']]
address_info_objs = tlv.decode(address_tlv)
is_ipv6 = struct.unpack('?',
address_info_objs[SETUP_ADDR_INFO['ADDRESS_VER']])[0]
address = address_info_objs[SETUP_ADDR_INFO['ADDRESS']].decode('utf8')
target_video_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['VIDEO_RTP_PORT']])[0]
target_audio_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['AUDIO_RTP_PORT']])[0]
video_srtp_tlv = objs[SETUP_TYPES['VIDEO_SRTP_PARAM']]
video_info_objs = tlv.decode(video_srtp_tlv)
video_crypto_suite = video_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
video_master_key = video_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
video_master_salt = video_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
audio_srtp_tlv = objs[SETUP_TYPES['AUDIO_SRTP_PARAM']]
audio_info_objs = tlv.decode(audio_srtp_tlv)
audio_crypto_suite = audio_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
audio_master_key = audio_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
audio_master_salt = audio_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
logging.debug('Received endpoint configuration:'
'\nsession_id: %s\naddress: %s\nis_ipv6: %s'
'\ntarget_video_port: %s\ntarget_audio_port: %s'
'\nvideo_crypto_suite: %s\nvideo_srtp: %s'
'\naudio_crypto_suite: %s\naudio_srtp: %s',
session_id, address, is_ipv6, target_video_port, target_audio_port,
video_crypto_suite,
to_base64_str(video_master_key + video_master_salt),
audio_crypto_suite,
to_base64_str(audio_master_key + audio_master_salt))
if self.has_srtp:
video_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], video_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], video_master_salt)
audio_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], audio_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], audio_master_salt)
else:
video_srtp_tlv = NO_SRTP
audio_srtp_tlv = NO_SRTP
video_ssrc = b'\x01'
audio_ssrc = b'\x01'
res_address_tlv = tlv.encode(
SETUP_ADDR_INFO['ADDRESS_VER'], self.stream_address_isv6,
SETUP_ADDR_INFO['ADDRESS'], self.stream_address.encode('utf-8'),
SETUP_ADDR_INFO['VIDEO_RTP_PORT'], struct.pack('<H', target_video_port),
SETUP_ADDR_INFO['AUDIO_RTP_PORT'], struct.pack('<H', target_audio_port))
response_tlv = tlv.encode(
SETUP_TYPES['SESSION_ID'], session_id.bytes,
SETUP_TYPES['STATUS'], SETUP_STATUS['SUCCESS'],
SETUP_TYPES['ADDRESS'], res_address_tlv,
SETUP_TYPES['VIDEO_SRTP_PARAM'], video_srtp_tlv,
SETUP_TYPES['AUDIO_SRTP_PARAM'], audio_srtp_tlv,
SETUP_TYPES['VIDEO_SSRC'], video_ssrc,
SETUP_TYPES['AUDIO_SSRC'], audio_ssrc,
to_base64=True)
self.sessions[session_id] = {
'id': session_id,
'address': address,
'v_port': target_video_port,
'v_srtp_key': to_base64_str(video_master_key + video_master_salt),
'a_port': target_audio_port,
'audio_srtp_key': to_base64_str(audio_master_key + audio_master_salt),
'a_ssrc': audio_ssrc
}
self.get_service('CameraRTPStreamManagement')\
.get_characteristic('SetupEndpoints')\
.set_value(response_tlv)
async def stop(self):
await asyncio.gather(*(
self.stop_stream(session_info) for session_info in self.sessions.values()))
s] Starting stream with the following parameters: %s',
session_info['id'], stream_config)
cmd = self.start_stream_cmd.format(**stream_config).split()
logging.debug('Executing start stream command: "%s"', ' '.join(cmd))
try:
process = await asyncio.create_subprocess_exec(*cmd,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.PIPE,
limit=1024)
except Exception as e:
logging.error('Failed to start streaming process because of error: %s', e)
return False
session_info['process'] = process
logging.info('[%s] Started stream process - PID %d',
session_info['id'], process.pid)
return True
async def stop_stream(self, session_info):
session_id = session_info['id']
ffmpeg_process = session_info.get('process')
if ffmpeg_process:
logging.info('[%s] Stopping stream.', session_id)
try:
ffmpeg_process.terminate()
_, stderr = await asyncio.wait_for(
ffmpeg_process.communicate(), timeout=2.0)
logging.debug('Stream command stderr: %s', stderr)
except asyncio.TimeoutError:
logging.error('Timeout while waiting for the stream process '
'to terminate. Trying with kill.')
ffmpeg_process.kill()
await ffmpeg_process.wait()
logging.debug('Stream process stopped.')
else:
logging.warning('No process for session ID %s', session_id)
async def reconfigure_stream(self, session_info, stream_config):
await self.start_stream(session_info, stream_config)
def get_snapshot(self, image_size):
with open(os.path.join(RESOURCE_DIR, 'snapshot.jpg'), 'rb') as fp:
return fp.read()
| true | true |
1c49776c2f73f90a5fcf5d29799236503717cedd | 5,508 | py | Python | python/tests/unittest/test_context.py | LI-Mingyu/GraphScope-MY | 942060983d3f7f8d3a3377467386e27aba285b33 | [
"Apache-2.0"
] | 1 | 2021-12-17T03:58:08.000Z | 2021-12-17T03:58:08.000Z | python/tests/unittest/test_context.py | LI-Mingyu/GraphScope-MY | 942060983d3f7f8d3a3377467386e27aba285b33 | [
"Apache-2.0"
] | null | null | null | python/tests/unittest/test_context.py | LI-Mingyu/GraphScope-MY | 942060983d3f7f8d3a3377467386e27aba285b33 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pandas as pd
import pytest
import vineyard.io
from graphscope import lpa
from graphscope import sssp
from graphscope.framework.app import AppAssets
from graphscope.framework.errors import InvalidArgumentError
def test_simple_context_to_numpy(simple_context):
out = simple_context.to_numpy("v.id")
assert out.shape == (40521,)
out = simple_context.to_numpy("v.data")
assert out.shape == (40521,)
# selector of `e` is not done yet.
# out = simple_context.to_numpy('e.src')
# out = simple_context.to_numpy('e.dst')
# out = simple_context.to_numpy('e.data')
out = simple_context.to_numpy("r")
assert out.shape == (40521,)
def test_simple_context_to_dataframe(simple_context):
out = simple_context.to_dataframe({"id": "v.id", "data": "v.data", "result": "r"})
assert out.shape == (40521, 3)
def test_simple_context_to_vineyard_tensor(simple_context, p2p_project_directed_graph):
out = simple_context.to_vineyard_tensor("v.id")
assert out is not None
out = simple_context.to_vineyard_tensor("r")
assert out is not None
has_path = AppAssets(algo="sssp_has_path", context="tensor")
ctx = has_path(
p2p_project_directed_graph._project_to_simple(), source=6, target=3728
)
assert ctx.to_vineyard_tensor(axis=0) is not None
def test_simple_context_to_vineyard_dataframe(
simple_context, p2p_project_directed_graph
):
out = simple_context.to_vineyard_dataframe(
{"id": "v.id", "data": "v.data", "result": "r"}
)
assert out is not None
def test_property_context_to_numpy(property_context):
out = property_context.to_numpy("v:v0.dist")
assert out.shape == (40521,)
out = property_context.to_numpy("r:v1.dist_1")
assert out.shape == (40786,)
def test_property_context_to_dataframe(property_context):
out = property_context.to_dataframe({"id": "v:v0.id", "result": "r:v0.dist_0"})
assert out.shape == (40521, 2)
out = property_context.to_dataframe({"id": "v:v1.id", "result": "r:v1.dist_1"})
assert out.shape == (40786, 2)
def test_property_context_output(property_context):
property_context.output_to_client(
fd="/tmp/r0", selector={"id": "v:v0.id", "result": "r:v0.dist_0"}
)
out = pd.read_csv("/tmp/r0")
assert out.shape == (40521, 2)
def test_property_context_to_vineyard_tensor(property_context):
out = property_context.to_vineyard_tensor("v:v0.id")
assert out is not None
def test_property_context_to_vineyard_dataframe(graphscope_session, property_context):
out = property_context.to_vineyard_dataframe(
{"id": "v:v0.id", "data": "v:v0.dist", "result": "r:v0.dist_0"}
)
assert out is not None
def test_add_column(arrow_property_graph, property_context):
g2 = arrow_property_graph.add_column(
property_context, {"result_0": "r:v0.dist_0", "result_1": "r:v1.dist_1"}
)
assert "result_0" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "result_1" in [p.name for p in g2.schema.get_vertex_properties("v1")]
def test_context_output(simple_context):
simple_context.output(
fd="file:///tmp/rlt.csv",
selector={"id": "v.id", "data": "v.data", "result": "r"},
)
def test_add_column_after_computation(arrow_property_graph):
sg = arrow_property_graph.project(vertices={"v0": ["id"]}, edges={"e0": ["weight"]})
ret = sssp(sg, 20)
g2 = arrow_property_graph.add_column(
ret, {"id_col": "v.id", "data_col": "v.data", "result_col": "r"}
)
assert "id_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "data_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "result_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
def test_lpa(arrow_property_graph_lpa):
ret = (
lpa(arrow_property_graph_lpa, max_round=20)
.to_dataframe(
{"node": "v:v0.id", "label0": "r:v0.label_0", "label1": "r:v0.label_1"}
)
.sort_values(by=["node"])
)
@pytest.mark.skipif("NIGHTLY" not in os.environ, reason="Run in nightly CI")
def test_error_on_selector(property_context):
with pytest.raises(KeyError, match="non_exist_label"):
out = property_context.to_numpy("v:non_exist_label.id")
with pytest.raises(KeyError, match="non_exist_prop"):
out = property_context.to_numpy("v:v0.non_exist_prop")
with pytest.raises(
InvalidArgumentError,
match="Selector in labeled vertex data context cannot be None",
):
out = property_context.to_numpy(selector=None)
with pytest.raises(ValueError, match="not enough values to unpack"):
out = property_context.to_numpy("xxx")
with pytest.raises(SyntaxError, match="Invalid selector"):
out = property_context.to_numpy("xxx:a.b")
| 35.307692 | 88 | 0.698076 |
import os
import pandas as pd
import pytest
import vineyard.io
from graphscope import lpa
from graphscope import sssp
from graphscope.framework.app import AppAssets
from graphscope.framework.errors import InvalidArgumentError
def test_simple_context_to_numpy(simple_context):
out = simple_context.to_numpy("v.id")
assert out.shape == (40521,)
out = simple_context.to_numpy("v.data")
assert out.shape == (40521,)
out = simple_context.to_numpy("r")
assert out.shape == (40521,)
def test_simple_context_to_dataframe(simple_context):
out = simple_context.to_dataframe({"id": "v.id", "data": "v.data", "result": "r"})
assert out.shape == (40521, 3)
def test_simple_context_to_vineyard_tensor(simple_context, p2p_project_directed_graph):
out = simple_context.to_vineyard_tensor("v.id")
assert out is not None
out = simple_context.to_vineyard_tensor("r")
assert out is not None
has_path = AppAssets(algo="sssp_has_path", context="tensor")
ctx = has_path(
p2p_project_directed_graph._project_to_simple(), source=6, target=3728
)
assert ctx.to_vineyard_tensor(axis=0) is not None
def test_simple_context_to_vineyard_dataframe(
simple_context, p2p_project_directed_graph
):
out = simple_context.to_vineyard_dataframe(
{"id": "v.id", "data": "v.data", "result": "r"}
)
assert out is not None
def test_property_context_to_numpy(property_context):
out = property_context.to_numpy("v:v0.dist")
assert out.shape == (40521,)
out = property_context.to_numpy("r:v1.dist_1")
assert out.shape == (40786,)
def test_property_context_to_dataframe(property_context):
out = property_context.to_dataframe({"id": "v:v0.id", "result": "r:v0.dist_0"})
assert out.shape == (40521, 2)
out = property_context.to_dataframe({"id": "v:v1.id", "result": "r:v1.dist_1"})
assert out.shape == (40786, 2)
def test_property_context_output(property_context):
property_context.output_to_client(
fd="/tmp/r0", selector={"id": "v:v0.id", "result": "r:v0.dist_0"}
)
out = pd.read_csv("/tmp/r0")
assert out.shape == (40521, 2)
def test_property_context_to_vineyard_tensor(property_context):
out = property_context.to_vineyard_tensor("v:v0.id")
assert out is not None
def test_property_context_to_vineyard_dataframe(graphscope_session, property_context):
out = property_context.to_vineyard_dataframe(
{"id": "v:v0.id", "data": "v:v0.dist", "result": "r:v0.dist_0"}
)
assert out is not None
def test_add_column(arrow_property_graph, property_context):
g2 = arrow_property_graph.add_column(
property_context, {"result_0": "r:v0.dist_0", "result_1": "r:v1.dist_1"}
)
assert "result_0" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "result_1" in [p.name for p in g2.schema.get_vertex_properties("v1")]
def test_context_output(simple_context):
simple_context.output(
fd="file:///tmp/rlt.csv",
selector={"id": "v.id", "data": "v.data", "result": "r"},
)
def test_add_column_after_computation(arrow_property_graph):
sg = arrow_property_graph.project(vertices={"v0": ["id"]}, edges={"e0": ["weight"]})
ret = sssp(sg, 20)
g2 = arrow_property_graph.add_column(
ret, {"id_col": "v.id", "data_col": "v.data", "result_col": "r"}
)
assert "id_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "data_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "result_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
def test_lpa(arrow_property_graph_lpa):
ret = (
lpa(arrow_property_graph_lpa, max_round=20)
.to_dataframe(
{"node": "v:v0.id", "label0": "r:v0.label_0", "label1": "r:v0.label_1"}
)
.sort_values(by=["node"])
)
@pytest.mark.skipif("NIGHTLY" not in os.environ, reason="Run in nightly CI")
def test_error_on_selector(property_context):
with pytest.raises(KeyError, match="non_exist_label"):
out = property_context.to_numpy("v:non_exist_label.id")
with pytest.raises(KeyError, match="non_exist_prop"):
out = property_context.to_numpy("v:v0.non_exist_prop")
with pytest.raises(
InvalidArgumentError,
match="Selector in labeled vertex data context cannot be None",
):
out = property_context.to_numpy(selector=None)
with pytest.raises(ValueError, match="not enough values to unpack"):
out = property_context.to_numpy("xxx")
with pytest.raises(SyntaxError, match="Invalid selector"):
out = property_context.to_numpy("xxx:a.b")
| true | true |
1c49784aa8306157fe272237ed9d63b7286a170d | 6,799 | py | Python | torchgan/metrics/proximal_duality_gap.py | proximal-dg/proximal_dg | 000e925c7daab099b2c3735f99e65e6b2a00a799 | [
"MIT"
] | 13 | 2021-05-12T05:37:20.000Z | 2022-03-30T17:05:47.000Z | torchgan/metrics/proximal_duality_gap.py | proximal-dg/proximal_dg | 000e925c7daab099b2c3735f99e65e6b2a00a799 | [
"MIT"
] | 3 | 2021-10-20T04:51:36.000Z | 2022-02-25T13:37:32.000Z | torchgan/metrics/proximal_duality_gap.py | proximal-dg/proximal_dg | 000e925c7daab099b2c3735f99e65e6b2a00a799 | [
"MIT"
] | 1 | 2021-12-28T17:03:08.000Z | 2021-12-28T17:03:08.000Z | import torch
import torch.nn.functional as F
import torchvision
import copy
import time
import os
from ..utils import reduce
from .metric import EvaluationMetric
from torchgan.trainer import *
import torch.multiprocessing as mp
import numpy as np
from ray import tune
from torch.optim import Adam
__all__ = ["ProximalDualityGap"]
class ProximalDualityGap(EvaluationMetric):
r"""
Computes the DualityGap of a Model.
Args:
optimizer : The optimizer to be used for DG estimation ('SGD','Adam')
n_iter : The no. steps in M1 and M2 estimation (int)
perturb : Use perturbed DG (Boolean)
"""
def __init__(self,perturbation=False,network_params=None,generator_loss=None,discriminator_loss=None,evaluation_loss=None,proximal_evaluation_loss=None,train_dataloader=None,eval_dataloader=None,n_iter=10,log_dir="./",sample_size=28,n_row=7,verbose=False):
super(ProximalDualityGap, self).__init__()
self.perturbation = perturbation
self.n_iter = n_iter
self.network_params = network_params
self.generator_loss = generator_loss
self.discriminator_loss = discriminator_loss
self.evaluation_loss = evaluation_loss
self.proximal_evaluation_loss = proximal_evaluation_loss if proximal_evaluation_loss is not None else evaluation_loss
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader if eval_dataloader is not None else train_dataloader
self.log_dir = log_dir
self.sample_size = sample_size
self.n_row = n_row
self.set_arg_map({"ckpt_dir":"checkpoints" , "ckpt_no":"last_retained_checkpoint"})
self.verbose = verbose
self.evaluation_loss.eval_only = True
self.history = []
def preprocess(self, x):
r"""
Preprocessor for the trainer object
Args:
x (torch.Tensor) : Instance of class BaseTrainer
Returns:
Trainer class after preprocessing
"""
return x
def attempt_deviation(self,trainer):
trainer(self.train_dataloader)
trainer.losses[type(self.evaluation_loss).__name__] = self.evaluate
trainer._store_loss_maps()
batch_score = []
for data in self.eval_dataloader:
if type(data) is tuple or type(data) is list:
trainer.real_inputs = data[0].to(trainer.device)
trainer.labels = data[1].to(trainer.device)
elif type(data) is torch.Tensor:
trainer.real_inputs = data.to(trainer.device)
else:
trainer.real_inputs = data
batch_score.append(-1*self.evaluate.train_ops(**trainer._get_arguments(trainer.loss_arg_maps[type(self.evaluation_loss).__name__])) )
return np.mean(batch_score)
def calculate_score(self,load_path=None,m1_dir=None,m2_dir=None,perturb_std=1e-3):
r"""
Computes the duality gap for a given trainer instance.
Args:
load_path (str) : Path to load the Instance of class BaseTrainer
m1_dir (str) : Path to save the logs for estimating M1
m2_dir (str) : Path to save the logs for estimating M2
Returns:
The Duality Gap.
"""
disc_trainer = Trainer(self.network_params,[self.discriminator_loss],log_dir=os.path.join(m1_dir,"logs"),recon=os.path.join(m1_dir,"images"),checkpoints=os.path.join(m1_dir,"ckpts","model_"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose)
disc_trainer.load_model(load_path,model_only=True)
disc_trainer.epochs = self.n_iter
disc_trainer.loss_information["generator_iters"] = 1
disc_trainer.tune_report = "DG"
if(perturb_std>0):
with torch.no_grad():
for x in disc_trainer.discriminator.parameters():
x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=disc_trainer.device))
gen_trainer = Trainer(self.network_params,[self.generator_loss],log_dir=os.path.join(m2_dir,"logs"),recon=os.path.join(m2_dir,"images"),checkpoints=os.path.join(m2_dir,"ckpts","model_"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose)
gen_trainer.load_model(load_path,model_only=True)
gen_trainer.epochs = self.n_iter
gen_trainer.loss_information["discriminator_iters"] = 1
gen_trainer.tune_report = "DG"
if(perturb_std>0):
with torch.no_grad():
for x in gen_trainer.generator.parameters():
x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=gen_trainer.device))
if(self.verbose):
print("__"*10,"\n{:30s}\n".format("Estimating M1"),"__"*10)
self.evaluate = self.evaluation_loss
M1 = self.attempt_deviation(disc_trainer)
if(self.verbose):
print("M1 : ",M1)
print("__"*10,"\n{:30s}\n".format("Estimating M2"),"__"*10)
# M2 = 0
self.evaluate = self.proximal_evaluation_loss
M2 = self.attempt_deviation(gen_trainer)
if(self.verbose):
print("M2 : ",M2)
disc_trainer.complete()
gen_trainer.complete()
return abs(M1 - M2)
def metric_ops(self,ckpt_dir=None,ckpt_no=None):
r"""Defines the set of operations necessary to compute the ClassifierScore.
Args:
generator (torchgan.models.Generator): The generator which needs to be evaluated.
device (torch.device): Device on which the generator is present.
Returns:
The Classifier Score (scalar quantity)
"""
if(self.verbose):
print("=="*60,"\n{:^120s}\n".format("Estimating Proximal Duality Gap"),"=="*60)
load_path = ckpt_dir + str(ckpt_no-1)+ ".model"
m1_dir = os.path.join(self.log_dir,"proximal_duality_gap","M1","iter_{}".format(ckpt_no))
m2_dir = os.path.join(self.log_dir,"proximal_duality_gap","M2","iter_{}".format(ckpt_no))
start_time = time.time()
score = self.calculate_score(load_path=load_path,m1_dir=m1_dir,m2_dir=m2_dir)
time_taken = time.time()-start_time
if(self.verbose):
print("__"*60,"\n{:^50s} : {}\n".format("Proximal Duality Gap",score),"__"*60)
self.history.append(abs(score))
tune.report(score=np.mean(self.history))
return score
| 42.761006 | 292 | 0.626121 | import torch
import torch.nn.functional as F
import torchvision
import copy
import time
import os
from ..utils import reduce
from .metric import EvaluationMetric
from torchgan.trainer import *
import torch.multiprocessing as mp
import numpy as np
from ray import tune
from torch.optim import Adam
__all__ = ["ProximalDualityGap"]
class ProximalDualityGap(EvaluationMetric):
def __init__(self,perturbation=False,network_params=None,generator_loss=None,discriminator_loss=None,evaluation_loss=None,proximal_evaluation_loss=None,train_dataloader=None,eval_dataloader=None,n_iter=10,log_dir="./",sample_size=28,n_row=7,verbose=False):
super(ProximalDualityGap, self).__init__()
self.perturbation = perturbation
self.n_iter = n_iter
self.network_params = network_params
self.generator_loss = generator_loss
self.discriminator_loss = discriminator_loss
self.evaluation_loss = evaluation_loss
self.proximal_evaluation_loss = proximal_evaluation_loss if proximal_evaluation_loss is not None else evaluation_loss
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader if eval_dataloader is not None else train_dataloader
self.log_dir = log_dir
self.sample_size = sample_size
self.n_row = n_row
self.set_arg_map({"ckpt_dir":"checkpoints" , "ckpt_no":"last_retained_checkpoint"})
self.verbose = verbose
self.evaluation_loss.eval_only = True
self.history = []
def preprocess(self, x):
return x
def attempt_deviation(self,trainer):
trainer(self.train_dataloader)
trainer.losses[type(self.evaluation_loss).__name__] = self.evaluate
trainer._store_loss_maps()
batch_score = []
for data in self.eval_dataloader:
if type(data) is tuple or type(data) is list:
trainer.real_inputs = data[0].to(trainer.device)
trainer.labels = data[1].to(trainer.device)
elif type(data) is torch.Tensor:
trainer.real_inputs = data.to(trainer.device)
else:
trainer.real_inputs = data
batch_score.append(-1*self.evaluate.train_ops(**trainer._get_arguments(trainer.loss_arg_maps[type(self.evaluation_loss).__name__])) )
return np.mean(batch_score)
def calculate_score(self,load_path=None,m1_dir=None,m2_dir=None,perturb_std=1e-3):
disc_trainer = Trainer(self.network_params,[self.discriminator_loss],log_dir=os.path.join(m1_dir,"logs"),recon=os.path.join(m1_dir,"images"),checkpoints=os.path.join(m1_dir,"ckpts","model_"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose)
disc_trainer.load_model(load_path,model_only=True)
disc_trainer.epochs = self.n_iter
disc_trainer.loss_information["generator_iters"] = 1
disc_trainer.tune_report = "DG"
if(perturb_std>0):
with torch.no_grad():
for x in disc_trainer.discriminator.parameters():
x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=disc_trainer.device))
gen_trainer = Trainer(self.network_params,[self.generator_loss],log_dir=os.path.join(m2_dir,"logs"),recon=os.path.join(m2_dir,"images"),checkpoints=os.path.join(m2_dir,"ckpts","model_"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose)
gen_trainer.load_model(load_path,model_only=True)
gen_trainer.epochs = self.n_iter
gen_trainer.loss_information["discriminator_iters"] = 1
gen_trainer.tune_report = "DG"
if(perturb_std>0):
with torch.no_grad():
for x in gen_trainer.generator.parameters():
x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=gen_trainer.device))
if(self.verbose):
print("__"*10,"\n{:30s}\n".format("Estimating M1"),"__"*10)
self.evaluate = self.evaluation_loss
M1 = self.attempt_deviation(disc_trainer)
if(self.verbose):
print("M1 : ",M1)
print("__"*10,"\n{:30s}\n".format("Estimating M2"),"__"*10)
self.evaluate = self.proximal_evaluation_loss
M2 = self.attempt_deviation(gen_trainer)
if(self.verbose):
print("M2 : ",M2)
disc_trainer.complete()
gen_trainer.complete()
return abs(M1 - M2)
def metric_ops(self,ckpt_dir=None,ckpt_no=None):
if(self.verbose):
print("=="*60,"\n{:^120s}\n".format("Estimating Proximal Duality Gap"),"=="*60)
load_path = ckpt_dir + str(ckpt_no-1)+ ".model"
m1_dir = os.path.join(self.log_dir,"proximal_duality_gap","M1","iter_{}".format(ckpt_no))
m2_dir = os.path.join(self.log_dir,"proximal_duality_gap","M2","iter_{}".format(ckpt_no))
start_time = time.time()
score = self.calculate_score(load_path=load_path,m1_dir=m1_dir,m2_dir=m2_dir)
time_taken = time.time()-start_time
if(self.verbose):
print("__"*60,"\n{:^50s} : {}\n".format("Proximal Duality Gap",score),"__"*60)
self.history.append(abs(score))
tune.report(score=np.mean(self.history))
return score
| true | true |
1c49787b94ab42aa228264ebb5813f6406a67b28 | 203 | py | Python | Old/src/com/basic/call_func.py | exchris/Pythonlearn | 174f38a86cf1c85d6fc099005aab3568e7549cd0 | [
"MIT"
] | null | null | null | Old/src/com/basic/call_func.py | exchris/Pythonlearn | 174f38a86cf1c85d6fc099005aab3568e7549cd0 | [
"MIT"
] | 1 | 2018-11-27T09:58:54.000Z | 2018-11-27T09:58:54.000Z | Old/src/com/basic/call_func.py | exchris/pythonlearn | 174f38a86cf1c85d6fc099005aab3568e7549cd0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
x = abs(100)
y = abs(-20)
print(x, y)
print('max(1, 2, 3) =', max(1, 2, 3))
print('min(1, 2, 3) =', min(1, 2, 3))
print('sum([1, 2, 3]) =', sum([1, 2, 3])) | 22.555556 | 41 | 0.477833 |
x = abs(100)
y = abs(-20)
print(x, y)
print('max(1, 2, 3) =', max(1, 2, 3))
print('min(1, 2, 3) =', min(1, 2, 3))
print('sum([1, 2, 3]) =', sum([1, 2, 3])) | true | true |
1c4979a46c5b421ace7a15f391b274820af9e4a1 | 25,942 | py | Python | stable_baselines3/sac/policies.py | danielhettegger-rl/stable-baselines3 | 23de12e95d96b7bb6136c6a338e407ae7db7c545 | [
"MIT"
] | null | null | null | stable_baselines3/sac/policies.py | danielhettegger-rl/stable-baselines3 | 23de12e95d96b7bb6136c6a338e407ae7db7c545 | [
"MIT"
] | null | null | null | stable_baselines3/sac/policies.py | danielhettegger-rl/stable-baselines3 | 23de12e95d96b7bb6136c6a338e407ae7db7c545 | [
"MIT"
] | null | null | null | import warnings
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import torch as th
from torch import nn
from stable_baselines3.common.distributions import SquashedDiagGaussianDistribution, StateDependentNoiseDistribution
from stable_baselines3.common.policies import BaseModel, BasePolicy, ContinuousCritic, register_policy
from stable_baselines3.common.preprocessing import get_action_dim
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
NatureCNN,
create_mlp,
get_actor_critic_arch,
)
from stable_baselines3.common.type_aliases import Schedule
# CAP the standard deviation of the actor
LOG_STD_MAX = 2
LOG_STD_MIN = -20
class Actor(BasePolicy):
"""
Actor network (policy) for SAC.
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE.
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
normalize_images: bool = True,
):
super(Actor, self).__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
squash_output=True,
)
# Save arguments to re-create object at loading
self.use_sde = use_sde
self.sde_features_extractor = None
self.net_arch = net_arch
self.features_dim = features_dim
self.activation_fn = activation_fn
self.log_std_init = log_std_init
self.sde_net_arch = sde_net_arch
self.use_expln = use_expln
self.full_std = full_std
self.clip_mean = clip_mean
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
action_dim = get_action_dim(self.action_space)
latent_pi_net = create_mlp(features_dim, -1, net_arch, activation_fn)
self.latent_pi = nn.Sequential(*latent_pi_net)
last_layer_dim = net_arch[-1] if len(net_arch) > 0 else features_dim
if self.use_sde:
self.action_dist = StateDependentNoiseDistribution(
action_dim, full_std=full_std, use_expln=use_expln, learn_features=True, squash_output=True
)
self.mu, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=last_layer_dim, latent_sde_dim=last_layer_dim, log_std_init=log_std_init
)
# Avoid numerical issues by limiting the mean of the Gaussian
# to be in [-clip_mean, clip_mean]
if clip_mean > 0.0:
self.mu = nn.Sequential(self.mu, nn.Hardtanh(min_val=-clip_mean, max_val=clip_mean))
else:
self.action_dist = SquashedDiagGaussianDistribution(action_dim)
self.mu = nn.Linear(last_layer_dim, action_dim)
self.log_std = nn.Linear(last_layer_dim, action_dim)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
full_std=self.full_std,
use_expln=self.use_expln,
features_extractor=self.features_extractor,
clip_mean=self.clip_mean,
)
)
return data
def get_std(self) -> th.Tensor:
"""
Retrieve the standard deviation of the action distribution.
Only useful when using gSDE.
It corresponds to ``th.exp(log_std)`` in the normal case,
but is slightly different when using ``expln`` function
(cf StateDependentNoiseDistribution doc).
:return:
"""
msg = "get_std() is only available when using gSDE"
assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg
return self.action_dist.get_std(self.log_std)
def reset_noise(self, batch_size: int = 1) -> None:
"""
Sample new weights for the exploration matrix, when using gSDE.
:param batch_size:
"""
msg = "reset_noise() is only available when using gSDE"
assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg
self.action_dist.sample_weights(self.log_std, batch_size=batch_size)
def get_action_dist_params(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Dict[str, th.Tensor]]:
"""
Get the parameters for the action distribution.
:param obs:
:return:
Mean, standard deviation and optional keyword arguments.
"""
features = self.extract_features(obs)
latent_pi = self.latent_pi(features)
mean_actions = self.mu(latent_pi)
if self.use_sde:
return mean_actions, self.log_std, dict(latent_sde=latent_pi)
# Unstructured exploration (Original implementation)
log_std = self.log_std(latent_pi)
# Original Implementation to cap the standard deviation
log_std = th.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)
return mean_actions, log_std, {}
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
mean_actions, log_std, kwargs = self.get_action_dist_params(obs)
# Note: the action is squashed
return self.action_dist.actions_from_params(mean_actions, log_std, deterministic=deterministic, **kwargs)
def action_log_prob(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
mean_actions, log_std, kwargs = self.get_action_dist_params(obs)
# return action and associated log prob
return self.action_dist.log_prob_from_params(mean_actions, log_std, **kwargs)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self.forward(observation, deterministic)
class SACPolicy(BasePolicy):
"""
Policy class (with both actor and critic) for SAC.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether to share or not the features extractor
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(SACPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=True,
)
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [256, 256]
actor_arch, critic_arch = get_actor_critic_arch(net_arch)
self.net_arch = net_arch
self.activation_fn = activation_fn
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": actor_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.actor_kwargs = self.net_args.copy()
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
sde_kwargs = {
"use_sde": use_sde,
"log_std_init": log_std_init,
"use_expln": use_expln,
"clip_mean": clip_mean,
}
self.actor_kwargs.update(sde_kwargs)
self.critic_kwargs = self.net_args.copy()
self.critic_kwargs.update(
{
"n_critics": n_critics,
"net_arch": critic_arch,
"share_features_extractor": share_features_extractor,
}
)
self.actor, self.actor_target = None, None
self.critic, self.critic_target = None, None
self.share_features_extractor = share_features_extractor
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
self.actor = self.make_actor()
self.actor.optimizer = self.optimizer_class(self.actor.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
if self.share_features_extractor:
self.critic = self.make_critic(features_extractor=self.actor.features_extractor)
# Do not optimize the shared features extractor with the critic loss
# otherwise, there are gradient computation issues
critic_parameters = [param for name, param in self.critic.named_parameters() if "features_extractor" not in name]
else:
# Create a separate features extractor for the critic
# this requires more memory and computation
self.critic = self.make_critic(features_extractor=None)
critic_parameters = self.critic.parameters()
# Critic target should not share the features extractor with critic
self.critic_target = self.make_critic(features_extractor=None)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic.optimizer = self.optimizer_class(critic_parameters, lr=lr_schedule(1), **self.optimizer_kwargs)
# Target networks should always be in eval mode
self.critic_target.set_training_mode(False)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.net_args["activation_fn"],
use_sde=self.actor_kwargs["use_sde"],
log_std_init=self.actor_kwargs["log_std_init"],
use_expln=self.actor_kwargs["use_expln"],
clip_mean=self.actor_kwargs["clip_mean"],
n_critics=self.critic_kwargs["n_critics"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, batch_size: int = 1) -> None:
"""
Sample new weights for the exploration matrix, when using gSDE.
:param batch_size:
"""
self.actor.reset_noise(batch_size=batch_size)
def make_actor(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> Actor:
actor_kwargs = self._update_features_extractor(self.actor_kwargs, features_extractor)
return Actor(**actor_kwargs).to(self.device)
def make_critic(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> ContinuousCritic:
critic_kwargs = self._update_features_extractor(self.critic_kwargs, features_extractor)
return ContinuousCritic(**critic_kwargs).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self.actor(observation, deterministic)
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.actor.set_training_mode(mode)
self.critic.set_training_mode(mode)
self.training = mode
MlpPolicy = SACPolicy
class IPTSACPolicy(SACPolicy):
"""
Policy Class for Interactive Policy Transfer (IPT) version of SAC.
Most Parameters are passed through to the SAC policy class.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether to share or not the features extractor
between the actor and the critic (this saves computation time)
:param teacher_policy: The policy, which is used to interactively guide the training process.
:param ipt_weight_schedule: The schedule for the weight of the teacher policy.
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
teacher_policy: BaseModel = None,
ipt_weight_schedule: Schedule = None,
**kwargs
):
super().__init__(observation_space, action_space, lr_schedule, **kwargs)
self.teacher_policy = teacher_policy
self.ipt_weight_schedule = ipt_weight_schedule
if ipt_weight_schedule is not None:
self.ipt_weight = ipt_weight_schedule(1)
else:
self.ipt_weight = 0.0
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
if self.ipt_weight == 0 or deterministic:
return self.actor(observation, deterministic)
mean_actions, log_std, kwargs = self.actor.get_action_dist_params(observation)
# Note: the action is squashed
actor_noise = self.actor.action_dist.actions_from_params(
th.zeros_like(mean_actions),
log_std,
deterministic=deterministic,
**kwargs
)
teacher_action = self.teacher_policy.forward(observation)
action = (self.ipt_weight * teacher_action + (1.0 - self.ipt_weight) * mean_actions) + actor_noise
return action
def update_schedules(self, current_progress_remaining):
if self.ipt_weight_schedule is not None:
self.ipt_weight = self.ipt_weight_schedule(current_progress_remaining)
class CnnPolicy(SACPolicy):
"""
Policy class (with both actor and critic) for SAC.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether to share or not the features extractor
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(CnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
use_sde,
log_std_init,
sde_net_arch,
use_expln,
clip_mean,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
n_critics,
share_features_extractor,
)
class MultiInputPolicy(SACPolicy):
"""
Policy class (with both actor and critic) for SAC.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether to share or not the features extractor
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(MultiInputPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
use_sde,
log_std_init,
sde_net_arch,
use_expln,
clip_mean,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
n_critics,
share_features_extractor,
)
register_policy("MlpPolicy", MlpPolicy)
register_policy("IptMlpPolicy", IPTSACPolicy)
register_policy("CnnPolicy", CnnPolicy)
register_policy("MultiInputPolicy", MultiInputPolicy)
| 43.021559 | 125 | 0.67304 | import warnings
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import torch as th
from torch import nn
from stable_baselines3.common.distributions import SquashedDiagGaussianDistribution, StateDependentNoiseDistribution
from stable_baselines3.common.policies import BaseModel, BasePolicy, ContinuousCritic, register_policy
from stable_baselines3.common.preprocessing import get_action_dim
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
NatureCNN,
create_mlp,
get_actor_critic_arch,
)
from stable_baselines3.common.type_aliases import Schedule
LOG_STD_MAX = 2
LOG_STD_MIN = -20
class Actor(BasePolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
normalize_images: bool = True,
):
super(Actor, self).__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
squash_output=True,
)
self.use_sde = use_sde
self.sde_features_extractor = None
self.net_arch = net_arch
self.features_dim = features_dim
self.activation_fn = activation_fn
self.log_std_init = log_std_init
self.sde_net_arch = sde_net_arch
self.use_expln = use_expln
self.full_std = full_std
self.clip_mean = clip_mean
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
action_dim = get_action_dim(self.action_space)
latent_pi_net = create_mlp(features_dim, -1, net_arch, activation_fn)
self.latent_pi = nn.Sequential(*latent_pi_net)
last_layer_dim = net_arch[-1] if len(net_arch) > 0 else features_dim
if self.use_sde:
self.action_dist = StateDependentNoiseDistribution(
action_dim, full_std=full_std, use_expln=use_expln, learn_features=True, squash_output=True
)
self.mu, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=last_layer_dim, latent_sde_dim=last_layer_dim, log_std_init=log_std_init
)
if clip_mean > 0.0:
self.mu = nn.Sequential(self.mu, nn.Hardtanh(min_val=-clip_mean, max_val=clip_mean))
else:
self.action_dist = SquashedDiagGaussianDistribution(action_dim)
self.mu = nn.Linear(last_layer_dim, action_dim)
self.log_std = nn.Linear(last_layer_dim, action_dim)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
full_std=self.full_std,
use_expln=self.use_expln,
features_extractor=self.features_extractor,
clip_mean=self.clip_mean,
)
)
return data
def get_std(self) -> th.Tensor:
msg = "get_std() is only available when using gSDE"
assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg
return self.action_dist.get_std(self.log_std)
def reset_noise(self, batch_size: int = 1) -> None:
msg = "reset_noise() is only available when using gSDE"
assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg
self.action_dist.sample_weights(self.log_std, batch_size=batch_size)
def get_action_dist_params(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Dict[str, th.Tensor]]:
features = self.extract_features(obs)
latent_pi = self.latent_pi(features)
mean_actions = self.mu(latent_pi)
if self.use_sde:
return mean_actions, self.log_std, dict(latent_sde=latent_pi)
log_std = self.log_std(latent_pi)
log_std = th.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)
return mean_actions, log_std, {}
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
mean_actions, log_std, kwargs = self.get_action_dist_params(obs)
return self.action_dist.actions_from_params(mean_actions, log_std, deterministic=deterministic, **kwargs)
def action_log_prob(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
mean_actions, log_std, kwargs = self.get_action_dist_params(obs)
return self.action_dist.log_prob_from_params(mean_actions, log_std, **kwargs)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self.forward(observation, deterministic)
class SACPolicy(BasePolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(SACPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=True,
)
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [256, 256]
actor_arch, critic_arch = get_actor_critic_arch(net_arch)
self.net_arch = net_arch
self.activation_fn = activation_fn
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": actor_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.actor_kwargs = self.net_args.copy()
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
sde_kwargs = {
"use_sde": use_sde,
"log_std_init": log_std_init,
"use_expln": use_expln,
"clip_mean": clip_mean,
}
self.actor_kwargs.update(sde_kwargs)
self.critic_kwargs = self.net_args.copy()
self.critic_kwargs.update(
{
"n_critics": n_critics,
"net_arch": critic_arch,
"share_features_extractor": share_features_extractor,
}
)
self.actor, self.actor_target = None, None
self.critic, self.critic_target = None, None
self.share_features_extractor = share_features_extractor
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
self.actor = self.make_actor()
self.actor.optimizer = self.optimizer_class(self.actor.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
if self.share_features_extractor:
self.critic = self.make_critic(features_extractor=self.actor.features_extractor)
critic_parameters = [param for name, param in self.critic.named_parameters() if "features_extractor" not in name]
else:
self.critic = self.make_critic(features_extractor=None)
critic_parameters = self.critic.parameters()
self.critic_target = self.make_critic(features_extractor=None)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic.optimizer = self.optimizer_class(critic_parameters, lr=lr_schedule(1), **self.optimizer_kwargs)
self.critic_target.set_training_mode(False)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.net_args["activation_fn"],
use_sde=self.actor_kwargs["use_sde"],
log_std_init=self.actor_kwargs["log_std_init"],
use_expln=self.actor_kwargs["use_expln"],
clip_mean=self.actor_kwargs["clip_mean"],
n_critics=self.critic_kwargs["n_critics"],
lr_schedule=self._dummy_schedule,
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, batch_size: int = 1) -> None:
self.actor.reset_noise(batch_size=batch_size)
def make_actor(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> Actor:
actor_kwargs = self._update_features_extractor(self.actor_kwargs, features_extractor)
return Actor(**actor_kwargs).to(self.device)
def make_critic(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> ContinuousCritic:
critic_kwargs = self._update_features_extractor(self.critic_kwargs, features_extractor)
return ContinuousCritic(**critic_kwargs).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self.actor(observation, deterministic)
def set_training_mode(self, mode: bool) -> None:
self.actor.set_training_mode(mode)
self.critic.set_training_mode(mode)
self.training = mode
MlpPolicy = SACPolicy
class IPTSACPolicy(SACPolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
teacher_policy: BaseModel = None,
ipt_weight_schedule: Schedule = None,
**kwargs
):
super().__init__(observation_space, action_space, lr_schedule, **kwargs)
self.teacher_policy = teacher_policy
self.ipt_weight_schedule = ipt_weight_schedule
if ipt_weight_schedule is not None:
self.ipt_weight = ipt_weight_schedule(1)
else:
self.ipt_weight = 0.0
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
if self.ipt_weight == 0 or deterministic:
return self.actor(observation, deterministic)
mean_actions, log_std, kwargs = self.actor.get_action_dist_params(observation)
actor_noise = self.actor.action_dist.actions_from_params(
th.zeros_like(mean_actions),
log_std,
deterministic=deterministic,
**kwargs
)
teacher_action = self.teacher_policy.forward(observation)
action = (self.ipt_weight * teacher_action + (1.0 - self.ipt_weight) * mean_actions) + actor_noise
return action
def update_schedules(self, current_progress_remaining):
if self.ipt_weight_schedule is not None:
self.ipt_weight = self.ipt_weight_schedule(current_progress_remaining)
class CnnPolicy(SACPolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(CnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
use_sde,
log_std_init,
sde_net_arch,
use_expln,
clip_mean,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
n_critics,
share_features_extractor,
)
class MultiInputPolicy(SACPolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(MultiInputPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
use_sde,
log_std_init,
sde_net_arch,
use_expln,
clip_mean,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
n_critics,
share_features_extractor,
)
register_policy("MlpPolicy", MlpPolicy)
register_policy("IptMlpPolicy", IPTSACPolicy)
register_policy("CnnPolicy", CnnPolicy)
register_policy("MultiInputPolicy", MultiInputPolicy)
| true | true |
1c497aa96e625e83e18815ae709066fd24247385 | 6,399 | py | Python | controller/gui.py | HighwayFlocking/HighwayFlocking | e870579d11574f5789162481219e771610f8b721 | [
"ECL-2.0",
"Apache-2.0"
] | 44 | 2015-06-11T14:39:26.000Z | 2021-05-21T11:06:47.000Z | controller/gui.py | HighwayFlocking/HighwayFlocking | e870579d11574f5789162481219e771610f8b721 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2015-06-12T07:32:58.000Z | 2018-05-27T07:04:52.000Z | controller/gui.py | HighwayFlocking/HighwayFlocking | e870579d11574f5789162481219e771610f8b721 | [
"ECL-2.0",
"Apache-2.0"
] | 8 | 2015-06-11T15:19:08.000Z | 2019-10-08T13:18:52.000Z | #coding: utf-8
# Copyright 2015 Sindre Ilebekk Johansen and Andreas Sløgedal Løvland
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
import os
import threading
from datetime import time
import subprocess
from PySide import QtCore, QtGui
from PySide.QtCore import QTimer
from lib.gui.controller_ui import Ui_MainWindow
import configs
import config as cfg
from lib.simulation import Simulator, SimulatorIsClosedException
logger = logging.getLogger(__name__)
configurations = (
('just_cars', configs.JUST_CARS),
('oncoming', configs.ONCOMING),
('oncoming + merging', configs.ONCOMING_ONRAMP),
('oncoming + merging + buses', configs.ONCOMING_ONRAMP_BUS),
('oncoming + merging + emergency vehicles', configs.ONCOMING_ONRAMP_EMERGENCY),
('oncoming + merging + buses + emergency vehicles', configs.ONCOMING_ONRAMP_BUS_EMERGENCY),
('symetric', configs.SYMETRIC)
)
class ControllerMainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(ControllerMainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setup_configurations()
self.assign_widgets()
self.simulator = None
self.ui.stop.setEnabled(False)
self.timer = QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(1000)
self.show()
desktop = QtGui.QDesktopWidget().availableGeometry()
y = self.y()
x = (desktop.width() - 1280) / 2 - self.width() - 50
self.move(x, y)
def setup_configurations(self):
for i, config in enumerate(configurations):
self.ui.configuration.insertItem(i, config[0])
def assign_widgets(self):
self.ui.start.clicked.connect(self.start_clicked)
self.ui.stop.clicked.connect(self.stop_clicked)
def on_about_to_quit(self):
if self.simulator:
self.simulator.close()
def stop_clicked(self):
self.ui.stop.setEnabled(False)
if self.simulator:
self.simulator.close()
self.ui.fixed.setEnabled(True)
self.simulator = None
self.ui.start.setText("Start Simulation")
def start_clicked(self):
self.ui.start.setEnabled(False)
self.ui.fixed.setEnabled(False)
if self.simulator:
self.update_simulator()
else:
self.simulator = Simulator(fixed_time_step=self.ui.fixed.isChecked())
threading.Thread(target=self.start_simulator, name="Start simulator Thread").start()
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:#222;\">Simulator is Starting</span></p></body></html>")
self.ui.start.setText("Restart Simulation")
def start_simulator(self):
self.simulator.start_and_connect()
self.update_simulator()
def update_simulator(self):
base_config = configurations[self.ui.configuration.currentIndex()][1]
throughput = self.ui.throughput.value()
config = configs.througput(base_config, throughput=throughput)
logger.info("Max Waits: %s", [sp['max_wait'] for sp in config['spawners']])
logger.info("Min Waits: %s", [sp['min_wait'] for sp in config['spawners']])
logger.info('Pausing the simulation')
self.simulator.set_paused(True)
logger.info('Removing all vehicles')
self.simulator.remove_all_vehicles()
logger.info('Resetting the spawners')
self.simulator.reset_all_spawners()
logger.info('Configuring the spawners')
for spawner_conf in config['spawners']:
self.simulator.configure_spawner(spawner_conf)
logger.info('Starting the simulation')
self.simulator.set_paused(False)
logger.info('Resetting the stats')
self.simulator.reset_stats()
self.simulator.clear_queue()
self.ui.start.setEnabled(True)
self.ui.stop.setEnabled(True)
def update(self):
if self.simulator:
try:
stats = self.simulator.get_newest_stats()
if stats:
minutes, seconds = divmod(int(stats['time']), 60)
stats['time'] = time(minute=minutes, second=seconds)
self.ui.current_values.setText(
"""Time: {time:%M:%S}
Current Throughput: From City: {throughputs[0]}, To City: {throughputs[1]}
Incidents: {incidents}
Vehicles Spawned: {spawned}
Vehicles on Road: {onroad}""".format(**stats))
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:#00b548;\">Simulator is Running</span></p></body></html>")
except SimulatorIsClosedException:
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:#b50003;\">Simulator is not Running</span></p></body></html>")
self.simulator.close()
self.simulator = None
self.ui.start.setText("Start Simulation")
self.ui.stop.setEnabled(False)
self.ui.fixed.setEnabled(True)
return
def main():
fh = logging.FileHandler('gui.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
fh.setFormatter(formatter)
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
app = QtGui.QApplication(sys.argv)
if not cfg.SIMULATOR_LOCATION:
a=QtGui.QMessageBox.critical(None,'No Simulator!',"Could not find the simulator!", QtGui.QMessageBox.Abort)
return
controllerWindow = ControllerMainWindow()
app.aboutToQuit.connect(controllerWindow.on_about_to_quit)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 34.967213 | 125 | 0.64932 |
import sys
import logging
import os
import threading
from datetime import time
import subprocess
from PySide import QtCore, QtGui
from PySide.QtCore import QTimer
from lib.gui.controller_ui import Ui_MainWindow
import configs
import config as cfg
from lib.simulation import Simulator, SimulatorIsClosedException
logger = logging.getLogger(__name__)
configurations = (
('just_cars', configs.JUST_CARS),
('oncoming', configs.ONCOMING),
('oncoming + merging', configs.ONCOMING_ONRAMP),
('oncoming + merging + buses', configs.ONCOMING_ONRAMP_BUS),
('oncoming + merging + emergency vehicles', configs.ONCOMING_ONRAMP_EMERGENCY),
('oncoming + merging + buses + emergency vehicles', configs.ONCOMING_ONRAMP_BUS_EMERGENCY),
('symetric', configs.SYMETRIC)
)
class ControllerMainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(ControllerMainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setup_configurations()
self.assign_widgets()
self.simulator = None
self.ui.stop.setEnabled(False)
self.timer = QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(1000)
self.show()
desktop = QtGui.QDesktopWidget().availableGeometry()
y = self.y()
x = (desktop.width() - 1280) / 2 - self.width() - 50
self.move(x, y)
def setup_configurations(self):
for i, config in enumerate(configurations):
self.ui.configuration.insertItem(i, config[0])
def assign_widgets(self):
self.ui.start.clicked.connect(self.start_clicked)
self.ui.stop.clicked.connect(self.stop_clicked)
def on_about_to_quit(self):
if self.simulator:
self.simulator.close()
def stop_clicked(self):
self.ui.stop.setEnabled(False)
if self.simulator:
self.simulator.close()
self.ui.fixed.setEnabled(True)
self.simulator = None
self.ui.start.setText("Start Simulation")
def start_clicked(self):
self.ui.start.setEnabled(False)
self.ui.fixed.setEnabled(False)
if self.simulator:
self.update_simulator()
else:
self.simulator = Simulator(fixed_time_step=self.ui.fixed.isChecked())
threading.Thread(target=self.start_simulator, name="Start simulator Thread").start()
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:
self.ui.start.setText("Restart Simulation")
def start_simulator(self):
self.simulator.start_and_connect()
self.update_simulator()
def update_simulator(self):
base_config = configurations[self.ui.configuration.currentIndex()][1]
throughput = self.ui.throughput.value()
config = configs.througput(base_config, throughput=throughput)
logger.info("Max Waits: %s", [sp['max_wait'] for sp in config['spawners']])
logger.info("Min Waits: %s", [sp['min_wait'] for sp in config['spawners']])
logger.info('Pausing the simulation')
self.simulator.set_paused(True)
logger.info('Removing all vehicles')
self.simulator.remove_all_vehicles()
logger.info('Resetting the spawners')
self.simulator.reset_all_spawners()
logger.info('Configuring the spawners')
for spawner_conf in config['spawners']:
self.simulator.configure_spawner(spawner_conf)
logger.info('Starting the simulation')
self.simulator.set_paused(False)
logger.info('Resetting the stats')
self.simulator.reset_stats()
self.simulator.clear_queue()
self.ui.start.setEnabled(True)
self.ui.stop.setEnabled(True)
def update(self):
if self.simulator:
try:
stats = self.simulator.get_newest_stats()
if stats:
minutes, seconds = divmod(int(stats['time']), 60)
stats['time'] = time(minute=minutes, second=seconds)
self.ui.current_values.setText(
"""Time: {time:%M:%S}
Current Throughput: From City: {throughputs[0]}, To City: {throughputs[1]}
Incidents: {incidents}
Vehicles Spawned: {spawned}
Vehicles on Road: {onroad}""".format(**stats))
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:
except SimulatorIsClosedException:
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:
self.simulator.close()
self.simulator = None
self.ui.start.setText("Start Simulation")
self.ui.stop.setEnabled(False)
self.ui.fixed.setEnabled(True)
return
def main():
fh = logging.FileHandler('gui.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
fh.setFormatter(formatter)
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
app = QtGui.QApplication(sys.argv)
if not cfg.SIMULATOR_LOCATION:
a=QtGui.QMessageBox.critical(None,'No Simulator!',"Could not find the simulator!", QtGui.QMessageBox.Abort)
return
controllerWindow = ControllerMainWindow()
app.aboutToQuit.connect(controllerWindow.on_about_to_quit)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| true | true |
1c497acc563c98424984a3eed65eb8d2e59387b3 | 563 | py | Python | pyqt/pyqt5-master/src/windows/Background2.py | Ding-zhenke/Dcount-s-notebook | 16c29ac7d076c466e053f1b8db4a7f4e43f67a24 | [
"MIT"
] | null | null | null | pyqt/pyqt5-master/src/windows/Background2.py | Ding-zhenke/Dcount-s-notebook | 16c29ac7d076c466e053f1b8db4a7f4e43f67a24 | [
"MIT"
] | null | null | null | pyqt/pyqt5-master/src/windows/Background2.py | Ding-zhenke/Dcount-s-notebook | 16c29ac7d076c466e053f1b8db4a7f4e43f67a24 | [
"MIT"
] | 2 | 2019-06-18T05:53:26.000Z | 2019-06-19T03:26:02.000Z | '''
使用多种方式设置窗口背景色和背景图片
1. QSS
2. QPalette
3. 直接绘制
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Background2(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("绘制背景图片")
def paintEvent(self, event):
painter = QPainter(self)
pixmap = QPixmap('./images/screen1.jpg')
painter.drawPixmap(self.rect(),pixmap)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = Background2()
form.show()
sys.exit(app.exec_()) | 18.766667 | 48 | 0.646536 |
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Background2(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("绘制背景图片")
def paintEvent(self, event):
painter = QPainter(self)
pixmap = QPixmap('./images/screen1.jpg')
painter.drawPixmap(self.rect(),pixmap)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = Background2()
form.show()
sys.exit(app.exec_()) | true | true |
1c497bb22e51089dbe0ccd8f10dc86c537c0c7d6 | 9,748 | py | Python | train.py | ssahn3087/pedestrian_detection | d9a6cb9d10246941cff8575c803ab60b3a9d7d04 | [
"MIT"
] | 1 | 2019-10-25T12:31:38.000Z | 2019-10-25T12:31:38.000Z | train.py | ssahn3087/pedestrian_detection | d9a6cb9d10246941cff8575c803ab60b3a9d7d04 | [
"MIT"
] | null | null | null | train.py | ssahn3087/pedestrian_detection | d9a6cb9d10246941cff8575c803ab60b3a9d7d04 | [
"MIT"
] | null | null | null | import os
import torch
import numpy as np
import math
from torch.autograd import Variable
from datetime import datetime
from faster_rcnn import network
from faster_rcnn.network import init_data, data_to_variable
from faster_rcnn.network import train_net_params, print_weight_grad
from faster_rcnn.faster_rcnn_vgg import FasterRCNN as FasterRCNN_VGG
from faster_rcnn.faster_rcnn_res import FasterRCNN as FasterRCNN_RES
from faster_rcnn.utils.timer import Timer
from val import test, id_match_test
from faster_rcnn.roi_data_layer.sampler import sampler
from faster_rcnn.roi_data_layer.roidb import extract_roidb
from faster_rcnn.roi_data_layer.roibatchLoader import roibatchLoader
from faster_rcnn.fast_rcnn.config import cfg, cfg_from_file
try:
from termcolor import cprint
except ImportError:
cprint = None
try:
from pycrayon import CrayonClient
except ImportError:
CrayonClient = None
def log_print(text, color='blue', on_color=None, attrs=None):
if cprint is not None:
cprint(text, color=color, on_color=on_color, attrs=attrs)
else:
print(text)
# hyper-parameters
# ------------
imdb_name = 'voc_2007_trainval'
test_name = 'voc_2007_test'
# imdb_name = 'coco_2017_train'
# test_name = 'coco_2017_val'
# imdb_name = 'CaltechPedestrians_train'
# test_name = 'CaltechPedestrians_test'
cfg_file = 'experiments/cfgs/faster_rcnn_end2end.yml'
model_dir = 'data/pretrained_model/'
output_dir = 'models/saved_model3'
pre_model_name = 'voc_2007_trainval_14_vgg16_0.7_b1.h5'
pretrained_model = model_dir + pre_model_name
start_epoch = 1
end_epoch = 10
lr_decay_step = 5
lr_decay = 0.1
rand_seed = 1024
_DEBUG = True
use_tensorboard = True
remove_all_log = True # remove all historical experiments in TensorBoard
exp_name = None # the previous experiment name in TensorBoard
# ------------
if rand_seed is not None:
np.random.seed(rand_seed)
# load config
cfg_from_file(cfg_file)
fg_thresh = cfg.TRAIN.RPN_POSITIVE_OVERLAP
is_resnet = cfg.RESNET.IS_TRUE
batch_size = cfg.TRAIN.IMS_PER_BATCH
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
disp_interval = cfg.TRAIN.DISPLAY
log_interval = cfg.TRAIN.LOG_IMAGE_ITERS
save_interval = cfg.TRAIN.SNAPSHOT_ITERS
# load data
imdb, roidb, ratio_list, ratio_index = extract_roidb(imdb_name)
test_imdb, test_roidb, _, _ = extract_roidb(test_name)
train_size = len(roidb)
sampler_batch = sampler(train_size, batch_size, cfg.TRIPLET.IS_TRUE)
dataset = roibatchLoader(imdb, roidb, ratio_list, ratio_index, batch_size,
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=sampler_batch, num_workers=0)
# load net
if is_resnet:
model_name = cfg.RESNET.MODEL
cfg.TRAIN.DOUBLE_BIAS = False
cfg.TRAIN.WEIGHT_DECAY = 0.0001
net = FasterRCNN_RES(classes=imdb.classes, debug=_DEBUG)
net.init_module()
else:
model_name = 'vgg16'
net = FasterRCNN_VGG(classes=imdb.classes, debug=_DEBUG)
net.init_module()
if cfg.TRIPLET.IS_TRUE:
model_name += '_' + cfg.TRIPLET.LOSS
# network.load_net(pretrained_model, net)
# person_key = 15 (pascal_voc) user_defined_coco_set = 1
#network.load_net_pedestrians(pretrained_model, net, person_key=15)
blob = init_data(is_cuda=True)
# set net to be prepared to train
net.cuda()
params = train_net_params(net, cfg, lr)
optimizer = torch.optim.SGD(params, momentum=momentum)
def make_dir(output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
make_dir(output_dir)
# tensorboad
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
print('TENSORBOARD IS ON')
cc = CrayonClient(hostname='127.0.0.1')
if remove_all_log:
cc.remove_all_experiments()
if exp_name is None:
name = '{}_{}'.format(imdb_name, model_name)
exp_name = datetime.now().strftime(name+'_%m-%d_%H-%M')
exp = cc.create_experiment(exp_name)
else:
exp = cc.open_experiment(exp_name)
iters_per_epoch = int(train_size / batch_size)
# training
train_loss = 0
previous_precision = 0.
descend = 0
step_cnt = 0
cnt = 0
re_cnt = False
t = Timer()
t.tic()
from math import isnan
for epoch in range(start_epoch, end_epoch+1):
pf, tot = 0., 0
tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0., 0., 0., 0.
rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.
net.train()
if epoch > 1 and (epoch-1) % lr_decay_step == 0:
lr *= lr_decay
params = train_net_params(net, cfg, lr)
optimizer = torch.optim.SGD(params, momentum=momentum)
data_iter = iter(dataloader)
for step in range(iters_per_epoch):
# get one batch
data = next(data_iter)
(im_data, im_info, gt_boxes, num_boxes) = data_to_variable(blob, data)
# forward
net.zero_grad()
net(im_data, im_info, gt_boxes, num_boxes)
if _DEBUG:
tp += float(net.tp)
tn += float(net.tn)
fp += float(net.fp)
fg += net.fg_cnt
bg += net.bg_cnt
tp_box += float(net.rpn.tp)
fg_box += net.rpn.fg_box
rpn_box += net.rpn.cross_entropy.data.cpu().numpy()[0]
rpn_cls += net.rpn.loss_box.data.cpu().numpy()[0]
rcnn_box += net.loss_box.data.cpu().numpy()[0]
rcnn_cls += net.cross_entropy.data.cpu().numpy()[0]
sim_loss += net.triplet_loss.data.cpu().numpy()[0] if cfg.TRIPLET.IS_TRUE else 0.
loss = net.rpn.loss + net.loss
if isnan(loss):
print(gt_boxes)
print(net.rpn.loss, net.loss)
train_loss += loss.data[0]
step_cnt += 1
cnt += 1
# backward
optimizer.zero_grad() # clear grad
loss.backward()
network.clip_gradient(net, 10.)
# print_weight_grad(net)
optimizer.step()
if step % disp_interval == 0 and step > 0:
duration = t.toc(average=False)
fps = step_cnt / duration
log_text = 'step %d, loss: %.4f, fps: %.2f (%.2fs per batch) --[epoch %2d] --[iter %4d/%4d]' % (
step, train_loss / step_cnt, fps, 1./fps, epoch, step, iters_per_epoch)
log_print(log_text, color='green', attrs=['bold'])
if _DEBUG:
if fg == 0 or bg == 0:
pass
else:
tot += 1
pf += tp/fg*100
match_rate = net.match/net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.
log_print('\tEP: %.2f%% PR: %.2f%% TP: %.2f%%, TF: %.2f%%, fg/bg=(%d/%d), TD: %.2f%%' %
(tp_box/fg_box*100, tp/(tp+fp)*100, tp/fg*100., tn/bg*100., fg/step_cnt, bg/step_cnt, match_rate))
log_print('\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box: %.4f, sim_loss: %.4f' % (
rpn_cls/step_cnt, rpn_box/step_cnt, rcnn_cls/step_cnt, rcnn_box/step_cnt, sim_loss/step_cnt )
)
re_cnt = True
if use_tensorboard and cnt % log_interval == 0 and cnt > 0:
exp.add_scalar_value('train_loss', train_loss / step_cnt, step=cnt)
exp.add_scalar_value('learning_rate', lr, step=cnt)
if _DEBUG:
match_rate = net.match / net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.
triplet_loss = net.triplet_loss.data.cpu().numpy() if cfg.TRIPLET.IS_TRUE else 0.
exp.add_scalar_value('true_positive', tp/fg*100., step=cnt)
exp.add_scalar_value('true_negative', tn/bg*100., step=cnt)
exp.add_scalar_value('precision', tp / (tp+fp) * 100., step=cnt)
exp.add_scalar_value('true_distance', match_rate, step=cnt)
losses = {'rpn_cls': float(rpn_cls/step_cnt),
'rpn_box': float(rpn_box/step_cnt),
'rcnn_cls': float(rcnn_cls/step_cnt),
'rcnn_box': float(rcnn_box/step_cnt),
'sim_loss': float(sim_loss/step_cnt)}
exp.add_scalar_dict(losses, step=cnt)
if re_cnt:
train_loss = 0
tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0, 0, 0., 0
rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.
net.reset_match_count()
step_cnt = 0
t.tic()
re_cnt = False
# if epoch % save_interval == 0 and cnt > 0:
save_dir = os.path.join(output_dir, model_name)
make_dir(save_dir)
save_name = os.path.join(save_dir, '{}_{}_{}_{}_b{}.h5'
.format(imdb_name, epoch, model_name, fg_thresh, batch_size))
network.save_net(save_name, net)
print('save model: {}'.format(save_name))
if pf/tot > 80:
print('Entering Test Phase ...')
f = open('PrecisionAndRecall.txt', 'a')
prec, rec = test(save_name, net, test_imdb, test_roidb)
match = id_match_test(save_name, net, test_imdb, test_roidb, cfg.TRIPLET.LOSS) if cfg.TRIPLET.IS_TRUE else 0.
f.write(save_name + ' ----[prec: {:.2f}%, rec: {:.2f}%] / {:.2f}%\n'.format(prec, rec, match))
f.close()
if previous_precision == 0.:
previous_precision = prec
else:
if previous_precision > prec:
print('Precision decreased {:.2f}% -> {:.2f}% ...' \
.format(previous_precision, prec))
import warnings
warnings.warn('test set Precision decreased. Keep Watching')
else:
previous_precision = prec
| 36.237918 | 122 | 0.627924 | import os
import torch
import numpy as np
import math
from torch.autograd import Variable
from datetime import datetime
from faster_rcnn import network
from faster_rcnn.network import init_data, data_to_variable
from faster_rcnn.network import train_net_params, print_weight_grad
from faster_rcnn.faster_rcnn_vgg import FasterRCNN as FasterRCNN_VGG
from faster_rcnn.faster_rcnn_res import FasterRCNN as FasterRCNN_RES
from faster_rcnn.utils.timer import Timer
from val import test, id_match_test
from faster_rcnn.roi_data_layer.sampler import sampler
from faster_rcnn.roi_data_layer.roidb import extract_roidb
from faster_rcnn.roi_data_layer.roibatchLoader import roibatchLoader
from faster_rcnn.fast_rcnn.config import cfg, cfg_from_file
try:
from termcolor import cprint
except ImportError:
cprint = None
try:
from pycrayon import CrayonClient
except ImportError:
CrayonClient = None
def log_print(text, color='blue', on_color=None, attrs=None):
if cprint is not None:
cprint(text, color=color, on_color=on_color, attrs=attrs)
else:
print(text)
imdb_name = 'voc_2007_trainval'
test_name = 'voc_2007_test'
cfg_file = 'experiments/cfgs/faster_rcnn_end2end.yml'
model_dir = 'data/pretrained_model/'
output_dir = 'models/saved_model3'
pre_model_name = 'voc_2007_trainval_14_vgg16_0.7_b1.h5'
pretrained_model = model_dir + pre_model_name
start_epoch = 1
end_epoch = 10
lr_decay_step = 5
lr_decay = 0.1
rand_seed = 1024
_DEBUG = True
use_tensorboard = True
remove_all_log = True
exp_name = None
if rand_seed is not None:
np.random.seed(rand_seed)
cfg_from_file(cfg_file)
fg_thresh = cfg.TRAIN.RPN_POSITIVE_OVERLAP
is_resnet = cfg.RESNET.IS_TRUE
batch_size = cfg.TRAIN.IMS_PER_BATCH
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
disp_interval = cfg.TRAIN.DISPLAY
log_interval = cfg.TRAIN.LOG_IMAGE_ITERS
save_interval = cfg.TRAIN.SNAPSHOT_ITERS
imdb, roidb, ratio_list, ratio_index = extract_roidb(imdb_name)
test_imdb, test_roidb, _, _ = extract_roidb(test_name)
train_size = len(roidb)
sampler_batch = sampler(train_size, batch_size, cfg.TRIPLET.IS_TRUE)
dataset = roibatchLoader(imdb, roidb, ratio_list, ratio_index, batch_size,
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=sampler_batch, num_workers=0)
if is_resnet:
model_name = cfg.RESNET.MODEL
cfg.TRAIN.DOUBLE_BIAS = False
cfg.TRAIN.WEIGHT_DECAY = 0.0001
net = FasterRCNN_RES(classes=imdb.classes, debug=_DEBUG)
net.init_module()
else:
model_name = 'vgg16'
net = FasterRCNN_VGG(classes=imdb.classes, debug=_DEBUG)
net.init_module()
if cfg.TRIPLET.IS_TRUE:
model_name += '_' + cfg.TRIPLET.LOSS
blob = init_data(is_cuda=True)
net.cuda()
params = train_net_params(net, cfg, lr)
optimizer = torch.optim.SGD(params, momentum=momentum)
def make_dir(output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
make_dir(output_dir)
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
print('TENSORBOARD IS ON')
cc = CrayonClient(hostname='127.0.0.1')
if remove_all_log:
cc.remove_all_experiments()
if exp_name is None:
name = '{}_{}'.format(imdb_name, model_name)
exp_name = datetime.now().strftime(name+'_%m-%d_%H-%M')
exp = cc.create_experiment(exp_name)
else:
exp = cc.open_experiment(exp_name)
iters_per_epoch = int(train_size / batch_size)
train_loss = 0
previous_precision = 0.
descend = 0
step_cnt = 0
cnt = 0
re_cnt = False
t = Timer()
t.tic()
from math import isnan
for epoch in range(start_epoch, end_epoch+1):
pf, tot = 0., 0
tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0., 0., 0., 0.
rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.
net.train()
if epoch > 1 and (epoch-1) % lr_decay_step == 0:
lr *= lr_decay
params = train_net_params(net, cfg, lr)
optimizer = torch.optim.SGD(params, momentum=momentum)
data_iter = iter(dataloader)
for step in range(iters_per_epoch):
data = next(data_iter)
(im_data, im_info, gt_boxes, num_boxes) = data_to_variable(blob, data)
net.zero_grad()
net(im_data, im_info, gt_boxes, num_boxes)
if _DEBUG:
tp += float(net.tp)
tn += float(net.tn)
fp += float(net.fp)
fg += net.fg_cnt
bg += net.bg_cnt
tp_box += float(net.rpn.tp)
fg_box += net.rpn.fg_box
rpn_box += net.rpn.cross_entropy.data.cpu().numpy()[0]
rpn_cls += net.rpn.loss_box.data.cpu().numpy()[0]
rcnn_box += net.loss_box.data.cpu().numpy()[0]
rcnn_cls += net.cross_entropy.data.cpu().numpy()[0]
sim_loss += net.triplet_loss.data.cpu().numpy()[0] if cfg.TRIPLET.IS_TRUE else 0.
loss = net.rpn.loss + net.loss
if isnan(loss):
print(gt_boxes)
print(net.rpn.loss, net.loss)
train_loss += loss.data[0]
step_cnt += 1
cnt += 1
optimizer.zero_grad()
loss.backward()
network.clip_gradient(net, 10.)
optimizer.step()
if step % disp_interval == 0 and step > 0:
duration = t.toc(average=False)
fps = step_cnt / duration
log_text = 'step %d, loss: %.4f, fps: %.2f (%.2fs per batch) --[epoch %2d] --[iter %4d/%4d]' % (
step, train_loss / step_cnt, fps, 1./fps, epoch, step, iters_per_epoch)
log_print(log_text, color='green', attrs=['bold'])
if _DEBUG:
if fg == 0 or bg == 0:
pass
else:
tot += 1
pf += tp/fg*100
match_rate = net.match/net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.
log_print('\tEP: %.2f%% PR: %.2f%% TP: %.2f%%, TF: %.2f%%, fg/bg=(%d/%d), TD: %.2f%%' %
(tp_box/fg_box*100, tp/(tp+fp)*100, tp/fg*100., tn/bg*100., fg/step_cnt, bg/step_cnt, match_rate))
log_print('\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box: %.4f, sim_loss: %.4f' % (
rpn_cls/step_cnt, rpn_box/step_cnt, rcnn_cls/step_cnt, rcnn_box/step_cnt, sim_loss/step_cnt )
)
re_cnt = True
if use_tensorboard and cnt % log_interval == 0 and cnt > 0:
exp.add_scalar_value('train_loss', train_loss / step_cnt, step=cnt)
exp.add_scalar_value('learning_rate', lr, step=cnt)
if _DEBUG:
match_rate = net.match / net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.
triplet_loss = net.triplet_loss.data.cpu().numpy() if cfg.TRIPLET.IS_TRUE else 0.
exp.add_scalar_value('true_positive', tp/fg*100., step=cnt)
exp.add_scalar_value('true_negative', tn/bg*100., step=cnt)
exp.add_scalar_value('precision', tp / (tp+fp) * 100., step=cnt)
exp.add_scalar_value('true_distance', match_rate, step=cnt)
losses = {'rpn_cls': float(rpn_cls/step_cnt),
'rpn_box': float(rpn_box/step_cnt),
'rcnn_cls': float(rcnn_cls/step_cnt),
'rcnn_box': float(rcnn_box/step_cnt),
'sim_loss': float(sim_loss/step_cnt)}
exp.add_scalar_dict(losses, step=cnt)
if re_cnt:
train_loss = 0
tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0, 0, 0., 0
rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.
net.reset_match_count()
step_cnt = 0
t.tic()
re_cnt = False
save_dir = os.path.join(output_dir, model_name)
make_dir(save_dir)
save_name = os.path.join(save_dir, '{}_{}_{}_{}_b{}.h5'
.format(imdb_name, epoch, model_name, fg_thresh, batch_size))
network.save_net(save_name, net)
print('save model: {}'.format(save_name))
if pf/tot > 80:
print('Entering Test Phase ...')
f = open('PrecisionAndRecall.txt', 'a')
prec, rec = test(save_name, net, test_imdb, test_roidb)
match = id_match_test(save_name, net, test_imdb, test_roidb, cfg.TRIPLET.LOSS) if cfg.TRIPLET.IS_TRUE else 0.
f.write(save_name + ' ----[prec: {:.2f}%, rec: {:.2f}%] / {:.2f}%\n'.format(prec, rec, match))
f.close()
if previous_precision == 0.:
previous_precision = prec
else:
if previous_precision > prec:
print('Precision decreased {:.2f}% -> {:.2f}% ...' \
.format(previous_precision, prec))
import warnings
warnings.warn('test set Precision decreased. Keep Watching')
else:
previous_precision = prec
| true | true |
1c497cc803b8be1b63fb9e21a689f8082660622d | 600 | py | Python | tester.py | sjsafranek/asset_server | e036b87f629dade7f52a8a3e2b63ace52b32a88f | [
"MIT"
] | null | null | null | tester.py | sjsafranek/asset_server | e036b87f629dade7f52a8a3e2b63ace52b32a88f | [
"MIT"
] | null | null | null | tester.py | sjsafranek/asset_server | e036b87f629dade7f52a8a3e2b63ace52b32a88f | [
"MIT"
] | null | null | null | import requests
r = requests.post("http://localhost:1111/api/v1/asset", files={
'uploadfile': open('test.jpg','rb')
})
print(r.text)
if 200 != r.status_code:
exit()
asset_id = r.json()['data']['asset_id']
r = requests.get("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
r = requests.delete("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
r = requests.get("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
| 22.222222 | 78 | 0.66 | import requests
r = requests.post("http://localhost:1111/api/v1/asset", files={
'uploadfile': open('test.jpg','rb')
})
print(r.text)
if 200 != r.status_code:
exit()
asset_id = r.json()['data']['asset_id']
r = requests.get("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
r = requests.delete("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
r = requests.get("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
| true | true |
1c497e6e46579745df1fb77e24b216d3ac5774a7 | 21,159 | py | Python | ktrain/vision/models.py | husmen/ktrain | 4147b0bd146deb513c6f94505908294a5163efac | [
"Apache-2.0"
] | null | null | null | ktrain/vision/models.py | husmen/ktrain | 4147b0bd146deb513c6f94505908294a5163efac | [
"Apache-2.0"
] | null | null | null | ktrain/vision/models.py | husmen/ktrain | 4147b0bd146deb513c6f94505908294a5163efac | [
"Apache-2.0"
] | null | null | null | from ..imports import *
from .. import utils as U
from .wrn import create_wide_residual_network
PRETRAINED_RESNET50 = 'pretrained_resnet50'
PRETRAINED_MOBILENET = 'pretrained_mobilenet'
PRETRAINED_MOBILENETV3 = 'pretrained_mobilenetv3'
PRETRAINED_INCEPTION = 'pretrained_inception'
PRETRAINED_EFFICIENTNETB1 = 'pretrained_efficientnetb1'
PRETRAINED_EFFICIENTNETB7 = 'pretrained_efficientnetb7'
RESNET50 = 'resnet50'
MOBILENET = 'mobilenet'
MOBILENETV3 = 'mobilenetv3'
INCEPTION = 'inception'
EFFICIENTNETB1 = 'efficientnetb1'
EFFICIENTNETB7 = 'efficientnetb7'
CNN = 'default_cnn'
WRN22 = 'wrn22'
PRETRAINED_MODELS = [
PRETRAINED_RESNET50, PRETRAINED_MOBILENET, PRETRAINED_MOBILENETV3,
PRETRAINED_INCEPTION, PRETRAINED_EFFICIENTNETB1, PRETRAINED_EFFICIENTNETB7
]
PREDEFINED_MODELS = PRETRAINED_MODELS + [
RESNET50, MOBILENET, MOBILENETV3, INCEPTION, EFFICIENTNETB1, EFFICIENTNETB7
]
IMAGE_CLASSIFIERS = {
PRETRAINED_RESNET50: '50-layer Residual Network (pretrained on ImageNet)',
RESNET50: '50-layer Resididual Network (randomly initialized) [https://arxiv.org/abs/1512.03385]',
PRETRAINED_MOBILENET: 'MobileNet Neural Network (pretrained on ImageNet)',
MOBILENET: 'MobileNet Neural Network (randomly initialized) [https://arxiv.org/abs/1704.04861]',
PRETRAINED_MOBILENETV3: 'MobileNetV3-Small Neural Network (pretrained on ImageNet)',
MOBILENETV3: 'MobileNetV3-Small Neural Network (randomly initialized) [https://arxiv.org/abs/1905.02244]',
PRETRAINED_INCEPTION: 'Inception Version 3 (pretrained on ImageNet)',
INCEPTION: 'Inception Version 3 (randomly initialized) [http://arxiv.org/abs/1512.00567]',
PRETRAINED_EFFICIENTNETB1: 'EfficientNet-B1 Neural Network (pretrained on ImageNet)',
EFFICIENTNETB1: 'EfficientNet-B1 Neural Network (pretrained on ImageNet) [https://arxiv.org/abs/1905.11946]',
PRETRAINED_EFFICIENTNETB7: 'EfficientNet-B7 Neural Network (pretrained on ImageNet)',
EFFICIENTNETB7: 'EfficientNet-B7 Neural Network (pretrained on ImageNet) [https://arxiv.org/abs/1905.11946]',
WRN22: '22-layer Wide Residual Network (randomly initialized)',
CNN : 'a default LeNet-like Convolutional Neural Network'}
def print_image_classifiers():
for k,v in IMAGE_CLASSIFIERS.items():
print("%s: %s" % (k,v))
def print_image_regression_models():
for k,v in IMAGE_CLASSIFIERS.items():
print("%s: %s" % (k,v))
def pretrained_datagen(data, name):
if not data or not U.is_iter(data): return
idg = data.image_data_generator
if name == PRETRAINED_RESNET50:
idg.preprocessing_function = pre_resnet50
idg.ktrain_preproc = 'resnet50'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_MOBILENET:
idg.preprocessing_function = pre_mobilenet
idg.ktrain_preproc = 'mobilenet'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_MOBILENETV3:
idg.preprocessing_function = pre_mobilenetv3small
idg.ktrain_preproc = 'mobilenetv3'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_INCEPTION:
idg.preprocessing_function = pre_inception
idg.ktrain_preproc = 'inception'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_EFFICIENTNETB1 or name == PRETRAINED_EFFICIENTNETB7:
idg.preprocessing_function = pre_efficientnet
idg.ktrain_preproc = 'efficientnet'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
return
def image_classifier(name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['accuracy'],
optimizer_name = U.DEFAULT_OPT,
multilabel=None,
pt_fc = [],
pt_ps = [],
verbose=1):
"""
```
Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
for multi-class classification. By default, all layers are
trainable/unfrozen.
Args:
name (string): one of model shown on ktrain.vision.print_image_classifiers
train_data (image.Iterator): train data. Note: Will be manipulated here!
val_data (image.Iterator): validation data. Note: Will be manipulated here!
freeze_layers (int): number of beginning layers to make untrainable
If None, then all layers except new Dense layers
will be frozen/untrainable.
metrics (list): metrics to use
optimizer_name(str): name of Keras optimizer (e.g., 'adam', 'sgd')
multilabel(bool): If True, model will be build to support
multilabel classificaiton (labels are not mutually exclusive).
If False, binary/multiclassification model will be returned.
If None, multilabel status will be inferred from data.
pt_fc (list of ints): number of hidden units in extra Dense layers
before final Dense layer of pretrained model.
Only takes effect if name in PRETRAINED_MODELS
pt_ps (list of floats): dropout probabilities to use before
each extra Dense layer in pretrained model.
Only takes effect if name in PRETRAINED_MODELS
verbose (int): verbosity
Return:
model(Model): the compiled model ready to be fine-tuned/trained
```
"""
return image_model(name, train_data, val_data=val_data, freeze_layers=freeze_layers,
metrics=metrics, optimizer_name=optimizer_name, multilabel=multilabel,
pt_fc=pt_fc, pt_ps=pt_ps, verbose=verbose)
def image_regression_model(name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['mae'],
optimizer_name = U.DEFAULT_OPT,
pt_fc = [],
pt_ps = [],
verbose=1):
"""
```
Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
for multi-class classification. By default, all layers are
trainable/unfrozen.
Args:
name (string): one of model shown on ktrain.vision.print_image_regression_models
train_data (image.Iterator): train data. Note: Will be manipulated here!
val_data (image.Iterator): validation data. Note: Will be manipulated here!
freeze_layers (int): number of beginning layers to make untrainable
If None, then all layers except new Dense layers
will be frozen/untrainable.
metrics (list): metrics to use
optimizer_name(str): name of Keras optimizer (e.g., 'adam', 'sgd')
multilabel(bool): If True, model will be build to support
multilabel classificaiton (labels are not mutually exclusive).
If False, binary/multiclassification model will be returned.
If None, multilabel status will be inferred from data.
pt_fc (list of ints): number of hidden units in extra Dense layers
before final Dense layer of pretrained model.
Only takes effect if name in PRETRAINED_MODELS
pt_ps (list of floats): dropout probabilities to use before
each extra Dense layer in pretrained model.
Only takes effect if name in PRETRAINED_MODELS
verbose (int): verbosity
Return:
model(Model): the compiled model ready to be fine-tuned/trained
```
"""
return image_model(name, train_data, val_data=val_data, freeze_layers=freeze_layers,
metrics=metrics, optimizer_name=optimizer_name, multilabel=False,
pt_fc=pt_fc, pt_ps=pt_ps, verbose=verbose)
def image_model( name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['accuracy'],
optimizer_name = U.DEFAULT_OPT,
multilabel=None,
pt_fc = [],
pt_ps = [],
verbose=1):
"""
```
Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
for multi-class classification or regression. By default, all layers are
trainable/unfrozen.
Args:
name (string): one of model shown on ktrain.vision.print_image_classifiers
train_data (image.Iterator): train data. Note: Will be manipulated here!
val_data (image.Iterator): validation data. Note: Will be manipulated here!
freeze_layers (int): number of beginning layers to make untrainable
If None, then all layers except new Dense layers
will be frozen/untrainable.
metrics (list): metrics to use
optimizer_name(str): name of Keras optimizer (e.g., 'adam', 'sgd')
multilabel(bool): If True, model will be build to support
multilabel classificaiton (labels are not mutually exclusive).
If False, binary/multiclassification model will be returned.
If None, multilabel status will be inferred from data.
pt_fc (list of ints): number of hidden units in extra Dense layers
before final Dense layer of pretrained model.
Only takes effect if name in PRETRAINED_MODELS
pt_ps (list of floats): dropout probabilities to use before
each extra Dense layer in pretrained model.
Only takes effect if name in PRETRAINED_MODELS
verbose (int): verbosity
Return:
model(Model): the compiled model ready to be fine-tuned/trained
```
"""
# arg check
U.data_arg_check(train_data=train_data, train_required=True)
if name not in list(IMAGE_CLASSIFIERS.keys()):
raise ValueError('Unknown or unsupported model: %s' % (name))
if not U.is_iter(train_data):
raise ValueError('train_data must be an Keras iterator ' +\
'(e.g., DirectoryIterator, DataframIterator, '+ \
'NumpyArrayIterator) - please use the ktrain.data.images_from* ' +\
'functions')
# check for MobileNetV3
if name in [PRETRAINED_MOBILENETV3, MOBILENETV3] and not HAS_MOBILENETV3:
raise ValueError(f'You chose {name}, but it does not appear to be available in your version of TensorFlow.')
# set pretrained flag
pretrained = True if name in PRETRAINED_MODELS else False
# adjust freeze_layers with warning
if not pretrained and freeze_layers is not None and freeze_layers > 0:
warnings.warn('Only pretrained models (e.g., pretrained_resnet50) support freeze_layers. ' +\
'Setting freeze_layers to 0. Use one of the following models if' +\
'desiring a model pretrained on ImageNet: %s' % (PRETRAINED_MODELS))
freeze_layers = 0
if pretrained and val_data is None:
raise ValueError('val_data is required if selecting a pretrained model, '+\
'as normalization scheme will be altered.')
# adjust the data augmentation based on model selected
if pretrained:
pretrained_datagen(train_data, name)
pretrained_datagen(val_data, name)
U.vprint('The normalization scheme has been changed for use with a %s' % (name) +\
' model. If you decide to use a different model, please reload your' +\
' dataset with a ktrain.vision.data.images_from* function.\n', verbose=verbose)
# determine if multilabel
if multilabel is None:
multilabel = U.is_multilabel(train_data)
is_regression=False
if not multilabel and len(train_data[0][-1].shape) == 1: is_regression=True
# set loss and acivations
loss_func = 'categorical_crossentropy'
activation = 'softmax'
if multilabel:
loss_func = 'binary_crossentropy'
activation = 'sigmoid'
elif is_regression:
loss_func = 'mse'
activation = None
if metrics == ['accuracy']: metrics = ['mae']
U.vprint("Is Multi-Label? %s" % (multilabel), verbose=verbose)
U.vprint("Is Regression? %s" % (is_regression), verbose=verbose)
# determine number of classes and shape
num_classes = 1 if is_regression else U.nclasses_from_data(train_data)
input_shape = U.shape_from_data(train_data)
#------------
# build model
#------------
model = build_visionmodel(name,
num_classes,
input_shape=input_shape,
freeze_layers=freeze_layers,
activation=activation,
pt_fc = pt_fc,
pt_ps = pt_ps)
model.compile(optimizer=optimizer_name, loss=loss_func, metrics=metrics)
return model
def build_visionmodel(name,
num_classes,
input_shape=(224,224,3),
freeze_layers=2,
activation='softmax',
pt_fc=[],
pt_ps = []):
if name in PREDEFINED_MODELS:
model = build_predefined(name, num_classes,
input_shape=input_shape,
freeze_layers=freeze_layers,
activation=activation,
pt_fc = pt_fc,
pt_ps = pt_ps)
elif name == CNN:
model = build_cnn(num_classes,
input_shape=input_shape,
activation=activation)
elif name == WRN22:
model = create_wide_residual_network(input_shape, nb_classes=num_classes,
N=3, k=6, dropout=0.00,
activation=activation, verbose=0)
else:
raise ValueError('Unknown model: %s' % (name))
U.vprint('%s model created.' % (name))
return model
def build_cnn(num_classes,
input_shape=(28,28,1),
activation='softmax'):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',
kernel_initializer='he_normal',input_shape=input_shape))
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',
kernel_initializer='he_normal'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.20))
model.add(Conv2D(64, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(Conv2D(64, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation=activation))
return model
def build_predefined(
name,
num_classes,
input_shape=(224,224,3),
freeze_layers=None,
activation='softmax',
pt_fc=[],
pt_ps=[]):
"""
```
Builds a pre-defined architecture supported in Keras.
Args:
name (str): one of ktrain.vision.model.PREDEFINED_MODELS
num_classes (int): # of classes
input_shape (tuple): the input shape including channels
freeze_layers (int): number of early layers to freeze.
Only takes effect if name in PRETRAINED_MODELS.
If None and name in PRETRAINED_MODELS,
all layers except the "custom head"
fully-connected (Dense) layers are frozen.
activation (str): name of the Keras activation to use in final layer
pt_fc (list of ints): number of hidden units in extra Dense layers
before final Dense layer of pretrained model
pt_ps (list of floats): dropout probabilities to use before
each extra Dense layer in pretrained model
```
"""
# default parameters
include_top = False
input_tensor = None
dropout = 0.5 # final dropout
# setup pretrained
weights = 'imagenet' if name in PRETRAINED_MODELS else None
# setup the pretrained network
if name in [RESNET50, PRETRAINED_RESNET50]:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
net = ResNet50(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [MOBILENET, PRETRAINED_MOBILENET]:
net = MobileNet(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [MOBILENETV3, PRETRAINED_MOBILENETV3]:
net = MobileNetV3Small(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [INCEPTION, PRETRAINED_INCEPTION]:
net = InceptionV3(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [EFFICIENTNETB1, PRETRAINED_EFFICIENTNETB1]:
net = EfficientNetB1(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [EFFICIENTNETB7, PRETRAINED_EFFICIENTNETB7]:
net = EfficientNetB7(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
else:
raise ValueError('Unsupported model: %s' % (name))
if freeze_layers is None:
for layer in net.layers:
layer.trainable = False
x = net.output
x = Flatten()(x)
# xtra FCs in pretrained model
if name in PRETRAINED_MODELS:
if len(pt_fc) != len(pt_ps):
raise ValueError('size off xtra_fc must match size of fc_dropouts')
for i, fc in enumerate(pt_fc):
p = pt_ps[i]
fc_name = "fc%s" % (i)
if p is not None:
x = Dropout(p)(x)
x = Dense(fc, activation='relu',
kernel_initializer='he_normal', name=fc_name)(x)
# final FC
x = Dropout(dropout)(x)
output_layer = Dense(num_classes, activation=activation, name=activation)(x)
model = Model(inputs=net.input, outputs=output_layer)
if freeze_layers is not None:
# set certain earlier layers as non-trainable
for layer in model.layers[:freeze_layers]:
layer.trainable = False
for layer in model.layers[freeze_layers:]:
layer.trainable = True
# set optimizer, loss, and metrics and return model
return model
| 42.745455 | 130 | 0.598658 | from ..imports import *
from .. import utils as U
from .wrn import create_wide_residual_network
PRETRAINED_RESNET50 = 'pretrained_resnet50'
PRETRAINED_MOBILENET = 'pretrained_mobilenet'
PRETRAINED_MOBILENETV3 = 'pretrained_mobilenetv3'
PRETRAINED_INCEPTION = 'pretrained_inception'
PRETRAINED_EFFICIENTNETB1 = 'pretrained_efficientnetb1'
PRETRAINED_EFFICIENTNETB7 = 'pretrained_efficientnetb7'
RESNET50 = 'resnet50'
MOBILENET = 'mobilenet'
MOBILENETV3 = 'mobilenetv3'
INCEPTION = 'inception'
EFFICIENTNETB1 = 'efficientnetb1'
EFFICIENTNETB7 = 'efficientnetb7'
CNN = 'default_cnn'
WRN22 = 'wrn22'
PRETRAINED_MODELS = [
PRETRAINED_RESNET50, PRETRAINED_MOBILENET, PRETRAINED_MOBILENETV3,
PRETRAINED_INCEPTION, PRETRAINED_EFFICIENTNETB1, PRETRAINED_EFFICIENTNETB7
]
PREDEFINED_MODELS = PRETRAINED_MODELS + [
RESNET50, MOBILENET, MOBILENETV3, INCEPTION, EFFICIENTNETB1, EFFICIENTNETB7
]
IMAGE_CLASSIFIERS = {
PRETRAINED_RESNET50: '50-layer Residual Network (pretrained on ImageNet)',
RESNET50: '50-layer Resididual Network (randomly initialized) [https://arxiv.org/abs/1512.03385]',
PRETRAINED_MOBILENET: 'MobileNet Neural Network (pretrained on ImageNet)',
MOBILENET: 'MobileNet Neural Network (randomly initialized) [https://arxiv.org/abs/1704.04861]',
PRETRAINED_MOBILENETV3: 'MobileNetV3-Small Neural Network (pretrained on ImageNet)',
MOBILENETV3: 'MobileNetV3-Small Neural Network (randomly initialized) [https://arxiv.org/abs/1905.02244]',
PRETRAINED_INCEPTION: 'Inception Version 3 (pretrained on ImageNet)',
INCEPTION: 'Inception Version 3 (randomly initialized) [http://arxiv.org/abs/1512.00567]',
PRETRAINED_EFFICIENTNETB1: 'EfficientNet-B1 Neural Network (pretrained on ImageNet)',
EFFICIENTNETB1: 'EfficientNet-B1 Neural Network (pretrained on ImageNet) [https://arxiv.org/abs/1905.11946]',
PRETRAINED_EFFICIENTNETB7: 'EfficientNet-B7 Neural Network (pretrained on ImageNet)',
EFFICIENTNETB7: 'EfficientNet-B7 Neural Network (pretrained on ImageNet) [https://arxiv.org/abs/1905.11946]',
WRN22: '22-layer Wide Residual Network (randomly initialized)',
CNN : 'a default LeNet-like Convolutional Neural Network'}
def print_image_classifiers():
for k,v in IMAGE_CLASSIFIERS.items():
print("%s: %s" % (k,v))
def print_image_regression_models():
for k,v in IMAGE_CLASSIFIERS.items():
print("%s: %s" % (k,v))
def pretrained_datagen(data, name):
if not data or not U.is_iter(data): return
idg = data.image_data_generator
if name == PRETRAINED_RESNET50:
idg.preprocessing_function = pre_resnet50
idg.ktrain_preproc = 'resnet50'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_MOBILENET:
idg.preprocessing_function = pre_mobilenet
idg.ktrain_preproc = 'mobilenet'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_MOBILENETV3:
idg.preprocessing_function = pre_mobilenetv3small
idg.ktrain_preproc = 'mobilenetv3'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_INCEPTION:
idg.preprocessing_function = pre_inception
idg.ktrain_preproc = 'inception'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_EFFICIENTNETB1 or name == PRETRAINED_EFFICIENTNETB7:
idg.preprocessing_function = pre_efficientnet
idg.ktrain_preproc = 'efficientnet'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
return
def image_classifier(name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['accuracy'],
optimizer_name = U.DEFAULT_OPT,
multilabel=None,
pt_fc = [],
pt_ps = [],
verbose=1):
return image_model(name, train_data, val_data=val_data, freeze_layers=freeze_layers,
metrics=metrics, optimizer_name=optimizer_name, multilabel=multilabel,
pt_fc=pt_fc, pt_ps=pt_ps, verbose=verbose)
def image_regression_model(name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['mae'],
optimizer_name = U.DEFAULT_OPT,
pt_fc = [],
pt_ps = [],
verbose=1):
return image_model(name, train_data, val_data=val_data, freeze_layers=freeze_layers,
metrics=metrics, optimizer_name=optimizer_name, multilabel=False,
pt_fc=pt_fc, pt_ps=pt_ps, verbose=verbose)
def image_model( name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['accuracy'],
optimizer_name = U.DEFAULT_OPT,
multilabel=None,
pt_fc = [],
pt_ps = [],
verbose=1):
U.data_arg_check(train_data=train_data, train_required=True)
if name not in list(IMAGE_CLASSIFIERS.keys()):
raise ValueError('Unknown or unsupported model: %s' % (name))
if not U.is_iter(train_data):
raise ValueError('train_data must be an Keras iterator ' +\
'(e.g., DirectoryIterator, DataframIterator, '+ \
'NumpyArrayIterator) - please use the ktrain.data.images_from* ' +\
'functions')
if name in [PRETRAINED_MOBILENETV3, MOBILENETV3] and not HAS_MOBILENETV3:
raise ValueError(f'You chose {name}, but it does not appear to be available in your version of TensorFlow.')
pretrained = True if name in PRETRAINED_MODELS else False
if not pretrained and freeze_layers is not None and freeze_layers > 0:
warnings.warn('Only pretrained models (e.g., pretrained_resnet50) support freeze_layers. ' +\
'Setting freeze_layers to 0. Use one of the following models if' +\
'desiring a model pretrained on ImageNet: %s' % (PRETRAINED_MODELS))
freeze_layers = 0
if pretrained and val_data is None:
raise ValueError('val_data is required if selecting a pretrained model, '+\
'as normalization scheme will be altered.')
if pretrained:
pretrained_datagen(train_data, name)
pretrained_datagen(val_data, name)
U.vprint('The normalization scheme has been changed for use with a %s' % (name) +\
' model. If you decide to use a different model, please reload your' +\
' dataset with a ktrain.vision.data.images_from* function.\n', verbose=verbose)
if multilabel is None:
multilabel = U.is_multilabel(train_data)
is_regression=False
if not multilabel and len(train_data[0][-1].shape) == 1: is_regression=True
loss_func = 'categorical_crossentropy'
activation = 'softmax'
if multilabel:
loss_func = 'binary_crossentropy'
activation = 'sigmoid'
elif is_regression:
loss_func = 'mse'
activation = None
if metrics == ['accuracy']: metrics = ['mae']
U.vprint("Is Multi-Label? %s" % (multilabel), verbose=verbose)
U.vprint("Is Regression? %s" % (is_regression), verbose=verbose)
num_classes = 1 if is_regression else U.nclasses_from_data(train_data)
input_shape = U.shape_from_data(train_data)
model = build_visionmodel(name,
num_classes,
input_shape=input_shape,
freeze_layers=freeze_layers,
activation=activation,
pt_fc = pt_fc,
pt_ps = pt_ps)
model.compile(optimizer=optimizer_name, loss=loss_func, metrics=metrics)
return model
def build_visionmodel(name,
num_classes,
input_shape=(224,224,3),
freeze_layers=2,
activation='softmax',
pt_fc=[],
pt_ps = []):
if name in PREDEFINED_MODELS:
model = build_predefined(name, num_classes,
input_shape=input_shape,
freeze_layers=freeze_layers,
activation=activation,
pt_fc = pt_fc,
pt_ps = pt_ps)
elif name == CNN:
model = build_cnn(num_classes,
input_shape=input_shape,
activation=activation)
elif name == WRN22:
model = create_wide_residual_network(input_shape, nb_classes=num_classes,
N=3, k=6, dropout=0.00,
activation=activation, verbose=0)
else:
raise ValueError('Unknown model: %s' % (name))
U.vprint('%s model created.' % (name))
return model
def build_cnn(num_classes,
input_shape=(28,28,1),
activation='softmax'):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',
kernel_initializer='he_normal',input_shape=input_shape))
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',
kernel_initializer='he_normal'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.20))
model.add(Conv2D(64, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(Conv2D(64, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation=activation))
return model
def build_predefined(
name,
num_classes,
input_shape=(224,224,3),
freeze_layers=None,
activation='softmax',
pt_fc=[],
pt_ps=[]):
include_top = False
input_tensor = None
dropout = 0.5
weights = 'imagenet' if name in PRETRAINED_MODELS else None
if name in [RESNET50, PRETRAINED_RESNET50]:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
net = ResNet50(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [MOBILENET, PRETRAINED_MOBILENET]:
net = MobileNet(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [MOBILENETV3, PRETRAINED_MOBILENETV3]:
net = MobileNetV3Small(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [INCEPTION, PRETRAINED_INCEPTION]:
net = InceptionV3(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [EFFICIENTNETB1, PRETRAINED_EFFICIENTNETB1]:
net = EfficientNetB1(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [EFFICIENTNETB7, PRETRAINED_EFFICIENTNETB7]:
net = EfficientNetB7(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
else:
raise ValueError('Unsupported model: %s' % (name))
if freeze_layers is None:
for layer in net.layers:
layer.trainable = False
x = net.output
x = Flatten()(x)
if name in PRETRAINED_MODELS:
if len(pt_fc) != len(pt_ps):
raise ValueError('size off xtra_fc must match size of fc_dropouts')
for i, fc in enumerate(pt_fc):
p = pt_ps[i]
fc_name = "fc%s" % (i)
if p is not None:
x = Dropout(p)(x)
x = Dense(fc, activation='relu',
kernel_initializer='he_normal', name=fc_name)(x)
x = Dropout(dropout)(x)
output_layer = Dense(num_classes, activation=activation, name=activation)(x)
model = Model(inputs=net.input, outputs=output_layer)
if freeze_layers is not None:
for layer in model.layers[:freeze_layers]:
layer.trainable = False
for layer in model.layers[freeze_layers:]:
layer.trainable = True
return model
| true | true |
1c497ec5d0c301db41eee7e775d56ae2985ce8dc | 10,074 | py | Python | octopus_deploy_swagger_client/models/root_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | octopus_deploy_swagger_client/models/root_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | octopus_deploy_swagger_client/models/root_resource.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RootResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'application': 'str',
'version': 'str',
'api_version': 'str',
'installation_id': 'str',
'is_early_access_program': 'bool',
'has_long_term_support': 'bool',
'last_modified_on': 'datetime',
'last_modified_by': 'str',
'links': 'dict(str, str)'
}
attribute_map = {
'id': 'Id',
'application': 'Application',
'version': 'Version',
'api_version': 'ApiVersion',
'installation_id': 'InstallationId',
'is_early_access_program': 'IsEarlyAccessProgram',
'has_long_term_support': 'HasLongTermSupport',
'last_modified_on': 'LastModifiedOn',
'last_modified_by': 'LastModifiedBy',
'links': 'Links'
}
def __init__(self, id=None, application=None, version=None, api_version=None, installation_id=None, is_early_access_program=False, has_long_term_support=False, last_modified_on=None, last_modified_by=None, links=None): # noqa: E501
"""RootResource - a model defined in Swagger""" # noqa: E501
self._id = None
self._application = None
self._version = None
self._api_version = None
self._installation_id = None
self._is_early_access_program = None
self._has_long_term_support = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if application is not None:
self.application = application
if version is not None:
self.version = version
if api_version is not None:
self.api_version = api_version
if installation_id is not None:
self.installation_id = installation_id
if is_early_access_program is not None:
self.is_early_access_program = is_early_access_program
if has_long_term_support is not None:
self.has_long_term_support = has_long_term_support
if last_modified_on is not None:
self.last_modified_on = last_modified_on
if last_modified_by is not None:
self.last_modified_by = last_modified_by
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this RootResource. # noqa: E501
:return: The id of this RootResource. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this RootResource.
:param id: The id of this RootResource. # noqa: E501
:type: str
"""
self._id = id
@property
def application(self):
"""Gets the application of this RootResource. # noqa: E501
:return: The application of this RootResource. # noqa: E501
:rtype: str
"""
return self._application
@application.setter
def application(self, application):
"""Sets the application of this RootResource.
:param application: The application of this RootResource. # noqa: E501
:type: str
"""
self._application = application
@property
def version(self):
"""Gets the version of this RootResource. # noqa: E501
:return: The version of this RootResource. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this RootResource.
:param version: The version of this RootResource. # noqa: E501
:type: str
"""
self._version = version
@property
def api_version(self):
"""Gets the api_version of this RootResource. # noqa: E501
:return: The api_version of this RootResource. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this RootResource.
:param api_version: The api_version of this RootResource. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def installation_id(self):
"""Gets the installation_id of this RootResource. # noqa: E501
:return: The installation_id of this RootResource. # noqa: E501
:rtype: str
"""
return self._installation_id
@installation_id.setter
def installation_id(self, installation_id):
"""Sets the installation_id of this RootResource.
:param installation_id: The installation_id of this RootResource. # noqa: E501
:type: str
"""
self._installation_id = installation_id
@property
def is_early_access_program(self):
"""Gets the is_early_access_program of this RootResource. # noqa: E501
:return: The is_early_access_program of this RootResource. # noqa: E501
:rtype: bool
"""
return self._is_early_access_program
@is_early_access_program.setter
def is_early_access_program(self, is_early_access_program):
"""Sets the is_early_access_program of this RootResource.
:param is_early_access_program: The is_early_access_program of this RootResource. # noqa: E501
:type: bool
"""
self._is_early_access_program = is_early_access_program
@property
def has_long_term_support(self):
"""Gets the has_long_term_support of this RootResource. # noqa: E501
:return: The has_long_term_support of this RootResource. # noqa: E501
:rtype: bool
"""
return self._has_long_term_support
@has_long_term_support.setter
def has_long_term_support(self, has_long_term_support):
"""Sets the has_long_term_support of this RootResource.
:param has_long_term_support: The has_long_term_support of this RootResource. # noqa: E501
:type: bool
"""
self._has_long_term_support = has_long_term_support
@property
def last_modified_on(self):
"""Gets the last_modified_on of this RootResource. # noqa: E501
:return: The last_modified_on of this RootResource. # noqa: E501
:rtype: datetime
"""
return self._last_modified_on
@last_modified_on.setter
def last_modified_on(self, last_modified_on):
"""Sets the last_modified_on of this RootResource.
:param last_modified_on: The last_modified_on of this RootResource. # noqa: E501
:type: datetime
"""
self._last_modified_on = last_modified_on
@property
def last_modified_by(self):
"""Gets the last_modified_by of this RootResource. # noqa: E501
:return: The last_modified_by of this RootResource. # noqa: E501
:rtype: str
"""
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
"""Sets the last_modified_by of this RootResource.
:param last_modified_by: The last_modified_by of this RootResource. # noqa: E501
:type: str
"""
self._last_modified_by = last_modified_by
@property
def links(self):
"""Gets the links of this RootResource. # noqa: E501
:return: The links of this RootResource. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this RootResource.
:param links: The links of this RootResource. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RootResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RootResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.782857 | 236 | 0.611376 |
import pprint
import re
import six
class RootResource(object):
swagger_types = {
'id': 'str',
'application': 'str',
'version': 'str',
'api_version': 'str',
'installation_id': 'str',
'is_early_access_program': 'bool',
'has_long_term_support': 'bool',
'last_modified_on': 'datetime',
'last_modified_by': 'str',
'links': 'dict(str, str)'
}
attribute_map = {
'id': 'Id',
'application': 'Application',
'version': 'Version',
'api_version': 'ApiVersion',
'installation_id': 'InstallationId',
'is_early_access_program': 'IsEarlyAccessProgram',
'has_long_term_support': 'HasLongTermSupport',
'last_modified_on': 'LastModifiedOn',
'last_modified_by': 'LastModifiedBy',
'links': 'Links'
}
def __init__(self, id=None, application=None, version=None, api_version=None, installation_id=None, is_early_access_program=False, has_long_term_support=False, last_modified_on=None, last_modified_by=None, links=None):
self._id = None
self._application = None
self._version = None
self._api_version = None
self._installation_id = None
self._is_early_access_program = None
self._has_long_term_support = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if application is not None:
self.application = application
if version is not None:
self.version = version
if api_version is not None:
self.api_version = api_version
if installation_id is not None:
self.installation_id = installation_id
if is_early_access_program is not None:
self.is_early_access_program = is_early_access_program
if has_long_term_support is not None:
self.has_long_term_support = has_long_term_support
if last_modified_on is not None:
self.last_modified_on = last_modified_on
if last_modified_by is not None:
self.last_modified_by = last_modified_by
if links is not None:
self.links = links
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def application(self):
return self._application
@application.setter
def application(self, application):
self._application = application
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def installation_id(self):
return self._installation_id
@installation_id.setter
def installation_id(self, installation_id):
self._installation_id = installation_id
@property
def is_early_access_program(self):
return self._is_early_access_program
@is_early_access_program.setter
def is_early_access_program(self, is_early_access_program):
self._is_early_access_program = is_early_access_program
@property
def has_long_term_support(self):
return self._has_long_term_support
@has_long_term_support.setter
def has_long_term_support(self, has_long_term_support):
self._has_long_term_support = has_long_term_support
@property
def last_modified_on(self):
return self._last_modified_on
@last_modified_on.setter
def last_modified_on(self, last_modified_on):
self._last_modified_on = last_modified_on
@property
def last_modified_by(self):
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
self._last_modified_by = last_modified_by
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RootResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, RootResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
1c497f273191aaa9f08c21c995e05301e9578810 | 546 | py | Python | python/collatz_conjecture.py | lsantosdemoura/clojure-algorithms | 56696b7b6544f37d736135cac6b03342fdeb4825 | [
"MIT"
] | null | null | null | python/collatz_conjecture.py | lsantosdemoura/clojure-algorithms | 56696b7b6544f37d736135cac6b03342fdeb4825 | [
"MIT"
] | null | null | null | python/collatz_conjecture.py | lsantosdemoura/clojure-algorithms | 56696b7b6544f37d736135cac6b03342fdeb4825 | [
"MIT"
] | null | null | null | def calculate_sieve(number):
if number <= 0:
print(f'{number} is less than or equal to 0, enter another number please:')
ask_number()
else:
count = 1
while number != 1:
if number % 2 == 0:
number = number // 2
else:
number = (number * 3) + 1
count += 1
print(count)
def ask_number():
entered_number = int(input("Enter a number bigger than 0: "))
calculate_sieve(entered_number)
if __name__ == '__main__':
ask_number()
| 23.73913 | 83 | 0.53663 | def calculate_sieve(number):
if number <= 0:
print(f'{number} is less than or equal to 0, enter another number please:')
ask_number()
else:
count = 1
while number != 1:
if number % 2 == 0:
number = number // 2
else:
number = (number * 3) + 1
count += 1
print(count)
def ask_number():
entered_number = int(input("Enter a number bigger than 0: "))
calculate_sieve(entered_number)
if __name__ == '__main__':
ask_number()
| true | true |
1c498059b0ab55361020f761725d41830c547370 | 2,423 | py | Python | bayesvp/tests/test_likelihood.py | cameronliang/BayesVP | 3a38e6fc8b85f96f402289fde74f996971edec93 | [
"MIT"
] | 5 | 2017-10-10T20:24:05.000Z | 2017-11-02T20:20:34.000Z | bayesvp/tests/test_likelihood.py | cameronliang/BayesVP | 3a38e6fc8b85f96f402289fde74f996971edec93 | [
"MIT"
] | 1 | 2019-11-15T18:17:19.000Z | 2019-11-15T18:36:01.000Z | bayesvp/tests/test_likelihood.py | cameronliang/BayesVP | 3a38e6fc8b85f96f402289fde74f996971edec93 | [
"MIT"
] | 4 | 2018-05-22T14:30:23.000Z | 2021-09-23T09:23:46.000Z | import unittest
import os
import sys
import numpy as np
from bayesvp.config import DefineParams
from bayesvp.likelihood import Posterior
from bayesvp.utilities import get_bayesvp_Dir
###############################################################################
# TEST CASE 1: OVI line with stock config file and spectrum
###############################################################################
class TCPosterior(unittest.TestCase):
def setUp(self):
# read example config file
code_path = get_bayesvp_Dir()
self.config_ex = code_path + '/data/example/config_OVI.dat'
self.config_params = DefineParams(self.config_ex)
self.posterior = Posterior(self.config_params)
def tearDown(self):
try:
import shutil
shutil.rmtree(self.config_params.output_path)
except OSError as oserr:
print(oserr)
###########################################################################
# Basic Tests for likelihood, prior and posterior
###########################################################################
def test_default_no_continuum(self):
self.assertFalse(self.config_params.cont_normalize)
def test_lnlike(self):
vp_params = np.array([15,20,0]) # logN, b, z
correct = -344.55470583729573
self.assertEqual(self.posterior.lnlike(vp_params),correct)
def test_prior(self):
vp_params = np.array([15,20,0])
correct = 0
self.assertEqual(self.posterior.lnprior(vp_params),correct)
# Outside of prior (logN)
vp_params = np.array([19,20,0])
correct = -np.inf
self.assertEqual(self.posterior.lnprior(vp_params),correct)
# Outside of prior (b)
vp_params = np.array([15,-10,0])
correct = -np.inf
self.assertEqual(self.posterior.lnprior(vp_params),correct)
# Outside of prior (z)
vp_params = np.array([10,20,-1])
correct = -np.inf
self.assertEqual(self.posterior.lnprior(vp_params),correct)
def test_call(self):
vp_params = np.array([15,20,0])
correct = -344.55470583729573
self.assertEqual(self.posterior.__call__(vp_params),correct)
vp_params = np.array([10,20,-1])
correct = -np.inf
self.assertEqual(self.posterior.__call__(vp_params),correct)
if __name__ == '__main__':
unittest.main() | 31.881579 | 79 | 0.570367 | import unittest
import os
import sys
import numpy as np
from bayesvp.config import DefineParams
from bayesvp.likelihood import Posterior
from bayesvp.utilities import get_bayesvp_Dir
| true | true |
1c498119f6fa59f0759598353b4eb9eb224fdda7 | 1,104 | py | Python | h2o-py/tests/testdir_misc/pyunit_download_all_logs.py | ChristosChristofidis/h2o-3 | 2a926c0950a98eff5a4c06aeaf0373e17176ecd8 | [
"Apache-2.0"
] | null | null | null | h2o-py/tests/testdir_misc/pyunit_download_all_logs.py | ChristosChristofidis/h2o-3 | 2a926c0950a98eff5a4c06aeaf0373e17176ecd8 | [
"Apache-2.0"
] | null | null | null | h2o-py/tests/testdir_misc/pyunit_download_all_logs.py | ChristosChristofidis/h2o-3 | 2a926c0950a98eff5a4c06aeaf0373e17176ecd8 | [
"Apache-2.0"
] | 1 | 2020-12-18T19:20:02.000Z | 2020-12-18T19:20:02.000Z | import sys, os
sys.path.insert(1, "../../")
import h2o
import random
def download_all_logs(ip,port):
# Connect to h2o
h2o.init(ip,port)
# default
log_location = h2o.download_all_logs()
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# dirname and filename
log_location = h2o.download_all_logs(".","h2o_logs.txt")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# dirname
log_location = h2o.download_all_logs(dirname=".")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# filename
log_location = h2o.download_all_logs(filename="h2o_logs.txt")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
if __name__ == "__main__":
h2o.run_test(sys.argv, download_all_logs) | 35.612903 | 118 | 0.712862 | import sys, os
sys.path.insert(1, "../../")
import h2o
import random
def download_all_logs(ip,port):
h2o.init(ip,port)
log_location = h2o.download_all_logs()
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# dirname and filename
log_location = h2o.download_all_logs(".","h2o_logs.txt")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
log_location = h2o.download_all_logs(dirname=".")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# filename
log_location = h2o.download_all_logs(filename="h2o_logs.txt")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
if __name__ == "__main__":
h2o.run_test(sys.argv, download_all_logs) | true | true |
1c4981738c26de6226fe40bbd740e912076a2276 | 13,275 | py | Python | vnpy/api/geya/geyaApi.py | cmbclh/vnpy1.7 | 25a95ba63c7797e92ba45450d79ee1326135fb47 | [
"MIT"
] | null | null | null | vnpy/api/geya/geyaApi.py | cmbclh/vnpy1.7 | 25a95ba63c7797e92ba45450d79ee1326135fb47 | [
"MIT"
] | null | null | null | vnpy/api/geya/geyaApi.py | cmbclh/vnpy1.7 | 25a95ba63c7797e92ba45450d79ee1326135fb47 | [
"MIT"
] | null | null | null | # encoding: utf-8
import urllib
import hashlib
import os
import jpype
from jpype import *
import math
import requests
from Queue import Queue, Empty
from threading import Thread
from time import sleep
LHANG_API_ROOT = "https://api.lhang.com/v1/"
FUNCTION_TICKER = 'FUNCTION_TICKER'
FUNCTION_ALL_TICKER = 'FUNCTION_ALL_TICKER'
FUNCTION_CREATEORDER = 'FUNCTION_CREATEORDER'
########################################################################
class GeyaBase(object):
""""""
DEBUG = True
# ----------------------------------------------------------------------
def __init__(self):
"""Constructor"""
self.apiKey = ''
self.secretKey = ''
self.interval = 1 # 每次请求的间隔等待
self.active = False # API工作状态
self.reqID = 0 # 请求编号
self.reqQueue = Queue() # 请求队列
self.reqThread = Thread(target=self.processQueue) # 请求处理线程
# ----------------------------------------------------------------------
def init(self):
"""初始化"""
self.active = True
self.reqThread.start()
# ----------------------------------------------------------------------
def exit(self):
"""退出"""
self.active = False
if self.reqThread.isAlive():
self.reqThread.join()
# ----------------------------------------------------------------------
# 需要进行改造,该接口需要进行高度抽象
def processRequest(self, req):
"""处理请求"""
# 读取方法和参数
method = req['function']
params = req['params']
# url = LHANG_API_ROOT + api
if method == FUNCTION_TICKER: # 查询平盘最优额度
r = self.queryTradePrice(params)
elif method == FUNCTION_ALL_TICKER: # 查询平盘全部额度,调用该服务结束后直接返回,因为该接口应答报文没有错误码
r = self.queryAllTradePrice(params)
data = {'code': '00000', 'resList': r}
return data
elif method == FUNCTION_CREATEORDER: # 平盘交易
r = self.coverTrade(params)
if r.code == '00000':
if method == FUNCTION_TICKER:#查询最优平盘额度
data = {'code': r.code, 'message': r.message, 'exnm': r.exnm, 'tradeSide': r.tradeSide, 'status': r.status,
'tradeLimitAmount': r.tradeLimitAmount, 'price': r.price}
elif method == FUNCTION_CREATEORDER: # 平盘交易,返回报文中可能有多条成交记录
data = {'code': r.code, 'message': r.message, 'trsn': r.trsn, 'exnm': r.exnm, 'prcd': r.prcd,
'direction': params['tradeSide'], 'details': r.details}
else:
data = None
return data
# ----------------------------------------------------------------------
def processQueue(self):
"""处理请求队列中的请求"""
while self.active:
try:
req = self.reqQueue.get(block=True, timeout=1) # 获取请求的阻塞为一秒
#req = self.reqQueue.get(block=False) # 获取请求的阻塞为一秒
if req is None:
continue
callback = req['callback']
reqID = req['reqID']
#判断java虚拟机是否启动,未启动则启动
if jpype.isJVMStarted() == False:
# 启动java虚拟机
jarpath = os.path.join(os.path.abspath('.'), 'RmiInterface.jar')
print jarpath
print jpype.getDefaultJVMPath()
#jpype.startJVM(jpype.getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % jarpath)
jpype.startJVM(jpype.getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % jarpath)
data = self.processRequest(req)
# 请求失败
if data is None:
error = u'请求失败'
self.onError(error, req, reqID)
# 请求成功
elif data['code'] == '00000':
if self.DEBUG:
print callback.__name__
callback(data, req, reqID)
# 请求失败
else:
error = u'请求出错,错误代码:%s' % data['code']
self.onError(error, req, reqID)
#finally:
# jpype.shutdownJVM()
# 流控等待
#sleep(self.interval)
except Empty:
pass
except jpype.JavaException, ex:
print ex.javaClass(),ex.message()
print ex.stacktrace()
# ----------------------------------------------------------------------
def sendRequest(self, function, params, callback):
"""发送请求"""
# 请求编号加1
self.reqID += 1
# 生成请求字典并放入队列中
req = {}
req['function'] = function
req['params'] = params
req['callback'] = callback
req['reqID'] = self.reqID
self.reqQueue.put(req)
# 返回请求编号
return self.reqID
# ----------------------------------------------------------------------
def onError(self, error, req, reqID):
"""错误推送"""
print error, req, reqID
###############################################
# 行情接口
###############################################
# ----------------------------------------------------------------------
def getTicker(self, symbol, direction, rmiIp, rmiPort):
"""#查询平盘最优额度"""
function = FUNCTION_TICKER
params = {'symbol': symbol,
'direction': direction,
'rmiIp': rmiIp,
'rmiPort': rmiPort}
callback = self.onGetTicker
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getDepth(self, symbol, rmiIp, rmiPort):
"""查询深度"""
function = FUNCTION_ALL_TICKER
params = {
'symbol': symbol,
'rmiIp': rmiIp,
'rmiPort': rmiPort}
callback = self.onGetDepth
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def onGetTicker(self, data, req, reqID):
"""查询行情回调"""
print data, reqID
# ----------------------------------------------------------------------
def onGetDepth(self, data, req, reqID):
"""查询深度回调"""
print data, reqID
# ----------------------------------------------------------------------
def onGetTrades(self, data, req, reqID):
"""查询历史成交"""
print data, reqID
# ----------------------------------------------------------------------
def onGetKline(self, data, req, reqID):
"""查询K线回报"""
print data, reqID
###############################################
# 交易接口
###############################################
# ----------------------------------------------------------------------
def createOrder(self, serial, prcd, exnm, reqDate, reqTime, volume, ppds, tradeSide, akpc, rrdc):
"""发送委托"""
function = FUNCTION_CREATEORDER
params = {
'serial': serial,
'prcd': prcd,
'exnm': exnm,
'reqDate': reqDate,
'reqTime': reqTime,
'volume': volume,
'ppds': ppds,
'tradeSide': tradeSide,
'akpc': akpc,
'rrdc': rrdc
}
callback = self.onCreateTrade
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def cancelOrder(self, symbol, orderId):
"""撤单"""
function = FUNCTION_CANCELORDER
params = {
'symbol': symbol,
'order_id': orderId
}
callback = self.onCancelOrder
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getOrdersInfo(self, symbol, orderId):
"""查询委托"""
function = FUNCTION_ORDERSINFO
params = {
'symbol': symbol,
'order_id': orderId
}
callback = self.onGetOrdersInfo
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def getOrdersInfoHistory(self, symbol, status, currentPage, pageLength):
"""撤单"""
function = FUNCTION_ORDERSINFOHISTORY
params = {
'symbol': symbol,
'status': status,
'current_page': currentPage,
'page_length': pageLength
}
callback = self.onGetOrdersInfoHistory
return self.sendRequest(function, params, callback)
# ----------------------------------------------------------------------
def onGetUserInfo(self, data, req, reqID):
"""查询账户信息"""
print data, reqID
# ----------------------------------------------------------------------
def onCreateOrder(self, data, req, reqID):
"""委托回报"""
print data, reqID
# ----------------------------------------------------------------------
def onCancelOrder(self, data, req, reqID):
"""撤单回报"""
print data, reqID
# ----------------------------------------------------------------------
def onGetOrdersInfo(self, data, req, reqID):
"""查询委托回报"""
print data, reqID
# ----------------------------------------------------------------------
def onGetOrdersInfoHistory(self, data, req, reqID):
"""撤单回报"""
print data, reqID
# 调用自动平盘平台的查询平盘最优额度接口
# def queryTradePrice(self, exnm, tradeSide, rmiIp, rmiPort):
def queryTradePrice(self, params):
Context = jpype.javax.naming.Context
InitialContext = jpype.javax.naming.InitialContext
namingContext = InitialContext()
IHydraTradeService = jpype.JClass('com.cmbc.hydra.rmi.service.IHydraTradeService')
CheckHydraTradeInfoRequest = jpype.JClass('com.cmbc.hydra.rmi.bean.CheckHydraTradeInfoRequest')
# CheckHydraTradeInfoResponse = jpype.JClass('com.cmbc.hydra.rmi.bean.CheckHydraTradeInfoResponse')
TradeSide = jpype.JClass('com.cmbc.hydra.rmi.bean.TradeSide')
# python调用java接口
remoteObj = namingContext.lookup(
"rmi://" + params['rmiIp'] + ":" + params['rmiPort'] + "/HydraTradeService")
request = CheckHydraTradeInfoRequest()
request.setExnm(params['symbol'])
if params['direction'] == "BUY":
request.setTradeSide(TradeSide.BUY)
elif params['direction'] == "SELL":
request.setTradeSide(TradeSide.SELL)
resp = remoteObj.sendCheckForDeal(request)
return resp
# 调用自动平盘平台的查询全部平盘额度
def queryAllTradePrice(self, params):
Context = jpype.javax.naming.Context
InitialContext = jpype.javax.naming.InitialContext
namingContext = InitialContext()
IHydraTradeService = jpype.JClass('com.cmbc.hydra.rmi.service.IHydraTradeService')
CheckHydraTradeAllInfoRequest = jpype.JClass('com.cmbc.hydra.rmi.bean.CheckHydraTradeAllInfoRequest')
# CheckHydraTradeInfoResponse = jpype.JClass('com.cmbc.hydra.rmi.bean.CheckHydraTradeInfoResponse')
# TradeSide = jpype.JClass('com.cmbc.hydra.rmi.bean.TradeSide')
# python调用java接口
remoteObj = namingContext.lookup(
"rmi://" + params['rmiIp'] + ":" + params['rmiPort'] + "/HydraTradeService")
request = CheckHydraTradeAllInfoRequest()
request.setExnm(params['symbol'])
resp = remoteObj.sendCheckForDealAll(request)
return resp
# 调用自动平盘平台的平盘交易接口
def coverTrade(self, params):
# jarpath = os.path.join(os.path.abspath('.'), 'RmiInterface.jar')
# startJVM(getDefaultJVMPath(), "-ea", "-Djava.class.path=%s" % jarpath)
Context = jpype.javax.naming.Context
InitialContext = jpype.javax.naming.InitialContext
namingContext = InitialContext()
IHydraTradeService = jpype.JClass('com.cmbc.hydra.rmi.service.IHydraTradeService')
CallHydraTradeRequest = jpype.JClass('com.cmbc.hydra.rmi.bean.CallHydraTradeRequest')
CallHydraTradeResponse = jpype.JClass('com.cmbc.hydra.rmi.bean.CallHydraTradeResponse')
TradeSide = jpype.JClass('com.cmbc.hydra.rmi.bean.TradeSide')
# python调用java接口
remoteObj = namingContext.lookup(
"rmi://" + self.gateway.rmiIp + ":" + self.gateway.rmiPort + "/HydraTradeService")
rmiRequst = CallHydraTradeRequest()
rmiRequst.setTrsn(params['serial'])
rmiRequst.setPrcd(params['prcd'])
rmiRequst.setRqdt(params['reqDate'])
rmiRequst.setRqtm(params['reqTime'])
rmiRequst.setPpds(params['ppds'])
rmiRequst.setExnm(params['exnm']) # 可能需要转换
if params['tradeSide'] == "BUY":
rmiRequst.setTradeSide(TradeSide.BUY)
elif params['tradeSide'] == "SELL":
rmiRequst.setTradeSide(TradeSide.SELL)
BigDecimal = jpype.java.math.BigDecimal
rmiRequst.setAmut(BigDecimal(params['volume']))
rmiRequst.setAkpc(BigDecimal(params['akpc']))
rmiRequst.setRrdc(BigDecimal(params['rrdc']))
# resp = CheckHydraTradeInfoResponse()
resp = remoteObj.callHydraTrade(rmiRequst)
return resp | 37.394366 | 123 | 0.490847 |
import urllib
import hashlib
import os
import jpype
from jpype import *
import math
import requests
from Queue import Queue, Empty
from threading import Thread
from time import sleep
LHANG_API_ROOT = "https://api.lhang.com/v1/"
FUNCTION_TICKER = 'FUNCTION_TICKER'
FUNCTION_ALL_TICKER = 'FUNCTION_ALL_TICKER'
FUNCTION_CREATEORDER = 'FUNCTION_CREATEORDER'
elif data['code'] == '00000':
if self.DEBUG:
print callback.__name__
callback(data, req, reqID)
else:
error = u'请求出错,错误代码:%s' % data['code']
self.onError(error, req, reqID)
except Empty:
pass
except jpype.JavaException, ex:
print ex.javaClass(),ex.message()
print ex.stacktrace()
def sendRequest(self, function, params, callback):
"""发送请求"""
self.reqID += 1
req = {}
req['function'] = function
req['params'] = params
req['callback'] = callback
req['reqID'] = self.reqID
self.reqQueue.put(req)
return self.reqID
def onError(self, error, req, reqID):
"""错误推送"""
print error, req, reqID
ingContext = InitialContext()
IHydraTradeService = jpype.JClass('com.cmbc.hydra.rmi.service.IHydraTradeService')
CheckHydraTradeAllInfoRequest = jpype.JClass('com.cmbc.hydra.rmi.bean.CheckHydraTradeAllInfoRequest')
remoteObj = namingContext.lookup(
"rmi://" + params['rmiIp'] + ":" + params['rmiPort'] + "/HydraTradeService")
request = CheckHydraTradeAllInfoRequest()
request.setExnm(params['symbol'])
resp = remoteObj.sendCheckForDealAll(request)
return resp
def coverTrade(self, params):
Context = jpype.javax.naming.Context
InitialContext = jpype.javax.naming.InitialContext
namingContext = InitialContext()
IHydraTradeService = jpype.JClass('com.cmbc.hydra.rmi.service.IHydraTradeService')
CallHydraTradeRequest = jpype.JClass('com.cmbc.hydra.rmi.bean.CallHydraTradeRequest')
CallHydraTradeResponse = jpype.JClass('com.cmbc.hydra.rmi.bean.CallHydraTradeResponse')
TradeSide = jpype.JClass('com.cmbc.hydra.rmi.bean.TradeSide')
remoteObj = namingContext.lookup(
"rmi://" + self.gateway.rmiIp + ":" + self.gateway.rmiPort + "/HydraTradeService")
rmiRequst = CallHydraTradeRequest()
rmiRequst.setTrsn(params['serial'])
rmiRequst.setPrcd(params['prcd'])
rmiRequst.setRqdt(params['reqDate'])
rmiRequst.setRqtm(params['reqTime'])
rmiRequst.setPpds(params['ppds'])
rmiRequst.setExnm(params['exnm'])
if params['tradeSide'] == "BUY":
rmiRequst.setTradeSide(TradeSide.BUY)
elif params['tradeSide'] == "SELL":
rmiRequst.setTradeSide(TradeSide.SELL)
BigDecimal = jpype.java.math.BigDecimal
rmiRequst.setAmut(BigDecimal(params['volume']))
rmiRequst.setAkpc(BigDecimal(params['akpc']))
rmiRequst.setRrdc(BigDecimal(params['rrdc']))
resp = remoteObj.callHydraTrade(rmiRequst)
return resp | false | true |
1c4981ea161448965260abc067ee7218670be9b4 | 218 | py | Python | text/_cascade/text/spacing/word.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | text/_cascade/text/spacing/word.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | text/_cascade/text/spacing/word.py | jedhsu/text | 8525b602d304ac571a629104c48703443244545c | [
"Apache-2.0"
] | null | null | null | """
Word Spacing
"""
__all__ = ["WordSpacing"]
class WordSpacingKeyword:
Normal = "normal"
class WordSpacing(
WordSpacingKeyword,
Length,
):
"""
Spacing between each word.
"""
pass
| 9.083333 | 30 | 0.59633 |
__all__ = ["WordSpacing"]
class WordSpacingKeyword:
Normal = "normal"
class WordSpacing(
WordSpacingKeyword,
Length,
):
pass
| true | true |
1c4982c7f40c95390cf2ad55bc3592134703da57 | 5,605 | py | Python | fa_en_keyboard_exchange.py | arian42/wrong-keyboard | c0c0842ae8181ff52b33675aa7171de43bb56513 | [
"MIT"
] | null | null | null | fa_en_keyboard_exchange.py | arian42/wrong-keyboard | c0c0842ae8181ff52b33675aa7171de43bb56513 | [
"MIT"
] | null | null | null | fa_en_keyboard_exchange.py | arian42/wrong-keyboard | c0c0842ae8181ff52b33675aa7171de43bb56513 | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------------------------------------------------
# this is a alpha version. need more work
# written by Arian Heydari
#
# things that i should add and fix:
# words list are bad (need better words file)
# add auto learn function for new words
# -------------------------------------------------------------------------------------------------------------------
def binary_search(alist, item):
first = 0
last = len(alist) - 1
found = False
while first <= last and not found:
pos = 0
midpoint = (first + last) // 2
if alist[midpoint] == item:
pos = midpoint
found = True
else:
if item < alist[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return found
def lang_exchange(string):
dict = {
u'a': u'ش',
u'b': u'ذ',
u'c': u'ز',
u'd': u'ی',
u'e': u'ث',
u'f': u'ب',
u'g': u'ل',
u'h': u'ا',
u'i': u'ه',
u'j': u'ت',
u'k': u'ن',
u'l': u'م',
u'm': u'ئ',
u'n': u'د',
u'o': u'خ',
u'p': u'ح',
u'q': u'ض',
u'r': u'ق',
u's': u'س',
u't': u'ف',
u'u': u'ع',
u'v': u'ر',
u'w': u'ص',
u'x': u'ط',
u'y': u'غ',
u'z': u'ظ',
u'A': u'َ',
u'B': u'إ',
u'C': u'ژ',
u'D': u'ِ',
u'E': u'ٍ',
u'F': u'ّ',
u'G': u'ۀ',
u'H': u'آ',
u'I': u']',
u'J': u'ـ',
u'K': u'«',
u'L': u'»',
u'M': u'ء',
u'N': u'أ',
u'O': u'[',
u'P': u'\\',
u'Q': u'ً',
u'R': u'ريال',
u'S': u'ُ',
u'T': u'،',
u'U': u',',
u'V': u'ؤ',
u'W': u'ٌ',
u'X': u'ي',
u'Y': u'؛',
u'Z': u'ة',
u';': u'ک',
u'\'': u'گ',
u',': u'و',
u'.': u'.',
u'/': u'/',
u'[': u'ج',
u']': u'چ',
u'\\': u'پ',
u':': u':',
u'"': u'"',
u'<': u'<',
u'>': u'>',
u'?': u'؟',
u'{': u'}',
u'}': u'{',
u'|': u'|',
u'`': u'÷',
u'1': u'1',
u'2': u'2',
u'3': u'3',
u'4': u'4',
u'5': u'5',
u'6': u'6',
u'7': u'7',
u'8': u'8',
u'9': u'9',
u'0': u'0',
u'-': u'-',
u'=': u'=',
u'~': u'×',
u'!': u'!',
u'@': u'@',
u'#': u'#',
u'$': u'$',
u'%': u'%',
u'^': u'^',
u'&': u'&',
u'*': u'*',
u'(': u')',
u')': u'(',
u'_': u'_',
u'+': u'+',
u' ': u' ',
}
rdict = {v: k for k, v in dict.items()}
newString = ''
for i in range(len(string)):
if string[i] in dict:
newString += dict[string[i]]
elif string[i] in rdict:
newString += rdict[string[i]]
else:
newString += string[i]
return newString
print("Whait a bit please. loading data...")
# LOAD DATA -------------
enChars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', ]
englishWordsFile = open("en.words.txt", 'r')
englishWordsList = englishWordsFile.read().split(',')
englishWordsFile.close()
faChars = [u'ظ', u'ط', u'ز', u'ر', u'ذ', u'د', u'ئ', u'و', u'ش', u'س', u'ی', u'ب', u'ل', u'ا', u'ت', u'ن', u'م', u'ک',
u'گ', u'ض', u'ص',
u'ث', u'ق', u'ف', u'غ', u'ع', u'ه', u'خ', u'ح', u'ج', u'چ', u'پ', u'ة', u'ي', u'ژ', u'ؤ', u'إ', u'أ', u'ء',
u'َ', u'ُ', u'ِ', u'ّ',
u'ۀ', u'آ', u'ـ', u'«', u'»', u'ً', u'ٌ', u'ٍ', u'ريال', u'،', u'؛', u',', u']', u'[', u'×', ]
farsiWordsFile = open("fa.words.txt", "r", encoding="utf-8")
farsiWordsList = farsiWordsFile.read().split(u',')
farsiWordsFile.close()
# INPUT ---------------
def translate(input_data):
rowInput = input_data
splitInput = rowInput.split()
enWordsNumbers = 0
faWordsNumbers = 0
otherWordsNumbers = 0
allWords = 0
allChar = 0
englishChar = 0
farsiChar = 0
for x in splitInput:
allWords += 1
for i in x:
allChar += 1
if i in enChars:
englishChar += 1
if i in faChars:
farsiChar += 1
if binary_search(farsiWordsList, x):
faWordsNumbers += 1
elif binary_search(englishWordsList, x):
enWordsNumbers += 1
else:
otherWordsNumbers += 1
if farsiChar + englishChar * 2 >= allChar:
if faWordsNumbers * 20 >= allWords or enWordsNumbers * 20 >= allWords:
# it is farsi or english
return rowInput
else:
translate_words = lang_exchange(rowInput)
new_words = 0
for words in translate_words.split():
if binary_search(englishWordsList, words) or binary_search(farsiWordsList, words):
new_words += 1
if new_words * 10 > len(translate_words.split()):
return translate_words
return rowInput
else:
# it is other language
return rowInput
print("Done. ready to use ,just type:")
while True:
print(translate(input()))
| 27.747525 | 120 | 0.363426 |
def binary_search(alist, item):
first = 0
last = len(alist) - 1
found = False
while first <= last and not found:
pos = 0
midpoint = (first + last) // 2
if alist[midpoint] == item:
pos = midpoint
found = True
else:
if item < alist[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return found
def lang_exchange(string):
dict = {
u'a': u'ش',
u'b': u'ذ',
u'c': u'ز',
u'd': u'ی',
u'e': u'ث',
u'f': u'ب',
u'g': u'ل',
u'h': u'ا',
u'i': u'ه',
u'j': u'ت',
u'k': u'ن',
u'l': u'م',
u'm': u'ئ',
u'n': u'د',
u'o': u'خ',
u'p': u'ح',
u'q': u'ض',
u'r': u'ق',
u's': u'س',
u't': u'ف',
u'u': u'ع',
u'v': u'ر',
u'w': u'ص',
u'x': u'ط',
u'y': u'غ',
u'z': u'ظ',
u'A': u'َ',
u'B': u'إ',
u'C': u'ژ',
u'D': u'ِ',
u'E': u'ٍ',
u'F': u'ّ',
u'G': u'ۀ',
u'H': u'آ',
u'I': u']',
u'J': u'ـ',
u'K': u'«',
u'L': u'»',
u'M': u'ء',
u'N': u'أ',
u'O': u'[',
u'P': u'\\',
u'Q': u'ً',
u'R': u'ريال',
u'S': u'ُ',
u'T': u'،',
u'U': u',',
u'V': u'ؤ',
u'W': u'ٌ',
u'X': u'ي',
u'Y': u'؛',
u'Z': u'ة',
u';': u'ک',
u'\'': u'گ',
u',': u'و',
u'.': u'.',
u'/': u'/',
u'[': u'ج',
u']': u'چ',
u'\\': u'پ',
u':': u':',
u'"': u'"',
u'<': u'<',
u'>': u'>',
u'?': u'؟',
u'{': u'}',
u'}': u'{',
u'|': u'|',
u'`': u'÷',
u'1': u'1',
u'2': u'2',
u'3': u'3',
u'4': u'4',
u'5': u'5',
u'6': u'6',
u'7': u'7',
u'8': u'8',
u'9': u'9',
u'0': u'0',
u'-': u'-',
u'=': u'=',
u'~': u'×',
u'!': u'!',
u'@': u'@',
u' u'$': u'$',
u'%': u'%',
u'^': u'^',
u'&': u'&',
u'*': u'*',
u'(': u')',
u')': u'(',
u'_': u'_',
u'+': u'+',
u' ': u' ',
}
rdict = {v: k for k, v in dict.items()}
newString = ''
for i in range(len(string)):
if string[i] in dict:
newString += dict[string[i]]
elif string[i] in rdict:
newString += rdict[string[i]]
else:
newString += string[i]
return newString
print("Whait a bit please. loading data...")
# LOAD DATA -------------
enChars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', ]
englishWordsFile = open("en.words.txt", 'r')
englishWordsList = englishWordsFile.read().split(',')
englishWordsFile.close()
faChars = [u'ظ', u'ط', u'ز', u'ر', u'ذ', u'د', u'ئ', u'و', u'ش', u'س', u'ی', u'ب', u'ل', u'ا', u'ت', u'ن', u'م', u'ک',
u'گ', u'ض', u'ص',
u'ث', u'ق', u'ف', u'غ', u'ع', u'ه', u'خ', u'ح', u'ج', u'چ', u'پ', u'ة', u'ي', u'ژ', u'ؤ', u'إ', u'أ', u'ء',
u'َ', u'ُ', u'ِ', u'ّ',
u'ۀ', u'آ', u'ـ', u'«', u'»', u'ً', u'ٌ', u'ٍ', u'ريال', u'،', u'؛', u',', u']', u'[', u'×', ]
farsiWordsFile = open("fa.words.txt", "r", encoding="utf-8")
farsiWordsList = farsiWordsFile.read().split(u',')
farsiWordsFile.close()
# INPUT ---------------
def translate(input_data):
rowInput = input_data
splitInput = rowInput.split()
enWordsNumbers = 0
faWordsNumbers = 0
otherWordsNumbers = 0
allWords = 0
allChar = 0
englishChar = 0
farsiChar = 0
for x in splitInput:
allWords += 1
for i in x:
allChar += 1
if i in enChars:
englishChar += 1
if i in faChars:
farsiChar += 1
if binary_search(farsiWordsList, x):
faWordsNumbers += 1
elif binary_search(englishWordsList, x):
enWordsNumbers += 1
else:
otherWordsNumbers += 1
if farsiChar + englishChar * 2 >= allChar:
if faWordsNumbers * 20 >= allWords or enWordsNumbers * 20 >= allWords:
# it is farsi or english
return rowInput
else:
translate_words = lang_exchange(rowInput)
new_words = 0
for words in translate_words.split():
if binary_search(englishWordsList, words) or binary_search(farsiWordsList, words):
new_words += 1
if new_words * 10 > len(translate_words.split()):
return translate_words
return rowInput
else:
# it is other language
return rowInput
print("Done. ready to use ,just type:")
while True:
print(translate(input()))
| true | true |
1c4982dab6584e4c750c0a7551513aed7ec8c4b7 | 221 | py | Python | abc/abc190/abc190d-3.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | 1 | 2019-08-21T00:49:34.000Z | 2019-08-21T00:49:34.000Z | abc/abc190/abc190d-3.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | abc/abc190/abc190d-3.py | c-yan/atcoder | 940e49d576e6a2d734288fadaf368e486480a948 | [
"MIT"
] | null | null | null | N = int(input())
a = N
while a % 2 == 0:
a //= 2
result = 0
for i in range(1, int(a ** 0.5) + 1):
if a % i != 0:
continue
result += 1
if i * i != a:
result += 1
result *= 2
print(result)
| 13.8125 | 37 | 0.438914 | N = int(input())
a = N
while a % 2 == 0:
a //= 2
result = 0
for i in range(1, int(a ** 0.5) + 1):
if a % i != 0:
continue
result += 1
if i * i != a:
result += 1
result *= 2
print(result)
| true | true |
1c498316fdd0c125a26460ff88c3dfe714b68c44 | 9,921 | py | Python | train_sppe/src/utils/img.py | mdraw/AlphaPose | bed8e0798f6deed4789b9ae2646f72b9fd138c5b | [
"Apache-2.0"
] | null | null | null | train_sppe/src/utils/img.py | mdraw/AlphaPose | bed8e0798f6deed4789b9ae2646f72b9fd138c5b | [
"Apache-2.0"
] | null | null | null | train_sppe/src/utils/img.py | mdraw/AlphaPose | bed8e0798f6deed4789b9ae2646f72b9fd138c5b | [
"Apache-2.0"
] | null | null | null | # -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import numpy as np
import torch
import scipy.misc
import torch.nn.functional as F
import cv2
from opt import opt
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
if img.max() > 1:
img /= 255
return img
def torch_to_im(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0)) # C*H*W
return img
def load_image(img_path):
# H x W x C => C x H x W
return im_to_torch(scipy.misc.imread(img_path, mode='RGB'))
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def drawGaussian(img, pt, sigma):
img = to_numpy(img)
tmpSize = 3 * sigma
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def transformBox(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = torch.zeros(2)
_pt[0] = pt[0] - ul[0]
_pt[1] = pt[1] - ul[1]
# Move to center
_pt[0] = _pt[0] + max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] + max(0, (lenH - 1) / 2 - center[1])
pt = (_pt * resH) / lenH
pt[0] = round(float(pt[0]))
pt[1] = round(float(pt[1]))
return pt.int()
def transformBoxInvert(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = (pt * lenH) / resH
_pt[0] = _pt[0] - max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] - max(0, (lenH - 1) / 2 - center[1])
new_point = torch.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
def cropBox(img, ul, br, resH, resW):
ul = ul.int()
br = (br - 1).int()
# br = br.int()
lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
lenW = lenH * resW / resH
if img.dim() == 2:
img = img[np.newaxis, :]
box_shape = [br[1] - ul[1], br[0] - ul[0]]
pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
# Padding Zeros
img[:, :ul[1], :], img[:, :, :ul[0]] = 0, 0
img[:, br[1] + 1:, :], img[:, :, br[0] + 1:] = 0, 0
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)
src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)
dst[0, :] = 0
dst[1, :] = np.array([resW - 1, resH - 1], np.float32)
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def cv_rotate(img, rot, resW, resH):
center = np.array((resW - 1, resH - 1)) / 2
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, (resH - 1) * -0.5], rot_rad)
dst_dir = np.array([0, (resH - 1) * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center
src[1, :] = center + src_dir
dst[0, :] = [(resW - 1) * 0.5, (resH - 1) * 0.5]
dst[1, :] = np.array([(resW - 1) * 0.5, (resH - 1) * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def flip_v(x, cuda=False):
x = flip(x.cpu().data)
if cuda:
x = x
x = torch.autograd.Variable(x)
return x
def flip(x):
assert (x.dim() == 3 or x.dim() == 4)
# dim = x.dim() - 1
x = x.numpy().copy()
if x.ndim == 3:
x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1))
elif x.ndim == 4:
for i in range(x.shape[0]):
x[i] = np.transpose(
np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))
# x = x.swapaxes(dim, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, dim)
return torch.from_numpy(x.copy())
def shuffleLR(x, dataset):
flipRef = dataset.flipRef
assert (x.dim() == 3 or x.dim() == 4)
for pair in flipRef:
dim0, dim1 = pair
dim0 -= 1
dim1 -= 1
if x.dim() == 4:
tmp = x[:, dim1].clone()
x[:, dim1] = x[:, dim0].clone()
x[:, dim0] = tmp.clone()
#x[:, dim0], x[:, dim1] = deepcopy((x[:, dim1], x[:, dim0]))
else:
tmp = x[dim1].clone()
x[dim1] = x[dim0].clone()
x[dim0] = tmp.clone()
#x[dim0], x[dim1] = deepcopy((x[dim1], x[dim0]))
return x
def shuffleLR_v(x, dataset, cuda=False):
x = shuffleLR(x.cpu().data, dataset)
if cuda:
x = x
x = torch.autograd.Variable(x)
return x
def vis_frame(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(5, 11), (6, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [RED, RED, RED, RED, RED, YELLOW, YELLOW, YELLOW,
YELLOW, YELLOW, YELLOW, GREEN, GREEN, GREEN, GREEN, GREEN, GREEN]
line_color = [YELLOW, YELLOW, YELLOW, YELLOW, BLUE, BLUE,
BLUE, BLUE, BLUE, PURPLE, PURPLE, RED, RED, RED, RED]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED,
RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE,
RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
raise NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame.copy()
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.15:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (cor_x, cor_y)
cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)
# Now create a mask of logo and create its inverse mask also
#transparency = max(0, min(1, kp_scores[n]))
#img = cv2.addWeighted(bg, transparency, img, 1, 0)
# Draw limbs
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy,
line_color[i], (0.5 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
#transparency = max(
# 0, min(1, (kp_scores[start_p] + kp_scores[end_p])))
#img = cv2.addWeighted(bg, transparency, img, 1, 0)
return img
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
| 31.100313 | 92 | 0.513154 |
import numpy as np
import torch
import scipy.misc
import torch.nn.functional as F
import cv2
from opt import opt
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1))
img = to_torch(img).float()
if img.max() > 1:
img /= 255
return img
def torch_to_im(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0))
return img
def load_image(img_path):
return im_to_torch(scipy.misc.imread(img_path, mode='RGB'))
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def drawGaussian(img, pt, sigma):
img = to_numpy(img)
tmpSize = 3 * sigma
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
return to_torch(img)
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def transformBox(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = torch.zeros(2)
_pt[0] = pt[0] - ul[0]
_pt[1] = pt[1] - ul[1]
_pt[0] = _pt[0] + max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] + max(0, (lenH - 1) / 2 - center[1])
pt = (_pt * resH) / lenH
pt[0] = round(float(pt[0]))
pt[1] = round(float(pt[1]))
return pt.int()
def transformBoxInvert(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = (pt * lenH) / resH
_pt[0] = _pt[0] - max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] - max(0, (lenH - 1) / 2 - center[1])
new_point = torch.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
def cropBox(img, ul, br, resH, resW):
ul = ul.int()
br = (br - 1).int()
lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
lenW = lenH * resW / resH
if img.dim() == 2:
img = img[np.newaxis, :]
box_shape = [br[1] - ul[1], br[0] - ul[0]]
pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
img[:, :ul[1], :], img[:, :, :ul[0]] = 0, 0
img[:, br[1] + 1:, :], img[:, :, br[0] + 1:] = 0, 0
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)
src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)
dst[0, :] = 0
dst[1, :] = np.array([resW - 1, resH - 1], np.float32)
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def cv_rotate(img, rot, resW, resH):
center = np.array((resW - 1, resH - 1)) / 2
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, (resH - 1) * -0.5], rot_rad)
dst_dir = np.array([0, (resH - 1) * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center
src[1, :] = center + src_dir
dst[0, :] = [(resW - 1) * 0.5, (resH - 1) * 0.5]
dst[1, :] = np.array([(resW - 1) * 0.5, (resH - 1) * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def flip_v(x, cuda=False):
x = flip(x.cpu().data)
if cuda:
x = x
x = torch.autograd.Variable(x)
return x
def flip(x):
assert (x.dim() == 3 or x.dim() == 4)
x = x.numpy().copy()
if x.ndim == 3:
x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1))
elif x.ndim == 4:
for i in range(x.shape[0]):
x[i] = np.transpose(
np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))
return torch.from_numpy(x.copy())
def shuffleLR(x, dataset):
flipRef = dataset.flipRef
assert (x.dim() == 3 or x.dim() == 4)
for pair in flipRef:
dim0, dim1 = pair
dim0 -= 1
dim1 -= 1
if x.dim() == 4:
tmp = x[:, dim1].clone()
x[:, dim1] = x[:, dim0].clone()
x[:, dim0] = tmp.clone()
else:
tmp = x[dim1].clone()
x[dim1] = x[dim0].clone()
x[dim0] = tmp.clone()
return x
def shuffleLR_v(x, dataset, cuda=False):
x = shuffleLR(x.cpu().data, dataset)
if cuda:
x = x
x = torch.autograd.Variable(x)
return x
def vis_frame(frame, im_res, format='coco'):
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4),
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(5, 11), (6, 12),
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [RED, RED, RED, RED, RED, YELLOW, YELLOW, YELLOW,
YELLOW, YELLOW, YELLOW, GREEN, GREEN, GREEN, GREEN, GREEN, GREEN]
line_color = [YELLOW, YELLOW, YELLOW, YELLOW, BLUE, BLUE,
BLUE, BLUE, BLUE, PURPLE, PURPLE, RED, RED, RED, RED]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED,
RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE,
RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
raise NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame.copy()
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.15:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (cor_x, cor_y)
cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy,
line_color[i], (0.5 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
return img
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
| true | true |
1c498364b124248db0499e5d367de8334f74324d | 462 | py | Python | data/scripts/templates/object/draft_schematic/furniture/shared_furniture_chair_elegant.py | obi-two/GameServer | 7d37024e2291a97d49522610cd8f1dbe5666afc2 | [
"MIT"
] | 20 | 2015-02-23T15:11:56.000Z | 2022-03-18T20:56:48.000Z | data/scripts/templates/object/draft_schematic/furniture/shared_furniture_chair_elegant.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | null | null | null | data/scripts/templates/object/draft_schematic/furniture/shared_furniture_chair_elegant.py | apathyboy/swganh | 665128efe9154611dec4cb5efc61d246dd095984 | [
"MIT"
] | 20 | 2015-04-04T16:35:59.000Z | 2022-03-24T14:54:37.000Z | #### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/furniture/shared_furniture_chair_elegant.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | 27.176471 | 88 | 0.735931 | true | true | |
1c4983c64dbb362dcacbdb6c9d607d9aba2da2ce | 509 | py | Python | pythran/tests/euler/euler10.py | artas360/pythran | 66dad52d52be71693043e9a7d7578cfb9cb3d1da | [
"BSD-3-Clause"
] | null | null | null | pythran/tests/euler/euler10.py | artas360/pythran | 66dad52d52be71693043e9a7d7578cfb9cb3d1da | [
"BSD-3-Clause"
] | null | null | null | pythran/tests/euler/euler10.py | artas360/pythran | 66dad52d52be71693043e9a7d7578cfb9cb3d1da | [
"BSD-3-Clause"
] | 1 | 2017-03-12T20:32:36.000Z | 2017-03-12T20:32:36.000Z | #runas solve(2000000)
#pythran export solve(int)
def solve(max):
'''
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
'''
sieve = [True] * max # Sieve is faster for 2M primes
def mark(sieve, x):
for i in xrange(x+x, len(sieve), x):
sieve[i] = False
for x in xrange(2, int(len(sieve) ** 0.5) + 1):
if sieve[x]: mark(sieve, x)
return sum(i for i in xrange(2, len(sieve)) if sieve[i])
| 25.45 | 60 | 0.563851 |
def solve(max):
sieve = [True] * max
def mark(sieve, x):
for i in xrange(x+x, len(sieve), x):
sieve[i] = False
for x in xrange(2, int(len(sieve) ** 0.5) + 1):
if sieve[x]: mark(sieve, x)
return sum(i for i in xrange(2, len(sieve)) if sieve[i])
| true | true |
1c4984353c9bf656314d3233f534932929e34855 | 2,833 | py | Python | cvjyo.py | Aravind-Suresh/CVJyo | 6cb324fb538a50939335fd28ee90e23fbb32f2c0 | [
"MIT"
] | null | null | null | cvjyo.py | Aravind-Suresh/CVJyo | 6cb324fb538a50939335fd28ee90e23fbb32f2c0 | [
"MIT"
] | null | null | null | cvjyo.py | Aravind-Suresh/CVJyo | 6cb324fb538a50939335fd28ee90e23fbb32f2c0 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import sys
import math
def markPoints(pts, img):
for pt in pts:
cv2.circle(img, tuple((pt[0], pt[1])), 2, 0, -1)
def contourAreaComparator(cnt1, cnt2):
if cv2.contourArea(cnt1) > cv2.contourArea(cnt2):
return 1
else:
return -1
def orderClockwise(ptsO, pt):
pts = ptsO - np.asarray(pt)
pts = np.array(pts, dtype=np.float32)
slopes = []
for p in pts:
if p[0] > 0:
slopes.append(math.atan(p[1]/p[0]))
else:
slopes.append(math.pi + math.atan(p[1]/p[0]))
ptsSorted = [y for x, y in sorted(zip(list(slopes), list(np.arange(len(ptsO)))))]
ptsSorted = ptsO[ptsSorted]
return ptsSorted
img = cv2.imread(sys.argv[1], 0)
img = cv2.GaussianBlur(img, (5, 5), 0)
height,width = img.shape
_,otsu = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imshow("img", otsu); cv2.waitKey(0);
imgAnd = cv2.bitwise_and(img, otsu)
cv2.imshow("img", imgAnd); cv2.waitKey(0);
_, contours, hierarchy = cv2.findContours(otsu, 1, 2)
area = []
for cnt in contours:
area.append(cv2.contourArea(cnt))
area = np.array(area)
idx = np.max(area)
idx = np.where(area==idx)[0][0]
cnt = contours[idx]
hull = cv2.convexHull(cnt, returnPoints = False)
defects = cv2.convexityDefects(cnt, hull)
for d in defects:
s, e, f, appr = d[0]
cv2.circle(imgAnd, tuple(cnt[f][0]), 2, 255, -1)
dt = cv2.distanceTransform(otsu, cv2.DIST_L2, 3)
cv2.normalize(dt, dt, 0.0, 1.0, cv2.NORM_MINMAX);
cv2.imshow("img", dt);cv2.waitKey(0)
idx = np.where(dt==np.max(dt))
pt = (idx[1][0], idx[0][0])
defPts = cnt[defects[:, 0, 2]]
defPts = defPts.reshape(-1,2)
thrDistTop = int(0.4*height)
thrDistLeft = int(0.2*width)
defPts = defPts[np.where(defPts[:, 1] > thrDistTop)[0]]
defPts = defPts[np.where(defPts[:, 0] > thrDistLeft)[0]]
#markPoints(defPts, img)
#cv2.imshow("img", img); cv2.waitKey(0)
defPtsC = defPts.copy()
defPts = orderClockwise(defPtsC, pt)
# ii = 0
# for p in defPts:
# cv2.putText(img, str(ii), (p[0], p[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)
# ii = ii + 1
# cv2.imshow("img", img); cv2.waitKey(0)
boundImg = np.zeros((height,width), np.uint8)
cv2.fillPoly(boundImg, [defPts], 255)
imgRoi = cv2.bitwise_and(img, boundImg)
imgRoi = cv2.adaptiveThreshold(imgRoi, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
kernel = np.ones((5,5),np.uint8)
boundImg = cv2.erode(boundImg,kernel,iterations = 1)
imgRoi = cv2.bitwise_and(imgRoi, boundImg)
cv2.imshow("img", imgRoi); cv2.waitKey(0)
imgRoiC = imgRoi.copy()
_, contours, hierarchy = cv2.findContours(imgRoiC, 1, 2)
contours.sort(contourAreaComparator)
l = len(contours)
ll = np.arange(l-6, l-1)
imgColor = cv2.imread(sys.argv[1])
for idx in ll:
cv2.drawContours(imgRoi, contours, idx, 127, 3)
cv2.drawContours(imgColor, contours, idx, (0, 0, 255), 3)
cv2.imshow("img", imgColor); cv2.waitKey(0) | 26.476636 | 101 | 0.678433 | import cv2
import numpy as np
import sys
import math
def markPoints(pts, img):
for pt in pts:
cv2.circle(img, tuple((pt[0], pt[1])), 2, 0, -1)
def contourAreaComparator(cnt1, cnt2):
if cv2.contourArea(cnt1) > cv2.contourArea(cnt2):
return 1
else:
return -1
def orderClockwise(ptsO, pt):
pts = ptsO - np.asarray(pt)
pts = np.array(pts, dtype=np.float32)
slopes = []
for p in pts:
if p[0] > 0:
slopes.append(math.atan(p[1]/p[0]))
else:
slopes.append(math.pi + math.atan(p[1]/p[0]))
ptsSorted = [y for x, y in sorted(zip(list(slopes), list(np.arange(len(ptsO)))))]
ptsSorted = ptsO[ptsSorted]
return ptsSorted
img = cv2.imread(sys.argv[1], 0)
img = cv2.GaussianBlur(img, (5, 5), 0)
height,width = img.shape
_,otsu = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imshow("img", otsu); cv2.waitKey(0);
imgAnd = cv2.bitwise_and(img, otsu)
cv2.imshow("img", imgAnd); cv2.waitKey(0);
_, contours, hierarchy = cv2.findContours(otsu, 1, 2)
area = []
for cnt in contours:
area.append(cv2.contourArea(cnt))
area = np.array(area)
idx = np.max(area)
idx = np.where(area==idx)[0][0]
cnt = contours[idx]
hull = cv2.convexHull(cnt, returnPoints = False)
defects = cv2.convexityDefects(cnt, hull)
for d in defects:
s, e, f, appr = d[0]
cv2.circle(imgAnd, tuple(cnt[f][0]), 2, 255, -1)
dt = cv2.distanceTransform(otsu, cv2.DIST_L2, 3)
cv2.normalize(dt, dt, 0.0, 1.0, cv2.NORM_MINMAX);
cv2.imshow("img", dt);cv2.waitKey(0)
idx = np.where(dt==np.max(dt))
pt = (idx[1][0], idx[0][0])
defPts = cnt[defects[:, 0, 2]]
defPts = defPts.reshape(-1,2)
thrDistTop = int(0.4*height)
thrDistLeft = int(0.2*width)
defPts = defPts[np.where(defPts[:, 1] > thrDistTop)[0]]
defPts = defPts[np.where(defPts[:, 0] > thrDistLeft)[0]]
defPtsC = defPts.copy()
defPts = orderClockwise(defPtsC, pt)
boundImg = np.zeros((height,width), np.uint8)
cv2.fillPoly(boundImg, [defPts], 255)
imgRoi = cv2.bitwise_and(img, boundImg)
imgRoi = cv2.adaptiveThreshold(imgRoi, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
kernel = np.ones((5,5),np.uint8)
boundImg = cv2.erode(boundImg,kernel,iterations = 1)
imgRoi = cv2.bitwise_and(imgRoi, boundImg)
cv2.imshow("img", imgRoi); cv2.waitKey(0)
imgRoiC = imgRoi.copy()
_, contours, hierarchy = cv2.findContours(imgRoiC, 1, 2)
contours.sort(contourAreaComparator)
l = len(contours)
ll = np.arange(l-6, l-1)
imgColor = cv2.imread(sys.argv[1])
for idx in ll:
cv2.drawContours(imgRoi, contours, idx, 127, 3)
cv2.drawContours(imgColor, contours, idx, (0, 0, 255), 3)
cv2.imshow("img", imgColor); cv2.waitKey(0) | true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.