repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
slipstream/SlipStreamClient | client/src/main/python/slipstream/Logger.py | 1 | 1610 | import os
import errno
import logging
from logging.handlers import RotatingFileHandler
class Logger(object):
LOGGER_NAME = 'SSClient'
LOGFILE_MAXBYTES = 2*1024*1024
LOGFILE_BACKUPCOUNT = 5
LOGFILE_FORMAT = "%(asctime)s:%(levelname)s:%(name)s:%(message)s"
log_file = '/var/log/slipstream/client/slipstream-node.log'
def __init__(self, config_holder):
self.log_to_file = True
self.log_level = 'info'
self.logger_name = ''
config_holder.assign(self)
self.logger = None
self._configure_logger()
def _configure_logger(self):
self.logger = logging.getLogger(self.logger_name or Logger.LOGGER_NAME)
numeric_level = getattr(logging, self.log_level.upper(), None)
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % self.log_level)
self.logger.setLevel(numeric_level)
formatter = logging.Formatter(self.LOGFILE_FORMAT)
if self.log_to_file:
self._create_log_dir()
handler = RotatingFileHandler(self.log_file,
maxBytes=self.LOGFILE_MAXBYTES,
backupCount=self.LOGFILE_BACKUPCOUNT)
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
self.logger.addHandler(handler)
def _create_log_dir(self):
log_dir = os.path.dirname(self.log_file)
try:
os.makedirs(log_dir)
except OSError, ex:
if ex.errno != errno.EEXIST:
raise
def get_logger(self):
return self.logger
| apache-2.0 |
HyperBaton/ansible | lib/ansible/modules/network/f5/bigip_asm_policy_manage.py | 38 | 32848 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_asm_policy_manage
short_description: Manage BIG-IP ASM policies
description:
- Manage BIG-IP ASM policies, create from templates and manage global policy settings.
version_added: 2.8
options:
active:
description:
- If C(yes) will apply and activate existing inactive policy. If C(no), it will
deactivate existing active policy. Generally should be C(yes) only in cases where
you want to activate new or existing policy.
default: no
type: bool
name:
description:
- The ASM policy to manage or create.
type: str
required: True
state:
description:
- When C(state) is C(present), and C(template) parameter is provided,
new ASM policy is created from template with the given policy C(name).
- When C(state) is present and no C(template) parameter is provided
new blank ASM policy is created with the given policy C(name).
- When C(state) is C(absent), ensures that the policy is removed, even if it is
currently active.
type: str
choices:
- present
- absent
default: present
template:
description:
- An ASM policy built-in template. If the template does not exist we will raise an error.
- Once the policy has been created, this value cannot change.
- The C(Comprehensive), C(Drupal), C(Fundamental), C(Joomla),
C(Vulnerability Assessment Baseline), and C(Wordpress) templates are only available
on BIG-IP versions >= 13.
type: str
choices:
- ActiveSync v1.0 v2.0 (http)
- ActiveSync v1.0 v2.0 (https)
- Comprehensive
- Drupal
- Fundamental
- Joomla
- LotusDomino 6.5 (http)
- LotusDomino 6.5 (https)
- OWA Exchange 2003 (http)
- OWA Exchange 2003 (https)
- OWA Exchange 2003 with ActiveSync (http)
- OWA Exchange 2003 with ActiveSync (https)
- OWA Exchange 2007 (http)
- OWA Exchange 2007 (https)
- OWA Exchange 2007 with ActiveSync (http)
- OWA Exchange 2007 with ActiveSync (https)
- OWA Exchange 2010 (http)
- OWA Exchange 2010 (https)
- Oracle 10g Portal (http)
- Oracle 10g Portal (https)
- Oracle Applications 11i (http)
- Oracle Applications 11i (https)
- PeopleSoft Portal 9 (http)
- PeopleSoft Portal 9 (https)
- Rapid Deployment Policy
- SAP NetWeaver 7 (http)
- SAP NetWeaver 7 (https)
- SharePoint 2003 (http)
- SharePoint 2003 (https)
- SharePoint 2007 (http)
- SharePoint 2007 (https)
- SharePoint 2010 (http)
- SharePoint 2010 (https)
- Vulnerability Assessment Baseline
- Wordpress
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5
author:
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create ASM policy from template
bigip_asm_policy:
name: new_sharepoint_policy
template: SharePoint 2007 (http)
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Create blank ASM policy
bigip_asm_policy:
name: new_blank_policy
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Create blank ASM policy and activate
bigip_asm_policy:
name: new_blank_policy
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Activate ASM policy
bigip_asm_policy:
name: inactive_policy
active: yes
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
- name: Deactivate ASM policy
bigip_asm_policy_manage:
name: active_policy
state: present
provider:
server: lb.mydomain.com
user: admin
password: secret
delegate_to: localhost
'''
RETURN = r'''
active:
description: Set when activating/deactivating ASM policy
returned: changed
type: bool
sample: yes
state:
description: Action performed on the target device.
returned: changed
type: str
sample: absent
template:
description: Name of the built-in ASM policy template
returned: changed
type: str
sample: OWA Exchange 2007 (https)
name:
description: Name of the ASM policy to be managed/created
returned: changed
type: str
sample: Asm_APP1_Transparent
'''
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from distutils.version import LooseVersion
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.icontrol import tmos_version
from library.module_utils.network.f5.icontrol import module_provisioned
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.icontrol import tmos_version
from ansible.module_utils.network.f5.icontrol import module_provisioned
class Parameters(AnsibleF5Parameters):
updatables = [
'active',
]
returnables = [
'name',
'template',
'active',
]
api_attributes = [
'name',
'active',
]
api_map = {
}
@property
def template_link(self):
if self._values['template_link'] is not None:
return self._values['template_link']
result = None
uri = "https://{0}:{1}/mgmt/tm/asm/policy-templates/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')".format(self.template.upper())
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if 'items' in response and response['items'] != []:
result = dict(link=response['items'][0]['selfLink'])
return result
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class V1Parameters(Parameters):
@property
def template(self):
if self._values['template'] is None:
return None
template_map = {
'ActiveSync v1.0 v2.0 (http)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP',
'ActiveSync v1.0 v2.0 (https)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS',
'LotusDomino 6.5 (http)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP',
'LotusDomino 6.5 (https)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS',
'OWA Exchange 2003 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP',
'OWA Exchange 2003 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS',
'OWA Exchange 2003 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2003 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2007 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP',
'OWA Exchange 2007 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS',
'OWA Exchange 2007 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2007 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2010 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP',
'OWA Exchange 2010 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS',
'Oracle 10g Portal (http)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP',
'Oracle 10g Portal (https)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS',
'Oracle Applications 11i (http)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP',
'Oracle Applications 11i (https)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS',
'PeopleSoft Portal 9 (http)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP',
'PeopleSoft Portal 9 (https)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS',
'Rapid Deployment Policy': 'POLICY_TEMPLATE_RAPID_DEPLOYMENT',
'SAP NetWeaver 7 (http)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP',
'SAP NetWeaver 7 (https)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS',
'SharePoint 2003 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP',
'SharePoint 2003 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS',
'SharePoint 2007 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP',
'SharePoint 2007 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS',
'SharePoint 2010 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP',
'SharePoint 2010 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS'
}
if self._values['template'] in template_map:
return template_map[self._values['template']]
else:
raise F5ModuleError(
"The specified template is not valid for this version of BIG-IP."
)
class V2Parameters(Parameters):
@property
def template(self):
if self._values['template'] is None:
return None
template_map = {
'ActiveSync v1.0 v2.0 (http)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP',
'ActiveSync v1.0 v2.0 (https)': 'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS',
'Comprehensive': 'POLICY_TEMPLATE_COMPREHENSIVE', # v13
'Drupal': 'POLICY_TEMPLATE_DRUPAL', # v13
'Fundamental': 'POLICY_TEMPLATE_FUNDAMENTAL', # v13
'Joomla': 'POLICY_TEMPLATE_JOOMLA', # v13
'LotusDomino 6.5 (http)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP',
'LotusDomino 6.5 (https)': 'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS',
'OWA Exchange 2003 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP',
'OWA Exchange 2003 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS',
'OWA Exchange 2003 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2003 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2007 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP',
'OWA Exchange 2007 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS',
'OWA Exchange 2007 with ActiveSync (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP',
'OWA Exchange 2007 with ActiveSync (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS',
'OWA Exchange 2010 (http)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP',
'OWA Exchange 2010 (https)': 'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS',
'Oracle 10g Portal (http)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP',
'Oracle 10g Portal (https)': 'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS',
'Oracle Applications 11i (http)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP',
'Oracle Applications 11i (https)': 'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS',
'PeopleSoft Portal 9 (http)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP',
'PeopleSoft Portal 9 (https)': 'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS',
'Rapid Deployment Policy': 'POLICY_TEMPLATE_RAPID_DEPLOYMENT',
'SAP NetWeaver 7 (http)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP',
'SAP NetWeaver 7 (https)': 'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS',
'SharePoint 2003 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP',
'SharePoint 2003 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS',
'SharePoint 2007 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP',
'SharePoint 2007 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS',
'SharePoint 2010 (http)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP',
'SharePoint 2010 (https)': 'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS',
'Vulnerability Assessment Baseline': 'POLICY_TEMPLATE_VULNERABILITY_ASSESSMENT', # v13
'Wordpress': 'POLICY_TEMPLATE_WORDPRESS' # v13
}
return template_map[self._values['template']]
class Changes(Parameters):
@property
def template(self):
if self._values['template'] is None:
return None
template_map = {
'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTP': 'ActiveSync v1.0 v2.0 (http)',
'POLICY_TEMPLATE_ACTIVESYNC_V1_0_V2_0_HTTPS': 'ActiveSync v1.0 v2.0 (https)',
'POLICY_TEMPLATE_COMPREHENSIVE': 'Comprehensive',
'POLICY_TEMPLATE_DRUPAL': 'Drupal',
'POLICY_TEMPLATE_FUNDAMENTAL': 'Fundamental',
'POLICY_TEMPLATE_JOOMLA': 'Joomla',
'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTP': 'LotusDomino 6.5 (http)',
'POLICY_TEMPLATE_LOTUSDOMINO_6_5_HTTPS': 'LotusDomino 6.5 (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTP': 'OWA Exchange 2003 (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_HTTPS': 'OWA Exchange 2003 (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTP': 'OWA Exchange 2003 with ActiveSync (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2003_WITH_ACTIVESYNC_HTTPS': 'OWA Exchange 2003 with ActiveSync (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTP': 'OWA Exchange 2007 (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_HTTPS': 'OWA Exchange 2007 (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTP': 'OWA Exchange 2007 with ActiveSync (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2007_WITH_ACTIVESYNC_HTTPS': 'OWA Exchange 2007 with ActiveSync (https)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTP': 'OWA Exchange 2010 (http)',
'POLICY_TEMPLATE_OWA_EXCHANGE_2010_HTTPS': 'OWA Exchange 2010 (https)',
'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTP': 'Oracle 10g Portal (http)',
'POLICY_TEMPLATE_ORACLE_10G_PORTAL_HTTPS': 'Oracle 10g Portal (https)',
'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTP': 'Oracle Applications 11i (http)',
'POLICY_TEMPLATE_ORACLE_APPLICATIONS_11I_HTTPS': 'Oracle Applications 11i (https)',
'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTP': 'PeopleSoft Portal 9 (http)',
'POLICY_TEMPLATE_PEOPLESOFT_PORTAL_9_HTTPS': 'PeopleSoft Portal 9 (https)',
'POLICY_TEMPLATE_RAPID_DEPLOYMENT': 'Rapid Deployment Policy',
'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTP': 'SAP NetWeaver 7 (http)',
'POLICY_TEMPLATE_SAP_NETWEAVER_7_HTTPS': 'SAP NetWeaver 7 (https)',
'POLICY_TEMPLATE_SHAREPOINT_2003_HTTP': 'SharePoint 2003 (http)',
'POLICY_TEMPLATE_SHAREPOINT_2003_HTTPS': 'SharePoint 2003 (https)',
'POLICY_TEMPLATE_SHAREPOINT_2007_HTTP': 'SharePoint 2007 (http)',
'POLICY_TEMPLATE_SHAREPOINT_2007_HTTPS': 'SharePoint 2007 (https)',
'POLICY_TEMPLATE_SHAREPOINT_2010_HTTP': 'SharePoint 2010 (http)',
'POLICY_TEMPLATE_SHAREPOINT_2010_HTTPS': 'SharePoint 2010 (https)',
'POLICY_TEMPLATE_VULNERABILITY_ASSESSMENT': 'Vulnerability Assessment Baseline',
'POLICY_TEMPLATE_WORDPRESS': 'Wordpress',
}
return template_map[self._values['template']]
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def active(self):
if self.want.active is True and self.have.active is False:
return True
if self.want.active is False and self.have.active is True:
return False
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
self.have = None
self.changes = Changes()
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = Changes(params=changed)
return True
return False
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if not self.exists():
return False
else:
return self.remove()
def create(self):
if self.want.active is None:
self.want.update(dict(active=False))
self._set_changed_options()
if self.module.check_mode:
return True
if self.want.template is not None:
self.create_from_template()
if self.want.template is None:
self.create_blank()
if self.want.active:
self.activate()
return True
else:
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
if self.changes.active:
self.activate()
return True
def activate(self):
self.have = self.read_current_from_device()
task_id = self.apply_on_device()
if self.wait_for_task(task_id):
return True
else:
raise F5ModuleError('Apply policy task failed.')
def create_blank(self):
self.create_on_device()
if self.exists():
return True
else:
raise F5ModuleError(
'Failed to create ASM policy: {0}'.format(self.want.name)
)
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError(
'Failed to delete ASM policy: {0}'.format(self.want.name)
)
return True
def is_activated(self):
if self.want.active is True:
return True
else:
return False
def exists(self):
uri = 'https://{0}:{1}/mgmt/tm/asm/policies/'.format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,partition".format(
self.want.name, self.want.partition
)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'items' in response and response['items'] != []:
return True
return False
def wait_for_task(self, task_id):
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/apply-policy/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
task_id
)
while True:
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if response['status'] in ['COMPLETED', 'FAILURE']:
break
time.sleep(1)
if response['status'] == 'FAILURE':
return False
if response['status'] == 'COMPLETED':
return True
def _get_policy_id(self):
policy_id = None
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
query = "?$filter=contains(name,'{0}')+and+contains(partition,'{1}')&$select=name,id".format(
self.want.name, self.want.partition
)
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'items' in response and response['items'] != []:
policy_id = response['items'][0]['id']
if not policy_id:
raise F5ModuleError("The policy was not found")
return policy_id
def update_on_device(self):
params = self.changes.api_params()
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
if not params['active']:
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def read_current_from_device(self):
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
response.update((dict(self_link=response['selfLink'])))
return Parameters(params=response)
def apply_on_device(self):
uri = "https://{0}:{1}/mgmt/tm/asm/tasks/apply-policy/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
params = dict(policyReference={'link': self.have.self_link})
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return response['id']
def create_from_template_on_device(self):
full_name = fq_name(self.want.partition, self.want.name)
cmd = 'tmsh create asm policy {0} policy-template {1}'.format(full_name, self.want.template)
uri = "https://{0}:{1}/mgmt/tm/util/bash/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
args = dict(
command='run',
utilCmdArgs='-c "{0}"'.format(cmd)
)
resp = self.client.api.post(uri, json=args)
try:
response = resp.json()
if 'commandResult' in response:
if 'Unexpected Error' in response['commandResult']:
raise F5ModuleError(response['commandResult'])
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
# we need to remove active from params as API will raise an error if the active is set to True,
# policies can only be activated via apply-policy task endpoint.
params.pop('active')
uri = "https://{0}:{1}/mgmt/tm/asm/policies/".format(
self.client.provider['server'],
self.client.provider['server_port'],
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 401, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
time.sleep(2)
return True
def remove_from_device(self):
policy_id = self._get_policy_id()
uri = "https://{0}:{1}/mgmt/tm/asm/policies/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
policy_id
)
response = self.client.api.delete(uri)
if response.status in [200, 201]:
return True
raise F5ModuleError(response.content)
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.kwargs = kwargs
def exec_module(self):
if not module_provisioned(self.client, 'asm'):
raise F5ModuleError(
"ASM must be provisioned to use this module."
)
if self.version_is_less_than_13():
manager = self.get_manager('v1')
else:
manager = self.get_manager('v2')
return manager.exec_module()
def get_manager(self, type):
if type == 'v1':
return V1Manager(**self.kwargs)
elif type == 'v2':
return V2Manager(**self.kwargs)
def version_is_less_than_13(self):
version = tmos_version(self.client)
if LooseVersion(version) < LooseVersion('13.0.0'):
return True
else:
return False
class V1Manager(BaseManager):
def __init__(self, *args, **kwargs):
module = kwargs.get('module', None)
client = F5RestClient(**module.params)
super(V1Manager, self).__init__(client=client, module=module)
self.want = V1Parameters(params=module.params, client=client)
def create_from_template(self):
self.create_from_template_on_device()
class V2Manager(BaseManager):
def __init__(self, *args, **kwargs):
module = kwargs.get('module', None)
client = F5RestClient(**module.params)
super(V2Manager, self).__init__(client=client, module=module)
self.want = V2Parameters(params=module.params, client=client)
# TODO Include creating ASM policies from custom templates in v13
def create_from_template(self):
if not self.create_from_template_on_device():
return False
class ArgumentSpec(object):
def __init__(self):
self.template_map = [
'ActiveSync v1.0 v2.0 (http)',
'ActiveSync v1.0 v2.0 (https)',
'Comprehensive',
'Drupal',
'Fundamental',
'Joomla',
'LotusDomino 6.5 (http)',
'LotusDomino 6.5 (https)',
'OWA Exchange 2003 (http)',
'OWA Exchange 2003 (https)',
'OWA Exchange 2003 with ActiveSync (http)',
'OWA Exchange 2003 with ActiveSync (https)',
'OWA Exchange 2007 (http)',
'OWA Exchange 2007 (https)',
'OWA Exchange 2007 with ActiveSync (http)',
'OWA Exchange 2007 with ActiveSync (https)',
'OWA Exchange 2010 (http)',
'OWA Exchange 2010 (https)',
'Oracle 10g Portal (http)',
'Oracle 10g Portal (https)',
'Oracle Applications 11i (http)',
'Oracle Applications 11i (https)',
'PeopleSoft Portal 9 (http)',
'PeopleSoft Portal 9 (https)',
'Rapid Deployment Policy',
'SAP NetWeaver 7 (http)',
'SAP NetWeaver 7 (https)',
'SharePoint 2003 (http)',
'SharePoint 2003 (https)',
'SharePoint 2007 (http)',
'SharePoint 2007 (https)',
'SharePoint 2010 (http)',
'SharePoint 2010 (https)',
'Vulnerability Assessment Baseline',
'Wordpress',
]
self.supports_check_mode = True
argument_spec = dict(
name=dict(
required=True,
),
template=dict(
choices=self.template_map
),
active=dict(
type='bool',
default='no'
),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
thaumos/ansible | lib/ansible/modules/network/nxos/nxos_vtp_password.py | 19 | 8051 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_vtp_password
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages VTP password configuration.
description:
- Manages VTP password configuration.
author:
- Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- VTP feature must be active on the device to use this module.
- This module is used to manage only VTP passwords.
- Use this in combination with M(nxos_vtp_domain) and M(nxos_vtp_version)
to fully manage VTP operations.
- You can set/remove password only if a VTP domain already exist.
- If C(state=absent) and no C(vtp_password) is provided, it remove the current
VTP password.
- If C(state=absent) and C(vtp_password) is provided, the proposed C(vtp_password)
has to match the existing one in order to remove it.
options:
vtp_password:
description:
- VTP password
state:
description:
- Manage the state of the resource
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ENSURE VTP PASSWORD IS SET
- nxos_vtp_password:
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
# ENSURE VTP PASSWORD IS REMOVED
- nxos_vtp_password:
state: absent
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"vtp_password": "new_ntc"}
existing:
description:
- k/v pairs of existing vtp
returned: always
type: dict
sample: {"domain": "ntc", "version": "1", "vtp_password": "ntc"}
end_state:
description: k/v pairs of vtp after module execution
returned: always
type: dict
sample: {"domain": "ntc", "version": "1", "vtp_password": "new_ntc"}
updates:
description: command sent to the device
returned: always
type: list
sample: ["vtp password new_ntc"]
changed:
description: check to see if a change was made on the device
returned: always
type: bool
sample: true
'''
from ansible.module_utils.network.nxos.nxos import load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
import re
def execute_show_command(command, module, command_type='cli_show'):
if 'status' not in command:
output = 'json'
else:
output = 'text'
cmds = [{
'command': command,
'output': output,
}]
body = run_commands(module, cmds)
return body
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_vtp_config(module):
command = 'show vtp status'
body = execute_show_command(
command, module)[0]
vtp_parsed = {}
if body:
version_regex = r'.*VTP version running\s+:\s+(?P<version>\d).*'
domain_regex = r'.*VTP Domain Name\s+:\s+(?P<domain>\S+).*'
try:
match_version = re.match(version_regex, body, re.DOTALL)
version = match_version.groupdict()['version']
except AttributeError:
version = ''
try:
match_domain = re.match(domain_regex, body, re.DOTALL)
domain = match_domain.groupdict()['domain']
except AttributeError:
domain = ''
if domain and version:
vtp_parsed['domain'] = domain
vtp_parsed['version'] = version
vtp_parsed['vtp_password'] = get_vtp_password(module)
return vtp_parsed
def get_vtp_password(module):
command = 'show vtp password'
body = execute_show_command(command, module)[0]
try:
password = body['passwd']
if password:
return str(password)
else:
return ""
except TypeError:
return ""
def main():
argument_spec = dict(
vtp_password=dict(type='str', no_log=True),
state=dict(choices=['absent', 'present'],
default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
vtp_password = module.params['vtp_password'] or None
state = module.params['state']
existing = get_vtp_config(module)
end_state = existing
args = dict(vtp_password=vtp_password)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'absent':
# if vtp_password is not set, some devices returns '\\'
if not existing['vtp_password'] or existing['vtp_password'] == '\\':
pass
elif vtp_password is not None:
if existing['vtp_password'] == proposed['vtp_password']:
commands.append(['no vtp password'])
else:
module.fail_json(msg="Proposed vtp password doesn't match "
"current vtp password. It cannot be "
"removed when state=absent. If you are "
"trying to change the vtp password, use "
"state=present.")
else:
if not existing.get('domain'):
module.fail_json(msg='Cannot remove a vtp password '
'before vtp domain is set.')
elif existing['vtp_password'] != ('\\'):
commands.append(['no vtp password'])
elif state == 'present':
if delta:
if not existing.get('domain'):
module.fail_json(msg='Cannot set vtp password '
'before vtp domain is set.')
else:
commands.append(['vtp password {0}'.format(vtp_password)])
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
load_config(module, cmds)
end_state = get_vtp_config(module)
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
foodszhang/kbengine | kbe/src/lib/python/Tools/clinic/clinic_test.py | 41 | 21082 | # Argument Clinic
# Copyright 2012-2013 by Larry Hastings.
# Licensed to the PSF under a contributor agreement.
#
import builtins
import clinic
from clinic import DSLParser
import collections
import inspect
from test import support
import sys
import unittest
from unittest import TestCase
class FakeConverter:
def __init__(self, name, args):
self.name = name
self.args = args
class FakeConverterFactory:
def __init__(self, name):
self.name = name
def __call__(self, name, default, **kwargs):
return FakeConverter(self.name, kwargs)
class FakeConvertersDict:
def __init__(self):
self.used_converters = {}
def get(self, name, default):
return self.used_converters.setdefault(name, FakeConverterFactory(name))
clinic.Clinic.presets_text = ''
c = clinic.Clinic(language='C')
class FakeClinic:
def __init__(self):
self.converters = FakeConvertersDict()
self.legacy_converters = FakeConvertersDict()
self.language = clinic.CLanguage(None)
self.filename = None
self.block_parser = clinic.BlockParser('', self.language)
self.modules = collections.OrderedDict()
self.classes = collections.OrderedDict()
clinic.clinic = self
self.name = "FakeClinic"
self.line_prefix = self.line_suffix = ''
self.destinations = {}
self.add_destination("block", "buffer")
self.add_destination("file", "buffer")
self.add_destination("suppress", "suppress")
d = self.destinations.get
self.field_destinations = collections.OrderedDict((
('docstring_prototype', d('suppress')),
('docstring_definition', d('block')),
('methoddef_define', d('block')),
('impl_prototype', d('block')),
('parser_prototype', d('suppress')),
('parser_definition', d('block')),
('impl_definition', d('block')),
))
def get_destination(self, name):
d = self.destinations.get(name)
if not d:
sys.exit("Destination does not exist: " + repr(name))
return d
def add_destination(self, name, type, *args):
if name in self.destinations:
sys.exit("Destination already exists: " + repr(name))
self.destinations[name] = clinic.Destination(name, type, self, *args)
def is_directive(self, name):
return name == "module"
def directive(self, name, args):
self.called_directives[name] = args
_module_and_class = clinic.Clinic._module_and_class
class ClinicWholeFileTest(TestCase):
def test_eol(self):
# regression test:
# clinic's block parser didn't recognize
# the "end line" for the block if it
# didn't end in "\n" (as in, the last)
# byte of the file was '/'.
# so it woudl spit out an end line for you.
# and since you really already had one,
# the last line of the block got corrupted.
c = clinic.Clinic(clinic.CLanguage(None))
raw = "/*[clinic]\nfoo\n[clinic]*/"
cooked = c.parse(raw).splitlines()
end_line = cooked[2].rstrip()
# this test is redundant, it's just here explicitly to catch
# the regression test so we don't forget what it looked like
self.assertNotEqual(end_line, "[clinic]*/[clinic]*/")
self.assertEqual(end_line, "[clinic]*/")
class ClinicGroupPermuterTest(TestCase):
def _test(self, l, m, r, output):
computed = clinic.permute_optional_groups(l, m, r)
self.assertEqual(output, computed)
def test_range(self):
self._test([['start']], ['stop'], [['step']],
(
('stop',),
('start', 'stop',),
('start', 'stop', 'step',),
))
def test_add_window(self):
self._test([['x', 'y']], ['ch'], [['attr']],
(
('ch',),
('ch', 'attr'),
('x', 'y', 'ch',),
('x', 'y', 'ch', 'attr'),
))
def test_ludicrous(self):
self._test([['a1', 'a2', 'a3'], ['b1', 'b2']], ['c1'], [['d1', 'd2'], ['e1', 'e2', 'e3']],
(
('c1',),
('b1', 'b2', 'c1'),
('b1', 'b2', 'c1', 'd1', 'd2'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1', 'd1', 'd2'),
('a1', 'a2', 'a3', 'b1', 'b2', 'c1', 'd1', 'd2', 'e1', 'e2', 'e3'),
))
def test_right_only(self):
self._test([], [], [['a'],['b'],['c']],
(
(),
('a',),
('a', 'b'),
('a', 'b', 'c')
))
def test_have_left_options_but_required_is_empty(self):
def fn():
clinic.permute_optional_groups(['a'], [], [])
self.assertRaises(AssertionError, fn)
class ClinicLinearFormatTest(TestCase):
def _test(self, input, output, **kwargs):
computed = clinic.linear_format(input, **kwargs)
self.assertEqual(output, computed)
def test_empty_strings(self):
self._test('', '')
def test_solo_newline(self):
self._test('\n', '\n')
def test_no_substitution(self):
self._test("""
abc
""", """
abc
""")
def test_empty_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
def
""", name='')
def test_single_line_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
GARGLE
def
""", name='GARGLE')
def test_multiline_substitution(self):
self._test("""
abc
{name}
def
""", """
abc
bingle
bungle
def
""", name='bingle\nbungle\n')
class InertParser:
def __init__(self, clinic):
pass
def parse(self, block):
pass
class CopyParser:
def __init__(self, clinic):
pass
def parse(self, block):
block.output = block.input
class ClinicBlockParserTest(TestCase):
def _test(self, input, output):
language = clinic.CLanguage(None)
blocks = list(clinic.BlockParser(input, language))
writer = clinic.BlockPrinter(language)
for block in blocks:
writer.print_block(block)
output = writer.f.getvalue()
assert output == input, "output != input!\n\noutput " + repr(output) + "\n\n input " + repr(input)
def round_trip(self, input):
return self._test(input, input)
def test_round_trip_1(self):
self.round_trip("""
verbatim text here
lah dee dah
""")
def test_round_trip_2(self):
self.round_trip("""
verbatim text here
lah dee dah
/*[inert]
abc
[inert]*/
def
/*[inert checksum: 7b18d017f89f61cf17d47f92749ea6930a3f1deb]*/
xyz
""")
def _test_clinic(self, input, output):
language = clinic.CLanguage(None)
c = clinic.Clinic(language)
c.parsers['inert'] = InertParser(c)
c.parsers['copy'] = CopyParser(c)
computed = c.parse(input)
self.assertEqual(output, computed)
def test_clinic_1(self):
self._test_clinic("""
verbatim text here
lah dee dah
/*[copy input]
def
[copy start generated code]*/
abc
/*[copy end generated code: output=03cfd743661f0797 input=7b18d017f89f61cf]*/
xyz
""", """
verbatim text here
lah dee dah
/*[copy input]
def
[copy start generated code]*/
def
/*[copy end generated code: output=7b18d017f89f61cf input=7b18d017f89f61cf]*/
xyz
""")
class ClinicParserTest(TestCase):
def test_trivial(self):
parser = DSLParser(FakeClinic())
block = clinic.Block("module os\nos.access")
parser.parse(block)
module, function = block.signatures
self.assertEqual("access", function.name)
self.assertEqual("os", module.name)
def test_ignore_line(self):
block = self.parse("#\nmodule os\nos.access")
module, function = block.signatures
self.assertEqual("access", function.name)
self.assertEqual("os", module.name)
def test_param(self):
function = self.parse_function("module os\nos.access\n path: int")
self.assertEqual("access", function.name)
self.assertEqual(2, len(function.parameters))
p = function.parameters['path']
self.assertEqual('path', p.name)
self.assertIsInstance(p.converter, clinic.int_converter)
def test_param_default(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: bool = True")
p = function.parameters['follow_symlinks']
self.assertEqual(True, p.default)
def test_param_with_continuations(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: \\\n bool \\\n =\\\n True")
p = function.parameters['follow_symlinks']
self.assertEqual(True, p.default)
def test_param_default_expression(self):
function = self.parse_function("module os\nos.access\n follow_symlinks: int(c_default='MAXSIZE') = sys.maxsize")
p = function.parameters['follow_symlinks']
self.assertEqual(sys.maxsize, p.default)
self.assertEqual("MAXSIZE", p.converter.c_default)
s = self.parse_function_should_fail("module os\nos.access\n follow_symlinks: int = sys.maxsize")
self.assertEqual(s, "Error on line 0:\nWhen you specify a named constant ('sys.maxsize') as your default value,\nyou MUST specify a valid c_default.\n")
def test_param_no_docstring(self):
function = self.parse_function("""
module os
os.access
follow_symlinks: bool = True
something_else: str = ''""")
p = function.parameters['follow_symlinks']
self.assertEqual(3, len(function.parameters))
self.assertIsInstance(function.parameters['something_else'].converter, clinic.str_converter)
def test_param_default_parameters_out_of_order(self):
s = self.parse_function_should_fail("""
module os
os.access
follow_symlinks: bool = True
something_else: str""")
self.assertEqual(s, """Error on line 0:
Can't have a parameter without a default ('something_else')
after a parameter with a default!
""")
def disabled_test_converter_arguments(self):
function = self.parse_function("module os\nos.access\n path: path_t(allow_fd=1)")
p = function.parameters['path']
self.assertEqual(1, p.converter.args['allow_fd'])
def test_function_docstring(self):
function = self.parse_function("""
module os
os.stat as os_stat_fn
path: str
Path to be examined
Perform a stat system call on the given path.""")
self.assertEqual("""
stat($module, /, path)
--
Perform a stat system call on the given path.
path
Path to be examined
""".strip(), function.docstring)
def test_explicit_parameters_in_docstring(self):
function = self.parse_function("""
module foo
foo.bar
x: int
Documentation for x.
y: int
This is the documentation for foo.
Okay, we're done here.
""")
self.assertEqual("""
bar($module, /, x, y)
--
This is the documentation for foo.
x
Documentation for x.
Okay, we're done here.
""".strip(), function.docstring)
def test_parser_regression_special_character_in_parameter_column_of_docstring_first_line(self):
function = self.parse_function("""
module os
os.stat
path: str
This/used to break Clinic!
""")
self.assertEqual("stat($module, /, path)\n--\n\nThis/used to break Clinic!", function.docstring)
def test_c_name(self):
function = self.parse_function("module os\nos.stat as os_stat_fn")
self.assertEqual("os_stat_fn", function.c_basename)
def test_return_converter(self):
function = self.parse_function("module os\nos.stat -> int")
self.assertIsInstance(function.return_converter, clinic.int_return_converter)
def test_star(self):
function = self.parse_function("module os\nos.access\n *\n follow_symlinks: bool = True")
p = function.parameters['follow_symlinks']
self.assertEqual(inspect.Parameter.KEYWORD_ONLY, p.kind)
self.assertEqual(0, p.group)
def test_group(self):
function = self.parse_function("module window\nwindow.border\n [\n ls : int\n ]\n /\n")
p = function.parameters['ls']
self.assertEqual(1, p.group)
def test_left_group(self):
function = self.parse_function("""
module curses
curses.addch
[
y: int
Y-coordinate.
x: int
X-coordinate.
]
ch: char
Character to add.
[
attr: long
Attributes for the character.
]
/
""")
for name, group in (
('y', -1), ('x', -1),
('ch', 0),
('attr', 1),
):
p = function.parameters[name]
self.assertEqual(p.group, group)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(function.docstring.strip(), """
addch([y, x,] ch, [attr])
y
Y-coordinate.
x
X-coordinate.
ch
Character to add.
attr
Attributes for the character.
""".strip())
def test_nested_groups(self):
function = self.parse_function("""
module curses
curses.imaginary
[
[
y1: int
Y-coordinate.
y2: int
Y-coordinate.
]
x1: int
X-coordinate.
x2: int
X-coordinate.
]
ch: char
Character to add.
[
attr1: long
Attributes for the character.
attr2: long
Attributes for the character.
attr3: long
Attributes for the character.
[
attr4: long
Attributes for the character.
attr5: long
Attributes for the character.
attr6: long
Attributes for the character.
]
]
/
""")
for name, group in (
('y1', -2), ('y2', -2),
('x1', -1), ('x2', -1),
('ch', 0),
('attr1', 1), ('attr2', 1), ('attr3', 1),
('attr4', 2), ('attr5', 2), ('attr6', 2),
):
p = function.parameters[name]
self.assertEqual(p.group, group)
self.assertEqual(p.kind, inspect.Parameter.POSITIONAL_ONLY)
self.assertEqual(function.docstring.strip(), """
imaginary([[y1, y2,] x1, x2,] ch, [attr1, attr2, attr3, [attr4, attr5,
attr6]])
y1
Y-coordinate.
y2
Y-coordinate.
x1
X-coordinate.
x2
X-coordinate.
ch
Character to add.
attr1
Attributes for the character.
attr2
Attributes for the character.
attr3
Attributes for the character.
attr4
Attributes for the character.
attr5
Attributes for the character.
attr6
Attributes for the character.
""".strip())
def parse_function_should_fail(self, s):
with support.captured_stdout() as stdout:
with self.assertRaises(SystemExit):
self.parse_function(s)
return stdout.getvalue()
def test_disallowed_grouping__two_top_groups_on_left(self):
s = self.parse_function_should_fail("""
module foo
foo.two_top_groups_on_left
[
group1 : int
]
[
group2 : int
]
param: int
""")
self.assertEqual(s,
('Error on line 0:\n'
'Function two_top_groups_on_left has an unsupported group configuration. (Unexpected state 2.b)\n'))
def test_disallowed_grouping__two_top_groups_on_right(self):
self.parse_function_should_fail("""
module foo
foo.two_top_groups_on_right
param: int
[
group1 : int
]
[
group2 : int
]
""")
def test_disallowed_grouping__parameter_after_group_on_right(self):
self.parse_function_should_fail("""
module foo
foo.parameter_after_group_on_right
param: int
[
[
group1 : int
]
group2 : int
]
""")
def test_disallowed_grouping__group_after_parameter_on_left(self):
self.parse_function_should_fail("""
module foo
foo.group_after_parameter_on_left
[
group2 : int
[
group1 : int
]
]
param: int
""")
def test_disallowed_grouping__empty_group_on_left(self):
self.parse_function_should_fail("""
module foo
foo.empty_group
[
[
]
group2 : int
]
param: int
""")
def test_disallowed_grouping__empty_group_on_right(self):
self.parse_function_should_fail("""
module foo
foo.empty_group
param: int
[
[
]
group2 : int
]
""")
def test_no_parameters(self):
function = self.parse_function("""
module foo
foo.bar
Docstring
""")
self.assertEqual("bar($module, /)\n--\n\nDocstring", function.docstring)
self.assertEqual(1, len(function.parameters)) # self!
def test_init_with_no_parameters(self):
function = self.parse_function("""
module foo
class foo.Bar "unused" "notneeded"
foo.Bar.__init__
Docstring
""", signatures_in_block=3, function_index=2)
# self is not in the signature
self.assertEqual("Bar()\n--\n\nDocstring", function.docstring)
# but it *is* a parameter
self.assertEqual(1, len(function.parameters))
def test_illegal_module_line(self):
self.parse_function_should_fail("""
module foo
foo.bar => int
/
""")
def test_illegal_c_basename(self):
self.parse_function_should_fail("""
module foo
foo.bar as 935
/
""")
def test_single_star(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
*
""")
def test_parameters_required_after_star_without_initial_parameters_or_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
""")
def test_parameters_required_after_star_without_initial_parameters_with_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
*
Docstring here.
""")
def test_parameters_required_after_star_with_initial_parameters_without_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
this: int
*
""")
def test_parameters_required_after_star_with_initial_parameters_and_docstring(self):
self.parse_function_should_fail("""
module foo
foo.bar
this: int
*
Docstring.
""")
def test_single_slash(self):
self.parse_function_should_fail("""
module foo
foo.bar
/
/
""")
def test_mix_star_and_slash(self):
self.parse_function_should_fail("""
module foo
foo.bar
x: int
y: int
*
z: int
/
""")
def test_parameters_not_permitted_after_slash_for_now(self):
self.parse_function_should_fail("""
module foo
foo.bar
/
x: int
""")
def test_function_not_at_column_0(self):
function = self.parse_function("""
module foo
foo.bar
x: int
Nested docstring here, goeth.
*
y: str
Not at column 0!
""")
self.assertEqual("""
bar($module, /, x, *, y)
--
Not at column 0!
x
Nested docstring here, goeth.
""".strip(), function.docstring)
def test_parser_regression_special_character_in_parameter_column_of_docstring_first_line(self):
function = self.parse_function("""
module os
os.stat
path: str
This/used to break Clinic!
""")
self.assertEqual("stat($module, /, path)\n--\n\nThis/used to break Clinic!", function.docstring)
def test_directive(self):
c = FakeClinic()
parser = DSLParser(c)
parser.flag = False
parser.directives['setflag'] = lambda : setattr(parser, 'flag', True)
block = clinic.Block("setflag")
parser.parse(block)
self.assertTrue(parser.flag)
def test_legacy_converters(self):
block = self.parse('module os\nos.access\n path: "s"')
module, function = block.signatures
self.assertIsInstance((function.parameters['path']).converter, clinic.str_converter)
def parse(self, text):
c = FakeClinic()
parser = DSLParser(c)
block = clinic.Block(text)
parser.parse(block)
return block
def parse_function(self, text, signatures_in_block=2, function_index=1):
block = self.parse(text)
s = block.signatures
self.assertEqual(len(s), signatures_in_block)
assert isinstance(s[0], clinic.Module)
assert isinstance(s[function_index], clinic.Function)
return s[function_index]
def test_scaffolding(self):
# test repr on special values
self.assertEqual(repr(clinic.unspecified), '<Unspecified>')
self.assertEqual(repr(clinic.NULL), '<Null>')
# test that fail fails
with support.captured_stdout() as stdout:
with self.assertRaises(SystemExit):
clinic.fail('The igloos are melting!', filename='clown.txt', line_number=69)
self.assertEqual(stdout.getvalue(), 'Error in file "clown.txt" on line 69:\nThe igloos are melting!\n')
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
heena23/Millionaire | test/functional/p2p-compactblocks.py | 10 | 44276 | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.script import CScript, OP_TRUE
VB_TOP_BITS = 0x20000000
# TestNode: A peer we use to send messages to bitcoind, and store responses.
class TestNode(NodeConnCB):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
self.block_announced = False
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_cmpctblock(self, conn, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
self.announced_blockhashes.add(x.hash)
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_message.pop("inv", None)
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
assert wait_until(self.received_block_announcement, timeout=30)
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.announced_blockhashes)
return wait_until(received_hash, timeout=timeout)
def send_await_disconnect(self, message, timeout=30):
"""Sends a message to the node and wait for disconnect.
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
success = wait_until(lambda: not self.connected, timeout=timeout)
if not success:
logger.error("send_await_disconnect failed!")
raise AssertionError("send_await_disconnect failed!")
return success
class CompactBlocksTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.extra_args = [["-vbparams=segwit:0:0"], ["-txindex"]]
self.utxos = []
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = VB_TOP_BITS
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
got_message = wait_until(received_sendcmpct, timeout=30)
assert(received_sendcmpct())
assert(got_message)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
assert(got_message)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_message.get("cmpctblock", None), peer.last_message.get("inv", None)))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "inv" in p.last_message)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" in p.last_message)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: "cmpctblock" not in p.last_message and "headers" in p.last_message)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: "cmpctblock" in p.last_message)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_await_disconnect(msg_cmpctblock(cmpct_block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
assert(test_node.wait_for_block_announcement(tip))
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
[tx.calc_sha256() for tx in block.vtx]
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert("cmpctblock" in test_node.last_message)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_message.pop("getdata", None)
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
success = wait_until(lambda: "getheaders" in test_node.last_message, timeout=30)
assert(success)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
success = wait_until(lambda: "getdata" in test_node.last_message, timeout=30)
assert(success)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert_equal(test_node.last_message["getdata"].inv[0].type, 4)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 100000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert("getblocktxn" in peer.last_message)
absolute_indexes = peer.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_message.pop("getblocktxn", None)
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert("getblocktxn" not in test_node.last_message)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert("getblocktxn" in test_node.last_message)
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
success = wait_until(lambda: "getdata" in test_node.last_message, timeout=10)
assert(success)
assert_equal(len(test_node.last_message["getdata"].inv), 1)
assert(test_node.last_message["getdata"].inv[0].type == 2 or test_node.last_message["getdata"].inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_message["getdata"].inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
success = wait_until(lambda: "blocktxn" in test_node.last_message, timeout=10)
assert(success)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_message["blocktxn"].block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_message["blocktxn"].block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_message.pop("blocktxn", None)
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(block_hash, 16))
assert "blocktxn" not in test_node.last_message
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30)
assert(success)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_message.pop("block", None)
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: "block" in test_node.last_message, timeout=30)
assert(success)
with mininode_lock:
test_node.last_message["block"].block.calc_sha256()
assert_equal(test_node.last_message["block"].block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_message.pop("blocktxn", None)
test_node.send_and_ping(msg)
with mininode_lock:
assert "blocktxn" not in test_node.last_message
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30)
with mininode_lock:
for l in listeners:
assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert "getblocktxn" in peer.last_message
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
self.log.info("Running tests, pre-segwit activation:")
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
self.log.info("Testing reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
self.log.info("Advancing to segwit activation")
self.activate_segwit(self.nodes[1])
self.log.info("Running tests, post-segwit activation...")
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
self.log.info("Testing compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
self.log.info("Testing getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
self.log.info("Syncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
self.log.info("Testing compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
self.log.info("Testing getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
self.log.info("Testing getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
self.log.info("Testing end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
self.log.info("Testing handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| mit |
ryfeus/lambda-packs | Keras_tensorflow/source/numpy/matrixlib/tests/test_defmatrix.py | 130 | 14801 | from __future__ import division, absolute_import, print_function
import collections
import numpy as np
from numpy import matrix, asmatrix, bmat
from numpy.testing import (
TestCase, run_module_suite, assert_, assert_equal, assert_almost_equal,
assert_array_equal, assert_array_almost_equal, assert_raises
)
from numpy.matrixlib.defmatrix import matrix_power
from numpy.matrixlib import mat
class TestCtor(TestCase):
def test_basic(self):
A = np.array([[1, 2], [3, 4]])
mA = matrix(A)
assert_(np.all(mA.A == A))
B = bmat("A,A;A,A")
C = bmat([[A, A], [A, A]])
D = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
assert_(np.all(B.A == D))
assert_(np.all(C.A == D))
E = np.array([[5, 6], [7, 8]])
AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
assert_(np.all(bmat([A, E]) == AEresult))
vec = np.arange(5)
mvec = matrix(vec)
assert_(mvec.shape == (1, 5))
def test_exceptions(self):
# Check for TypeError when called with invalid string data.
assert_raises(TypeError, matrix, "invalid")
def test_bmat_nondefault_str(self):
A = np.array([[1, 2], [3, 4]])
B = np.array([[5, 6], [7, 8]])
Aresult = np.array([[1, 2, 1, 2],
[3, 4, 3, 4],
[1, 2, 1, 2],
[3, 4, 3, 4]])
mixresult = np.array([[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 1, 2],
[7, 8, 3, 4]])
assert_(np.all(bmat("A,A;A,A") == Aresult))
assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
assert_(
np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
assert_(np.all(b2 == mixresult))
class TestProperties(TestCase):
def test_sum(self):
"""Test whether matrix.sum(axis=1) preserves orientation.
Fails in NumPy <= 0.9.6.2127.
"""
M = matrix([[1, 2, 0, 0],
[3, 4, 0, 0],
[1, 2, 1, 2],
[3, 4, 3, 4]])
sum0 = matrix([8, 12, 4, 6])
sum1 = matrix([3, 7, 6, 14]).T
sumall = 30
assert_array_equal(sum0, M.sum(axis=0))
assert_array_equal(sum1, M.sum(axis=1))
assert_equal(sumall, M.sum())
assert_array_equal(sum0, np.sum(M, axis=0))
assert_array_equal(sum1, np.sum(M, axis=1))
assert_equal(sumall, np.sum(M))
def test_prod(self):
x = matrix([[1, 2, 3], [4, 5, 6]])
assert_equal(x.prod(), 720)
assert_equal(x.prod(0), matrix([[4, 10, 18]]))
assert_equal(x.prod(1), matrix([[6], [120]]))
assert_equal(np.prod(x), 720)
assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]]))
assert_equal(np.prod(x, axis=1), matrix([[6], [120]]))
y = matrix([0, 1, 3])
assert_(y.prod() == 0)
def test_max(self):
x = matrix([[1, 2, 3], [4, 5, 6]])
assert_equal(x.max(), 6)
assert_equal(x.max(0), matrix([[4, 5, 6]]))
assert_equal(x.max(1), matrix([[3], [6]]))
assert_equal(np.max(x), 6)
assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]]))
assert_equal(np.max(x, axis=1), matrix([[3], [6]]))
def test_min(self):
x = matrix([[1, 2, 3], [4, 5, 6]])
assert_equal(x.min(), 1)
assert_equal(x.min(0), matrix([[1, 2, 3]]))
assert_equal(x.min(1), matrix([[1], [4]]))
assert_equal(np.min(x), 1)
assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]]))
assert_equal(np.min(x, axis=1), matrix([[1], [4]]))
def test_ptp(self):
x = np.arange(4).reshape((2, 2))
assert_(x.ptp() == 3)
assert_(np.all(x.ptp(0) == np.array([2, 2])))
assert_(np.all(x.ptp(1) == np.array([1, 1])))
def test_var(self):
x = np.arange(9).reshape((3, 3))
mx = x.view(np.matrix)
assert_equal(x.var(ddof=0), mx.var(ddof=0))
assert_equal(x.var(ddof=1), mx.var(ddof=1))
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.],
[3., 4.]])
mA = matrix(A)
assert_(np.allclose(linalg.inv(A), mA.I))
assert_(np.all(np.array(np.transpose(A) == mA.T)))
assert_(np.all(np.array(np.transpose(A) == mA.H)))
assert_(np.all(A == mA.A))
B = A + 2j*A
mB = matrix(B)
assert_(np.allclose(linalg.inv(B), mB.I))
assert_(np.all(np.array(np.transpose(B) == mB.T)))
assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
def test_pinv(self):
x = matrix(np.arange(6).reshape(2, 3))
xpinv = matrix([[-0.77777778, 0.27777778],
[-0.11111111, 0.11111111],
[ 0.55555556, -0.05555556]])
assert_almost_equal(x.I, xpinv)
def test_comparisons(self):
A = np.arange(100).reshape(10, 10)
mA = matrix(A)
mB = matrix(A) + 0.1
assert_(np.all(mB == A+0.1))
assert_(np.all(mB == matrix(A+0.1)))
assert_(not np.any(mB == matrix(A-0.1)))
assert_(np.all(mA < mB))
assert_(np.all(mA <= mB))
assert_(np.all(mA <= mA))
assert_(not np.any(mA < mA))
assert_(not np.any(mB < mA))
assert_(np.all(mB >= mA))
assert_(np.all(mB >= mB))
assert_(not np.any(mB > mB))
assert_(np.all(mA == mA))
assert_(not np.any(mA == mB))
assert_(np.all(mB != mA))
assert_(not np.all(abs(mA) > 0))
assert_(np.all(abs(mB > 0)))
def test_asmatrix(self):
A = np.arange(100).reshape(10, 10)
mA = asmatrix(A)
A[0, 0] = -10
assert_(A[0, 0] == mA[0, 0])
def test_noaxis(self):
A = matrix([[1, 0], [0, 1]])
assert_(A.sum() == matrix(2))
assert_(A.mean() == matrix(0.5))
def test_repr(self):
A = matrix([[1, 0], [0, 1]])
assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])")
class TestCasting(TestCase):
def test_basic(self):
A = np.arange(100).reshape(10, 10)
mA = matrix(A)
mB = mA.copy()
O = np.ones((10, 10), np.float64) * 0.1
mB = mB + O
assert_(mB.dtype.type == np.float64)
assert_(np.all(mA != mB))
assert_(np.all(mB == mA+0.1))
mC = mA.copy()
O = np.ones((10, 10), np.complex128)
mC = mC * O
assert_(mC.dtype.type == np.complex128)
assert_(np.all(mA != mB))
class TestAlgebra(TestCase):
def test_basic(self):
import numpy.linalg as linalg
A = np.array([[1., 2.], [3., 4.]])
mA = matrix(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** i).A, B))
B = np.dot(B, A)
Ainv = linalg.inv(A)
B = np.identity(2)
for i in range(6):
assert_(np.allclose((mA ** -i).A, B))
B = np.dot(B, Ainv)
assert_(np.allclose((mA * mA).A, np.dot(A, A)))
assert_(np.allclose((mA + mA).A, (A + A)))
assert_(np.allclose((3*mA).A, (3*A)))
mA2 = matrix(A)
mA2 *= 3
assert_(np.allclose(mA2.A, 3*A))
def test_pow(self):
"""Test raising a matrix to an integer power works as expected."""
m = matrix("1. 2.; 3. 4.")
m2 = m.copy()
m2 **= 2
mi = m.copy()
mi **= -1
m4 = m2.copy()
m4 **= 2
assert_array_almost_equal(m2, m**2)
assert_array_almost_equal(m4, np.dot(m2, m2))
assert_array_almost_equal(np.dot(mi, m), np.eye(2))
def test_notimplemented(self):
'''Check that 'not implemented' operations produce a failure.'''
A = matrix([[1., 2.],
[3., 4.]])
# __rpow__
try:
1.0**A
except TypeError:
pass
else:
self.fail("matrix.__rpow__ doesn't raise a TypeError")
# __mul__ with something not a list, ndarray, tuple, or scalar
try:
A*object()
except TypeError:
pass
else:
self.fail("matrix.__mul__ with non-numeric object doesn't raise"
"a TypeError")
class TestMatrixReturn(TestCase):
def test_instance_methods(self):
a = matrix([1.0], dtype='f8')
methodargs = {
'astype': ('intc',),
'clip': (0.0, 1.0),
'compress': ([1],),
'repeat': (1,),
'reshape': (1,),
'swapaxes': (0, 0),
'dot': np.array([1.0]),
}
excluded_methods = [
'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield',
'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize',
'searchsorted', 'setflags', 'setfield', 'sort',
'partition', 'argpartition',
'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any',
'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp',
'prod', 'std', 'ctypes', 'itemset',
]
for attrib in dir(a):
if attrib.startswith('_') or attrib in excluded_methods:
continue
f = getattr(a, attrib)
if isinstance(f, collections.Callable):
# reset contents of a
a.astype('f8')
a.fill(1.0)
if attrib in methodargs:
args = methodargs[attrib]
else:
args = ()
b = f(*args)
assert_(type(b) is matrix, "%s" % attrib)
assert_(type(a.real) is matrix)
assert_(type(a.imag) is matrix)
c, d = matrix([0.0]).nonzero()
assert_(type(c) is np.ndarray)
assert_(type(d) is np.ndarray)
class TestIndexing(TestCase):
def test_basic(self):
x = asmatrix(np.zeros((3, 2), float))
y = np.zeros((3, 1), float)
y[:, 0] = [0.8, 0.2, 0.3]
x[:, 1] = y > 0.5
assert_equal(x, [[0, 1], [0, 0], [0, 0]])
class TestNewScalarIndexing(TestCase):
def setUp(self):
self.a = matrix([[1, 2], [3, 4]])
def test_dimesions(self):
a = self.a
x = a[0]
assert_equal(x.ndim, 2)
def test_array_from_matrix_list(self):
a = self.a
x = np.array([a, a])
assert_equal(x.shape, [2, 2, 2])
def test_array_to_list(self):
a = self.a
assert_equal(a.tolist(), [[1, 2], [3, 4]])
def test_fancy_indexing(self):
a = self.a
x = a[1, [0, 1, 0]]
assert_(isinstance(x, matrix))
assert_equal(x, matrix([[3, 4, 3]]))
x = a[[1, 0]]
assert_(isinstance(x, matrix))
assert_equal(x, matrix([[3, 4], [1, 2]]))
x = a[[[1], [0]], [[1, 0], [0, 1]]]
assert_(isinstance(x, matrix))
assert_equal(x, matrix([[4, 3], [1, 2]]))
def test_matrix_element(self):
x = matrix([[1, 2, 3], [4, 5, 6]])
assert_equal(x[0][0], matrix([[1, 2, 3]]))
assert_equal(x[0][0].shape, (1, 3))
assert_equal(x[0].shape, (1, 3))
assert_equal(x[:, 0].shape, (2, 1))
x = matrix(0)
assert_equal(x[0, 0], 0)
assert_equal(x[0], 0)
assert_equal(x[:, 0].shape, x.shape)
def test_scalar_indexing(self):
x = asmatrix(np.zeros((3, 2), float))
assert_equal(x[0, 0], x[0][0])
def test_row_column_indexing(self):
x = asmatrix(np.eye(2))
assert_array_equal(x[0,:], [[1, 0]])
assert_array_equal(x[1,:], [[0, 1]])
assert_array_equal(x[:, 0], [[1], [0]])
assert_array_equal(x[:, 1], [[0], [1]])
def test_boolean_indexing(self):
A = np.arange(6)
A.shape = (3, 2)
x = asmatrix(A)
assert_array_equal(x[:, np.array([True, False])], x[:, 0])
assert_array_equal(x[np.array([True, False, False]),:], x[0,:])
def test_list_indexing(self):
A = np.arange(6)
A.shape = (3, 2)
x = asmatrix(A)
assert_array_equal(x[:, [1, 0]], x[:, ::-1])
assert_array_equal(x[[2, 1, 0],:], x[::-1,:])
class TestPower(TestCase):
def test_returntype(self):
a = np.array([[0, 1], [0, 0]])
assert_(type(matrix_power(a, 2)) is np.ndarray)
a = mat(a)
assert_(type(matrix_power(a, 2)) is matrix)
def test_list(self):
assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]])
class TestShape(TestCase):
def setUp(self):
self.a = np.array([[1], [2]])
self.m = matrix([[1], [2]])
def test_shape(self):
assert_equal(self.a.shape, (2, 1))
assert_equal(self.m.shape, (2, 1))
def test_numpy_ravel(self):
assert_equal(np.ravel(self.a).shape, (2,))
assert_equal(np.ravel(self.m).shape, (2,))
def test_member_ravel(self):
assert_equal(self.a.ravel().shape, (2,))
assert_equal(self.m.ravel().shape, (1, 2))
def test_member_flatten(self):
assert_equal(self.a.flatten().shape, (2,))
assert_equal(self.m.flatten().shape, (1, 2))
def test_numpy_ravel_order(self):
x = np.array([[1, 2, 3], [4, 5, 6]])
assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6])
assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6])
assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6])
assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6])
x = matrix([[1, 2, 3], [4, 5, 6]])
assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6])
assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6])
assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6])
assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6])
def test_matrix_ravel_order(self):
x = matrix([[1, 2, 3], [4, 5, 6]])
assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]])
assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]])
assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]])
assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]])
def test_array_memory_sharing(self):
assert_(np.may_share_memory(self.a, self.a.ravel()))
assert_(not np.may_share_memory(self.a, self.a.flatten()))
def test_matrix_memory_sharing(self):
assert_(np.may_share_memory(self.m, self.m.ravel()))
assert_(not np.may_share_memory(self.m, self.m.flatten()))
if __name__ == "__main__":
run_module_suite()
| mit |
qiubing/linux-3.2.4 | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
magicrub/MissionPlanner | Lib/bdb.py | 53 | 21716 | """Debugger basics"""
import fnmatch
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
class BdbQuit(Exception):
"""Exception to give up completely"""
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self, skip=None):
self.skip = set(skip) if skip else None
self.breaks = {}
self.fncache = {}
def canonic(self, filename):
if filename == "<" + filename[1:-1] + ">":
return filename
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
canonic = os.path.normcase(canonic)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self._set_stopinfo(None, None)
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
if event == 'c_call':
return self.trace_dispatch
if event == 'c_exception':
return self.trace_dispatch
if event == 'c_return':
return self.trace_dispatch
print 'bdb.Bdb.dispatch: unknown debugging event:', repr(event)
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame.f_back # (CT) Note that this may also be None!
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
self.user_return(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def is_skipped_module(self, module_name):
for pattern in self.skip:
if fnmatch.fnmatch(module_name, pattern):
return True
return False
def stop_here(self, frame):
# (CT) stopframe may now also be None, see dispatch_call.
# (CT) the former test for None is therefore removed from here.
if self.skip and \
self.is_skipped_module(frame.f_globals.get('__name__')):
return False
if frame is self.stopframe:
if self.stoplineno == -1:
return False
return frame.f_lineno >= self.stoplineno
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return True
frame = frame.f_back
return False
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not filename in self.breaks:
return False
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
# The line itself has no breakpoint, but maybe the line is the
# first line of a function with breakpoint set by function name.
lineno = frame.f_code.co_firstlineno
if not lineno in self.breaks[filename]:
return False
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return True
else:
return False
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.canonic(frame.f_code.co_filename) in self.breaks
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, exc_info):
exc_type, exc_value, exc_traceback = exc_info
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
def _set_stopinfo(self, stopframe, returnframe, stoplineno=0):
self.stopframe = stopframe
self.returnframe = returnframe
self.quitting = 0
# stoplineno >= 0 means: stop at line >= the stoplineno
# stoplineno -1 means: don't stop at all
self.stoplineno = stoplineno
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_until(self, frame): #the name "until" is borrowed from gdb
"""Stop when the line with the line no greater than the current one is
reached or when returning from current frame"""
self._set_stopinfo(frame, frame, frame.f_lineno+1)
def set_step(self):
"""Stop after one line of code."""
self._set_stopinfo(None, None)
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self._set_stopinfo(frame, None)
def set_return(self, frame):
"""Stop when returning from the given frame."""
self._set_stopinfo(frame.f_back, frame)
def set_trace(self, frame=None):
"""Start debugging from `frame`.
If frame is not specified, debugging starts from caller's frame.
"""
if frame is None:
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self._set_stopinfo(self.botframe, None, -1)
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
frame = sys._getframe().f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None,
funcname=None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not filename in self.breaks:
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond, funcname)
def _prune_breaks(self, filename, lineno):
if (filename, lineno) not in Breakpoint.bplist:
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
self._prune_breaks(filename, lineno)
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
bp.deleteMe()
self._prune_breaks(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not filename in self.breaks:
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return filename in self.breaks and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if filename in self.breaks:
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
if f is None:
i = max(0, len(stack) - 1)
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = '%s(%r)' % (filename, lineno)
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if '__args__' in frame.f_locals:
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if '__return__' in frame.f_locals:
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno, frame.f_globals)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args, **kwds):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
res = func(*args, **kwds)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond=None, funcname=None):
self.funcname = funcname
# Needed if funcname is not None.
self.func_first_executable_line = None
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if (file, line) in self.bplist:
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self, out=None):
if out is None:
out = sys.stdout
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes '
else:
disp = disp + 'no '
print >>out, '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print >>out, '\tstop only if %s' % (self.cond,)
if self.ignore:
print >>out, '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print >>out, ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
if not checkfuncname(b, frame):
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno, frame.f_globals)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end
| gpl-3.0 |
pilwon/selenium-webdriver | py/test/selenium/webdriver/common/webdriverwait_tests.py | 15 | 16413 | #!/usr/bin/python
# Copyright 2011 WebDriver committers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import unittest
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoSuchFrameException
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import StaleElementReferenceException
from selenium.common.exceptions import WebDriverException
from selenium.common.exceptions import InvalidElementStateException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
def not_available_on_remote(func):
def testMethod(self):
print(self.driver)
if type(self.driver) == 'remote':
return lambda x: None
else:
return func(self)
return testMethod
def throwSERE(driver):
raise StaleElementReferenceException("test")
class WebDriverWaitTest(unittest.TestCase):
def testShouldExplicitlyWaitForASingleElement(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click();
WebDriverWait(self.driver, 3).until(EC.presence_of_element_located((By.ID, "box0"))) # All is well if this doesn't throw.
def testShouldStillFailToFindAnElementWithExplicitWait(self):
self._loadPage("dynamic")
try:
WebDriverWait(self.driver, 0.7).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
except Exception as e:
self.fail("Expected TimeoutException but got " + str(e))
def testShouldExplicitlyWaituntilAtLeastOneElementIsFoundWhenSearchingForMany(self):
self._loadPage("dynamic")
add = self.driver.find_element_by_id("adder")
add.click();
add.click();
elements = WebDriverWait(self.driver, 2).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
self.assertTrue(len(elements) >= 1)
def testShouldFailToFindElementsWhenExplicitWaiting(self):
self._loadPage("dynamic")
try:
elements = WebDriverWait(self.driver, 0.7).until(EC.presence_of_all_elements_located((By.CLASS_NAME, "redbox")))
except TimeoutException as e:
pass # we should get a timeout
except Exception as e:
self.fail("Expected TimeoutException but got " + str(e))
def testShouldWaitOnlyAsLongAsTimeoutSpecifiedWhenImplicitWaitsAreSet(self):
self._loadPage("dynamic")
self.driver.implicitly_wait(0.5)
try:
start = time.time()
try:
WebDriverWait(self.driver, 1).until(EC.presence_of_element_located((By.ID, "box0")))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.assertTrue(time.time() - start < 1.5,
"Expected to take just over 1 second to execute, but took %f" %
(time.time() - start))
finally:
self.driver.implicitly_wait(0)
def testShouldWaitAtLeastOnce(self):
self._loadPage("simpleTest")
elements_exists = lambda driver: driver.find_elements_by_tag_name('h1')
elements = WebDriverWait(self.driver, 0).until(elements_exists)
self.assertTrue(len(elements) >= 1)
def testWaitUntilNotReturnsIfEvaluatesToFalse(self):
falsum = lambda driver: False
self.assertFalse(WebDriverWait(self.driver, 1).until_not(falsum))
def testWaitShouldStillFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testWaitShouldStillFailIfProduceChildOfIgnoredException(self):
ignored = (WebDriverException)
try:
WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until(throwSERE)
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testWaitUntilNotShouldNotFailIfProduceIgnoredException(self):
ignored = (InvalidElementStateException, StaleElementReferenceException)
self.assertTrue(WebDriverWait(self.driver, 1, 0.7, ignored_exceptions=ignored).until_not(throwSERE))
def testExpectedConditionTitleIs(self):
self._loadPage("blank")
WebDriverWait(self.driver, 1).until(EC.title_is("blank"))
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_is("not blank"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_is("blank"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testExpectedConditionTitleContains(self):
self._loadPage("blank")
self.driver.execute_script("setTimeout(function(){document.title='not blank'}, 200)")
WebDriverWait(self.driver, 1).until(EC.title_contains("not"))
self.assertEqual(self.driver.title, 'not blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.title_contains("blanket"))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
def testExpectedConditionVisibilityOfElementLocated(self):
self._loadPage("javascriptPage")
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.find_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of_element_located((By.ID, 'clickToHide')))
self.assertTrue(element.is_displayed())
def testExpectedConditionVisibilityOf(self):
self._loadPage("javascriptPage")
hidden = self.driver.find_element_by_id('clickToHide')
try:
WebDriverWait(self.driver, 0.7).until(EC.visibility_of(hidden))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.find_element_by_id('clickToShow').click()
element = WebDriverWait(self.driver, 5).until(EC.visibility_of(hidden))
self.assertTrue(element.is_displayed())
def testExpectedConditionTextToBePresentInElement(self):
self._loadPage('booleanAttributes')
try:
WebDriverWait(self.driver, 0.7).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){var el = document.getElementById('unwrappable'); el.textContent = el.innerText = 'Unwrappable Expected text'}, 200)")
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element((By.ID, 'unwrappable'), 'Expected'))
self.assertEqual('Unwrappable Expected text', self.driver.find_element_by_id('unwrappable').text)
def testExpectedConditionTextToBePresentInElementValue(self):
self._loadPage('booleanAttributes')
try:
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element_value((By.ID, 'inputRequired'), 'Expected'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('inputRequired').value = 'Example Expected text'}, 200)")
WebDriverWait(self.driver, 1).until(EC.text_to_be_present_in_element_value((By.ID, 'inputRequired'), 'Expected'))
self.assertEqual('Example Expected text', self.driver.find_element_by_id('inputRequired').get_attribute('value'))
def testExpectedConditionFrameToBeAvailableAndSwitchToItByName(self):
self._loadPage("blank")
try:
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it('myFrame'))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){var f = document.createElement('iframe'); f.id='myFrame'; f.src = '"+self._pageURL('iframeWithAlert')+"'; document.body.appendChild(f)}, 200)")
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it('myFrame'))
self.assertEqual('click me', self.driver.find_element_by_id('alertInFrame').text)
def testExpectedConditionFrameToBeAvailableAndSwitchToItByLocator(self):
self._loadPage("blank")
try:
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it((By.ID, 'myFrame')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){var f = document.createElement('iframe'); f.id='myFrame'; f.src = '"+self._pageURL('iframeWithAlert')+"'; document.body.appendChild(f)}, 200)")
WebDriverWait(self.driver, 1).until(EC.frame_to_be_available_and_switch_to_it((By.ID, 'myFrame')))
self.assertEqual('click me', self.driver.find_element_by_id('alertInFrame').text)
def testExpectedConditionInvisiblityOfElementLocated(self):
self._loadPage("javascriptPage")
self.driver.execute_script("delayedShowHide(0, true)")
try:
WebDriverWait(self.driver, 0.7).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("delayedShowHide(200, false)")
WebDriverWait(self.driver, 0.7).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.assertFalse(self.driver.find_element_by_id('clickToHide').is_displayed())
def testExpectedConditionElementToBeClickable(self):
self._loadPage("javascriptPage")
try:
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_clickable((By.ID, 'clickToHide')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("delayedShowHide(200, true)")
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_clickable((By.ID, 'clickToHide')))
element = self.driver.find_element_by_id('clickToHide')
element.click()
WebDriverWait(self.driver, 3.5).until(EC.invisibility_of_element_located((By.ID, 'clickToHide')))
self.assertFalse(element.is_displayed())
def testExpectedConditionStalenessOf(self):
self._loadPage('dynamicallyModifiedPage')
element = self.driver.find_element_by_id('element-to-remove')
try:
WebDriverWait(self.driver, 0.7).until(EC.staleness_of(element))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.find_element_by_id('buttonDelete').click()
self.assertEqual('element', element.text)
WebDriverWait(self.driver, 0.7).until(EC.staleness_of(element))
try:
element.text
self.fail("Expected StaleReferenceException to have been thrown")
except StaleElementReferenceException as e:
pass
def testExpectedConditionElementToBeSelected(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
try:
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_selected(element))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_to_be_selected(element))
self.assertTrue(element.is_selected())
def testExpectedConditionElementLocatedToBeSelected(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
try:
WebDriverWait(self.driver, 0.7).until(EC.element_located_to_be_selected((By.ID, 'checky')))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_located_to_be_selected((By.ID, 'checky')))
self.assertTrue(element.is_selected())
def testExpectedConditionElementSelectionStateToBe(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, False))
self.assertFalse(element.is_selected())
try:
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, True))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_selection_state_to_be(element, True))
self.assertTrue(element.is_selected())
def testExpectedConditionElementLocatedSelectionStateToBe(self):
self._loadPage("formPage")
element = self.driver.find_element_by_id('checky')
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), False))
self.assertFalse(element.is_selected())
try:
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), True))
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){document.getElementById('checky').checked = true}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.element_located_selection_state_to_be((By.ID, 'checky'), True))
self.assertTrue(element.is_selected())
def testExpectedConditionAlertIsPresent(self):
self._loadPage('blank')
try:
WebDriverWait(self.driver, 0.7).until(EC.alert_is_present())
self.fail("Expected TimeoutException to have been thrown")
except TimeoutException as e:
pass
self.driver.execute_script("setTimeout(function(){alert('alerty')}, 200)")
WebDriverWait(self.driver, 0.7).until(EC.alert_is_present())
alert = self.driver.switch_to.alert
self.assertEqual('alerty', alert.text)
alert.dismiss()
def _pageURL(self, name):
return self.webserver.where_is(name + '.html')
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| apache-2.0 |
gbowerman/azure-quickstart-templates | bosh-cf-crossregion/scripts/setup_env.py | 121 | 9192 | #!/usr/bin/env python
import json
import netaddr
import os
import random
import re
import requests
import sys
from azure.storage.blob import AppendBlobService
from azure.storage.table import TableService
import azure.mgmt.network
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.network import NetworkManagementClient, NetworkManagementClientConfiguration
def prepare_storage(settings):
default_storage_account_name = settings["DEFAULT_STORAGE_ACCOUNT_NAME"]
storage_access_key = settings["STORAGE_ACCESS_KEY"]
endpoint_suffix = settings["SERVICE_HOST_BASE"]
blob_service = AppendBlobService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix)
blob_service.create_container('bosh')
blob_service.create_container(
container_name='stemcell',
public_access='blob'
)
# Prepare the table for storing meta datas of storage account and stemcells
table_service = TableService(account_name=default_storage_account_name, account_key=storage_access_key, endpoint_suffix=endpoint_suffix)
table_service.create_table('stemcells')
# For secondary
default_storage_account_name_secondary = settings["DEFAULT_STORAGE_ACCOUNT_NAME_SECONDARY"]
default_storage_access_key_secondary = settings["DEFAULT_STORAGE_ACCESS_KEY_SECONDARY"]
endpoint_suffix = settings["SERVICE_HOST_BASE"]
blob_service = AppendBlobService(account_name=default_storage_account_name_secondary, account_key=default_storage_access_key_secondary, endpoint_suffix=endpoint_suffix)
blob_service.create_container('bosh')
blob_service.create_container(
container_name='stemcell',
public_access='blob'
)
# Prepare the table for storing meta datas of storage account and stemcells
table_service = TableService(account_name=default_storage_account_name_secondary, account_key=default_storage_access_key_secondary, endpoint_suffix=endpoint_suffix)
table_service.create_table('stemcells')
# Prepare primary premium storage account
storage_account_name_primary = settings["STORAGE_ACCOUNT_NAME_PRIMARY"]
storage_access_key_primary = settings["STORAGE_ACCESS_KEY_PRIMARY"]
endpoint_suffix = settings["SERVICE_HOST_BASE"]
blob_service = AppendBlobService(account_name=storage_account_name_primary, account_key=storage_access_key_primary, endpoint_suffix=endpoint_suffix)
blob_service.create_container('bosh')
blob_service.create_container('stemcell')
# Prepare secondary premium storage account
storage_account_name_secondary = settings["STORAGE_ACCOUNT_NAME_SECONDARY"]
storage_access_key_secondary = settings["STORAGE_ACCESS_KEY_SECONDARY"]
endpoint_suffix = settings["SERVICE_HOST_BASE"]
blob_service = AppendBlobService(account_name=storage_account_name_secondary, account_key=storage_access_key_secondary, endpoint_suffix=endpoint_suffix)
blob_service.create_container('bosh')
blob_service.create_container('stemcell')
def render_bosh_manifest(settings):
with open('bosh.pub', 'r') as tmpfile:
ssh_public_key = tmpfile.read()
ip = netaddr.IPNetwork(settings['SUBNET_ADDRESS_RANGE_FOR_BOSH'])
gateway_ip = str(ip[1])
bosh_director_ip = str(ip[4])
# Render the manifest for bosh-init
bosh_template = 'bosh.yml'
if os.path.exists(bosh_template):
with open(bosh_template, 'r') as tmpfile:
contents = tmpfile.read()
keys = [
"SUBNET_ADDRESS_RANGE_FOR_BOSH",
"VNET_NAME",
"SUBNET_NAME_FOR_BOSH",
"SUBSCRIPTION_ID",
"DEFAULT_STORAGE_ACCOUNT_NAME",
"RESOURCE_GROUP_NAME",
"KEEP_UNREACHABLE_VMS",
"TENANT_ID",
"CLIENT_ID",
"CLIENT_SECRET",
"BOSH_PUBLIC_IP",
"NSG_NAME_FOR_BOSH",
"BOSH_RELEASE_URL",
"BOSH_RELEASE_SHA1",
"BOSH_AZURE_CPI_RELEASE_URL",
"BOSH_AZURE_CPI_RELEASE_SHA1",
"STEMCELL_URL",
"STEMCELL_SHA1",
"ENVIRONMENT"
]
for k in keys:
v = settings[k]
contents = re.compile(re.escape("REPLACE_WITH_{0}".format(k))).sub(str(v), contents)
contents = re.compile(re.escape("REPLACE_WITH_SSH_PUBLIC_KEY")).sub(ssh_public_key, contents)
contents = re.compile(re.escape("REPLACE_WITH_GATEWAY_IP")).sub(gateway_ip, contents)
contents = re.compile(re.escape("REPLACE_WITH_BOSH_DIRECTOR_IP")).sub(bosh_director_ip, contents)
with open(bosh_template, 'w') as tmpfile:
tmpfile.write(contents)
return bosh_director_ip
def get_cloud_foundry_configuration(scenario, settings):
config = {}
for key in ["SUBNET_ADDRESS_RANGE_FOR_CLOUD_FOUNDRY", "VNET_NAME", "VNET_NAME_SECONDARY", "SUBNET_NAME_FOR_CLOUD_FOUNDRY", "CLOUD_FOUNDRY_PUBLIC_IP", "NSG_NAME_FOR_CLOUD_FOUNDRY"]:
config[key] = settings[key]
with open('cloudfoundry.cert', 'r') as tmpfile:
ssl_cert = tmpfile.read()
with open('cloudfoundry.key', 'r') as tmpfile:
ssl_key = tmpfile.read()
ssl_cert_and_key = "{0}{1}".format(ssl_cert, ssl_key)
indentation = " " * 8
ssl_cert_and_key = ("\n"+indentation).join([line for line in ssl_cert_and_key.split('\n')])
config["SSL_CERT_AND_KEY"] = ssl_cert_and_key
ip = netaddr.IPNetwork(settings['SUBNET_ADDRESS_RANGE_FOR_CLOUD_FOUNDRY'])
config["GATEWAY_IP"] = str(ip[1])
config["RESERVED_IP_FROM"] = str(ip[2])
config["RESERVED_IP_TO"] = str(ip[3])
config["CLOUD_FOUNDRY_INTERNAL_IP"] = str(ip[4])
# config["SYSTEM_DOMAIN"] = "{0}.xip.io".format(settings["CLOUD_FOUNDRY_PUBLIC_IP"])
# Get and replace SYSTEM_DOMAIN from parameter json, e.g custom domain that is mapped to Traffic Manager
config["SYSTEM_DOMAIN"] = settings["CUSTOM_SYSTEM_DOMAIN"]
# Get and replace for REPLACE_WITH_EXTERNAL_DATABASE_ENDPOINT from parameter json, e.g dxmariadblb.northeurope.cloudapp.azure.com
config["EXTERNAL_DATABASE_ENDPOINT"] = settings["EXTERNAL_DATABASE_ENDPOINT"]
# Get and replace REPLACE_WITH_EXTERNAL_NFS_ENDPOINT with external NFS cluster from parameter json
config["EXTERNAL_NFS_ENDPOINT"] = settings["EXTERNAL_NFS_ENDPOINT"]
# Get and replace REPLACE_WITH_STORAGE_ACCOUNT_NAME_SECONDARY
config["STORAGE_ACCOUNT_NAME_SECONDARY"] = settings["STORAGE_ACCOUNT_NAME_SECONDARY"]
# Get and replace REPLACE_WITH_CLOUD_FOUNDRY_PUBLIC_IP_SECONDARY
config["CLOUD_FOUNDRY_PUBLIC_IP_SECONDARY"] = settings["CLOUD_FOUNDRY_PUBLIC_IP_SECONDARY"]
# Get and replace parameters related to storage account
config["STORAGE_ACCOUNT_NAME_PRIMARY"] = settings["STORAGE_ACCOUNT_NAME_PRIMARY"]
config["STORAGE_ACCOUNT_NAME_SECONDARY"] = settings["STORAGE_ACCOUNT_NAME_SECONDARY"]
config["STATIC_IP_FROM"] = str(ip[4])
config["STATIC_IP_TO"] = str(ip[100])
config["HAPROXY_IP"] = str(ip[4])
config["POSTGRES_IP"] = str(ip[11])
config["ROUTER_IP"] = str(ip[12])
config["NATS_IP"] = str(ip[13])
config["ETCD_IP"] = str(ip[14])
config["NFS_IP"] = str(ip[15])
config["CONSUL_IP"] = str(ip[16])
return config
def render_cloud_foundry_manifest(settings):
for scenario in ["cross"]:
cloudfoundry_template = "{0}.yml".format(scenario)
if os.path.exists(cloudfoundry_template):
with open(cloudfoundry_template, 'r') as tmpfile:
contents = tmpfile.read()
config = get_cloud_foundry_configuration(scenario, settings)
for key in config:
value = config[key]
contents = re.compile(re.escape("REPLACE_WITH_{0}".format(key))).sub(value, contents)
with open(cloudfoundry_template, 'w') as tmpfile:
tmpfile.write(contents)
def render_cloud_foundry_deployment_cmd(settings):
cloudfoundry_deployment_cmd = "deploy_cloudfoundry.sh"
if os.path.exists(cloudfoundry_deployment_cmd):
with open(cloudfoundry_deployment_cmd, 'r') as tmpfile:
contents = tmpfile.read()
keys = ["CF_RELEASE_URL", "STEMCELL_URL"]
for key in keys:
value = settings[key]
contents = re.compile(re.escape("REPLACE_WITH_{0}".format(key))).sub(value, contents)
with open(cloudfoundry_deployment_cmd, 'w') as tmpfile:
tmpfile.write(contents)
def get_settings():
settings = dict()
config_file = sys.argv[4]
with open(config_file) as f:
settings = json.load(f)["runtimeSettings"][0]["handlerSettings"]["publicSettings"]
settings['TENANT_ID'] = sys.argv[1]
settings['CLIENT_ID'] = sys.argv[2]
settings['CLIENT_SECRET'] = sys.argv[3]
return settings
def main():
settings = get_settings()
with open('settings', "w") as tmpfile:
tmpfile.write(json.dumps(settings, indent=4, sort_keys=True))
prepare_storage(settings)
bosh_director_ip = render_bosh_manifest(settings)
print bosh_director_ip
render_cloud_foundry_manifest(settings)
render_cloud_foundry_deployment_cmd(settings)
if __name__ == "__main__":
main() | mit |
kpreid/shinysdr | shinysdr/plugins/wspr/test_blocks.py | 1 | 3942 | # Copyright 2017 Kevin Reid and the ShinySDR contributors
#
# This file is part of ShinySDR.
#
# ShinySDR is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ShinySDR is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ShinySDR. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function, unicode_literals
from zope.interface import implementer
from zope.interface.verify import verifyObject
from twisted.internet import defer
from twisted.trial import unittest
from twisted.internet import task
from shinysdr.plugins.wspr.blocks import WSPRFilter, WAVIntervalSink
from shinysdr.plugins.wspr.interfaces import IWAVIntervalListener
class TestWSPRFilter(unittest.TestCase):
def test_for_smoke(self):
WSPRFilter(48000)
class TestWAVIntervalSink(unittest.TestCase):
def setUp(self):
self.clock = task.Clock()
self.listener = FakeListener()
self.sink = WAVIntervalSink(
interval=120,
duration=115,
listener=self.listener,
sample_rate=48000,
_callLater=self.clock.callLater,
_time=self.clock.seconds,
_deferToThread=self.deferToThread)
def deferToThread(self, f, *args, **kwargs):
"""What thread?"""
return defer.succeed(f(*args, **kwargs))
def test_listener_interface(self):
verifyObject(IWAVIntervalListener, self.listener)
def advance_to_next_interval(self):
self.clock.advance(120 - (self.clock.seconds() % 120))
def test_time(self):
self.sink.start_running()
# initially nothing has happened.
self.assertFalse(self.listener._filesClosed)
self.assertFalse(self.listener._filesOpened)
# start of first interval.
self.advance_to_next_interval()
self.assertEqual(self.listener._filesOpened, ['120'])
# just before end of first interval.
self.clock.advance(114)
self.assertEqual(self.listener._filesClosed, [])
# end of first interval.
self.clock.advance(1)
self.assertEqual(self.listener._filesClosed, ['120'])
# next interval begins.
self.advance_to_next_interval()
self.assertEqual(self.listener._filesOpened, ['120', '240'])
self.assertEqual(self.listener._filesClosed, ['120'])
def test_start(self):
# nothing is scheduled
self.assertFalse(self.clock.getDelayedCalls())
# until we start it
self.sink.start_running()
self.assertEqual(len(self.clock.getDelayedCalls()), 1)
# and starting it again doesn't start it twice.
self.sink.start_running()
self.assertEqual(len(self.clock.getDelayedCalls()), 1)
# More things to test, but so little time.
#
# What if interval == duration? (Currently undefined behavior)
#
# What if there's an error in opening or closing the wav file?
#
# Are the interactions with the wavfile_sink block being done in a thread?
# They block on aquiring locks and file IO.
#
# Are the internal connections sane?
@implementer(IWAVIntervalListener)
class FakeListener(object):
def __init__(self):
self._filesOpened = []
self._filesClosed = []
def fileClosed(self, filename):
self._filesClosed.append(filename)
def fileOpened(self, filename):
self._filesOpened.append(filename)
def filename(self, time):
return str(int(time))
| gpl-3.0 |
wbchen99/bitcoin-hnote0 | qa/rpc-tests/wallet.py | 5 | 15081 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class WalletTest (BitcoinTestFramework):
def check_fee_amount(self, curr_balance, balance_with_fee, fee_per_byte, tx_size):
"""Return curr_balance after asserting the fee was in range"""
fee = balance_with_fee - curr_balance
target_fee = fee_per_byte * tx_size
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)"%(str(fee), str(target_fee)))
# allow the node's estimation to be at most 2 bytes off
if fee > fee_per_byte * (tx_size + 2):
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)"%(str(fee), str(target_fee)))
return curr_balance
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self, split=False):
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.is_network_split=False
self.sync_all()
def run_test (self):
# Check that there's no UTXO on none of the nodes
assert_equal(len(self.nodes[0].listunspent()), 0)
assert_equal(len(self.nodes[1].listunspent()), 0)
assert_equal(len(self.nodes[2].listunspent()), 0)
print("Mining blocks...")
self.nodes[0].generate(1)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 50)
assert_equal(walletinfo['balance'], 0)
self.sync_all()
self.nodes[1].generate(101)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 50)
assert_equal(self.nodes[1].getbalance(), 50)
assert_equal(self.nodes[2].getbalance(), 0)
# Check that only first and second nodes have UTXOs
assert_equal(len(self.nodes[0].listunspent()), 1)
assert_equal(len(self.nodes[1].listunspent()), 1)
assert_equal(len(self.nodes[2].listunspent()), 0)
# Send 21 BTC from 0 to 2 using sendtoaddress call.
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 11)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 10)
walletinfo = self.nodes[0].getwalletinfo()
assert_equal(walletinfo['immature_balance'], 0)
# Have node0 mine a block, thus it will collect its own fee.
self.nodes[0].generate(1)
self.sync_all()
# Exercise locking of unspent outputs
unspent_0 = self.nodes[2].listunspent()[0]
unspent_0 = {"txid": unspent_0["txid"], "vout": unspent_0["vout"]}
self.nodes[2].lockunspent(False, [unspent_0])
assert_raises(JSONRPCException, self.nodes[2].sendtoaddress, self.nodes[2].getnewaddress(), 20)
assert_equal([unspent_0], self.nodes[2].listlockunspent())
self.nodes[2].lockunspent(True, [unspent_0])
assert_equal(len(self.nodes[2].listlockunspent()), 0)
# Have node1 generate 100 blocks (so node0 can recover the fee)
self.nodes[1].generate(100)
self.sync_all()
# node0 should end up with 100 btc in block rewards plus fees, but
# minus the 21 plus fees sent to node2
assert_equal(self.nodes[0].getbalance(), 100-21)
assert_equal(self.nodes[2].getbalance(), 21)
# Node0 should have two unspent outputs.
# Create a couple of transactions to send them to node2, submit them through
# node1, and make sure both node0 and node2 pick them up properly:
node0utxos = self.nodes[0].listunspent(1)
assert_equal(len(node0utxos), 2)
# create both transactions
txns_to_send = []
for utxo in node0utxos:
inputs = []
outputs = {}
inputs.append({ "txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[2].getnewaddress("from1")] = utxo["amount"] - 3
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
txns_to_send.append(self.nodes[0].signrawtransaction(raw_tx))
# Have node 1 (miner) send the transactions
self.nodes[1].sendrawtransaction(txns_to_send[0]["hex"], True)
self.nodes[1].sendrawtransaction(txns_to_send[1]["hex"], True)
# Have node1 mine a block to confirm transactions:
self.nodes[1].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 94)
assert_equal(self.nodes[2].getbalance("from1"), 94-21)
# Send 10 BTC normal
address = self.nodes[0].getnewaddress("test")
fee_per_byte = Decimal('0.001') / 1000
self.nodes[2].settxfee(fee_per_byte * 1000)
txid = self.nodes[2].sendtoaddress(address, 10, "", "", False)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), Decimal('84'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), Decimal('10'))
# Send 10 BTC with subtract fee from amount
txid = self.nodes[2].sendtoaddress(address, 10, "", "", True)
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), Decimal('20'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Sendmany 10 BTC
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [])
self.nodes[2].generate(1)
self.sync_all()
node_0_bal += Decimal('10')
node_2_bal = self.check_fee_amount(self.nodes[2].getbalance(), node_2_bal - Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
assert_equal(self.nodes[0].getbalance(), node_0_bal)
# Sendmany 10 BTC with subtract fee from amount
txid = self.nodes[2].sendmany('from1', {address: 10}, 0, "", [address])
self.nodes[2].generate(1)
self.sync_all()
node_2_bal -= Decimal('10')
assert_equal(self.nodes[2].getbalance(), node_2_bal)
node_0_bal = self.check_fee_amount(self.nodes[0].getbalance(), node_0_bal + Decimal('10'), fee_per_byte, count_bytes(self.nodes[2].getrawtransaction(txid)))
# Test ResendWalletTransactions:
# Create a couple of transactions, then start up a fourth
# node (nodes[3]) and ask nodes[0] to rebroadcast.
# EXPECT: nodes[3] should have those transactions in its mempool.
txid1 = self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1)
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
sync_mempools(self.nodes)
self.nodes.append(start_node(3, self.options.tmpdir))
connect_nodes_bi(self.nodes, 0, 3)
sync_blocks(self.nodes)
relayed = self.nodes[0].resendwallettransactions()
assert_equal(set(relayed), {txid1, txid2})
sync_mempools(self.nodes)
assert(txid1 in self.nodes[3].getrawmempool())
# Exercise balance rpcs
assert_equal(self.nodes[0].getwalletinfo()["unconfirmed_balance"], 1)
assert_equal(self.nodes[0].getunconfirmedbalance(), 1)
#check if we can list zero value tx as available coins
#1. create rawtx
#2. hex-changed one output to 0.0
#3. sign and send
#4. check if recipient (node0) can list the zero value tx
usp = self.nodes[1].listunspent()
inputs = [{"txid":usp[0]['txid'], "vout":usp[0]['vout']}]
outputs = {self.nodes[1].getnewaddress(): 49.998, self.nodes[0].getnewaddress(): 11.11}
rawTx = self.nodes[1].createrawtransaction(inputs, outputs).replace("c0833842", "00000000") #replace 11.11 with 0.0 (int32)
decRawTx = self.nodes[1].decoderawtransaction(rawTx)
signedRawTx = self.nodes[1].signrawtransaction(rawTx)
decRawTx = self.nodes[1].decoderawtransaction(signedRawTx['hex'])
zeroValueTxid= decRawTx['txid']
sendResp = self.nodes[1].sendrawtransaction(signedRawTx['hex'])
self.sync_all()
self.nodes[1].generate(1) #mine a block
self.sync_all()
unspentTxs = self.nodes[0].listunspent() #zero value tx must be in listunspents output
found = False
for uTx in unspentTxs:
if uTx['txid'] == zeroValueTxid:
found = True
assert_equal(uTx['amount'], Decimal('0'))
assert(found)
#do some -walletbroadcast tests
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
self.sync_all()
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
self.nodes[1].generate(1) #mine a block, tx should not be in there
self.sync_all()
assert_equal(self.nodes[2].getbalance(), node_2_bal) #should not be changed because tx was not broadcasted
#now broadcast from another node, mine a block, sync, and check the balance
self.nodes[1].sendrawtransaction(txObjNotBroadcasted['hex'])
self.nodes[1].generate(1)
self.sync_all()
node_2_bal += 2
txObjNotBroadcasted = self.nodes[0].gettransaction(txIdNotBroadcasted)
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#create another tx
txIdNotBroadcasted = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), 2)
#restart the nodes with -walletbroadcast=1
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir)
connect_nodes_bi(self.nodes,0,1)
connect_nodes_bi(self.nodes,1,2)
connect_nodes_bi(self.nodes,0,2)
sync_blocks(self.nodes)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
node_2_bal += 2
#tx should be added to balance because after restarting the nodes tx should be broadcastet
assert_equal(self.nodes[2].getbalance(), node_2_bal)
#send a tx with value in a string (PR#6380 +)
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "2")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-2'))
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "0.0001")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
#check if JSON parser can handle scientific notation in strings
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1e-4")
txObj = self.nodes[0].gettransaction(txId)
assert_equal(txObj['amount'], Decimal('-0.0001'))
try:
txId = self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(), "1f-4")
except JSONRPCException as e:
assert("Invalid amount" in e.error['message'])
else:
raise AssertionError("Must not parse invalid amounts")
try:
self.nodes[0].generate("2")
raise AssertionError("Must not accept strings as numeric")
except JSONRPCException as e:
assert("not an integer" in e.error['message'])
# Import address and private key to check correct behavior of spendable unspents
# 1. Send some coins to generate new UTXO
address_to_import = self.nodes[2].getnewaddress()
txid = self.nodes[0].sendtoaddress(address_to_import, 1)
self.nodes[0].generate(1)
self.sync_all()
# 2. Import address from node2 to node1
self.nodes[1].importaddress(address_to_import)
# 3. Validate that the imported address is watch-only on node1
assert(self.nodes[1].validateaddress(address_to_import)["iswatchonly"])
# 4. Check that the unspents after import are not spendable
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": False})
# 5. Import private key of the previously imported address on node1
priv_key = self.nodes[2].dumpprivkey(address_to_import)
self.nodes[1].importprivkey(priv_key)
# 6. Check that the unspents are now spendable on node1
assert_array_result(self.nodes[1].listunspent(),
{"address": address_to_import},
{"spendable": True})
# Mine a block from node0 to an address from node1
cbAddr = self.nodes[1].getnewaddress()
blkHash = self.nodes[0].generatetoaddress(1, cbAddr)[0]
cbTxId = self.nodes[0].getblock(blkHash)['tx'][0]
self.sync_all()
# Check that the txid and balance is found by node1
self.nodes[1].gettransaction(cbTxId)
# check if wallet or blockchain maintenance changes the balance
self.sync_all()
blocks = self.nodes[0].generate(2)
self.sync_all()
balance_nodes = [self.nodes[i].getbalance() for i in range(3)]
block_count = self.nodes[0].getblockcount()
maintenance = [
'-rescan',
'-reindex',
'-zapwallettxes=1',
'-zapwallettxes=2',
# disabled until issue is fixed: https://github.com/bitcoin/bitcoin/issues/7463
# '-salvagewallet',
]
for m in maintenance:
print("check " + m)
stop_nodes(self.nodes)
wait_bitcoinds()
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
# reindex will leave rpc warm up "early"; Wait for it to finish
time.sleep(0.1)
assert_equal(balance_nodes, [self.nodes[i].getbalance() for i in range(3)])
# Exercise listsinceblock with the last two blocks
coinbase_tx_1 = self.nodes[0].listsinceblock(blocks[0])
assert_equal(coinbase_tx_1["lastblock"], blocks[1])
assert_equal(len(coinbase_tx_1["transactions"]), 1)
assert_equal(coinbase_tx_1["transactions"][0]["blockhash"], blocks[1])
assert_equal(len(self.nodes[0].listsinceblock(blocks[1])["transactions"]), 0)
if __name__ == '__main__':
WalletTest().main()
| mit |
migonzalvar/youtube-dl | youtube_dl/extractor/gamespot.py | 105 | 3168 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urlparse,
)
from ..utils import (
unescapeHTML,
)
class GameSpotIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?gamespot\.com/.*-(?P<id>\d+)/?'
_TESTS = [{
'url': 'http://www.gamespot.com/videos/arma-3-community-guide-sitrep-i/2300-6410818/',
'md5': 'b2a30deaa8654fcccd43713a6b6a4825',
'info_dict': {
'id': 'gs-2300-6410818',
'ext': 'mp4',
'title': 'Arma 3 - Community Guide: SITREP I',
'description': 'Check out this video where some of the basics of Arma 3 is explained.',
},
}, {
'url': 'http://www.gamespot.com/videos/the-witcher-3-wild-hunt-xbox-one-now-playing/2300-6424837/',
'info_dict': {
'id': 'gs-2300-6424837',
'ext': 'flv',
'title': 'The Witcher 3: Wild Hunt [Xbox ONE] - Now Playing',
'description': 'Join us as we take a look at the early hours of The Witcher 3: Wild Hunt and more.',
},
}]
def _real_extract(self, url):
page_id = self._match_id(url)
webpage = self._download_webpage(url, page_id)
data_video_json = self._search_regex(
r'data-video=["\'](.*?)["\']', webpage, 'data video')
data_video = json.loads(unescapeHTML(data_video_json))
streams = data_video['videoStreams']
formats = []
f4m_url = streams.get('f4m_stream')
if f4m_url is not None:
# Transform the manifest url to a link to the mp4 files
# they are used in mobile devices.
f4m_path = compat_urlparse.urlparse(f4m_url).path
QUALITIES_RE = r'((,\d+)+,?)'
qualities = self._search_regex(QUALITIES_RE, f4m_path, 'qualities').strip(',').split(',')
http_path = f4m_path[1:].split('/', 1)[1]
http_template = re.sub(QUALITIES_RE, r'%s', http_path)
http_template = http_template.replace('.csmil/manifest.f4m', '')
http_template = compat_urlparse.urljoin(
'http://video.gamespotcdn.com/', http_template)
for q in qualities:
formats.append({
'url': http_template % q,
'ext': 'mp4',
'format_id': q,
})
else:
for quality in ['sd', 'hd']:
# It's actually a link to a flv file
flv_url = streams.get('f4m_{0}'.format(quality))
if flv_url is not None:
formats.append({
'url': flv_url,
'ext': 'flv',
'format_id': quality,
})
return {
'id': data_video['guid'],
'display_id': page_id,
'title': compat_urllib_parse_unquote(data_video['title']),
'formats': formats,
'description': self._html_search_meta('description', webpage),
'thumbnail': self._og_search_thumbnail(webpage),
}
| unlicense |
pigeonflight/strider-plone | docker/appengine/lib/cherrypy/cherrypy/lib/static.py | 83 | 14710 | try:
from io import UnsupportedOperation
except ImportError:
UnsupportedOperation = object()
import logging
import mimetypes
mimetypes.init()
mimetypes.types_map['.dwg']='image/x-dwg'
mimetypes.types_map['.ico']='image/x-icon'
mimetypes.types_map['.bz2']='application/x-bzip2'
mimetypes.types_map['.gz']='application/x-gzip'
import os
import re
import stat
import time
import cherrypy
from cherrypy._cpcompat import ntob, unquote
from cherrypy.lib import cptools, httputil, file_generator_limited
def serve_file(path, content_type=None, disposition=None, name=None, debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except OSError:
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ""
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, 'filename' will
not be set. If disposition is None, no Content-Disposition header will
be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = None
except UnsupportedOperation:
content_length = None
else:
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, content_type, content_length, debug=False):
"""Internal. Set response.body to the given file object, perhaps ranged."""
response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers["Accept-Ranges"] = "bytes"
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
response.headers['Content-Range'] = "bytes */%s" % content_length
message = "Invalid Range (first-byte-pos greater than Content-Length)"
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log('Single part; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
response.status = "206 Partial Content"
response.headers['Content-Range'] = (
"bytes %s-%s/%s" % (start, stop - 1, content_length))
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
# Return a multipart/byteranges response.
response.status = "206 Partial Content"
try:
# Python 3
from email.generator import _make_boundary as choose_boundary
except ImportError:
# Python 2
from mimetools import choose_boundary
boundary = choose_boundary()
ct = "multipart/byteranges; boundary=%s" % boundary
response.headers['Content-Type'] = ct
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
def file_ranges():
# Apache compatibility:
yield ntob("\r\n")
for start, stop in r:
if debug:
cherrypy.log('Multipart; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
yield ntob("--" + boundary, 'ascii')
yield ntob("\r\nContent-type: %s" % content_type, 'ascii')
yield ntob("\r\nContent-range: bytes %s-%s/%s\r\n\r\n"
% (start, stop - 1, content_length), 'ascii')
fileobj.seek(start)
for chunk in file_generator_limited(fileobj, stop-start):
yield chunk
yield ntob("\r\n")
# Final boundary
yield ntob("--" + boundary + "--", 'ascii')
# Apache compatibility:
yield ntob("\r\n")
response.body = file_ranges()
return response.body
else:
if debug:
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
response.headers['Content-Length'] = content_length
response.body = fileobj
return response.body
def serve_download(path, name=None):
"""Serve 'path' as an application/x-download attachment."""
# This is such a common idiom I felt it deserved its own wrapper.
return serve_file(path, "application/x-download", "attachment", name)
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def staticdir(section, dir, root="", match="", content_types=None, index="",
debug=False):
"""Serve a static resource from the given (root +) dir.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
index
If provided, it should be the (relative) name of a file to
serve for directory requests. For example, if the dir argument is
'/home/me', the Request-URI is 'myapp', and the index arg is
'index.html', the file '/home/me/myapp/index.html' will be sought.
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICDIR')
return False
# Allow the use of '~' to refer to a user's home directory.
dir = os.path.expanduser(dir)
# If dir is relative, make absolute using "root".
if not os.path.isabs(dir):
if not root:
msg = "Static dir requires an absolute dir (or root)."
if debug:
cherrypy.log(msg, 'TOOLS.STATICDIR')
raise ValueError(msg)
dir = os.path.join(root, dir)
# Determine where we are in the object tree relative to 'section'
# (where the static tool was defined).
if section == 'global':
section = "/"
section = section.rstrip(r"\/")
branch = request.path_info[len(section) + 1:]
branch = unquote(branch.lstrip(r"\/"))
# If branch is "", filename will end in a slash
filename = os.path.join(dir, branch)
if debug:
cherrypy.log('Checking file %r to fulfill %r' %
(filename, request.path_info), 'TOOLS.STATICDIR')
# There's a chance that the branch pulled from the URL might
# have ".." or similar uplevel attacks in it. Check that the final
# filename is a child of dir.
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
raise cherrypy.HTTPError(403) # Forbidden
handled = _attempt(filename, content_types)
if not handled:
# Check for an index file if a folder was requested.
if index:
handled = _attempt(os.path.join(filename, index), content_types)
if handled:
request.is_index = filename[-1] in (r"\/")
return handled
def staticfile(filename, root=None, match="", content_types=None, debug=False):
"""Serve a static resource from the given (root +) filename.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICFILE')
return False
# If filename is relative, make absolute using "root".
if not os.path.isabs(filename):
if not root:
msg = "Static tool requires an absolute filename (got '%s')." % filename
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
filename = os.path.join(root, filename)
return _attempt(filename, content_types, debug=debug)
| mit |
benschmaus/catapult | telemetry/telemetry/internal/util/path_set_unittest.py | 32 | 1086 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.internal.util import path_set
class PathSetTest(unittest.TestCase):
def testCreate(self):
ps = path_set.PathSet()
self.assertEqual(len(ps), 0) # Check __len__.
self.assertFalse(__file__ in ps)
for _ in ps: # Check __iter__.
self.fail('New set is not empty.')
ps = path_set.PathSet([__file__])
self.assertEqual(len(ps), 1)
self.assertTrue(__file__ in ps)
self.assertEqual(ps.pop(), os.path.realpath(__file__))
def testAdd(self):
ps = path_set.PathSet()
ps.add(__file__)
self.assertEqual(len(ps), 1)
self.assertTrue(__file__ in ps)
self.assertEqual(ps.pop(), os.path.realpath(__file__))
def testDiscard(self):
ps = path_set.PathSet([__file__])
ps.discard(__file__)
self.assertEqual(len(ps), 0)
self.assertFalse(__file__ in ps)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
psychopy/psychopy | psychopy/sound/microphone.py | 1 | 35191 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Audio recording using a microphone.
"""
# Part of the PsychoPy library
# Copyright (C) 2002-2018 Jonathan Peirce (C) 2019-2021 Open Science Tools Ltd.
# Distributed under the terms of the GNU General Public License (GPL).
__all__ = ['Microphone']
import sys
import psychopy.logging as logging
from psychopy.constants import NOT_STARTED, STARTED
from psychopy.preferences import prefs
from .audioclip import *
from .audiodevice import *
from .exceptions import *
import numpy as np
_hasPTB = True
try:
import psychtoolbox.audio as audio
except (ImportError, ModuleNotFoundError):
logging.warning(
"The 'psychtoolbox' library cannot be loaded but is required for audio "
"capture (use `pip install psychtoolbox` to get it). Microphone "
"recording will be unavailable this session. Note that opening a "
"microphone stream will raise an error.")
_hasPTB = False
class RecordingBuffer(object):
"""Class for a storing a recording from a stream.
Think of instances of this class behaving like an audio tape whereas the
`Microphone` class is the tape recorder. Samples taken from the stream are
written to the tape which stores the data.
Used internally by the `Microphone` class, users usually do not create
instances of this class themselves.
Parameters
----------
sampleRateHz : int
Sampling rate for audio recording in Hertz (Hz). By default, 48kHz
(``sampleRateHz=480000``) is used which is adequate for most consumer
grade microphones (headsets and built-in).
channels : int
Number of channels to record samples to `1=Mono` and `2=Stereo`.
maxRecordingSize : int
Maximum recording size in kilobytes (Kb). Since audio recordings tend to
consume a large amount of system memory, one might want to limit the
size of the recording buffer to ensure that the application does not run
out of memory. By default, the recording buffer is set to 24000 KB (or
24 MB). At a sample rate of 48kHz, this will result in 62.5 seconds of
continuous audio being recorded before the buffer is full.
policyWhenFull : str
What to do when the recording buffer is full and cannot accept any more
samples. If 'ignore', samples will be silently dropped and the `isFull`
property will be set to `True`. If 'warn', a warning will be logged and
the `isFull` flag will be set. Finally, if 'error' the application will
raise an exception.
"""
def __init__(self, sampleRateHz=SAMPLE_RATE_48kHz, channels=2,
maxRecordingSize=24000, policyWhenFull='ignore'):
self._channels = channels
self._sampleRateHz = sampleRateHz
self._maxRecordingSize = maxRecordingSize
self._samples = None # `ndarray` created in _allocRecBuffer`
self._offset = 0 # recording offset
self._lastSample = 0 # offset of the last sample from stream
self._spaceRemaining = None # set in `_allocRecBuffer`
self._totalSamples = None # set in `_allocRecBuffer`
# check if the value is valid
if policyWhenFull not in ['ignore', 'warn', 'error']:
raise ValueError("Invalid value for `policyWhenFull`.")
self._policyWhenFull = policyWhenFull
self._warnedRecBufferFull = False
self._loops = 0
self._allocRecBuffer()
def _allocRecBuffer(self):
"""Allocate the recording buffer. Called internally if properties are
changed."""
# allocate another array
nBytes = self._maxRecordingSize * 1000
recArraySize = int((nBytes / self._channels) / (np.float32()).itemsize)
self._samples = np.zeros(
(recArraySize, self._channels), dtype=np.float32, order='C')
# sanity check
assert self._samples.nbytes == nBytes
self._totalSamples = len(self._samples)
self._spaceRemaining = self._totalSamples
@property
def samples(self):
"""Reference to the actual sample buffer (`ndarray`)."""
return self._samples
@property
def bufferSecs(self):
"""Capacity of the recording buffer in seconds (`float`)."""
return self._totalSamples / self._sampleRateHz
@property
def nbytes(self):
"""Number of bytes the recording buffer occupies in memory (`int`)."""
return self._samples.nbytes
@property
def sampleBytes(self):
"""Number of bytes per sample (`int`)."""
return np.float32().itemsize
@property
def spaceRemaining(self):
"""The space remaining in the recording buffer (`int`). Indicates the
number of samples that the buffer can still add before overflowing.
"""
return self._spaceRemaining
@property
def isFull(self):
"""Is the recording buffer full (`bool`)."""
return self._spaceRemaining <= 0
@property
def totalSamples(self):
"""Total number samples the recording buffer can hold (`int`)."""
return self._totalSamples
@property
def writeOffset(self):
"""Index in the sample buffer where new samples will be written when
`write()` is called (`int`).
"""
return self._offset
@property
def lastSample(self):
"""Index of the last sample recorded (`int`). This can be used to slice
the recording buffer, only getting data from the beginning to place
where the last sample was written to.
"""
return self._lastSample
@property
def loopCount(self):
"""Number of times the recording buffer restarted (`int`). Only valid if
`loopback` is ``True``."""
return self._loops
@property
def maxRecordingSize(self):
"""Maximum recording size in kilobytes (`int`).
Since audio recordings tend to consume a large amount of system memory,
one might want to limit the size of the recording buffer to ensure that
the application does not run out of memory. By default, the recording
buffer is set to 24000 KB (or 24 MB). At a sample rate of 48kHz, this
will result in 62.5 seconds of continuous audio being recorded before
the buffer is full.
Setting this value will allocate another recording buffer of appropriate
size. Avoid doing this in any time sensitive parts of your application.
"""
return self._maxRecordingSize
@maxRecordingSize.setter
def maxRecordingSize(self, value):
value = int(value)
# don't do this unless the value changed
if value == self._maxRecordingSize:
return
# if different than last value, update the recording buffer
self._maxRecordingSize = value
self._allocRecBuffer()
def seek(self, offset, absolute=False):
"""Set the write offset.
Use this to specify where to begin writing samples the next time `write`
is called. You should call `seek(0)` when starting a new recording.
Parameters
----------
offset : int
Position in the sample buffer to set.
absolute : bool
Use absolute positioning. Use relative positioning if `False` where
the value of `offset` will be added to the current offset. Default
is `False`.
"""
if not absolute:
self._offset += offset
else:
self._offset = absolute
assert 0 <= self._offset < self._totalSamples
self._spaceRemaining = self._totalSamples - self._offset
def write(self, samples):
"""Write samples to the recording buffer.
Parameters
----------
samples : ArrayLike
Samples to write to the recording buffer, usually of a stream. Must
have the same number of dimensions as the internal array.
Returns
-------
int
Number of samples overflowed. If this is zero then all samples have
been recorded, if not, the number of samples rejected is given.
"""
nSamples = len(samples)
if self.isFull:
if self._policyWhenFull == 'ignore':
return nSamples # samples lost
elif self._policyWhenFull == 'warn':
if not self._warnedRecBufferFull:
logging.warning(
f"Audio recording buffer filled! This means that no "
f"samples are saved beyond {round(self.bufferSecs, 6)} "
f"seconds. Specify a larger recording buffer next time "
f"to avoid data loss.")
logging.flush()
self._warnedRecBufferFull = True
return nSamples
elif self._policyWhenFull == 'error':
raise AudioRecordingBufferFullError(
"Cannot write samples, recording buffer is full.")
else:
return nSamples # whatever
if not nSamples: # no samples came out of the stream, just return
return
if self._spaceRemaining >= nSamples:
self._lastSample = self._offset + nSamples
audioData = samples[:, :]
else:
self._lastSample = self._offset + self._spaceRemaining
audioData = samples[:self._spaceRemaining, :]
self._samples[self._offset:self._lastSample, :] = audioData
self._offset += nSamples
self._spaceRemaining -= nSamples
# Check if the recording buffer is now full. Next call to `poll` will
# not record anything.
if self._spaceRemaining <= 0:
self._spaceRemaining = 0
d = nSamples - self._spaceRemaining
return 0 if d < 0 else d
def clear(self):
# reset all live attributes
self._samples = None
self._offset = 0
self._lastSample = 0
self._spaceRemaining = None
self._totalSamples = None
# reallocate buffer
self._allocRecBuffer()
def getSegment(self, start=0, end=None):
"""Get a segment of recording data as an `AudioClip`.
Parameters
----------
start : float or int
Absolute time in seconds for the start of the clip.
end : float or int
Absolute time in seconds for the end of the clip. If `None` the time
at the last sample is used.
Returns
-------
AudioClip
Audio clip object with samples between `start` and `end`.
"""
idxStart = int(start * self._sampleRateHz)
idxEnd = self._lastSample if end is None else int(
end * self._sampleRateHz)
return AudioClip(
np.array(self._samples[idxStart:idxEnd, :],
dtype=np.float32, order='C'),
sampleRateHz=self._sampleRateHz)
class Microphone(object):
"""Class for recording audio from a microphone or input stream.
Creating an instance of this class will open a stream using the specified
device. Streams should remain open for the duration of your session. When a
stream is opened, a buffer is allocated to store samples coming off it.
Samples from the input stream will written to the buffer once
:meth:`~Microphone.start()` is called.
Parameters
----------
device : int or `~psychopy.sound.AudioDevice`
Audio capture device to use. You may specify the device either by index
(`int`) or descriptor (`AudioDevice`).
sampleRateHz : int
Sampling rate for audio recording in Hertz (Hz). By default, 48kHz
(``sampleRateHz=480000``) is used which is adequate for most consumer
grade microphones (headsets and built-in).
channels : int
Number of channels to record samples to `1=Mono` and `2=Stereo`.
streamBufferSecs : float
Stream buffer size to pre-allocate for the specified number of seconds.
The default is 2.0 seconds which is usually sufficient.
maxRecordingSize : int
Maximum recording size in kilobytes (Kb). Since audio recordings tend to
consume a large amount of system memory, one might want to limit the
size of the recording buffer to ensure that the application does not run
out of memory. By default, the recording buffer is set to 24000 KB (or
24 MB). At a sample rate of 48kHz, this will result in 62.5 seconds of
continuous audio being recorded before the buffer is full.
audioLatencyMode : int or None
Audio latency mode to use, values range between 0-4. If `None`, the
setting from preferences will be used. Using `3` (exclusive mode) is
adequate for most applications and required if using WASAPI on Windows
for other settings (such audio quality) to take effect. Symbolic
constants `psychopy.sound.audiodevice.AUDIO_PTB_LATENCY_CLASS_` can also
be used.
audioRunMode : int
Run mode for the recording device. Default is standby-mode (`0`) which
allows the system to put the device to sleep. However when the device is
needed, waking the device results in some latency. Using a run mode of
`1` will keep the microphone running (or 'hot') with reduces latency
when th recording is started. Cannot be set when after initialization at
this time.
Examples
--------
Capture 10 seconds of audio from the primary microphone::
import psychopy.core as core
import psychopy.sound.Microphone as Microphone
mic = Microphone(bufferSecs=10.0) # open the microphone
mic.start() # start recording
core.wait(10.0) # wait 10 seconds
mic.stop() # stop recording
audioClip = mic.getRecording()
print(audioClip.duration) # should be ~10 seconds
audioClip.save('test.wav') # save the recorded audio as a 'wav' file
The prescribed method for making long recordings is to poll the stream once
per frame (or every n-th frame)::
mic = Microphone(bufferSecs=2.0)
mic.start() # start recording
# main trial drawing loop
mic.poll()
win.flip() # calling the window flip function
mic.stop() # stop recording
audioClip = mic.getRecording()
"""
# Force the use of WASAPI for audio capture on Windows. If `True`, only
# WASAPI devices will be returned when calling static method
# `Microphone.getDevices()`
enforceWASAPI = True
def __init__(self,
device=None,
sampleRateHz=None,
channels=2,
streamBufferSecs=2.0,
maxRecordingSize=24000,
policyWhenFull='warn',
audioLatencyMode=None,
audioRunMode=0):
if not _hasPTB: # fail if PTB is not installed
raise ModuleNotFoundError(
"Microphone audio capture requires package `psychtoolbox` to "
"be installed.")
# get information about the selected device
devices = Microphone.getDevices()
if isinstance(device, AudioDeviceInfo):
self._device = device
elif isinstance(device, (int, float)):
devicesByIndex = {d.deviceIndex: d for d in devices}
if device in devicesByIndex:
self._device = devicesByIndex[device]
else:
raise AudioInvalidCaptureDeviceError(
'No suitable audio recording devices found matching index '
'{}.'.format(device))
else:
# get default device, first enumerated usually
if not devices:
raise AudioInvalidCaptureDeviceError(
'No suitable audio recording devices found on this system. '
'Check connections and try again.')
self._device = devices[0] # use first
logging.info('Using audio device #{} ({}) for audio capture'.format(
self._device.deviceIndex, self._device.deviceName))
# error if specified device is not suitable for capture
if not self._device.isCapture:
raise AudioInvalidCaptureDeviceError(
'Specified audio device not suitable for audio recording. '
'Has no input channels.')
# get the sample rate
self._sampleRateHz = \
self._device.defaultSampleRate if sampleRateHz is None else int(
sampleRateHz)
logging.debug('Set stream sample rate to {} Hz'.format(
self._sampleRateHz))
# set the audio latency mode
if audioLatencyMode is None:
self._audioLatencyMode = int(prefs.hardware["audioLatencyMode"])
else:
self._audioLatencyMode = audioLatencyMode
logging.debug('Set audio latency mode to {}'.format(
self._audioLatencyMode))
assert 0 <= self._audioLatencyMode <= 4 # sanity check for pref
# set the number of recording channels
self._channels = \
self._device.inputChannels if channels is None else int(channels)
logging.debug('Set recording channels to {} ({})'.format(
self._channels, 'stereo' if self._channels > 1 else 'mono'))
if self._channels > self._device.inputChannels:
raise AudioInvalidDeviceError(
'Invalid number of channels for audio input specified.')
# internal recording buffer size in seconds
assert isinstance(streamBufferSecs, (float, int))
self._streamBufferSecs = float(streamBufferSecs)
# PTB specific stuff
self._mode = 2 # open a stream in capture mode
# Handle for the recording stream, should only be opened once per
# session
logging.debug('Opening audio stream for device #{}'.format(
self._device.deviceIndex))
self._stream = audio.Stream(
device_id=self._device.deviceIndex,
latency_class=self._audioLatencyMode,
mode=self._mode,
freq=self._sampleRateHz,
channels=self._channels)
logging.debug('Stream opened')
assert isinstance(audioRunMode, (float, int)) and \
(audioRunMode == 0 or audioRunMode == 1)
self._audioRunMode = int(audioRunMode)
self._stream.run_mode = self._audioRunMode
logging.debug('Set run mode to `{}`'.format(
self._audioRunMode))
# set latency bias
self._stream.latency_bias = 0.0
logging.debug('Set stream latency bias to {} ms'.format(
self._stream.latency_bias))
# pre-allocate recording buffer, called once
self._stream.get_audio_data(self._streamBufferSecs)
logging.debug(
'Allocated stream buffer to hold {} seconds of data'.format(
self._streamBufferSecs))
# status flag
self._statusFlag = NOT_STARTED
# setup recording buffer
self._recording = RecordingBuffer(
sampleRateHz=self._sampleRateHz,
channels=self._channels,
maxRecordingSize=maxRecordingSize,
policyWhenFull=policyWhenFull
)
# setup clips and transcripts dicts
self.clips = {}
self.lastClip = None
self.scripts = {}
self.lastScript = None
logging.debug('Audio capture device #{} ready'.format(
self._device.deviceIndex))
@staticmethod
def getDevices():
"""Get a `list` of audio capture device (i.e. microphones) descriptors.
On Windows, only WASAPI devices are used.
Returns
-------
list
List of `AudioDevice` descriptors for suitable capture devices. If
empty, no capture devices have been found.
"""
try:
Microphone.enforceWASAPI = bool(prefs.hardware["audioForceWASAPI"])
except KeyError:
pass # use default if option not present in settings
# query PTB for devices
if Microphone.enforceWASAPI and sys.platform == 'win32':
allDevs = audio.get_devices(device_type=13)
else:
allDevs = audio.get_devices()
# make sure we have an array of descriptors
allDevs = [allDevs] if isinstance(allDevs, dict) else allDevs
# create list of descriptors only for capture devices
inputDevices = [desc for desc in [
AudioDeviceInfo.createFromPTBDesc(dev) for dev in allDevs]
if desc.isCapture]
return inputDevices
# def warmUp(self):
# """Warm-/wake-up the audio stream.
#
# On some systems the first time `start` is called incurs additional
# latency, whereas successive calls do not. To deal with this, it is
# recommended that you run this warm-up routine prior to capturing audio
# samples. By default, this routine is called when instancing a new
# microphone object.
#
# """
# # We should put an actual test here to see if timing stabilizes after
# # multiple invocations of this function.
# self._stream.start()
# self._stream.stop()
@property
def recording(self):
"""Reference to the current recording buffer (`RecordingBuffer`)."""
return self._recording
@property
def recBufferSecs(self):
"""Capacity of the recording buffer in seconds (`float`)."""
return self.recording.bufferSecs
@property
def maxRecordingSize(self):
"""Maximum recording size in kilobytes (`int`).
Since audio recordings tend to consume a large amount of system memory,
one might want to limit the size of the recording buffer to ensure that
the application does not run out. By default, the recording buffer is
set to 64000 KB (or 64 MB). At a sample rate of 48kHz, this will result
in about. Using stereo audio (``nChannels == 2``) requires twice the
buffer over mono (``nChannels == 2``) for the same length clip.
Setting this value will allocate another recording buffer of appropriate
size. Avoid doing this in any time sensitive parts of your application.
"""
return self._recording.maxRecordingSize
@maxRecordingSize.setter
def maxRecordingSize(self, value):
self._recording.maxRecordingSize = value
@property
def latencyBias(self):
"""Latency bias to add when starting the microphone (`float`).
"""
return self._stream.latency_bias
@latencyBias.setter
def latencyBias(self, value):
self._stream.latency_bias = float(value)
@property
def audioLatencyMode(self):
"""Audio latency mode in use (`int`). Cannot be set after
initialization.
"""
return self._audioLatencyMode
@property
def streamBufferSecs(self):
"""Size of the internal audio storage buffer in seconds (`float`).
To ensure all data is captured, there must be less time elapsed between
subsequent `getAudioClip` calls than `bufferSecs`.
"""
return self._streamBufferSecs
@property
def status(self):
"""Status flag for the microphone. Value can be one of
``psychopy.constants.STARTED`` or ``psychopy.constants.NOT_STARTED``.
For detailed stream status information, use the
:attr:`~psychopy.sound.microphone.Microphone.streamStatus` property.
"""
if hasattr(self, "_statusFlag"):
return self._statusFlag
@status.setter
def status(self, value):
self._statusFlag = value
@property
def streamStatus(self):
"""Status of the audio stream (`AudioDeviceStatus` or `None`).
See :class:`~psychopy.sound.AudioDeviceStatus` for a complete overview
of available status fields. This property has a value of `None` if
the stream is presently closed.
Examples
--------
Get the capture start time of the stream::
# assumes mic.start() was called
captureStartTime = mic.status.captureStartTime
Check if microphone recording is active::
isActive = mic.status.active
Get the number of seconds recorded up to this point::
recordedSecs = mic.status.recordedSecs
"""
currentStatus = self._stream.status
if currentStatus != -1:
return AudioDeviceStatus.createFromPTBDesc(currentStatus)
@property
def isRecBufferFull(self):
"""`True` if there is an overflow condition with the recording buffer.
If this is `True`, then `poll()` is still collecting stream samples but
is no longer writing them to anything, causing stream samples to be
lost.
"""
return self._recording.isFull
@property
def isStarted(self):
"""``True`` if stream recording has been started (`bool`)."""
return self.status == STARTED
def start(self, when=None, waitForStart=0, stopTime=None):
"""Start an audio recording.
Calling this method will begin capturing samples from the microphone and
writing them to the buffer.
Parameters
----------
when : float, int or None
When to start the stream. If the time specified is a floating point
(absolute) system time, the device will attempt to begin recording
at that time. If `None` or zero, the system will try to start
recording as soon as possible.
waitForStart : bool
Wait for sound onset if `True`.
stopTime : float, int or None
Number of seconds to record. If `None` or `-1`, recording will
continue forever until `stop` is called.
Returns
-------
float
Absolute time the stream was started.
"""
# check if the stream has been
if self.isStarted:
raise AudioStreamError(
"Cannot start a stream, already started.")
if self._stream is None:
raise AudioStreamError("Stream not ready.")
# reset the writing 'head'
self._recording.seek(0, absolute=True)
# reset warnings
# self._warnedRecBufferFull = False
startTime = self._stream.start(
repetitions=0,
when=when,
wait_for_start=int(waitForStart),
stop_time=stopTime)
# recording has begun or is scheduled to do so
self._statusFlag = STARTED
logging.debug(
'Scheduled start of audio capture for device #{} at t={}.'.format(
self._device.deviceIndex, startTime))
return startTime
def record(self, when=None, waitForStart=0, stopTime=None):
"""Start an audio recording (alias of `.start()`).
Calling this method will begin capturing samples from the microphone and
writing them to the buffer.
Parameters
----------
when : float, int or None
When to start the stream. If the time specified is a floating point
(absolute) system time, the device will attempt to begin recording
at that time. If `None` or zero, the system will try to start
recording as soon as possible.
waitForStart : bool
Wait for sound onset if `True`.
stopTime : float, int or None
Number of seconds to record. If `None` or `-1`, recording will
continue forever until `stop` is called.
Returns
-------
float
Absolute time the stream was started.
"""
return self.start(
when=when,
waitForStart=waitForStart,
stopTime=stopTime)
def stop(self, blockUntilStopped=True, stopTime=None):
"""Stop recording audio.
Call this method to end an audio recording if in progress. This will
simply halt recording and not close the stream. Any remaining samples
will be polled automatically and added to the recording buffer.
Parameters
----------
blockUntilStopped : bool
Halt script execution until the stream has fully stopped.
stopTime : float or None
Scheduled stop time for the stream in system time. If `None`, the
stream will stop as soon as possible.
Returns
-------
tuple
Tuple containing `startTime`, `endPositionSecs`, `xruns` and
`estStopTime`.
"""
if not self.isStarted:
raise AudioStreamError(
"Cannot stop a stream that has not been started.")
# poll remaining samples, if any
if not self.isRecBufferFull:
self.poll()
startTime, endPositionSecs, xruns, estStopTime = self._stream.stop(
block_until_stopped=int(blockUntilStopped),
stopTime=stopTime)
self._statusFlag = NOT_STARTED
logging.debug(
('Device #{} stopped capturing audio samples at estimated time '
't={}. Total overruns: {} Total recording time: {}').format(
self._device.deviceIndex, estStopTime, xruns, endPositionSecs))
return startTime, endPositionSecs, xruns, estStopTime
def pause(self, blockUntilStopped=True, stopTime=None):
"""Pause a recording (alias of `.stop`).
Call this method to end an audio recording if in progress. This will
simply halt recording and not close the stream. Any remaining samples
will be polled automatically and added to the recording buffer.
Parameters
----------
blockUntilStopped : bool
Halt script execution until the stream has fully stopped.
stopTime : float or None
Scheduled stop time for the stream in system time. If `None`, the
stream will stop as soon as possible.
Returns
-------
tuple
Tuple containing `startTime`, `endPositionSecs`, `xruns` and
`estStopTime`.
"""
return self.stop(blockUntilStopped=blockUntilStopped, stopTime=stopTime)
def close(self):
"""Close the stream.
Should not be called until you are certain you're done with it. Ideally,
you should never close and reopen the same stream within a single
session.
"""
self._stream.close()
logging.debug('Stream closed')
def poll(self):
"""Poll audio samples.
Calling this method adds audio samples collected from the stream buffer
to the recording buffer that have been captured since the last `poll`
call. Time between calls of this function should be less than
`bufferSecs`. You do not need to call this if you call `stop` before
the time specified by `bufferSecs` elapses since the `start` call.
Can only be called between called of `start` (or `record`) and `stop`
(or `pause`).
Returns
-------
int
Number of overruns in sampling.
"""
if not self.isStarted:
raise AudioStreamError(
"Cannot poll samples from audio device, not started.")
# figure out what to do with this other information
audioData, absRecPosition, overflow, cStartTime = \
self._stream.get_audio_data()
if overflow:
logging.warning(
"Audio stream buffer overflow, some audio samples have been "
"lost! To prevent this, ensure `Microphone.poll()` is being "
"called often enough, or increase the size of the audio buffer "
"with `bufferSecs`.")
overruns = self._recording.write(audioData)
return overruns
def bank(self, tag=None, transcribe=False, **kwargs):
"""Store current buffer as a clip within the microphone object.
This method is used internally by the Microphone component in Builder,
don't use it for other applications. Either `stop()` or `pause()` must
be called before calling this method.
Parameters
----------
tag : str or None
Label for the clip.
transcribe : bool or str
Set to the name of a transcription engine (e.g. "GOOGLE") to
transcribe using that engine, or set as `False` to not transcribe.
kwargs : dict
Additional keyword arguments to pass to
:class:`~psychopy.sound.AudioClip.transcribe()`.
"""
# make sure the tag exists in both clips and transcripts dicts
if tag not in self.clips:
self.clips[tag] = []
if tag not in self.scripts:
self.scripts[tag] = []
# append current recording to clip list according to tag
self.lastClip = self.getRecording()
self.clips[tag].append(self.lastClip)
# append current clip's transcription according to tag
if transcribe:
if transcribe in ('Built-in', True, 'BUILT_IN', 'BUILT-IN',
'Built-In', 'built-in'):
engine = "sphinx"
elif type(transcribe) == str:
engine = transcribe
self.lastScript = self.lastClip.transcribe(
engine=engine, **kwargs)
else:
self.lastScript = "Transcription disabled."
self.scripts[tag].append(self.lastScript)
# clear recording buffer
self._recording.clear()
# return banked items
if transcribe:
return self.lastClip, self.lastScript
else:
return self.lastClip
def clear(self):
"""Wipe all clips. Deletes previously banked audio clips.
"""
# clear clips
self.clips = {}
# clear recording
self._recording.clear()
def flush(self):
"""Get a copy of all banked clips, then clear the clips from storage."""
# get copy of clips dict
clips = self.clips.copy()
# clear
self.clear()
return clips
def getRecording(self):
"""Get audio data from the last microphone recording.
Call this after `stop` to get the recording as an `AudioClip` object.
Raises an error if a recording is in progress.
Returns
-------
AudioClip
Recorded data between the last calls to `start` (or `record`) and
`stop`.
"""
if self.isStarted:
raise AudioStreamError(
"Cannot get audio clip, recording was in progress. Be sure to "
"call `Microphone.stop` first.")
return self._recording.getSegment() # full recording
if __name__ == "__main__":
pass
| gpl-3.0 |
fyookball/electrum | ios/ElectronCash/app.py | 1 | 2878 | #
# This file is:
# Copyright (C) 2018 Calin Culianu <calin.culianu@gmail.com>
#
# MIT License
#
import os
from electroncash_gui.ios_native.monkeypatches import MonkeyPatches
from electroncash.util import set_verbosity
from electroncash_gui.ios_native import ElectrumGui
from electroncash_gui.ios_native.utils import call_later, get_user_dir, cleanup_tmp_dir, is_debug_build, NSLogSuppress, NSLog
from electroncash.simple_config import SimpleConfig
from electroncash.networks import set_mainnet, set_testnet
# NB: This is called from appdelegate.py "application_didFinishLaunchingWithOptions_"
def main():
cleanup_tmp_dir()
config_options = {
'verbose': is_debug_build(),
'cmd': 'gui',
'gui': 'ios_native',
'cwd': os.getcwd(),
'whitelist_servers_only' : True, # on iOS we force only the whitelist ('preferred') servers only for now as a security measure
'testnet': 'EC_TESTNET' in os.environ, # You can set the env when testing using Xcode "Scheme" editor
}
if config_options.get('testnet'):
set_testnet()
else:
set_mainnet()
set_verbosity(config_options.get('verbose'), timestamps=False, thread_id=False)
NSLogSuppress(not config_options.get('verbose'))
MonkeyPatches.patch()
config = SimpleConfig(config_options, read_user_dir_function = get_user_dir)
gui = ElectrumGui(config)
call_later(0.010, gui.main) # this is required for the activity indicator to actually animate. Switch to a direct call if not using activity indicator on Splash2
_printStats(config_options) # Prints some startup/debug stats such as Python version and SSL version (this is done in another thread to hopefully not impact startup overhead too much, as importing ssl may be a bit heavy)
return "Bitcoin Cash FTW!"
def _printStats(config_options):
import threading
def thrdfunc(config_options):
# lazy init of SSL
import ssl, sys
from electroncash import version, ecc_fast, schnorr
NSLog("Electron Cash lib version: %s (using server protocol: %s)", version.PACKAGE_VERSION, version.PROTOCOL_VERSION)
NSLog("Python version: %s", ' '.join(sys.version.split('\n')))
NSLog("OpenSSL version: %s", ssl.OPENSSL_VERSION)
NSLog("Fast ECC: %s Fast Schnorr: %s", str(ecc_fast.is_using_fast_ecc()), str(schnorr.has_fast_sign()))
#NSLog("Environment Vars:")
#for k,v in os.environ.copy().items():
# NSLog("%s=%s", str(k), str(v))
#NSLog("Config Vars:")
#for k,v in config_options.copy().items():
# NSLog("config[%s] = %s", str(k), str(v))
# /
# We do this from a thread so as to not delay app startup by importing more stuff we don't strictly need.
threading.Thread(target=thrdfunc, args=(config_options,), daemon=True).start()
| mit |
yograterol/django | django/http/multipartparser.py | 332 | 24331 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import binascii
import cgi
import sys
from django.conf import settings
from django.core.exceptions import SuspiciousMultipartForm
from django.core.files.uploadhandler import (
SkipFile, StopFutureHandlers, StopUpload,
)
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils.six.moves.urllib.parse import unquote
from django.utils.text import unescape_entities
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
_BASE64_DECODE_ERROR = TypeError if six.PY2 else binascii.Error
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handlers:
A list of UploadHandler instances that perform operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should contain multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2 ** 31 - 4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if any of the handlers take care of the parsing.
# This allows overriding everything if need be.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
# Check to see if it was handled
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = base64.b64decode(raw_data)
except _BASE64_DECODE_ERROR:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type, content_type_extra = meta_data.get('content-type', ('', {}))
content_type = content_type.strip()
charset = content_type_extra.get('charset')
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset, content_type_extra)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always decode base64 chunks by multiple of 4,
# ignoring whitespace.
stripped_chunk = b"".join(chunk.split())
remaining = len(stripped_chunk) % 4
while remaining != 0:
over_chunk = field_stream.read(4 - remaining)
stripped_chunk += b"".join(over_chunk.split())
remaining = len(stripped_chunk) % 4
try:
chunk = base64.b64decode(stripped_chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
msg = "Could not decode base64 data: %r" % e
six.reraise(MultiPartParserError, MultiPartParserError(msg), sys.exc_info()[2])
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
self._close_files()
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
self._close_files()
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signaling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(
force_text(old_field_name, self._encoding, errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\") + 1:].strip()
def _close_files(self):
# Free up all file handles.
# FIXME: this currently assumes that upload handlers store the file as 'file'
# We should document that... (Maybe add handler.free_file to complement new_file)
for handler in self._upload_handlers:
if hasattr(handler, 'file'):
handler.file.close()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = self._remaining if size is None else size
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
try:
chunk = next(self)
except StopIteration:
return
else:
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousMultipartForm(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we don't treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]: # and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof=False):
"""
Finds a multipart boundary in data.
Should no boundary exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = data.find(self._boundary)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end - 1)
if data[last:last + 1] == b'\n':
end -= 1
last = max(0, end - 1)
if data[last:last + 1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except ValueError:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except ValueError:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
has_encoding = False
name = p[:i].strip().lower().decode('ascii')
if name.endswith('*'):
# Lang/encoding embedded in the value (like "filename*=UTF-8''file.ext")
# http://tools.ietf.org/html/rfc2231#section-4
name = name[:-1]
if p.count(b"'") == 2:
has_encoding = True
value = p[i + 1:].strip()
if has_encoding:
encoding, lang, value = value.split(b"'")
if six.PY3:
value = unquote(value.decode(), encoding=encoding.decode())
else:
value = unquote(value).decode(encoding)
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| bsd-3-clause |
sipwise/repoapi | build/test/test_utils.py | 1 | 11161 | # Copyright (C) 2017-2020 The Sipwise Team - http://sipwise.com
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import re
from unittest.mock import patch
from django.test import override_settings
from django.test import SimpleTestCase
from build import exceptions as err
from build.conf import settings
from build.utils import get_common_release
from build.utils import get_simple_release
from build.utils import is_release_trunk
from build.utils import ReleaseConfig
from build.utils import trigger_build
from build.utils import trigger_copy_deps
class SimpleIsReleaseTrunkTest(SimpleTestCase):
def test_trunk(self):
ok, val = is_release_trunk("trunk")
self.assertFalse(ok)
self.assertIsNone(val)
def test_mrXX(self):
ok, val = is_release_trunk("release-mr8.5")
self.assertFalse(ok)
self.assertIsNone(val)
def test_release_trunk(self):
ok, val = is_release_trunk("release-trunk-buster")
self.assertTrue(ok)
self.assertEqual(val, "buster")
ok, val = is_release_trunk("release-trunk-bullseye")
self.assertTrue(ok)
self.assertEqual(val, "bullseye")
class SimpleReleaseTest(SimpleTestCase):
def test_trunk(self):
val = get_simple_release("release-trunk-buster")
self.assertEqual(val, "trunk")
def test_branch_release(self):
val = get_simple_release("release-mr8.0")
self.assertEqual(val, "mr8.0")
def test_release_ok(self):
val = get_simple_release("release-mr8.1.1")
self.assertEqual(val, "mr8.1.1")
def test_release_update_ok(self):
val = get_simple_release("release-mr8.1-update")
self.assertEqual(val, "mr8.1")
def test_release_ko(self):
val = get_simple_release("mr8.1.1")
self.assertIsNone(val)
class CommonReleaseTest(SimpleTestCase):
def test_trunk(self):
val = get_common_release("release-trunk-buster")
self.assertEqual(val, "master")
def test_branch_release(self):
val = get_common_release("release-mr8.0")
self.assertEqual(val, "mr8.0")
def test_release_ok(self):
val = get_common_release("mr8.1.1")
self.assertEqual(val, "mr8.1")
def test_release_ko(self):
val = get_common_release("whatever-mr8.1.1")
self.assertIsNone(val)
class ReleaseConfigTestCase(SimpleTestCase):
build_deps = [
"data-hal",
"ngcp-schema",
"libinewrate",
"libswrate",
"libtcap",
"sipwise-base",
"check-tools",
]
@override_settings(BUILD_RELEASES_SKIP=["mr0.1"])
def test_supported_releases(self):
supported = [
"release-trunk-buster",
"release-trunk-bullseye",
"mr8.1.2",
"mr8.1",
"mr7.5.3",
"mr7.5.2",
"mr7.5.1",
"mr7.5",
]
res = ReleaseConfig.supported_releases()
self.assertListEqual(res, supported)
@patch.object(ReleaseConfig, "supported_releases")
def test_supported_releases_dict(self, sr):
res_ok = [
{"release": "release-trunk-buster", "base": "master"},
{"release": "mr8.0", "base": "mr8.0"},
{"release": "mr8.0.1", "base": "mr8.0"},
{"release": "mr7.5.1", "base": "mr7.5"},
]
sr.return_value = [
"release-trunk-buster",
"mr8.0",
"mr8.0.1",
"mr7.5.1",
]
res = ReleaseConfig.supported_releases_dict()
self.assertListEqual(res, res_ok)
def test_no_release_config(self):
with self.assertRaises(err.NoConfigReleaseFile):
ReleaseConfig("fake_release")
def test_no_jenkins_jobs(self):
with self.assertRaises(err.NoJenkinsJobsInfo):
ReleaseConfig("mr0.1")
def test_ok(self):
rc = ReleaseConfig("trunk")
self.assertIsNotNone(rc.config)
self.assertListEqual(list(rc.build_deps.keys()), self.build_deps)
self.assertEqual(rc.debian_release, "buster")
self.assertEqual(len(rc.projects), 73)
def test_debian_release_value(self):
rc = ReleaseConfig("trunk")
self.assertEqual(rc.debian_release, "buster")
rc = ReleaseConfig("release-trunk-bullseye")
self.assertEqual(rc.debian_release, "bullseye")
rc = ReleaseConfig("trunk", "bullseye")
self.assertEqual(rc.debian_release, "bullseye")
# distribution parameter is only used with trunk
rc = ReleaseConfig("release-mr8.1-update", "bullseye")
self.assertEqual(rc.debian_release, "buster")
def test_release_value(self):
rc = ReleaseConfig("trunk")
self.assertEqual(rc.release, "trunk")
def test_branch_tag_value_trunk(self):
rc = ReleaseConfig("trunk")
self.assertEqual(rc.branch, "master")
self.assertIsNone(rc.tag)
def test_branch_tag_value_mrXX(self):
rc = ReleaseConfig("mr8.1")
self.assertEqual(rc.branch, "mr8.1")
self.assertIsNone(rc.tag)
def test_branch_tag_value_mrXXX(self):
rc = ReleaseConfig("mr7.5.2")
self.assertEqual(rc.branch, "mr7.5.2")
self.assertEqual(rc.tag, "mr7.5.2.1")
def test_build_deps(self):
rc = ReleaseConfig("trunk")
build_deps = [
"data-hal",
"ngcp-schema",
"libinewrate",
"libswrate",
"libtcap",
"sipwise-base",
"check-tools",
]
self.assertListEqual(list(rc.build_deps.keys()), build_deps)
def test_build_deps_iter_step_1(self):
rc = ReleaseConfig("trunk")
build_deps = [
"data-hal",
"libinewrate",
"libswrate",
"libtcap",
"sipwise-base",
"check-tools",
]
values = []
for prj in rc.wanna_build_deps(0):
values.append(prj)
self.assertListEqual(build_deps, values)
def test_build_deps_iter_step_2(self):
rc = ReleaseConfig("trunk")
values = []
for prj in rc.wanna_build_deps(1):
values.append(prj)
self.assertListEqual(["ngcp-schema"], values)
@patch("build.utils.open_jenkins_url")
class TriggerBuild(SimpleTestCase):
def test_project_build(self, openurl):
params = {
"project": "kamailio-get-code",
"release_uuid": "UUID_mr8.2",
"trigger_release": "release-mr8.2",
"trigger_branch_or_tag": "branch/mr8.2",
"trigger_distribution": "buster",
"uuid": "UUID_A",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={trigger_release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"branch=mr8.2&tag=none&"
"release={trigger_release}&distribution={trigger_distribution}"
)
res = trigger_build(**params)
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
def test_project_build_uuid(self, openurl):
params = {
"project": "kamailio-get-code",
"release_uuid": "UUID_mr8.2",
"trigger_release": "release-mr8.2",
"trigger_branch_or_tag": "branch/mr8.2",
"trigger_distribution": "buster",
}
res = [trigger_build(**params), trigger_build(**params)]
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res[0], "{base}/job/{project}/".format(**params))
self.assertEqual(res[0], res[1])
uuids = list()
self.assertEqual(len(openurl.call_args_list), 2)
for call in openurl.call_args_list:
m = re.match(r".+&uuid=([^&]+)&.+", str(call))
self.assertIsNotNone(m)
uuids.append(m.groups(0))
self.assertNotEqual(uuids[0], uuids[1])
def test_copy_debs_build(self, openurl):
params = {
"release": "release-mr8.2",
"internal": True,
"release_uuid": "UUID_mr8.2",
"uuid": "UUID_A",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"release=mr8.2&internal=true"
)
res = trigger_copy_deps(**params)
params["project"] = "release-copy-debs-yml"
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
def test_project_build_trunk(self, openurl):
params = {
"project": "kamailio-get-code",
"release_uuid": "UUID_mr8.2",
"trigger_release": "trunk",
"trigger_branch_or_tag": "branch/master",
"trigger_distribution": "buster",
"uuid": "UUID_A",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={trigger_release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"branch=master&tag=none&"
"release=trunk&distribution={trigger_distribution}"
)
res = trigger_build(**params)
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
def test_copy_debs_build_trunk(self, openurl):
params = {
"release": "release-trunk-buster",
"internal": True,
"release_uuid": "UUID_master",
"uuid": "UUID_B",
}
url = (
"{base}/job/{project}/buildWithParameters?"
"token={token}&cause={release}&uuid={uuid}&"
"release_uuid={release_uuid}&"
"release=release-trunk-buster&internal=true"
)
res = trigger_copy_deps(**params)
params["project"] = "release-copy-debs-yml"
params["base"] = settings.JENKINS_URL
params["token"] = settings.JENKINS_TOKEN
self.assertEqual(res, "{base}/job/{project}/".format(**params))
openurl.assert_called_once_with(url.format(**params))
| gpl-3.0 |
Stane1983/kernel-aml-m3 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
ryfeus/lambda-packs | Pyrestest_wrk/source/pip/_vendor/requests/adapters.py | 205 | 16799 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3 import Retry
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| mit |
RNAcentral/rnacentral-import-pipeline | tests/databases/gtrnadb/parser_test.py | 1 | 5743 | # -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import attr
import pytest
from rnacentral_pipeline.databases import data
from rnacentral_pipeline.databases.gtrnadb import parser
@pytest.fixture(scope="module")
def simple():
tax_file = Path("taxonomy.db")
with open("data/gtrnadb/simple.json", "r") as raw:
yield list(parser.parse(raw, tax_file))
@pytest.fixture(scope="module")
def version2():
with open("data/gtrnadb/version2.json", "r") as raw:
tax_file = Path("taxonomy.db")
yield list(parser.parse(raw, tax_file))
def test_it_can_generate_all_entries(simple):
assert len(simple) == 984 # 16 pseudogenes
def test_it_generates_correct_entries(simple):
assert attr.asdict(simple[0]) == attr.asdict(
data.Entry(
primary_id="tRNA-Ala-CGC-1-1:CP000828.1:603738-603810",
accession="CP000828.1:tRNA-Ala-CGC-1-1",
ncbi_tax_id=329726,
database="GTRNADB",
sequence="GGGGAATTAGCTCAGCTGGTAGAGTGCTGCGATCGCACCGCAGAGGTCAGGGGTTCGAATCCCCTATTCTCCA",
regions=[],
rna_type="tRNA",
url="http://gtrnadb.ucsc.edu/genomes/bacteria/Acar_mari_MBIC11017/genes/tRNA-Ala-CGC-1-1.html",
seq_version="1",
note_data={
"anticodon": "CGC",
"anticodon_positions": [
{
"relative_start": 34,
"relative_stop": 36,
}
],
"isotype": "Ala",
"score": 72.7,
"url": "http://gtrnadb.ucsc.edu/genomes/bacteria/Acar_mari_MBIC11017/genes/tRNA-Ala-CGC-1-1.html",
},
secondary_structure=data.SecondaryStructure.empty(),
references=[
data.Reference(
authors="Chan P.P., Lowe T.M.",
location="Nucl. Acids Res. 37(Database issue)",
title=(
"GtRNAdb: A database of transfer RNA genes detected in "
"genomic sequence"
),
pmid=18984615,
doi="10.1093/nar/gkn787.",
)
],
chromosome="chr",
species="Acaryochloris marina MBIC11017",
common_name=None,
anticodon="CGC",
lineage="Bacteria; Cyanobacteria; Synechococcales; Acaryochloridaceae; Acaryochloris; Acaryochloris marina MBIC11017",
gene="tRNA-Ala-CGC-1-1",
optional_id="tRNA-Ala-CGC-1-1",
product="tRNA-Ala (CGC)",
parent_accession="CP000828.1",
description="Acaryochloris marina MBIC11017 tRNA-Ala (CGC)",
mol_type="genomic DNA",
location_start=1,
location_end=73,
gene_synonyms=["chr.trna27-AlaCGC"],
)
)
def test_it_generates_all_entries(version2):
assert len(version2) == 1000
def test_it_creates_correct_entries(version2):
assert attr.asdict(version2[3]) == attr.asdict(
data.Entry(
primary_id="tRNA-Arg-CCG-1-1:CP003168.1:421631-421753",
accession="CP003168.1:tRNA-Arg-CCG-1-1",
ncbi_tax_id=673860,
database="GTRNADB",
sequence="GGGCCCGTGGGGTAGCTTGGATATCCTAGGGGCCTCCGGAGCCCCGGACCCGGGTTCGAATCCCGGCGGGCCCG",
regions=[],
rna_type="tRNA",
url="http://gtrnadb.ucsc.edu/genomes/archaea/Acid_MAR08_339/genes/tRNA-Arg-CCG-1-1.html",
seq_version="1",
note_data={
"anticodon": "CCG",
"anticodon_positions": [{"relative_start": 36, "relative_stop": 38}],
"isotype": "Arg",
"score": 73.6,
"url": "http://gtrnadb.ucsc.edu/genomes/archaea/Acid_MAR08_339/genes/tRNA-Arg-CCG-1-1.html",
},
secondary_structure=data.SecondaryStructure.empty(),
references=[
data.Reference(
authors="Chan P.P., Lowe T.M.",
location="Nucl. Acids Res. 37(Database issue)",
title=(
"GtRNAdb: A database of transfer RNA genes detected in "
"genomic sequence"
),
pmid=18984615,
doi="10.1093/nar/gkn787.",
)
],
chromosome="chr",
species="Aciduliprofundum sp. MAR08-339",
common_name=None,
anticodon="CCG",
lineage="Archaea; Euryarchaeota; Diaforarchaea group; DHVE2 group; Aciduliprofundum; unclassified Aciduliprofundum; Aciduliprofundum sp. MAR08-339",
gene="tRNA-Arg-CCG-1-1",
optional_id="tRNA-Arg-CCG-1-1",
product="tRNA-Arg (CCG)",
parent_accession="CP003168.1",
description="Aciduliprofundum sp. MAR08-339 tRNA Arginine with anticodon CCG",
mol_type="genomic DNA",
location_start=1,
location_end=123,
gene_synonyms=["chr.trna10-ArgCCG"],
)
)
| apache-2.0 |
mrquim/mrquimrepo | repo/script.module.youtube.dl/lib/youtube_dl/extractor/amcnetworks.py | 24 | 4634 | # coding: utf-8
from __future__ import unicode_literals
from .theplatform import ThePlatformIE
from ..utils import (
update_url_query,
parse_age_limit,
int_or_none,
)
class AMCNetworksIE(ThePlatformIE):
_VALID_URL = r'https?://(?:www\.)?(?:amc|bbcamerica|ifc|wetv)\.com/(?:movies|shows(?:/[^/]+)+)/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://www.ifc.com/shows/maron/season-04/episode-01/step-1',
'md5': '',
'info_dict': {
'id': 's3MX01Nl4vPH',
'ext': 'mp4',
'title': 'Maron - Season 4 - Step 1',
'description': 'In denial about his current situation, Marc is reluctantly convinced by his friends to enter rehab. Starring Marc Maron and Constance Zimmer.',
'age_limit': 17,
'upload_date': '20160505',
'timestamp': 1462468831,
'uploader': 'AMCN',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Requires TV provider accounts',
}, {
'url': 'http://www.bbcamerica.com/shows/the-hunt/full-episodes/season-1/episode-01-the-hardest-challenge',
'only_matching': True,
}, {
'url': 'http://www.amc.com/shows/preacher/full-episodes/season-01/episode-00/pilot',
'only_matching': True,
}, {
'url': 'http://www.wetv.com/shows/million-dollar-matchmaker/season-01/episode-06-the-dumped-dj-and-shallow-hal',
'only_matching': True,
}, {
'url': 'http://www.ifc.com/movies/chaos',
'only_matching': True,
}, {
'url': 'http://www.bbcamerica.com/shows/doctor-who/full-episodes/the-power-of-the-daleks/episode-01-episode-1-color-version',
'only_matching': True,
}, {
'url': 'http://www.wetv.com/shows/mama-june-from-not-to-hot/full-episode/season-01/thin-tervention',
'only_matching': True,
}, {
'url': 'http://www.wetv.com/shows/la-hair/videos/season-05/episode-09-episode-9-2/episode-9-sneak-peek-3',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
query = {
'mbr': 'true',
'manifest': 'm3u',
}
media_url = self._search_regex(
r'window\.platformLinkURL\s*=\s*[\'"]([^\'"]+)',
webpage, 'media url')
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
r'link\.theplatform\.com/s/([^?]+)',
media_url, 'theplatform_path'), display_id)
info = self._parse_theplatform_metadata(theplatform_metadata)
video_id = theplatform_metadata['pid']
title = theplatform_metadata['title']
rating = theplatform_metadata['ratings'][0]['rating']
auth_required = self._search_regex(
r'window\.authRequired\s*=\s*(true|false);',
webpage, 'auth required')
if auth_required == 'true':
requestor_id = self._search_regex(
r'window\.requestor_id\s*=\s*[\'"]([^\'"]+)',
webpage, 'requestor id')
resource = self._get_mvpd_resource(
requestor_id, title, video_id, rating)
query['auth'] = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
media_url = update_url_query(media_url, query)
formats, subtitles = self._extract_theplatform_smil(
media_url, video_id)
self._sort_formats(formats)
info.update({
'id': video_id,
'subtitles': subtitles,
'formats': formats,
'age_limit': parse_age_limit(parse_age_limit(rating)),
})
ns_keys = theplatform_metadata.get('$xmlns', {}).keys()
if ns_keys:
ns = list(ns_keys)[0]
series = theplatform_metadata.get(ns + '$show')
season_number = int_or_none(
theplatform_metadata.get(ns + '$season'))
episode = theplatform_metadata.get(ns + '$episodeTitle')
episode_number = int_or_none(
theplatform_metadata.get(ns + '$episode'))
if season_number:
title = 'Season %d - %s' % (season_number, title)
if series:
title = '%s - %s' % (series, title)
info.update({
'title': title,
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
})
return info
| gpl-2.0 |
bavardage/statsmodels | tools/github_stats.py | 3 | 7203 | #!/usr/bin/env python
"""Simple tools to query github.com and gather stats about issues.
Copied from IPython 732be29
https://github.com/ipython/ipython/blob/master/tools/github_stats.py
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import print_function
import json
import re
import sys
from datetime import datetime, timedelta
from subprocess import check_output
from gh_api import get_paged_request, make_auth_header, get_pull_request
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
ISO8601 = "%Y-%m-%dT%H:%M:%SZ"
PER_PAGE = 100
#-----------------------------------------------------------------------------
# Functions
#-----------------------------------------------------------------------------
def get_issues(project="statsmodels/statsmodels", state="closed", pulls=False):
"""Get a list of the issues from the Github API."""
which = 'pulls' if pulls else 'issues'
url = "https://api.github.com/repos/%s/%s?state=%s&per_page=%i" % (project, which, state, PER_PAGE)
return get_paged_request(url, headers=make_auth_header())
def round_hour(dt):
return dt.replace(minute=0,second=0,microsecond=0)
def _parse_datetime(s):
"""Parse dates in the format returned by the Github API."""
if s:
return datetime.strptime(s, ISO8601)
else:
return datetime.fromtimestamp(0)
def issues2dict(issues):
"""Convert a list of issues to a dict, keyed by issue number."""
idict = {}
for i in issues:
idict[i['number']] = i
return idict
def is_pull_request(issue):
"""Return True if the given issue is a pull request."""
return bool(issue.get('pull_request', {}).get('html_url', None))
def split_pulls(all_issues, project="statsmodels/statsmodels"):
"""split a list of closed issues into non-PR Issues and Pull Requests"""
pulls = []
issues = []
for i in all_issues:
if is_pull_request(i):
pull = get_pull_request(project, i['number'], auth=True)
pulls.append(pull)
else:
issues.append(i)
return issues, pulls
def issues_closed_since(period=timedelta(days=365), project="statsmodels/statsmodels", pulls=False):
"""Get all issues closed since a particular point in time. period
can either be a datetime object, or a timedelta object. In the
latter case, it is used as a time before the present.
"""
which = 'pulls' if pulls else 'issues'
if isinstance(period, timedelta):
since = round_hour(datetime.utcnow() - period)
else:
since = period
url = "https://api.github.com/repos/%s/%s?state=closed&sort=updated&since=%s&per_page=%i" % (project, which, since.strftime(ISO8601), PER_PAGE)
allclosed = get_paged_request(url, headers=make_auth_header())
filtered = [ i for i in allclosed if _parse_datetime(i['closed_at']) > since ]
if pulls:
filtered = [ i for i in filtered if _parse_datetime(i['merged_at']) > since ]
# filter out PRs not against master (backports)
filtered = [ i for i in filtered if i['base']['ref'] == 'master' ]
else:
filtered = [ i for i in filtered if not is_pull_request(i) ]
return filtered
def sorted_by_field(issues, field='closed_at', reverse=False):
"""Return a list of issues sorted by closing date date."""
return sorted(issues, key = lambda i:i[field], reverse=reverse)
def report(issues, show_urls=False):
"""Summary report about a list of issues, printing number and title.
"""
# titles may have unicode in them, so we must encode everything below
if show_urls:
for i in issues:
role = 'ghpull' if 'merged_at' in i else 'ghissue'
print('* :%s:`%d`: %s' % (role, i['number'],
i['title'].encode('utf-8')))
else:
for i in issues:
print('* %d: %s' % (i['number'], i['title'].encode('utf-8')))
#-----------------------------------------------------------------------------
# Main script
#-----------------------------------------------------------------------------
if __name__ == "__main__":
# deal with unicode
import codecs
sys.stdout = codecs.getwriter('utf8')(sys.stdout)
# Whether to add reST urls for all issues in printout.
show_urls = True
# By default, search one month back
tag = None
if len(sys.argv) > 1:
try:
days = int(sys.argv[1])
except:
tag = sys.argv[1]
else:
tag = check_output(['git', 'describe', '--abbrev=0']).strip()
if tag:
cmd = ['git', 'log', '-1', '--format=%ai', tag]
tagday, tz = check_output(cmd).strip().rsplit(' ', 1)
since = datetime.strptime(tagday, "%Y-%m-%d %H:%M:%S")
h = int(tz[1:3])
m = int(tz[3:])
td = timedelta(hours=h, minutes=m)
if tz[0] == '-':
since += td
else:
since -= td
else:
since = datetime.utcnow() - timedelta(days=days)
since = round_hour(since)
print("fetching GitHub stats since %s (tag: %s)" % (since, tag), file=sys.stderr)
# turn off to play interactively without redownloading, use %run -i
if 1:
issues = issues_closed_since(since, pulls=False)
pulls = issues_closed_since(since, pulls=True)
# For regular reports, it's nice to show them in reverse chronological order
issues = sorted_by_field(issues, reverse=True)
pulls = sorted_by_field(pulls, reverse=True)
n_issues, n_pulls = map(len, (issues, pulls))
n_total = n_issues + n_pulls
# Print summary report we can directly include into release notes.
print()
since_day = since.strftime("%Y/%m/%d")
today = datetime.today().strftime("%Y/%m/%d")
print("GitHub stats for %s - %s (tag: %s)" % (since_day, today, tag))
print()
print("These lists are automatically generated, and may be incomplete or contain duplicates.")
print()
if tag:
# print git info, in addition to GitHub info:
since_tag = tag+'..'
cmd = ['git', 'log', '--oneline', since_tag]
ncommits = len(check_output(cmd).splitlines())
author_cmd = ['git', 'log', "--format='* %aN'", since_tag]
all_authors = check_output(author_cmd).decode('utf-8', 'replace').splitlines()
unique_authors = sorted(set(all_authors), key=lambda s: s.lower())
print("The following %i authors contributed %i commits." % (len(unique_authors), ncommits))
print()
print('\n'.join(unique_authors))
print()
print()
print("We closed a total of %d issues, %d pull requests and %d regular issues;\n"
"this is the full list (generated with the script \n"
":file:`tools/github_stats.py`):" % (n_total, n_pulls, n_issues))
print()
print('Pull Requests (%d):\n' % n_pulls)
report(pulls, show_urls)
print()
print('Issues (%d):\n' % n_issues)
report(issues, show_urls)
| bsd-3-clause |
jmartinezchaine/OpenERP | openerp/addons/mrp/report/mrp_report.py | 9 | 3791 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields,osv
class report_workcenter_load(osv.osv):
_name="report.workcenter.load"
_description="Work Center Load"
_auto = False
_log_access = False
_columns = {
'name': fields.char('Week', size=64, required=True),
'workcenter_id': fields.many2one('mrp.workcenter', 'Work Center', required=True),
'cycle': fields.float('Nbr of cycle'),
'hour': fields.float('Nbr of hour'),
}
def init(self, cr):
cr.execute("""
create or replace view report_workcenter_load as (
SELECT
min(wl.id) as id,
to_char(p.date_planned,'YYYY:mm:dd') as name,
SUM(wl.hour) AS hour,
SUM(wl.cycle) AS cycle,
wl.workcenter_id as workcenter_id
FROM
mrp_production_workcenter_line wl
LEFT JOIN mrp_production p
ON p.id = wl.production_id
GROUP BY
wl.workcenter_id,
to_char(p.date_planned,'YYYY:mm:dd')
)""")
report_workcenter_load()
class report_mrp_inout(osv.osv):
_name="report.mrp.inout"
_description="Stock value variation"
_auto = False
_log_access = False
_rec_name = 'date'
_columns = {
'date': fields.char('Week', size=64, required=True),
'value': fields.float('Stock value', required=True, digits=(16,2)),
}
def init(self, cr):
cr.execute("""
create or replace view report_mrp_inout as (
select
min(sm.id) as id,
to_char(sm.date,'YYYY:IW') as date,
sum(case when (sl.usage='internal') then
pt.standard_price * sm.product_qty
else
0.0
end - case when (sl2.usage='internal') then
pt.standard_price * sm.product_qty
else
0.0
end) as value
from
stock_move sm
left join product_product pp
on (pp.id = sm.product_id)
left join product_template pt
on (pt.id = pp.product_tmpl_id)
left join stock_location sl
on ( sl.id = sm.location_id)
left join stock_location sl2
on ( sl2.id = sm.location_dest_id)
where
sm.state in ('waiting','confirmed','assigned')
group by
to_char(sm.date,'YYYY:IW')
)""")
report_mrp_inout()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
msrb/freeipa | ipalib/plugins/aci.py | 2 | 34228 | # Authors:
# Rob Crittenden <rcritten@redhat.com>
# Pavel Zuna <pzuna@redhat.com>
#
# Copyright (C) 2009 Red Hat
# see file 'COPYING' for use and warranty information
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Directory Server Access Control Instructions (ACIs)
ACIs are used to allow or deny access to information. This module is
currently designed to allow, not deny, access.
The aci commands are designed to grant permissions that allow updating
existing entries or adding or deleting new ones. The goal of the ACIs
that ship with IPA is to provide a set of low-level permissions that
grant access to special groups called taskgroups. These low-level
permissions can be combined into roles that grant broader access. These
roles are another type of group, roles.
For example, if you have taskgroups that allow adding and modifying users you
could create a role, useradmin. You would assign users to the useradmin
role to allow them to do the operations defined by the taskgroups.
You can create ACIs that delegate permission so users in group A can write
attributes on group B.
The type option is a map that applies to all entries in the users, groups or
host location. It is primarily designed to be used when granting add
permissions (to write new entries).
An ACI consists of three parts:
1. target
2. permissions
3. bind rules
The target is a set of rules that define which LDAP objects are being
targeted. This can include a list of attributes, an area of that LDAP
tree or an LDAP filter.
The targets include:
- attrs: list of attributes affected
- type: an object type (user, group, host, service, etc)
- memberof: members of a group
- targetgroup: grant access to modify a specific group. This is primarily
designed to enable users to add or remove members of a specific group.
- filter: A legal LDAP filter used to narrow the scope of the target.
- subtree: Used to apply a rule across an entire set of objects. For example,
to allow adding users you need to grant "add" permission to the subtree
ldap://uid=*,cn=users,cn=accounts,dc=example,dc=com. The subtree option
is a fail-safe for objects that may not be covered by the type option.
The permissions define what the ACI is allowed to do, and are one or
more of:
1. write - write one or more attributes
2. read - read one or more attributes
3. add - add a new entry to the tree
4. delete - delete an existing entry
5. all - all permissions are granted
Note the distinction between attributes and entries. The permissions are
independent, so being able to add a user does not mean that the user will
be editable.
The bind rule defines who this ACI grants permissions to. The LDAP server
allows this to be any valid LDAP entry but we encourage the use of
taskgroups so that the rights can be easily shared through roles.
For a more thorough description of access controls see
http://www.redhat.com/docs/manuals/dir-server/ag/8.0/Managing_Access_Control.html
EXAMPLES:
NOTE: ACIs are now added via the permission plugin. These examples are to
demonstrate how the various options work but this is done via the permission
command-line now (see last example).
Add an ACI so that the group "secretaries" can update the address on any user:
ipa group-add --desc="Office secretaries" secretaries
ipa aci-add --attrs=streetAddress --memberof=ipausers --group=secretaries --permissions=write --prefix=none "Secretaries write addresses"
Show the new ACI:
ipa aci-show --prefix=none "Secretaries write addresses"
Add an ACI that allows members of the "addusers" permission to add new users:
ipa aci-add --type=user --permission=addusers --permissions=add --prefix=none "Add new users"
Add an ACI that allows members of the editors manage members of the admins group:
ipa aci-add --permissions=write --attrs=member --targetgroup=admins --group=editors --prefix=none "Editors manage admins"
Add an ACI that allows members of the admins group to manage the street and zip code of those in the editors group:
ipa aci-add --permissions=write --memberof=editors --group=admins --attrs=street --attrs=postalcode --prefix=none "admins edit the address of editors"
Add an ACI that allows the admins group manage the street and zipcode of those who work for the boss:
ipa aci-add --permissions=write --group=admins --attrs=street --attrs=postalcode --filter="(manager=uid=boss,cn=users,cn=accounts,dc=example,dc=com)" --prefix=none "Edit the address of those who work for the boss"
Add an entirely new kind of record to IPA that isn't covered by any of the --type options, creating a permission:
ipa permission-add --permissions=add --subtree="cn=*,cn=orange,cn=accounts,dc=example,dc=com" --desc="Add Orange Entries" add_orange
The show command shows the raw 389-ds ACI.
IMPORTANT: When modifying the target attributes of an existing ACI you
must include all existing attributes as well. When doing an aci-mod the
targetattr REPLACES the current attributes, it does not add to them.
"""
from copy import deepcopy
from ipalib import api, crud, errors
from ipalib import Object
from ipalib import Flag, Str, StrEnum, DNParam
from ipalib.aci import ACI
from ipalib import output
from ipalib import _, ngettext
from ipalib.plugable import Registry
from ipalib.plugins.baseldap import gen_pkey_only_option, pkey_to_value
from ipapython.ipa_log_manager import *
from ipapython.dn import DN
register = Registry()
ACI_NAME_PREFIX_SEP = ":"
_type_map = {
'user': 'ldap:///' + str(DN(('uid', '*'), api.env.container_user, api.env.basedn)),
'group': 'ldap:///' + str(DN(('cn', '*'), api.env.container_group, api.env.basedn)),
'host': 'ldap:///' + str(DN(('fqdn', '*'), api.env.container_host, api.env.basedn)),
'hostgroup': 'ldap:///' + str(DN(('cn', '*'), api.env.container_hostgroup, api.env.basedn)),
'service': 'ldap:///' + str(DN(('krbprincipalname', '*'), api.env.container_service, api.env.basedn)),
'netgroup': 'ldap:///' + str(DN(('ipauniqueid', '*'), api.env.container_netgroup, api.env.basedn)),
'dnsrecord': 'ldap:///' + str(DN(('idnsname', '*'), api.env.container_dns, api.env.basedn)),
}
_valid_permissions_values = [
u'read', u'write', u'add', u'delete', u'all'
]
_valid_prefix_values = (
u'permission', u'delegation', u'selfservice', u'none'
)
class ListOfACI(output.Output):
type = (list, tuple)
doc = _('A list of ACI values')
def validate(self, cmd, entries):
assert isinstance(entries, self.type)
for (i, entry) in enumerate(entries):
if not isinstance(entry, unicode):
raise TypeError(output.emsg %
(cmd.name, self.__class__.__name__,
self.name, i, unicode, type(entry), entry)
)
aci_output = (
output.Output('result', unicode, 'A string representing the ACI'),
output.value,
output.summary,
)
def _make_aci_name(aciprefix, aciname):
"""
Given a name and a prefix construct an ACI name.
"""
if aciprefix == u"none":
return aciname
return aciprefix + ACI_NAME_PREFIX_SEP + aciname
def _parse_aci_name(aciname):
"""
Parse the raw ACI name and return a tuple containing the ACI prefix
and the actual ACI name.
"""
aciparts = aciname.partition(ACI_NAME_PREFIX_SEP)
if not aciparts[2]: # no prefix/name separator found
return (u"none",aciparts[0])
return (aciparts[0], aciparts[2])
def _group_from_memberof(memberof):
"""
Pull the group name out of a memberOf filter
"""
st = memberof.find('memberOf=')
if st == -1:
# We have a raw group name, use that
return api.Object['group'].get_dn(memberof)
en = memberof.find(')', st)
return memberof[st+9:en]
def _make_aci(ldap, current, aciname, kw):
"""
Given a name and a set of keywords construct an ACI.
"""
# Do some quick and dirty validation.
checked_args=['type','filter','subtree','targetgroup','attrs','memberof']
valid={}
for arg in checked_args:
if arg in kw:
valid[arg]=kw[arg] is not None
else:
valid[arg]=False
if valid['type'] + valid['filter'] + valid['subtree'] + valid['targetgroup'] > 1:
raise errors.ValidationError(name='target', error=_('type, filter, subtree and targetgroup are mutually exclusive'))
if 'aciprefix' not in kw:
raise errors.ValidationError(name='aciprefix', error=_('ACI prefix is required'))
if sum(valid.values()) == 0:
raise errors.ValidationError(name='target', error=_('at least one of: type, filter, subtree, targetgroup, attrs or memberof are required'))
if valid['filter'] + valid['memberof'] > 1:
raise errors.ValidationError(name='target', error=_('filter and memberof are mutually exclusive'))
group = 'group' in kw
permission = 'permission' in kw
selfaci = 'selfaci' in kw and kw['selfaci'] == True
if group + permission + selfaci > 1:
raise errors.ValidationError(name='target', error=_('group, permission and self are mutually exclusive'))
elif group + permission + selfaci == 0:
raise errors.ValidationError(name='target', error=_('One of group, permission or self is required'))
# Grab the dn of the group we're granting access to. This group may be a
# permission or a user group.
entry_attrs = []
if permission:
# This will raise NotFound if the permission doesn't exist
try:
entry_attrs = api.Command['permission_show'](kw['permission'])['result']
except errors.NotFound as e:
if 'test' in kw and not kw.get('test'):
raise e
else:
entry_attrs = {
'dn': DN(('cn', kw['permission']),
api.env.container_permission, api.env.basedn),
}
elif group:
# Not so friendly with groups. This will raise
try:
group_dn = api.Object['group'].get_dn_if_exists(kw['group'])
entry_attrs = {'dn': group_dn}
except errors.NotFound:
raise errors.NotFound(reason=_("Group '%s' does not exist") % kw['group'])
try:
a = ACI(current)
a.name = _make_aci_name(kw['aciprefix'], aciname)
a.permissions = kw['permissions']
if 'selfaci' in kw and kw['selfaci']:
a.set_bindrule('userdn = "ldap:///self"')
else:
dn = entry_attrs['dn']
a.set_bindrule('groupdn = "ldap:///%s"' % dn)
if valid['attrs']:
a.set_target_attr(kw['attrs'])
if valid['memberof']:
try:
api.Object['group'].get_dn_if_exists(kw['memberof'])
except errors.NotFound:
api.Object['group'].handle_not_found(kw['memberof'])
groupdn = _group_from_memberof(kw['memberof'])
a.set_target_filter('memberOf=%s' % groupdn)
if valid['filter']:
# Test the filter by performing a simple search on it. The
# filter is considered valid if either it returns some entries
# or it returns no entries, otherwise we let whatever exception
# happened be raised.
if kw['filter'] in ('', None, u''):
raise errors.BadSearchFilter(info=_('empty filter'))
try:
entries = ldap.find_entries(filter=kw['filter'])
except errors.NotFound:
pass
a.set_target_filter(kw['filter'])
if valid['type']:
target = _type_map[kw['type']]
a.set_target(target)
if valid['targetgroup']:
# Purposely no try here so we'll raise a NotFound
group_dn = api.Object['group'].get_dn_if_exists(kw['targetgroup'])
target = 'ldap:///%s' % group_dn
a.set_target(target)
if valid['subtree']:
# See if the subtree is a full URI
target = kw['subtree']
if not target.startswith('ldap:///'):
target = 'ldap:///%s' % target
a.set_target(target)
except SyntaxError as e:
raise errors.ValidationError(name='target', error=_('Syntax Error: %(error)s') % dict(error=str(e)))
return a
def _aci_to_kw(ldap, a, test=False, pkey_only=False):
"""Convert an ACI into its equivalent keywords.
This is used for the modify operation so we can merge the
incoming kw and existing ACI and pass the result to
_make_aci().
"""
kw = {}
kw['aciprefix'], kw['aciname'] = _parse_aci_name(a.name)
if pkey_only:
return kw
kw['permissions'] = tuple(a.permissions)
if 'targetattr' in a.target:
kw['attrs'] = tuple(unicode(e)
for e in a.target['targetattr']['expression'])
if 'targetfilter' in a.target:
target = a.target['targetfilter']['expression']
if target.startswith('(memberOf=') or target.startswith('memberOf='):
(junk, memberof) = target.split('memberOf=', 1)
memberof = DN(memberof)
kw['memberof'] = memberof['cn']
else:
kw['filter'] = unicode(target)
if 'target' in a.target:
target = a.target['target']['expression']
found = False
for k in _type_map.keys():
if _type_map[k] == target:
kw['type'] = unicode(k)
found = True
break;
if not found:
if target.startswith('('):
kw['filter'] = unicode(target)
else:
# See if the target is a group. If so we set the
# targetgroup attr, otherwise we consider it a subtree
try:
targetdn = DN(target.replace('ldap:///',''))
except ValueError as e:
raise errors.ValidationError(name='subtree', error=_("invalid DN (%s)") % e.message)
if targetdn.endswith(DN(api.env.container_group, api.env.basedn)):
kw['targetgroup'] = targetdn[0]['cn']
else:
kw['subtree'] = unicode(target)
groupdn = a.bindrule['expression']
groupdn = groupdn.replace('ldap:///','')
if groupdn == 'self':
kw['selfaci'] = True
elif groupdn == 'anyone':
pass
else:
groupdn = DN(groupdn)
if len(groupdn) and groupdn[0].attr == 'cn':
dn = DN()
entry = ldap.make_entry(dn)
try:
entry = ldap.get_entry(groupdn, ['cn'])
except errors.NotFound as e:
# FIXME, use real name here
if test:
dn = DN(('cn', 'test'), api.env.container_permission,
api.env.basedn)
entry = ldap.make_entry(dn, {'cn': [u'test']})
if api.env.container_permission in entry.dn:
kw['permission'] = entry['cn'][0]
else:
if 'cn' in entry:
kw['group'] = entry['cn'][0]
return kw
def _convert_strings_to_acis(acistrs):
acis = []
for a in acistrs:
try:
acis.append(ACI(a))
except SyntaxError as e:
root_logger.warning("Failed to parse: %s" % a)
return acis
def _find_aci_by_name(acis, aciprefix, aciname):
name = _make_aci_name(aciprefix, aciname).lower()
for a in acis:
if a.name.lower() == name:
return a
raise errors.NotFound(reason=_('ACI with name "%s" not found') % aciname)
def validate_permissions(ugettext, perm):
perm = perm.strip().lower()
if perm not in _valid_permissions_values:
return '"%s" is not a valid permission' % perm
def _normalize_permissions(perm):
valid_permissions = []
perm = perm.strip().lower()
if perm not in valid_permissions:
valid_permissions.append(perm)
return ','.join(valid_permissions)
_prefix_option = StrEnum('aciprefix',
cli_name='prefix',
label=_('ACI prefix'),
doc=_('Prefix used to distinguish ACI types ' \
'(permission, delegation, selfservice, none)'),
values=_valid_prefix_values,
)
@register()
class aci(Object):
"""
ACI object.
"""
NO_CLI = True
label = _('ACIs')
takes_params = (
Str('aciname',
cli_name='name',
label=_('ACI name'),
primary_key=True,
flags=('virtual_attribute',),
),
Str('permission?',
cli_name='permission',
label=_('Permission'),
doc=_('Permission ACI grants access to'),
flags=('virtual_attribute',),
),
Str('group?',
cli_name='group',
label=_('User group'),
doc=_('User group ACI grants access to'),
flags=('virtual_attribute',),
),
Str('permissions+', validate_permissions,
cli_name='permissions',
label=_('Permissions'),
doc=_('Permissions to grant' \
'(read, write, add, delete, all)'),
csv=True,
normalizer=_normalize_permissions,
flags=('virtual_attribute',),
),
Str('attrs*',
cli_name='attrs',
label=_('Attributes to which the permission applies'),
doc=_('Attributes'),
csv=True,
flags=('virtual_attribute',),
),
StrEnum('type?',
cli_name='type',
label=_('Type'),
doc=_('type of IPA object (user, group, host, hostgroup, service, netgroup)'),
values=(u'user', u'group', u'host', u'service', u'hostgroup', u'netgroup', u'dnsrecord'),
flags=('virtual_attribute',),
),
Str('memberof?',
cli_name='memberof',
label=_('Member of'), # FIXME: Does this label make sense?
doc=_('Member of a group'),
flags=('virtual_attribute',),
),
Str('filter?',
cli_name='filter',
label=_('Filter'),
doc=_('Legal LDAP filter (e.g. ou=Engineering)'),
flags=('virtual_attribute',),
),
Str('subtree?',
cli_name='subtree',
label=_('Subtree'),
doc=_('Subtree to apply ACI to'),
flags=('virtual_attribute',),
),
Str('targetgroup?',
cli_name='targetgroup',
label=_('Target group'),
doc=_('Group to apply ACI to'),
flags=('virtual_attribute',),
),
Flag('selfaci?',
cli_name='self',
label=_('Target your own entry (self)'),
doc=_('Apply ACI to your own entry (self)'),
flags=('virtual_attribute',),
),
)
@register()
class aci_add(crud.Create):
"""
Create new ACI.
"""
NO_CLI = True
msg_summary = _('Created ACI "%(value)s"')
takes_options = (
_prefix_option,
Flag('test?',
doc=_('Test the ACI syntax but don\'t write anything'),
default=False,
),
)
def execute(self, aciname, **kw):
"""
Execute the aci-create operation.
Returns the entry as it will be created in LDAP.
:param aciname: The name of the ACI being added.
:param kw: Keyword arguments for the other LDAP attributes.
"""
assert 'aciname' not in kw
ldap = self.api.Backend.ldap2
newaci = _make_aci(ldap, None, aciname, kw)
entry = ldap.get_entry(self.api.env.basedn, ['aci'])
acis = _convert_strings_to_acis(entry.get('aci', []))
for a in acis:
# FIXME: add check for permission_group = permission_group
if a.isequal(newaci) or newaci.name == a.name:
raise errors.DuplicateEntry()
newaci_str = unicode(newaci)
entry.setdefault('aci', []).append(newaci_str)
if not kw.get('test', False):
ldap.update_entry(entry)
if kw.get('raw', False):
result = dict(aci=unicode(newaci_str))
else:
result = _aci_to_kw(ldap, newaci, kw.get('test', False))
return dict(
result=result,
value=pkey_to_value(aciname, kw),
)
@register()
class aci_del(crud.Delete):
"""
Delete ACI.
"""
NO_CLI = True
has_output = output.standard_boolean
msg_summary = _('Deleted ACI "%(value)s"')
takes_options = (_prefix_option,)
def execute(self, aciname, aciprefix, **options):
"""
Execute the aci-delete operation.
:param aciname: The name of the ACI being deleted.
:param aciprefix: The ACI prefix.
"""
ldap = self.api.Backend.ldap2
entry = ldap.get_entry(self.api.env.basedn, ['aci'])
acistrs = entry.get('aci', [])
acis = _convert_strings_to_acis(acistrs)
aci = _find_aci_by_name(acis, aciprefix, aciname)
for a in acistrs:
candidate = ACI(a)
if aci.isequal(candidate):
acistrs.remove(a)
break
entry['aci'] = acistrs
ldap.update_entry(entry)
return dict(
result=True,
value=pkey_to_value(aciname, options),
)
@register()
class aci_mod(crud.Update):
"""
Modify ACI.
"""
NO_CLI = True
has_output_params = (
Str('aci',
label=_('ACI'),
),
)
takes_options = (_prefix_option,)
internal_options = ['rename']
msg_summary = _('Modified ACI "%(value)s"')
def execute(self, aciname, **kw):
aciprefix = kw['aciprefix']
ldap = self.api.Backend.ldap2
entry = ldap.get_entry(self.api.env.basedn, ['aci'])
acis = _convert_strings_to_acis(entry.get('aci', []))
aci = _find_aci_by_name(acis, aciprefix, aciname)
# The strategy here is to convert the ACI we're updating back into
# a series of keywords. Then we replace any keywords that have been
# updated and convert that back into an ACI and write it out.
oldkw = _aci_to_kw(ldap, aci)
newkw = deepcopy(oldkw)
if newkw.get('selfaci', False):
# selfaci is set in aci_to_kw to True only if the target is self
kw['selfaci'] = True
newkw.update(kw)
for acikw in (oldkw, newkw):
acikw.pop('aciname', None)
# _make_aci is what is run in aci_add and validates the input.
# Do this before we delete the existing ACI.
newaci = _make_aci(ldap, None, aciname, newkw)
if aci.isequal(newaci):
raise errors.EmptyModlist()
self.api.Command['aci_del'](aciname, aciprefix=aciprefix)
try:
result = self.api.Command['aci_add'](aciname, **newkw)['result']
except Exception as e:
# ACI could not be added, try to restore the old deleted ACI and
# report the ADD error back to user
try:
self.api.Command['aci_add'](aciname, **oldkw)
except Exception:
pass
raise e
if kw.get('raw', False):
result = dict(aci=unicode(newaci))
else:
result = _aci_to_kw(ldap, newaci)
return dict(
result=result,
value=pkey_to_value(aciname, kw),
)
@register()
class aci_find(crud.Search):
"""
Search for ACIs.
Returns a list of ACIs
EXAMPLES:
To find all ACIs that apply directly to members of the group ipausers:
ipa aci-find --memberof=ipausers
To find all ACIs that grant add access:
ipa aci-find --permissions=add
Note that the find command only looks for the given text in the set of
ACIs, it does not evaluate the ACIs to see if something would apply.
For example, searching on memberof=ipausers will find all ACIs that
have ipausers as a memberof. There may be other ACIs that apply to
members of that group indirectly.
"""
NO_CLI = True
msg_summary = ngettext('%(count)d ACI matched', '%(count)d ACIs matched', 0)
takes_options = (_prefix_option.clone_rename("aciprefix?", required=False),
gen_pkey_only_option("name"),)
def execute(self, term, **kw):
ldap = self.api.Backend.ldap2
entry = ldap.get_entry(self.api.env.basedn, ['aci'])
acis = _convert_strings_to_acis(entry.get('aci', []))
results = []
if term:
term = term.lower()
for a in acis:
if a.name.lower().find(term) != -1 and a not in results:
results.append(a)
acis = list(results)
else:
results = list(acis)
if kw.get('aciname'):
for a in acis:
prefix, name = _parse_aci_name(a.name)
if name != kw['aciname']:
results.remove(a)
acis = list(results)
if kw.get('aciprefix'):
for a in acis:
prefix, name = _parse_aci_name(a.name)
if prefix != kw['aciprefix']:
results.remove(a)
acis = list(results)
if kw.get('attrs'):
for a in acis:
if not 'targetattr' in a.target:
results.remove(a)
continue
alist1 = sorted(
[t.lower() for t in a.target['targetattr']['expression']]
)
alist2 = sorted([t.lower() for t in kw['attrs']])
if len(set(alist1) & set(alist2)) != len(alist2):
results.remove(a)
acis = list(results)
if kw.get('permission'):
try:
self.api.Command['permission_show'](
kw['permission']
)
except errors.NotFound:
pass
else:
for a in acis:
uri = 'ldap:///%s' % entry.dn
if a.bindrule['expression'] != uri:
results.remove(a)
acis = list(results)
if kw.get('permissions'):
for a in acis:
alist1 = sorted(a.permissions)
alist2 = sorted(kw['permissions'])
if len(set(alist1) & set(alist2)) != len(alist2):
results.remove(a)
acis = list(results)
if kw.get('memberof'):
try:
dn = _group_from_memberof(kw['memberof'])
except errors.NotFound:
pass
else:
memberof_filter = '(memberOf=%s)' % dn
for a in acis:
if 'targetfilter' in a.target:
targetfilter = a.target['targetfilter']['expression']
if targetfilter != memberof_filter:
results.remove(a)
else:
results.remove(a)
if kw.get('type'):
for a in acis:
if 'target' in a.target:
target = a.target['target']['expression']
else:
results.remove(a)
continue
found = False
for k in _type_map.keys():
if _type_map[k] == target and kw['type'] == k:
found = True
break;
if not found:
try:
results.remove(a)
except ValueError:
pass
if kw.get('selfaci', False) is True:
for a in acis:
if a.bindrule['expression'] != u'ldap:///self':
try:
results.remove(a)
except ValueError:
pass
if kw.get('group'):
for a in acis:
groupdn = a.bindrule['expression']
groupdn = DN(groupdn.replace('ldap:///',''))
try:
cn = groupdn[0]['cn']
except (IndexError, KeyError):
cn = None
if cn is None or cn != kw['group']:
try:
results.remove(a)
except ValueError:
pass
if kw.get('targetgroup'):
for a in acis:
found = False
if 'target' in a.target:
target = a.target['target']['expression']
targetdn = DN(target.replace('ldap:///',''))
group_container_dn = DN(api.env.container_group, api.env.basedn)
if targetdn.endswith(group_container_dn):
try:
cn = targetdn[0]['cn']
except (IndexError, KeyError):
cn = None
if cn == kw['targetgroup']:
found = True
if not found:
try:
results.remove(a)
except ValueError:
pass
if kw.get('filter'):
if not kw['filter'].startswith('('):
kw['filter'] = unicode('('+kw['filter']+')')
for a in acis:
if 'targetfilter' not in a.target or\
not a.target['targetfilter']['expression'] or\
a.target['targetfilter']['expression'] != kw['filter']:
results.remove(a)
if kw.get('subtree'):
for a in acis:
if 'target' in a.target:
target = a.target['target']['expression']
else:
results.remove(a)
continue
if kw['subtree'].lower() != target.lower():
try:
results.remove(a)
except ValueError:
pass
acis = []
for result in results:
if kw.get('raw', False):
aci = dict(aci=unicode(result))
else:
aci = _aci_to_kw(ldap, result,
pkey_only=kw.get('pkey_only', False))
acis.append(aci)
return dict(
result=acis,
count=len(acis),
truncated=False,
)
@register()
class aci_show(crud.Retrieve):
"""
Display a single ACI given an ACI name.
"""
NO_CLI = True
has_output_params = (
Str('aci',
label=_('ACI'),
),
)
takes_options = (
_prefix_option,
DNParam('location?',
label=_('Location of the ACI'),
)
)
def execute(self, aciname, **kw):
"""
Execute the aci-show operation.
Returns the entry
:param uid: The login name of the user to retrieve.
:param kw: unused
"""
ldap = self.api.Backend.ldap2
dn = kw.get('location', self.api.env.basedn)
entry = ldap.get_entry(dn, ['aci'])
acis = _convert_strings_to_acis(entry.get('aci', []))
aci = _find_aci_by_name(acis, kw['aciprefix'], aciname)
if kw.get('raw', False):
result = dict(aci=unicode(aci))
else:
result = _aci_to_kw(ldap, aci)
return dict(
result=result,
value=pkey_to_value(aciname, kw),
)
@register()
class aci_rename(crud.Update):
"""
Rename an ACI.
"""
NO_CLI = True
has_output_params = (
Str('aci',
label=_('ACI'),
),
)
takes_options = (
_prefix_option,
Str('newname',
doc=_('New ACI name'),
),
)
msg_summary = _('Renamed ACI to "%(value)s"')
def execute(self, aciname, **kw):
ldap = self.api.Backend.ldap2
entry = ldap.get_entry(self.api.env.basedn, ['aci'])
acis = _convert_strings_to_acis(entry.get('aci', []))
aci = _find_aci_by_name(acis, kw['aciprefix'], aciname)
for a in acis:
prefix, name = _parse_aci_name(a.name)
if _make_aci_name(prefix, kw['newname']) == a.name:
raise errors.DuplicateEntry()
# The strategy here is to convert the ACI we're updating back into
# a series of keywords. Then we replace any keywords that have been
# updated and convert that back into an ACI and write it out.
newkw = _aci_to_kw(ldap, aci)
if 'selfaci' in newkw and newkw['selfaci'] == True:
# selfaci is set in aci_to_kw to True only if the target is self
kw['selfaci'] = True
if 'aciname' in newkw:
del newkw['aciname']
# _make_aci is what is run in aci_add and validates the input.
# Do this before we delete the existing ACI.
newaci = _make_aci(ldap, None, kw['newname'], newkw)
self.api.Command['aci_del'](aciname, aciprefix=kw['aciprefix'])
result = self.api.Command['aci_add'](kw['newname'], **newkw)['result']
if kw.get('raw', False):
result = dict(aci=unicode(newaci))
else:
result = _aci_to_kw(ldap, newaci)
return dict(
result=result,
value=pkey_to_value(kw['newname'], kw),
)
| gpl-3.0 |
kaze/paasmaker | paasmaker/pacemaker/controller/role.py | 2 | 17884 | #
# Paasmaker - Platform as a Service
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import unittest
import paasmaker
import uuid
import logging
import colander
import json
from paasmaker.common.controller import BaseController, BaseControllerTest
from paasmaker.common.core import constants
import tornado
import tornado.testing
import sqlalchemy
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
class RoleSchema(colander.MappingSchema):
name = colander.SchemaNode(colander.String(),
title="Role name",
description="A nice name for this role.",
validator=colander.Length(min=2))
permissions = colander.SchemaNode(colander.Sequence(),
colander.SchemaNode(colander.String()),
title="Permissions",
default=[],
missing=[])
class RoleAllocationAssignSchema(colander.MappingSchema):
role_id = colander.SchemaNode(colander.Integer(),
title="Role ID",
description="The role ID.")
user_id = colander.SchemaNode(colander.Integer(),
title="User ID",
description="The user ID.")
workspace_id = colander.SchemaNode(colander.Integer(),
title="Optional Workspace ID",
description="The workspace ID.",
default=None,
missing=None)
class RoleAllocationUnAssignSchema(colander.MappingSchema):
allocation_id = colander.SchemaNode(colander.Integer(),
title="Allocation ID",
description="The allocation ID.")
class RoleListController(BaseController):
AUTH_METHODS = [BaseController.SUPER, BaseController.USER]
def get(self):
self.require_permission(constants.PERMISSION.ROLE_LIST)
roles = self.session.query(
paasmaker.model.Role
)
self._paginate('roles', roles)
# self.add_data('roles', roles)
self.client_side_render()
@staticmethod
def get_routes(configuration):
routes = []
routes.append((r"/role/list", RoleListController, configuration))
return routes
class RoleAllocationListController(BaseController):
AUTH_METHODS = [BaseController.SUPER, BaseController.USER]
def get(self):
self.require_permission(constants.PERMISSION.ROLE_ASSIGN)
allocations = self.session.query(
paasmaker.model.WorkspaceUserRole
).filter(
paasmaker.model.WorkspaceUserRole.deleted == None
)
self._paginate('allocations', allocations)
self.client_side_render()
@staticmethod
def get_routes(configuration):
routes = []
routes.append((r"/role/allocation/list", RoleAllocationListController, configuration))
return routes
class RoleEditController(BaseController):
AUTH_METHODS = [BaseController.SUPER, BaseController.USER]
def _get_role(self, role_id):
role = None
if role_id:
# Find and load the role.
role = self.session.query(
paasmaker.model.Role
).get(int(role_id))
if not role:
raise HTTPError(404, "No such role.")
return role
def _default_role(self):
role = paasmaker.model.Role()
role.name = ''
return role
def get(self, role_id=None):
self.require_permission(constants.PERMISSION.ROLE_EDIT)
role = self._get_role(role_id)
if not role:
role = self._default_role()
self.add_data('role', role)
available_permissions = constants.PERMISSION.ALL
available_permissions.sort()
self.add_data('available_permissions', available_permissions)
self.client_side_render()
def post(self, role_id=None):
self.require_permission(constants.PERMISSION.ROLE_EDIT)
role = self._get_role(role_id)
valid_data = self.validate_data(RoleSchema())
if not role:
role = self._default_role()
role.name = self.params['name']
# And a special handler - if supplied with "ALL", replace with all
# possible permissions.
if 'ALL' in self.params['permissions']:
role.permissions = constants.PERMISSION.ALL
else:
role.permissions = self.params['permissions']
if valid_data:
self.session.add(role)
try:
paasmaker.model.WorkspaceUserRoleFlat.build_flat_table(self.session)
self.session.refresh(role)
except sqlalchemy.exc.IntegrityError, ex:
self.session.rollback()
self.reload_current_user()
if 'name is not' in str(ex):
valid_data = False
self.add_error('The role name is not unique.')
else:
raise ex
if valid_data:
self.add_data('role', role)
self.redirect('/role/list')
else:
self.add_data('role', role)
self.add_data_template('available_permissions', constants.PERMISSION.ALL)
self.client_side_render()
@staticmethod
def get_routes(configuration):
routes = []
routes.append((r"/role/create", RoleEditController, configuration))
routes.append((r"/role/(\d+)", RoleEditController, configuration))
return routes
class RoleAllocationAssignController(BaseController):
AUTH_METHODS = [BaseController.SUPER, BaseController.USER]
def get(self):
self.require_permission(constants.PERMISSION.ROLE_ASSIGN)
# List available users, workspaces, and roles.
# TODO: This won't be efficient at large sets.
if self.format == 'html':
# We don't expose this here to the API - this is
# purely for the template to use.
users = self.session.query(
paasmaker.model.User
).all()
roles = self.session.query(
paasmaker.model.Role
).all()
workspaces = self.session.query(
paasmaker.model.Workspace
).all()
self.add_data_template('users', users)
self.add_data_template('roles', roles)
self.add_data_template('workspaces', workspaces)
self.client_side_render()
def post(self):
self.require_permission(constants.PERMISSION.ROLE_ASSIGN)
valid_data = self.validate_data(RoleAllocationAssignSchema())
# Fetch the role, user, and workspace.
role = self.session.query(
paasmaker.model.Role
).get(int(self.params['role_id']))
user = self.session.query(
paasmaker.model.User
).get(int(self.params['user_id']))
workspace_id = self.params['workspace_id']
workspace = None
if workspace_id:
workspace = self.session.query(
paasmaker.model.Workspace
).get(int(workspace_id))
if not role:
self.add_error("No such role.")
valid_data = False
if not user:
self.add_error("No such user.")
valid_data = False
if workspace_id and not workspace:
self.add_error("No such workspace.")
valid_data = False
if valid_data:
allocation = paasmaker.model.WorkspaceUserRole()
allocation.user = user
allocation.role = role
allocation.workspace = workspace
self.session.add(allocation)
paasmaker.model.WorkspaceUserRoleFlat.build_flat_table(self.session)
self.session.refresh(allocation)
self.add_data('allocation', allocation)
self.redirect('/role/allocation/list')
else:
self.client_side_render()
@staticmethod
def get_routes(configuration):
routes = []
routes.append((r"/role/allocation/assign", RoleAllocationAssignController, configuration))
return routes
class RoleAllocationUnAssignController(BaseController):
AUTH_METHODS = [BaseController.SUPER, BaseController.USER]
def post(self):
self.require_permission(constants.PERMISSION.ROLE_ASSIGN)
valid_data = self.validate_data(RoleAllocationUnAssignSchema())
# Fetch this allocation.
allocation = self.session.query(
paasmaker.model.WorkspaceUserRole
).get(int(self.params['allocation_id']))
if not allocation:
raise tornado.HTTPError(404, "No such allocation.")
allocation.delete()
self.session.add(allocation)
paasmaker.model.WorkspaceUserRoleFlat.build_flat_table(self.session)
self.add_data('success', True)
self.redirect('/role/allocation/list')
@staticmethod
def get_routes(configuration):
routes = []
routes.append((r"/role/allocation/unassign", RoleAllocationUnAssignController, configuration))
return routes
class RoleEditControllerTest(BaseControllerTest):
config_modules = ['pacemaker']
def get_app(self):
self.late_init_configuration(self.io_loop)
routes = RoleEditController.get_routes({'configuration': self.configuration})
routes.extend(RoleListController.get_routes({'configuration': self.configuration}))
routes.extend(RoleAllocationListController.get_routes({'configuration': self.configuration}))
routes.extend(RoleAllocationAssignController.get_routes({'configuration': self.configuration}))
routes.extend(RoleAllocationUnAssignController.get_routes({'configuration': self.configuration}))
application = tornado.web.Application(routes, **self.configuration.get_tornado_configuration())
return application
def test_create(self):
# Create the role.
request = paasmaker.common.api.role.RoleCreateAPIRequest(self.configuration)
request.set_superkey_auth()
request.set_role_params('Test Role', [constants.PERMISSION.USER_EDIT])
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertEquals(len(response.errors), 0, "There were errors.")
self.assertEquals(len(response.warnings), 0, "There were warnings.")
self.assertTrue(response.data.has_key('role'), "Missing role object in return data.")
self.assertTrue(response.data['role'].has_key('id'), "Missing ID in return data.")
self.assertTrue(response.data['role'].has_key('permissions'), "Missing permissions in return data.")
self.assertIn(constants.PERMISSION.USER_EDIT, response.data['role']['permissions'])
def test_create_fail(self):
# Send through some bogus data.
request = paasmaker.common.api.role.RoleCreateAPIRequest(self.configuration)
request.set_superkey_auth()
request.set_role_params('', [])
request.send(self.stop)
response = self.wait()
self.failIf(response.success)
self.assertTrue("Invalid" in response.errors[0], "Missing message in error.")
def test_edit(self):
# Create the role.
request = paasmaker.common.api.role.RoleCreateAPIRequest(self.configuration)
request.set_superkey_auth()
request.set_role_params('Test Role', [constants.PERMISSION.USER_EDIT])
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
role_id = response.data['role']['id']
# Set up the request.
request = paasmaker.common.api.role.RoleEditAPIRequest(self.configuration)
request.set_superkey_auth()
# This loads the role data from the server.
request.load(role_id, self.stop, self.stop)
load_response = self.wait()
# Now attempt to change the role.
request.set_role_permissions(load_response['permissions'] + [constants.PERMISSION.WORKSPACE_EDIT])
# Send it along!
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertEquals(len(response.data['role']['permissions']), 2, 'Not enough permissions.')
self.assertIn(constants.PERMISSION.USER_EDIT, response.data['role']['permissions'])
self.assertIn(constants.PERMISSION.WORKSPACE_EDIT, response.data['role']['permissions'])
# Load up the role separately and confirm.
self.configuration.get_database_session(self.stop, None)
session = self.wait()
role = session.query(
paasmaker.model.Role
).get(role_id)
self.assertIn(constants.PERMISSION.USER_EDIT, role.permissions)
self.assertIn(constants.PERMISSION.WORKSPACE_EDIT, role.permissions)
def test_edit_fail(self):
# Create the role.
request = paasmaker.common.api.role.RoleCreateAPIRequest(self.configuration)
request.set_superkey_auth()
request.set_role_params('Test Role', [constants.PERMISSION.USER_EDIT])
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
role_id = response.data['role']['id']
# Set up the request.
request = paasmaker.common.api.role.RoleEditAPIRequest(self.configuration)
request.set_superkey_auth()
# This loads the role data from the server.
request.load(role_id, self.stop, self.stop)
load_response = self.wait()
# Now attempt to change the role.
request.set_role_name('')
# Send it along!
request.send(self.stop)
response = self.wait()
self.failIf(response.success)
self.assertTrue("Invalid" in response.errors[0], "Missing message in error.")
def test_list(self):
# Create the role.
request = paasmaker.common.api.role.RoleCreateAPIRequest(self.configuration)
request.set_superkey_auth()
request.set_role_params('Test Role', [constants.PERMISSION.USER_EDIT])
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
request = paasmaker.common.api.role.RoleListAPIRequest(self.configuration)
request.set_superkey_auth()
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertTrue(response.data.has_key('roles'), "Missing roles list.")
self.assertEquals(len(response.data['roles']), 1, "Not enough roles returned.")
self.assertEquals(response.data['roles'][0]['name'], 'Test Role', "Returned role is not as expected.")
def test_list_allocation(self):
self.configuration.get_database_session(self.stop, None)
session = self.wait()
user = paasmaker.model.User()
user.login = 'username'
user.email = 'username@example.com'
user.name = 'User Name'
user.password = 'test'
workspace = paasmaker.model.Workspace()
workspace.name = 'Test Zone'
workspace.stub = 'test'
role = paasmaker.model.Role()
role.name = "Test"
role.permissions = []
session.add(user)
session.add(workspace)
session.add(role)
session.commit()
allocation = paasmaker.model.WorkspaceUserRole()
allocation.user = user
allocation.role = role
session.add(allocation)
other_allocation = paasmaker.model.WorkspaceUserRole()
other_allocation.user = user
other_allocation.role = role
other_allocation.workspace = workspace
session.add(other_allocation)
session.commit()
request = paasmaker.common.api.role.RoleAllocationListAPIRequest(self.configuration)
request.set_superkey_auth()
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertTrue(response.data.has_key('allocations'), "Missing allocations list.")
self.assertEquals(len(response.data['allocations']), 2, "Not enough allocations returned.")
self.assertEquals(response.data['allocations'][0]['user']['login'], 'username', "Returned allocations is not as expected.")
# TODO: Test that the workspace is blank in one of the responses.
def test_allocation(self):
self.configuration.get_database_session(self.stop, None)
session = self.wait()
user = paasmaker.model.User()
user.login = 'username'
user.email = 'username@example.com'
user.name = 'User Name'
user.password = 'test'
workspace = paasmaker.model.Workspace()
workspace.name = 'Test Zone'
workspace.stub = 'test'
role = paasmaker.model.Role()
role.name = "Test"
role.permissions = []
session.add(user)
session.add(workspace)
session.add(role)
session.commit()
session.refresh(user)
session.refresh(workspace)
session.refresh(role)
request = paasmaker.common.api.role.RoleAllocationAPIRequest(self.configuration)
request.set_superkey_auth()
request.set_allocation_params(user.id, role.id)
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertTrue(response.data.has_key('allocation'), "Missing allocation.")
self.assertEquals(response.data['allocation']['user']['id'], user.id, "User ID not as expected.")
self.assertEquals(response.data['allocation']['role']['id'], role.id, "Role ID not as expected.")
self.assertEquals(response.data['allocation']['workspace'], None, "Workspace is not None.")
first_allocation_id = response.data['allocation']['id']
# Same again, but apply to a workspace.
request = paasmaker.common.api.role.RoleAllocationAPIRequest(self.configuration)
request.set_superkey_auth()
request.set_allocation_params(user.id, role.id, workspace.id)
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertTrue(response.data.has_key('allocation'), "Missing allocation.")
self.assertEquals(response.data['allocation']['user']['id'], user.id, "User ID not as expected.")
self.assertEquals(response.data['allocation']['role']['id'], role.id, "Role ID not as expected.")
self.assertEquals(response.data['allocation']['workspace']['id'], workspace.id, "Workspace is not None.")
second_allocation_id = response.data['allocation']['id']
# Remove the allocations.
request = paasmaker.common.api.role.RoleUnAllocationAPIRequest(self.configuration)
request.set_superkey_auth()
request.set_allocation_id(first_allocation_id)
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertTrue(response.data.has_key('success'), "Missing success flag.")
# List it.
request = paasmaker.common.api.role.RoleAllocationListAPIRequest(self.configuration)
request.set_superkey_auth()
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertTrue(response.data.has_key('allocations'), "Missing allocations list.")
self.assertEquals(len(response.data['allocations']), 1, "Not enough allocations returned.")
self.assertEquals(response.data['allocations'][0]['user']['login'], 'username', "Returned allocations is not as expected.")
# Remove the other allocation.
request = paasmaker.common.api.role.RoleUnAllocationAPIRequest(self.configuration)
request.set_superkey_auth()
request.set_allocation_id(second_allocation_id)
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertTrue(response.data.has_key('success'), "Missing success flag.")
# List it. Should now be none.
request = paasmaker.common.api.role.RoleAllocationListAPIRequest(self.configuration)
request.set_superkey_auth()
request.send(self.stop)
response = self.wait()
self.failIf(not response.success)
self.assertTrue(response.data.has_key('allocations'), "Missing allocations list.")
self.assertEquals(len(response.data['allocations']), 0, "Not the right number of allocations.") | mpl-2.0 |
camilonova/django | tests/model_fields/test_datetimefield.py | 87 | 3393 | import datetime
from django.db import models
from django.test import (
SimpleTestCase, TestCase, override_settings, skipUnlessDBFeature,
)
from django.test.utils import requires_tz_support
from django.utils import timezone
from .models import DateTimeModel
class DateTimeFieldTests(TestCase):
def test_datetimefield_to_python_microseconds(self):
"""DateTimeField.to_python() supports microseconds."""
f = models.DateTimeField()
self.assertEqual(f.to_python('2001-01-02 03:04:05.000006'), datetime.datetime(2001, 1, 2, 3, 4, 5, 6))
self.assertEqual(f.to_python('2001-01-02 03:04:05.999999'), datetime.datetime(2001, 1, 2, 3, 4, 5, 999999))
def test_timefield_to_python_microseconds(self):
"""TimeField.to_python() supports microseconds."""
f = models.TimeField()
self.assertEqual(f.to_python('01:02:03.000004'), datetime.time(1, 2, 3, 4))
self.assertEqual(f.to_python('01:02:03.999999'), datetime.time(1, 2, 3, 999999))
@skipUnlessDBFeature('supports_microsecond_precision')
def test_datetimes_save_completely(self):
dat = datetime.date(2014, 3, 12)
datetim = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
tim = datetime.time(21, 22, 23, 240000)
DateTimeModel.objects.create(d=dat, dt=datetim, t=tim)
obj = DateTimeModel.objects.first()
self.assertTrue(obj)
self.assertEqual(obj.d, dat)
self.assertEqual(obj.dt, datetim)
self.assertEqual(obj.t, tim)
@override_settings(USE_TZ=False)
def test_lookup_date_without_use_tz(self):
d = datetime.date(2014, 3, 12)
dt1 = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000)
dt2 = datetime.datetime(2014, 3, 11, 21, 22, 23, 240000)
t = datetime.time(21, 22, 23, 240000)
m = DateTimeModel.objects.create(d=d, dt=dt1, t=t)
# Other model with different datetime.
DateTimeModel.objects.create(d=d, dt=dt2, t=t)
self.assertEqual(m, DateTimeModel.objects.get(dt__date=d))
@requires_tz_support
@skipUnlessDBFeature('has_zoneinfo_database')
@override_settings(USE_TZ=True, TIME_ZONE='America/Vancouver')
def test_lookup_date_with_use_tz(self):
d = datetime.date(2014, 3, 12)
# The following is equivalent to UTC 2014-03-12 18:34:23.24000.
dt1 = datetime.datetime(2014, 3, 12, 10, 22, 23, 240000, tzinfo=timezone.get_current_timezone())
# The following is equivalent to UTC 2014-03-13 05:34:23.24000.
dt2 = datetime.datetime(2014, 3, 12, 21, 22, 23, 240000, tzinfo=timezone.get_current_timezone())
t = datetime.time(21, 22, 23, 240000)
m1 = DateTimeModel.objects.create(d=d, dt=dt1, t=t)
m2 = DateTimeModel.objects.create(d=d, dt=dt2, t=t)
# In Vancouver, we expect both results.
self.assertQuerysetEqual(
DateTimeModel.objects.filter(dt__date=d),
[repr(m1), repr(m2)],
ordered=False
)
with self.settings(TIME_ZONE='UTC'):
# But in UTC, the __date only matches one of them.
self.assertQuerysetEqual(DateTimeModel.objects.filter(dt__date=d), [repr(m1)])
class ValidationTest(SimpleTestCase):
def test_datefield_cleans_date(self):
f = models.DateField()
self.assertEqual(datetime.date(2008, 10, 10), f.clean('2008-10-10', None))
| bsd-3-clause |
grovesdixon/metaTranscriptomes | scripts/parse_codeml_pairwise_outputBACKUP.py | 1 | 6189 | #!/usr/bin/env python
##parse_codeml_pairwise_output.py
##written 6/26/14 by Groves Dixon
ProgramName = 'parse_codeml_pairwise_output.py'
LastUpdated = '6/26/14'
By = 'Groves Dixon'
VersionNumber = '1.0'
print "\nRunning Program {}...".format(ProgramName)
VersionString = '{} version {} Last Updated {} by {}'.format(ProgramName, VersionNumber, LastUpdated, By)
Description = '''
Description:
Parses a list of codeml output files that were generated using pair-wise
dN/dS estimation (runmode -2). Pairs are set up against one base species
(set as spp1) and all other species (a list file)
'''
AdditionalProgramInfo = '''
Additional Program Information:
'''
##Import Modules
import time
import argparse
from sys import argv
from sys import exit
import numpy as np
Start_time = time.time() ##keeps track of how long the script takes to run
##Set Up Argument Parsing
parser = argparse.ArgumentParser(description=Description, epilog=AdditionalProgramInfo) ##create argument parser that will automatically return help texts from global variables above
parser.add_argument('-f', required = True, dest = 'files', nargs="+", help = 'A glob to the codeml output files (probably *.codeml)')
parser.add_argument('-spp1', required = True, dest = 'spp1', help = 'The search tag for species 1')
parser.add_argument('-sppList', required = True, dest = 'sppList', help = 'The List of species to pair with species 1')
parser.add_argument('-o', required = True, dest = 'out', help = 'The desired output file name')
args = parser.parse_args()
#Assign Arguments
FileList = args.files
Spp1 = args.spp1
SppListName = args.sppList
OutfileName = args.out
SppList = []
with open(SppListName, 'r') as infile:
for line in infile:
SppList.append(line.strip("\n"))
def read_files(FileList, Spp1, SppList):
'''Function to reads through each file and parses out
dN and dS estimates for the specified species pair.
'''
print "\nLooking for data in {} codeml output files...".format(len(FileList))
geneList = []
dNList = []
dSList = []
speciesList = []
highDScount = 0
for species in SppList:
if species == Spp1:
continue
for file in FileList:
with open(file, 'r') as infile:
hit = 0
hitCount = 0 #this should never exceed 1
for line in infile:
if hitCount > 1:
exit("Found more than one instance of pairing in a file. Something is wrong.")
if hit == 0:
##look for your species pair
if "("+Spp1+")" in line:
if "("+species+")" in line:
if "..." in line:
hit = 1
continue
elif hit == 1:
if "dN/dS=" in line:
line = line.split()
try:
dn = line[10]
ds = line[13]
except IndexError: #occurs sometimes when dS is very large
#the dn value is also sometimes so high it must be split differently
#this probably means its a bad alignment/ortholog call, but pasrse it anyway
try:
dn = line[10]
ds = line[12]
#it's rare, but possible that N is double digits and S is not, so only "strip" the = from the front of ds if its there
if "=" in ds:
ds = ds.split('=')[1] #split the large ds value assuming that dS is >= 10.0 but dN is not
except IndexError:
dn = line[9].split('=')[1] #this means that the dN value was also >= 10.0, so grab it differently
ds = line[11].split('=')[1] #dS is also in a different place because of the big dN, so grab it
geneName = file.strip(".codeml")
geneList.append(geneName)
dNList.append(dn)
dSList.append(ds)
speciesList.append(species)
hit = 0
hitCount += 1
# print geneName
# print species
# print dn
return geneList, dNList, dSList, speciesList
def output(OutfileName, geneList, dNList, dSList, speciesList):
"""Outputs the data into a table"""
badValues = []
lineNums = []
with open(OutfileName, 'w') as out:
out.write("EST\tspecies\tdN\tdS")
for i in range(len(geneList)):
#########
##there is a bug that occurs when the synonymous substitution rate is >99.99
#these are obviously wrong anyway and they stop the output from uploading into R so skip them
fourData = 'TRUE'
outList = [geneList[i], speciesList[i], dNList[i], dSList[i]]
try:
float(dNList[i])
float(dSList[i])
except ValueError:
badValues.append([dNList[i], dSList[i]])
lineNums.append(i)
continue
for x in outList:
if x == "":
fourData = 'FALSE'
if fourData == 'FALSE':
continue
###########
outString = "\n{}\t{}\t{}\t{}".format(geneList[i], speciesList[i], dNList[i], dSList[i])
out.write("\n{}\t{}\t{}\t{}".format(geneList[i], speciesList[i], dNList[i], dSList[i]))
geneList, dNList, dSList, speciesList = read_files(FileList, Spp1, SppList)
output(OutfileName, geneList, dNList, dSList, speciesList)
#return time to run
Time = time.time() - Start_time
print('\nTime took to run: {}'.format(Time))
| mit |
whitepages/nova | nova/tests/functional/api_sample_tests/test_create_backup.py | 30 | 1937 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import mock
from nova.tests.functional.api_sample_tests import test_servers
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class CreateBackupSamplesJsonTest(test_servers.ServersSampleBase):
extension_name = "os-create-backup"
def _get_flags(self):
f = super(CreateBackupSamplesJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.admin_actions.Admin_actions')
return f
def setUp(self):
"""setUp Method for PauseServer api samples extension
This method creates the server that will be used in each tests
"""
super(CreateBackupSamplesJsonTest, self).setUp()
self.uuid = self._post_server()
@mock.patch.object(fake._FakeImageService, 'detail', return_value=[])
def test_post_backup_server(self, mock_method):
# Get api samples to backup server request.
response = self._do_post('servers/%s/action' % self.uuid,
'create-backup-req', {})
self.assertEqual(202, response.status_code)
| apache-2.0 |
iRGBit/QGIS | python/plugins/processing/algs/qgis/GeometryConvert.py | 5 | 9974 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Gridify.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
from qgis.core import QGis, QgsFeature, QgsGeometry
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from processing.core.parameters import ParameterVector
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputVector
from processing.tools import dataobjects, vector
class GeometryConvert(GeoAlgorithm):
INPUT = 'INPUT'
TYPE = 'TYPE'
OUTPUT = 'OUTPUT'
TYPES = ['Centroids',
'Nodes',
'Linestrings',
'Multilinestrings',
'Polygons'
]
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('Convert geometry type')
self.group, self.i18n_group = self.trAlgorithm('Vector geometry tools')
self.addParameter(ParameterVector(self.INPUT,
self.tr('Input layer'), [ParameterVector.VECTOR_TYPE_ANY]))
self.addParameter(ParameterSelection(self.TYPE,
self.tr('New geometry type'), self.TYPES))
self.addOutput(OutputVector(self.OUTPUT, self.tr('Converted')))
def processAlgorithm(self, progress):
layer = dataobjects.getObjectFromUri(
self.getParameterValue(self.INPUT))
index = self.getParameterValue(self.TYPE)
splitNodes = False
if index == 0:
newType = QGis.WKBPoint
elif index == 1:
newType = QGis.WKBPoint
splitNodes = True
elif index == 2:
newType = QGis.WKBLineString
elif index == 3:
newType = QGis.WKBMultiLineString
elif index == 4:
newType = QGis.WKBPolygon
else:
newType = QGis.WKBPoint
writer = self.getOutputFromName(self.OUTPUT).getVectorWriter(
layer.pendingFields(), newType, layer.crs())
features = vector.features(layer)
count = len(features)
total = 100.0 / float(count)
for count, f in enumerate(features):
geom = f.geometry()
geomType = geom.wkbType()
if geomType in [QGis.WKBPoint, QGis.WKBPoint25D]:
if newType == QGis.WKBPoint:
writer.addFeature(f)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
elif geomType in [QGis.WKBMultiPoint, QGis.WKBMultiPoint25D]:
if newType == QGis.WKBPoint and splitNodes:
points = geom.asMultiPoint()
for p in points:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPoint(p))
writer.addFeature(feat)
elif newType == QGis.WKBPoint:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(geom.centroid())
writer.addFeature(feat)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
elif geomType in [QGis.WKBLineString, QGis.WKBLineString25D]:
if newType == QGis.WKBPoint and splitNodes:
points = geom.asPolyline()
for p in points:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPoint(p))
writer.addFeature(feat)
elif newType == QGis.WKBPoint:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(geom.centroid())
writer.addFeature(feat)
elif newType == QGis.WKBLineString:
writer.addFeature(f)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
elif geomType in [QGis.WKBMultiLineString, QGis.WKBMultiLineString25D]:
if newType == QGis.WKBPoint and splitNodes:
lines = geom.asMultiPolyline()
for line in lines:
for p in line:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPoint(p))
writer.addFeature(feat)
elif newType == QGis.WKBPoint:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(geom.centroid())
writer.addFeature(feat)
elif newType == QGis.WKBLineString:
lines = geom.asMultiPolyline()
for line in lines:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPolyline(line))
writer.addFeature(feat)
elif newType == QGis.WKBMultiLineString:
writer.addFeature(f)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
elif geomType in [QGis.WKBPolygon, QGis.WKBPolygon25D]:
if newType == QGis.WKBPoint and splitNodes:
rings = geom.asPolygon()
for ring in rings:
for p in ring:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPoint(p))
writer.addFeature(feat)
elif newType == QGis.WKBPoint:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(geom.centroid())
writer.addFeature(feat)
elif newType == QGis.WKBMultiLineString:
rings = geom.asPolygon()
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromMultiPolyline(rings))
writer.addFeature(feat)
elif newType == QGis.WKBPolygon:
writer.addFeature(f)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
elif geomType in [QGis.WKBMultiPolygon, QGis.WKBMultiPolygon25D]:
if newType == QGis.WKBPoint and splitNodes:
polygons = geom.asMultiPolygon()
for polygon in polygons:
for line in polygon:
for p in line:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPoint(p))
writer.addFeature(feat)
elif newType == QGis.WKBPoint:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(geom.centroid())
writer.addFeature(feat)
elif newType == QGis.WKBLineString:
polygons = geom.asMultiPolygon()
for polygons in polygons:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPolyline(polygon))
writer.addFeature(feat)
elif newType == QGis.WKBPolygon:
polygons = geom.asMultiPolygon()
for polygon in polygons:
feat = QgsFeature()
feat.setAttributes(f.attributes())
feat.setGeometry(QgsGeometry.fromPolygon(polygon))
writer.addFeature(feat)
elif newType in [QGis.WKBMultiLineString, QGis.WKBMultiPolygon]:
writer.addFeature(f)
else:
raise GeoAlgorithmExecutionException(
self.tr('Cannot convert from %s to %s', geomType, newType))
progress.setPercentage(int(count * total))
del writer
| gpl-2.0 |
Wintermute0110/plugin.program.advanced.emulator.launcher | tests/fakes.py | 1 | 8256 | from abc import ABCMeta, abstractmethod
from resources.objects import *
from resources.utils import *
from resources.scrap import *
class FakeRomSetRepository(ROMSetRepository):
def __init__(self, roms):
self.roms = roms
def find_by_launcher(self, launcher):
return self.roms
def save_rom_set(self, launcher, roms):
self.roms = roms
def delete_all_by_launcher(self, launcher):
self.roms = {}
class FakeExecutor(ExecutorABC):
def __init__(self):
self.actualApplication = None
self.actualArgs = None
super(FakeExecutor, self).__init__(None)
def getActualApplication(self):
return self.actualApplication
def getActualArguments(self):
return self.actualArgs
def execute(self, application, arguments, non_blocking):
self.actualApplication = application
self.actualArgs = arguments
pass
class FakeClass():
def FakeMethod(self, value, key, launcher):
self.value = value
class FakeFile(FileName):
def __init__(self, pathString):
self.fakeContent = ''
self.path_str = pathString
self.path_tr = pathString
self.exists = self.exists_fake
self.write = self.write_fake
def setFakeContent(self, content):
self.fakeContent = content
def getFakeContent(self):
return self.fakeContent
def loadFileToStr(self, encoding = 'utf-8'):
return self.fakeContent
def readAllUnicode(self, encoding='utf-8'):
contents = unicode(self.fakeContent)
return contents
def saveStrToFile(self, data_str, encoding = 'utf-8'):
self.fakeContent = data_str
def write_fake(self, bytes):
self.fakeContent = self.fakeContent + bytes
def open(self, mode):
pass
def close(self):
pass
def writeAll(self, bytes, flags='w'):
self.fakeContent = self.fakeContent + bytes
def pjoin(self, *args):
child = FakeFile(self.path_str)
child.setFakeContent(self.fakeContent)
for arg in args:
child.path_str = os.path.join(child.path_str, arg)
child.path_tr = os.path.join(child.path_tr, arg)
return child
def switchExtension(self, targetExt):
switched_fake = super(FakeFile, self).switchExtension(targetExt)
#switched_fake = FakeFile(switched_type.getPath())
switched_fake.setFakeContent(self.fakeContent)
return switched_fake
def exists_fake(self):
return True
def scanFilesInPathAsFileNameObjects(self, mask = '*.*'):
return []
#backwards compatiblity
def __create__(self, path):
return FakeFile(path)
class Fake_Paths:
def __init__(self, fake_base, fake_addon_id = 'ael-tests'):
# --- Base paths ---
self.ADDONS_DATA_DIR = FileName(fake_base, isdir = True)
self.ADDON_DATA_DIR = self.ADDONS_DATA_DIR.pjoin(fake_addon_id, isdir = True)
self.PROFILE_DIR = self.ADDONS_DATA_DIR.pjoin('profile', isdir = True)
self.HOME_DIR = self.ADDONS_DATA_DIR.pjoin('home', isdir = True)
self.ADDONS_DIR = self.HOME_DIR.pjoin('addons', isdir = True)
self.ADDON_CODE_DIR = self.ADDONS_DIR.pjoin(fake_addon_id, isdir = True)
self.ICON_FILE_PATH = self.ADDON_CODE_DIR.pjoin('media/icon.png')
self.FANART_FILE_PATH = self.ADDON_CODE_DIR.pjoin('media/fanart.jpg')
# --- Databases and reports ---
self.CATEGORIES_FILE_PATH = self.ADDON_DATA_DIR.pjoin('categories.xml')
self.FAV_JSON_FILE_PATH = self.ADDON_DATA_DIR.pjoin('favourites.json')
self.COLLECTIONS_FILE_PATH = self.ADDON_DATA_DIR.pjoin('collections.xml')
self.VCAT_TITLE_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_title.xml')
self.VCAT_YEARS_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_years.xml')
self.VCAT_GENRE_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_genre.xml')
self.VCAT_DEVELOPER_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_developers.xml')
self.VCAT_NPLAYERS_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_nplayers.xml')
self.VCAT_ESRB_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_esrb.xml')
self.VCAT_RATING_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_rating.xml')
self.VCAT_CATEGORY_FILE_PATH = self.ADDON_DATA_DIR.pjoin('vcat_category.xml')
# Launcher app stdout/stderr file
self.LAUNCH_LOG_FILE_PATH = self.ADDON_DATA_DIR.pjoin('launcher.log')
self.RECENT_PLAYED_FILE_PATH = self.ADDON_DATA_DIR.pjoin('history.json')
self.MOST_PLAYED_FILE_PATH = self.ADDON_DATA_DIR.pjoin('most_played.json')
self.BIOS_REPORT_FILE_PATH = self.ADDON_DATA_DIR.pjoin('report_BIOS.txt')
self.LAUNCHER_REPORT_FILE_PATH = self.ADDON_DATA_DIR.pjoin('report_Launchers.txt')
# --- Offline scraper databases ---
self.GAMEDB_INFO_DIR = self.ADDON_CODE_DIR.pjoin('GameDBInfo', isdir = True)
self.GAMEDB_JSON_BASE_NOEXT = 'GameDB_info'
self.LAUNCHBOX_INFO_DIR = self.ADDON_CODE_DIR.pjoin('LaunchBox', isdir = True)
self.LAUNCHBOX_JSON_BASE_NOEXT = 'LaunchBox_info'
# --- Artwork and NFO for Categories and Launchers ---
self.CATEGORIES_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-categories', isdir = True)
self.COLLECTIONS_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-collections', isdir = True)
self.LAUNCHERS_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-launchers', isdir = True)
self.FAVOURITES_ASSET_DIR = self.ADDON_DATA_DIR.pjoin('asset-favourites', isdir = True)
self.VIRTUAL_CAT_TITLE_DIR = self.ADDON_DATA_DIR.pjoin('db_title', isdir = True)
self.VIRTUAL_CAT_YEARS_DIR = self.ADDON_DATA_DIR.pjoin('db_year', isdir = True)
self.VIRTUAL_CAT_GENRE_DIR = self.ADDON_DATA_DIR.pjoin('db_genre', isdir = True)
self.VIRTUAL_CAT_DEVELOPER_DIR = self.ADDON_DATA_DIR.pjoin('db_developer', isdir = True)
self.VIRTUAL_CAT_NPLAYERS_DIR = self.ADDON_DATA_DIR.pjoin('db_nplayer', isdir = True)
self.VIRTUAL_CAT_ESRB_DIR = self.ADDON_DATA_DIR.pjoin('db_esrb', isdir = True)
self.VIRTUAL_CAT_RATING_DIR = self.ADDON_DATA_DIR.pjoin('db_rating', isdir = True)
self.VIRTUAL_CAT_CATEGORY_DIR = self.ADDON_DATA_DIR.pjoin('db_category', isdir = True)
self.ROMS_DIR = self.ADDON_DATA_DIR.pjoin('db_ROMs', isdir = True)
self.COLLECTIONS_DIR = self.ADDON_DATA_DIR.pjoin('db_Collections', isdir = True)
self.REPORTS_DIR = self.ADDON_DATA_DIR.pjoin('reports', isdir = True)
class FakeScraper(Scraper):
def __init__(self, settings, launcher, rom_data_to_apply = None):
self.rom_data_to_apply = rom_data_to_apply
scraper_settings = ScraperSettings(1,1,False,True)
super(FakeScraper, self).__init__(scraper_settings, launcher, True, [])
def getName(self):
return 'FakeScraper'
def supports_asset_type(self, asset_info):
return True
def _get_candidates(self, searchTerm, romPath, rom):
return ['fake']
def _load_metadata(self, candidate, romPath, rom):
gamedata = self._new_gamedata_dic()
if self.rom_data_to_apply :
gamedata['title'] = self.rom_data_to_apply['m_name'] if 'm_name' in self.rom_data_to_apply else ''
gamedata['year'] = self.rom_data_to_apply['m_year'] if 'm_year' in self.rom_data_to_apply else ''
gamedata['genre'] = self.rom_data_to_apply['m_genre'] if 'm_genre' in self.rom_data_to_apply else ''
gamedata['developer'] = self.rom_data_to_apply['m_developer']if 'm_developer' in self.rom_data_to_apply else ''
gamedata['plot'] = self.rom_data_to_apply['m_plot'] if 'm_plot' in self.rom_data_to_apply else ''
else:
gamedata['title'] = romPath.getBase_noext()
def _load_assets(self, candidate, romPath, rom):
pass
| gpl-2.0 |
coolbombom/CouchPotatoServer | couchpotato/core/downloaders/transmission/main.py | 1 | 10725 | from base64 import b64encode
from couchpotato.core.downloaders.base import Downloader, StatusList
from couchpotato.core.helpers.encoding import isInt
from couchpotato.core.logger import CPLog
from couchpotato.environment import Env
from datetime import timedelta
import httplib
import json
import os.path
import re
import traceback
import urllib2
log = CPLog(__name__)
class Transmission(Downloader):
type = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
def download(self, data, movie, filedata = None):
log.info('Sending "%s" (%s) to Transmission.', (data.get('name'), data.get('type')))
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
# Set parameters for Transmission
params = {
'paused': self.conf('paused', default = 0),
}
if len(self.conf('directory', default = '')) > 0:
folder_name = self.createFileName(data, filedata, movie)[:-len(data.get('type')) - 1]
params['download-dir'] = os.path.join(self.conf('directory', default = ''), folder_name).rstrip(os.path.sep)
torrent_params = {}
if self.conf('ratio'):
torrent_params = {
'seedRatioLimit': self.conf('ratio'),
'seedRatioMode': self.conf('ratiomode')
}
if not filedata and data.get('type') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Send request to Transmission
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
if data.get('type') == 'torrent_magnet':
remote_torrent = trpc.add_torrent_uri(data.get('url'), arguments = params)
torrent_params['trackerAdd'] = self.torrent_trackers
else:
remote_torrent = trpc.add_torrent_file(b64encode(filedata), arguments = params)
if not remote_torrent:
return False
# Change settings of added torrents
elif torrent_params:
trpc.set_torrent(remote_torrent['torrent-added']['hashString'], torrent_params)
log.info('Torrent sent to Transmission successfully.')
return self.downloadReturnId(remote_torrent['torrent-added']['hashString'])
except:
log.error('Failed to change settings for transfer: %s', traceback.format_exc())
return False
def getAllDownloadStatus(self):
log.debug('Checking Transmission download status.')
# Load host from config and split out port.
host = self.conf('host').split(':')
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
# Go through Queue
try:
trpc = TransmissionRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return_params = {
'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isFinished', 'downloadDir', 'uploadRatio']
}
queue = trpc.get_alltorrents(return_params)
except Exception, err:
log.error('Failed getting queue: %s', err)
return False
if not queue:
return []
statuses = StatusList(self)
# Get torrents status
# CouchPotato Status
#status = 'busy'
#status = 'failed'
#status = 'completed'
# Transmission Status
#status = 0 => "Torrent is stopped"
#status = 1 => "Queued to check files"
#status = 2 => "Checking files"
#status = 3 => "Queued to download"
#status = 4 => "Downloading"
#status = 4 => "Queued to seed"
#status = 6 => "Seeding"
#To do :
# add checking file
# manage no peer in a range time => fail
for item in queue['torrents']:
log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / eta=%s / uploadRatio=%s / confRatio=%s / isFinished=%s', (item['name'], item['id'], item['downloadDir'], item['hashString'], item['percentDone'], item['status'], item['eta'], item['uploadRatio'], self.conf('ratio'), item['isFinished']))
if not os.path.isdir(Env.setting('from', 'renamer')):
log.error('Renamer "from" folder doesn\'t to exist.')
return
if (item['percentDone'] * 100) >= 100 and (item['status'] == 6 or item['status'] == 0) and item['uploadRatio'] > self.conf('ratio'):
try:
trpc.stop_torrent(item['hashString'], {})
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'completed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
'folder': os.path.join(item['downloadDir'], item['name']),
})
if ((not os.path.isdir(item['downloadDir']))) and (self.conf('from') in item['downloadDir'])):
trpc.remove_torrent(item['id'], "true", {})
except Exception, err:
log.error('Failed to stop and remove torrent "%s" with error: %s', (item['name'], err))
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'failed',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = 0)),
})
else:
statuses.append({
'id': item['hashString'],
'name': item['name'],
'status': 'busy',
'original_status': item['status'],
'timeleft': str(timedelta(seconds = item['eta'])), # Is ETA in seconds??
})
return statuses
class TransmissionRPC(object):
"""TransmissionRPC lite library"""
def __init__(self, host = 'localhost', port = 9091, username = None, password = None):
super(TransmissionRPC, self).__init__()
self.url = 'http://' + host + ':' + str(port) + '/transmission/rpc'
self.tag = 0
self.session_id = 0
self.session = {}
if username and password:
password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password)
opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager))
opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')]
urllib2.install_opener(opener)
elif username or password:
log.debug('User or password missing, not using authentication.')
self.session = self.get_session()
def _request(self, ojson):
self.tag += 1
headers = {'x-transmission-session-id': str(self.session_id)}
request = urllib2.Request(self.url, json.dumps(ojson).encode('utf-8'), headers)
try:
open_request = urllib2.urlopen(request)
response = json.loads(open_request.read())
log.debug('request: %s', json.dumps(ojson))
log.debug('response: %s', json.dumps(response))
if response['result'] == 'success':
log.debug('Transmission action successfull')
return response['arguments']
else:
log.debug('Unknown failure sending command to Transmission. Return text is: %s', response['result'])
return False
except httplib.InvalidURL, err:
log.error('Invalid Transmission host, check your config %s', err)
return False
except urllib2.HTTPError, err:
if err.code == 401:
log.error('Invalid Transmission Username or Password, check your config')
return False
elif err.code == 409:
msg = str(err.read())
try:
self.session_id = \
re.search('X-Transmission-Session-Id:\s*(\w+)', msg).group(1)
log.debug('X-Transmission-Session-Id: %s', self.session_id)
# #resend request with the updated header
return self._request(ojson)
except:
log.error('Unable to get Transmission Session-Id %s', err)
else:
log.error('TransmissionRPC HTTPError: %s', err)
except urllib2.URLError, err:
log.error('Unable to connect to Transmission %s', err)
def get_session(self):
post_data = {'method': 'session-get', 'tag': self.tag}
return self._request(post_data)
def add_torrent_uri(self, torrent, arguments):
arguments['filename'] = torrent
post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
return self._request(post_data)
def add_torrent_file(self, torrent, arguments):
arguments['metainfo'] = torrent
post_data = {'arguments': arguments, 'method': 'torrent-add', 'tag': self.tag}
return self._request(post_data)
def set_torrent(self, torrent_id, arguments):
arguments['ids'] = torrent_id
post_data = {'arguments': arguments, 'method': 'torrent-set', 'tag': self.tag}
return self._request(post_data)
def get_alltorrents(self, arguments):
post_data = {'arguments': arguments, 'method': 'torrent-get', 'tag': self.tag}
return self._request(post_data)
def stop_torrent(self, torrent_id, arguments):
arguments['ids'] = torrent_id
post_data = {'arguments': arguments, 'method': 'torrent-stop', 'tag': self.tag}
return self._request(post_data)
def remove_torrent(self, torrent_id, remove_local_data, arguments):
arguments['ids'] = torrent_id
arguments['delete-local-data'] = remove_local_data
post_data = {'arguments': arguments, 'method': 'torrent-remove', 'tag': self.tag}
return self._request(post_data)
| gpl-3.0 |
astroswego/magellanic-structure | src/magstruct/transformations.py | 1 | 1722 | import numpy
from numpy import array, sin, cos
__all__ = [
'Equatorial2Cartesian',
'Rotation3D',
'rotation_matrix_3d'
]
class Equatorial2Cartesian():
def __init__(self, RA_0, Dec_0, D_0):
self.RA_0 = RA_0
self.Dec_0 = Dec_0
self.D_0 = D_0
def fit(self, X, y=None):
return self
def transform(self, X, y=None, **params):
X_new = numpy.empty_like(X)
x, y, z = X_new[:,0], X_new[:,1], X_new[:,2]
RA, Dec, D = X[:,0], X[:,1], X[:,2]
delta_RA = RA - self.RA_0
x[:] = -D * sin(delta_RA) * cos(Dec)
y[:] = D * (sin(Dec) * cos(self.Dec_0) +
sin(self.Dec_0) * cos(delta_RA) * cos(Dec))
z[:] = self.D_0 \
- D * (sin(Dec)*sin(self.Dec_0) + cos(RA)*cos(self.Dec_0)) \
- self.RA_0*cos(Dec)
return X_new
def rotation_matrix_3d(angle, axis):
assert axis in range(3), 'Axis must be 0, 1, or 2'
T = numpy.empty((3, 3), dtype=float)
# find the index of the -sin(angle) term
# this formula is the polynomial which passes through all of the pairs
# (axis, index)
i = axis**2 - 4*axis + 5
T.flat[::3+1] = cos(angle)
T.flat[i::3-1] = sin(angle)
# negate the -sin(angle) term, as it is currently just sin(angle)
T.flat[i] *= -1
T[axis,:] = 0
T[:,axis] = 0
T[axis,axis] = 1
return T
class Rotation3D():
def __init__(self, angle, axis):
self.axis = axis
self.angle = angle
self.rotation_matrix = rotation_matrix_3d(angle, axis)
def fit(self, X, y=None):
return self
def transform(self, X, y=None, **params):
return self.rotation_matrix.dot(X)
| mit |
lindemann09/pyForceDAQ | forceDAQ/data_handling/read_force_data.py | 1 | 2147 | """
Functions to read your force and event data
"""
__author__ = 'Oliver Lindemann'
import os
import sys
import gzip
from collections import OrderedDict
import numpy as np
TAG_COMMENTS = "#"
TAG_UDPDATA = TAG_COMMENTS + "UDP"
TAG_DAQEVENTS = TAG_COMMENTS + "T"
def _csv(line):
return list(map(lambda x: x.strip(), line.split(",")))
def DataFrameDict(data, varnames):
"""data frame: Dict of numpy arrays
does not require Pandas, but can be easily converted to pandas dataframe
via pandas.DataFrame(data_frame_dict)
"""
rtn = OrderedDict()
for v in varnames:
rtn[v] = []
for row in data:
for v, d in zip(varnames, row):
rtn[v].append(d)
return rtn
def data_frame_to_text(data_frame):
rtn = ",".join(data_frame.keys())
rtn += "\n"
for x in np.array(list(data_frame.values())).T:
rtn += ",".join(x) + "\n"
return rtn
def read_raw_data(path):
"""reading trigger and udp data
Returns: data, udp_event, daq_events and comments
data, udp_event, daq_events: DataFrameDict
comments: text string
"""
daq_events = []
udp_events = []
comments = ""
data = []
varnames = None
app_dir = os.path.split(sys.argv[0])[0]
path = os.path.abspath(os.path.join(app_dir, path))
if path.endswith("gz"):
fl = gzip.open(path, "rt")
else:
fl = open(path, "rt")
for ln in fl:
if ln.startswith(TAG_COMMENTS):
comments += ln
if ln.startswith(TAG_UDPDATA + ","):
udp_events.append(_csv(ln[len(TAG_UDPDATA) + 1:]))
elif ln.startswith(TAG_DAQEVENTS):
daq_events.append(_csv(ln[len(TAG_DAQEVENTS) + 1:]))
else:
# data
if varnames is None:
# first row contains varnames
varnames = _csv(ln)
else:
data.append(_csv(ln))
fl.close()
return (DataFrameDict(data, varnames),
DataFrameDict(udp_events, ["time", "value"]),
DataFrameDict(daq_events, ["time", "value"]),
comments)
| mit |
alanch-ms/PTVS | Python/Tests/TestData/VirtualEnv/env/Lib/encodings/cp1251.py | 593 | 13617 | """ Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1251',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u20ac' # 0x88 -> EURO SIGN
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
u'\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
u'\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
u'\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
u'\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
u'\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
u'\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
u'\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
u'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
u'\u2116' # 0xB9 -> NUMERO SIGN
u'\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
u'\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
u'\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
u'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
cemarchi/biosphere | Src/BioAnalyzer/Analysis/GenePrioritization/Steps/DataIntegration/IntermediateRepresentation/Transformers/MicroRnaToGeneTransformer.py | 1 | 4546 | import math
import statistics
from itertools import groupby
from random import randint
from typing import Dict, Tuple, Counter
import pandas as pd
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Generators import \
IntermediateRepresentationGeneratorBase
from Src.BioAnalyzer.Analysis.GenePrioritization.Steps.DataIntegration.IntermediateRepresentation.Transformers.SampleTransformerBase import \
SampleTransformerBase
from Src.BioDataManagement.CrossCutting.DTOs.ExpressionLevelStatusDto import ExpressionLevelStatusDto
class MicroRnaToGeneTransformer(SampleTransformerBase):
"""
"""
def __init__(self,
intermediateRepresentationGenerator: IntermediateRepresentationGeneratorBase,
get_global_diff_values_action,
get_mirna_gene_target_action):
super().__init__(intermediateRepresentationGenerator)
self.__get_mirna_gene_target_action = get_mirna_gene_target_action
self.__get_global_diff_values_action = get_global_diff_values_action
def transform(self, from_sample_matrix: pd.DataFrame, is_highly_significant: bool) -> Tuple[pd.DataFrame, Dict[int, ExpressionLevelStatusDto]]:
mirna_gene_targets = {mirna.lower(): g for mirna, g in
self.__get_mirna_gene_targets(from_sample_matrix.columns.tolist()).items()}
mirna_samples = self.__get_mirna_samples(from_sample_matrix, mirna_gene_targets)
id_entrez_list = list(set([id_entrez for mirna_symbol, id_entrez_list in mirna_gene_targets.items()
for id_entrez in id_entrez_list]))
measure_matrix = dict([(g, []) for g in id_entrez_list])
key_func = lambda gene: gene[0]
for patient_id, exp_values in mirna_samples.items():
gene_values = [(id_entrez,
exp_value) for mirna_symbol, exp_value in exp_values.items()
for id_entrez in mirna_gene_targets[mirna_symbol]]
gene_values = sorted(gene_values, key=key_func)
for id_entrez, measures in groupby(gene_values, key_func):
measures = [measure for id_entrez, measure in list(measures) if not math.isnan(measure)]
measure_matrix[id_entrez].append(float('NaN') if not measures else statistics.mean(measures))
gene_matrix = pd.DataFrame.from_dict(measure_matrix).dropna(axis=1,how='all')
gene_matrix = self.intermediateRepresentationGenerator.generate(gene_matrix).dropna(axis=1,how='all')
return gene_matrix, \
self.__get_gene_status(mirna_gene_targets, gene_matrix.columns.tolist(), is_highly_significant)
def __get_mirna_gene_targets(self, mirnas):
gene_targets = {}
fe_target = self.__get_mirna_gene_target_action(mirnas)
gene_targets.update(dict([(t.microrna_symbol, list(set(gene_targets[t.microrna_symbol] + t.id_entrez_genes)))
if t.microrna_symbol in gene_targets
else (t.microrna_symbol, t.id_entrez_genes) for t in fe_target.result_list]))
return gene_targets
def __get_mirna_samples(self, from_sample_matrix, mirna_gene_targets):
from_sample_matrix = from_sample_matrix[list(mirna_gene_targets.keys()) + ['patient_id']]
from_sample_matrix.set_index("patient_id", drop=True, inplace=True)
return from_sample_matrix.to_dict(orient="index")
def __get_gene_status(self, mirna_gene_targets, genes, is_highly_significant):
diff_mirna = [diff for diff in self.__get_global_diff_values_action(is_highly_significant).result.values
if diff.element_id in mirna_gene_targets]
genes_status = [(g, diff.status) for diff in diff_mirna
for g in mirna_gene_targets[diff.element_id] if g in genes]
key_func = lambda gene: gene[0]
genes_status = sorted(genes_status, key=key_func)
genes_status_dict = {}
for id_entrez, status in groupby(genes_status, key_func):
status = list(status)
status_counter = Counter(status)
status = [k for k, v in status_counter.most_common()]
len_status = len(status) - 1
genes_status_dict[id_entrez] = status[0] if len_status == 1 else status[randint(0, len_status)]
return dict([(entrez_id, status[1]) for entrez_id, status in genes_status_dict.items()]) | bsd-3-clause |
aplanas/kmanga | kmanga/core/models.py | 1 | 21424 | import os.path
from django.conf import settings
from django.db import connection
from django.db import models
from django.db.models import Count
from django.db.models import F
from django.db.models import Q
from django.urls import reverse
from django.utils import timezone
class TimeStampedModel(models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
class Meta:
abstract = True
class Source(TimeStampedModel):
name = models.CharField(max_length=200)
spider = models.CharField(max_length=80)
url = models.URLField(unique=True)
has_footer = models.BooleanField(default=False)
enabled = models.BooleanField(default=True)
def __str__(self):
return self.name
class SourceLanguage(TimeStampedModel):
GERMAN = 'DE'
ENGLISH = 'EN'
SPANISH = 'ES'
FRENCH = 'FR'
ITALIAN = 'IT'
RUSSIAN = 'RU'
PORTUGUESE = 'PT'
LANGUAGE_CHOICES = (
(ENGLISH, 'English'),
(SPANISH, 'Spanish'),
(GERMAN, 'German'),
(FRENCH, 'French'),
(ITALIAN, 'Italian'),
(RUSSIAN, 'Russian'),
(PORTUGUESE, 'Portuguese'),
)
language = models.CharField(max_length=2, choices=LANGUAGE_CHOICES)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
def __str__(self):
return '%s (%s)' % (self.get_language_display(), self.language)
class ConsolidateGenre(TimeStampedModel):
name = models.CharField(max_length=200)
def __str__(self):
return self.name
class Genre(TimeStampedModel):
name = models.CharField(max_length=200)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
# consolidategenre = models.ForeignKey(ConsolidateGenre,
# on_delete=models.CASCADE)
def __str__(self):
return self.name
class AdvRawQuerySet(models.query.RawQuerySet):
"""RawQuerySet subclass with advanced options."""
def __init__(self, raw_query, paged_query, count_query,
model=None, query=None, params=None,
translations=None, using=None, hints=None):
super(AdvRawQuerySet, self).__init__(raw_query, model=model,
query=query,
params=params,
translations=translations,
using=using, hints=hints)
self.raw_query = raw_query
self.paged_query = paged_query
self.count_query = count_query
def __getitem__(self, key):
if isinstance(key, slice):
start, stop = key.start, key.stop
else:
start, stop = key, key + 1
if self.params:
params = self.params + [stop-start, start]
else:
params = (stop-start, start)
return models.query.RawQuerySet(self.paged_query,
model=self.model,
params=params,
translations=self.translations,
using=self._db,
hints=self._hints)
def __len__(self):
cursor = connection.cursor()
cursor.execute(self.count_query, self.params)
return cursor.fetchone()[0]
class MangaQuerySet(models.QuerySet):
def latests(self):
"""Return the lastest mangas with new/updated issues."""
# The correct annotation expression is the next one, but due
# to an error in Django ORM, this empression uses a full GROUP
# BY with the data fields. This produce a slow query.
#
# return self.annotate(
# models.Max('issue__modified')
# ).order_by('-issue__modified__max')
#
# Alternative (without deferreds)
#
# extra_query = '''
# SELECT MAX(core_issue.modified)
# FROM core_issue
# WHERE core_issue.manga_id = core_manga.id
# '''
# Manga.objects.extra({
# 'issue__modified__max': extra_query
# }).order_by('-issue__modified__max')
raw_query = '''
SELECT core_manga.id,
MAX(core_issue.modified) AS issue__modified__max
FROM core_manga
LEFT OUTER JOIN core_issue
ON (core_manga.id = core_issue.manga_id)
GROUP BY core_manga.id
ORDER BY issue__modified__max DESC NULLS LAST,
core_manga.name ASC,
core_manga.url ASC;
'''
paged_query = '''
SELECT core_manga.id,
MAX(core_issue.modified) AS issue__modified__max
FROM core_manga
LEFT OUTER JOIN core_issue
ON (core_manga.id = core_issue.manga_id)
GROUP BY core_manga.id
ORDER BY issue__modified__max DESC NULLS LAST,
core_manga.name ASC,
core_manga.url ASC
LIMIT %s
OFFSET %s;
'''
count_query = '''
SELECT COUNT(*)
FROM core_manga;
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
using=self.db)
def _to_tsquery(self, q):
"""Convert a query to a PostgreSQL tsquery."""
# Remove special chars (except parens)
q = ''.join(c if c.isalnum() or c in '()' else ' ' for c in q)
# Separate parentesis from words
for token in ('(', ')'):
q = q.replace(token, ' %s ' % token)
# Parse the query
op = {
'and': '&',
'or': '|',
'not': '-',
'(': '(',
')': ')',
}
# Join operators
j = '&|'
# Operators that expect and join before
ops_j = '-('
tsquery = []
for token in q.split():
if token in op:
if tsquery and op[token] in ops_j and tsquery[-1] not in j:
tsquery.append(op['and'])
tsquery.append(op[token])
else:
if tsquery and tsquery[-1] not in (j + ops_j):
tsquery.append(op['and'])
tsquery.append('%s:*' % token)
# Add spaces between join operators
tsquery = [(t if t not in j else ' %s ' % t) for t in tsquery]
return ''.join(tsquery)
def is_valid(self, q):
"""Check is the query is a valid query."""
q = self._to_tsquery(q)
# Separate parentesis from words
for token in ('(', ')'):
q = q.replace(token, ' %s ' % token)
s = []
for token in q.split():
if token == '(':
s.append(token)
elif token == ')':
try:
t = s.pop()
except IndexError:
return False
if t != '(':
return False
return not len(s)
def search(self, q):
q = self._to_tsquery(q)
raw_query = '''
SELECT core_manga.*
FROM (
SELECT id
FROM core_manga_fts_view,
to_tsquery(%s) AS q
WHERE document @@ q
ORDER BY ts_rank(document, q) DESC,
name ASC,
url ASC
) AS ids
INNER JOIN core_manga ON core_manga.id = ids.id;
'''
paged_query = '''
SELECT core_manga.*
FROM (
SELECT id
FROM core_manga_fts_view,
to_tsquery(%s) AS q
WHERE document @@ q
ORDER BY ts_rank(document, q) DESC,
name ASC,
url ASC
LIMIT %s
OFFSET %s
) AS ids
INNER JOIN core_manga ON core_manga.id = ids.id;
'''
count_query = '''
SELECT COUNT(*)
FROM core_manga_fts_view
WHERE document @@ to_tsquery(%s);
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
params=[q],
using=self.db)
def refresh(self):
cursor = connection.cursor()
cursor.execute('REFRESH MATERIALIZED VIEW core_manga_fts_view;')
def _cover_path(instance, filename):
return os.path.join(instance.source.spider, filename)
class Manga(TimeStampedModel):
LEFT_TO_RIGHT = 'LR'
RIGHT_TO_LEFT = 'RL'
READING_DIRECTION = (
(LEFT_TO_RIGHT, 'Left-to-right'),
(RIGHT_TO_LEFT, 'Right-to-left'),
)
ONGOING = 'O'
COMPLETED = 'C'
STATUS = (
(ONGOING, 'Ongoing'),
(COMPLETED, 'Completed'),
)
ASC = 'ASC'
DESC = 'DESC'
RANK_ORDER = (
(ASC, 'Ascending'),
(DESC, 'Descending'),
)
name = models.CharField(max_length=200, db_index=True)
# slug = models.SlugField(max_length=200)
# release = models.DateField()
author = models.CharField(max_length=200)
artist = models.CharField(max_length=200)
reading_direction = models.CharField(max_length=2,
choices=READING_DIRECTION,
default=RIGHT_TO_LEFT)
status = models.CharField(max_length=1,
choices=STATUS,
default=ONGOING)
genres = models.ManyToManyField(Genre)
rank = models.FloatField(null=True, blank=True)
rank_order = models.CharField(max_length=4,
choices=RANK_ORDER,
default=ASC)
description = models.TextField()
cover = models.ImageField(upload_to=_cover_path)
url = models.URLField(unique=True, db_index=True)
source = models.ForeignKey(Source, on_delete=models.CASCADE)
objects = MangaQuerySet.as_manager()
def __str__(self):
return self.name
def subscribe(self, user, language=None, issues_per_day=4, paused=False):
"""Subscribe an User to the current manga."""
language = language if language else user.userprofile.language
obj, created = Subscription.all_objects.update_or_create(
manga=self,
user=user,
defaults={
'language': language,
'issues_per_day': issues_per_day,
'paused': paused,
'deleted': False,
})
return obj
def is_subscribed(self, user):
"""Check if an user is subscribed to this manga."""
return self.subscription(user).exists()
def subscription(self, user):
"""Return the users' subscription of this manga."""
return self.subscription_set.filter(user=user)
def languages(self):
"""Return the number of issues per language."""
return self.issue_set\
.values('language')\
.order_by('language')\
.annotate(Count('language'))
class AltName(TimeStampedModel):
name = models.CharField(max_length=200)
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
def __str__(self):
return self.name
class Issue(TimeStampedModel):
name = models.CharField(max_length=200)
number = models.CharField(max_length=10)
order = models.IntegerField()
language = models.CharField(max_length=2,
choices=SourceLanguage.LANGUAGE_CHOICES)
release = models.DateField()
url = models.URLField(unique=True, max_length=255)
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
class Meta:
ordering = ('order', 'name')
def __str__(self):
return self.name
def is_sent(self, user):
"""Check if an user has received this issue."""
return self.result(user, status=Result.SENT).exists()
def create_result_if_needed(self, user, status, set_send_date=True):
"""Create `Result` if is new with a status."""
defaults = {'status': status}
if set_send_date:
defaults['send_date'] = timezone.now()
subscription = Subscription.objects.get(
manga=self.manga, user=user)
result, _ = Result.objects.update_or_create(
issue=self,
subscription=subscription,
defaults=defaults)
return result
def result(self, user, status=None):
"""Return the Result for an user for this issue."""
# XXX TODO - Avoid filtering by subscription__deleted using
# the Subscription manager.
query = self.result_set.filter(
subscription__user=user,
subscription__deleted=False)
if status:
query = query.filter(status=status)
return query
def retry_if_failed(self, user):
"""Increment the retry field of `Result` if status is FAIL."""
self.result(user, status=Result.FAILED).update(retry=F('retry') + 1)
class SubscriptionQuerySet(models.QuerySet):
def latests(self, user):
"""Return the latests subscriptions with changes in Result."""
# See the notes from `MangaQuerySet.latests()`
raw_query = '''
SELECT core_subscription.id,
MAX(core_result.modified) AS result__modified__max
FROM core_subscription
LEFT OUTER JOIN core_result
ON (core_subscription.id = core_result.subscription_id)
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s
GROUP BY core_subscription.id
ORDER BY result__modified__max DESC NULLS LAST,
core_subscription.id ASC;
'''
paged_query = '''
SELECT core_subscription.id,
MAX(core_result.modified) AS result__modified__max
FROM core_subscription
LEFT OUTER JOIN core_result
ON (core_subscription.id = core_result.subscription_id)
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s
GROUP BY core_subscription.id
ORDER BY result__modified__max DESC NULLS LAST,
core_subscription.id ASC
LIMIT %s
OFFSET %s;
'''
count_query = '''
SELECT COUNT(*)
FROM core_subscription
WHERE core_subscription.deleted = false
AND core_subscription.user_id = %s;
'''
return AdvRawQuerySet(raw_query=raw_query,
paged_query=paged_query,
count_query=count_query,
model=self.model,
params=[user.id],
using=self.db)
class SubscriptionManager(models.Manager):
def get_queryset(self):
"""Exclude deleted subscriptions."""
return super(SubscriptionManager,
self).get_queryset().exclude(deleted=True)
class SubscriptionActiveManager(models.Manager):
def get_queryset(self):
"""Exclude paused and deleted subscriptions."""
return super(SubscriptionActiveManager,
self).get_queryset().exclude(
Q(paused=True) | Q(deleted=True))
class Subscription(TimeStampedModel):
# Number of retries before giving up in a FAILED result
RETRY = 3
manga = models.ForeignKey(Manga, on_delete=models.CASCADE)
user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
language = models.CharField(max_length=2,
choices=SourceLanguage.LANGUAGE_CHOICES)
issues_per_day = models.IntegerField(default=4)
paused = models.BooleanField(default=False)
deleted = models.BooleanField(default=False)
objects = SubscriptionManager.from_queryset(SubscriptionQuerySet)()
actives = SubscriptionActiveManager.from_queryset(SubscriptionQuerySet)()
all_objects = models.Manager()
class Meta:
unique_together = ('manga', 'user')
def __str__(self):
return '%s (%d per day)' % (self.manga, self.issues_per_day)
def issues(self):
"""Return the list of issues in the language of the Subscription."""
return self.manga.issue_set.filter(language=self.language)
def issues_to_send(self, retry=None):
"""Return the list of issues to send, ordered by number."""
if not retry:
retry = Subscription.RETRY
already_sent = Result.objects.processed_last_24hs(self.user,
subscription=self)
remains = max(0, self.issues_per_day-already_sent)
return self.manga.issue_set.filter(
language=self.language
).exclude(
pk__in=self.result_set.filter(
Q(status__in=(Result.PROCESSING, Result.SENT)) |
(Q(status=Result.FAILED) & Q(retry__gt=retry))
).values('issue__id')
).order_by('order')[:remains]
def issues_to_retry(self, retry=None):
"""Return the list of issues to retry, ordered by number."""
# This method doesn't take care about the limits of the user
if not retry:
retry = Subscription.RETRY
return self.manga.issue_set.filter(
language=self.language,
result__subscription=self,
result__status=Result.FAILED,
result__retry__lte=retry
).order_by('order')
def add_sent(self, issue):
"""Add or update a Result to a Subscription."""
# XXX TODO - add_sent is deprecated, use
# Issue.create_result_if_needed, or extend the features inside
# Subscription.
return Result.objects.update_or_create(
issue=issue,
subscription=self,
defaults={
'status': Result.SENT,
'send_date': timezone.now(),
})
def latest_issues(self):
"""Return the list of issues ordered by modified result."""
return self.issues().filter(
result__subscription=self
).annotate(
models.Max('result__modified')
).order_by('-result__modified')
class ResultQuerySet(models.QuerySet):
TIME_DELTA = 2
def latests(self, status=None):
query = self
if status:
query = query.filter(status=status)
return query.order_by('-modified')
def _processed_last_24hs(self, user, subscription=None):
"""Return the list of `Result` processed during the last 24 hours."""
today = timezone.now()
yesterday = today - timezone.timedelta(days=1)
# XXX TODO - Objects are created / modified always after time
# T. If the send process is slow, the error margin can be
# bigger than the one used here.
yesterday += timezone.timedelta(hours=ResultQuerySet.TIME_DELTA)
query = self.filter(
subscription__user=user,
send_date__range=[yesterday, today],
)
if subscription:
query = query.filter(subscription=subscription)
return query
def processed_last_24hs(self, user, subscription=None):
"""Return the number of `Result` processed during the last 24 hours."""
return self._processed_last_24hs(user, subscription).count()
def pending(self):
return self.latests(status=Result.PENDING)
def processing(self):
return self.latests(status=Result.PROCESSING)
def sent(self):
return self.latests(status=Result.SENT)
def failed(self):
return self.latests(status=Result.FAILED)
class Result(TimeStampedModel):
PENDING = 'PE'
PROCESSING = 'PR'
SENT = 'SE'
FAILED = 'FA'
STATUS_CHOICES = (
(PENDING, 'Pending'),
(PROCESSING, 'Processing'),
(SENT, 'Sent'),
(FAILED, 'Failed'),
)
issue = models.ForeignKey(Issue, on_delete=models.CASCADE)
subscription = models.ForeignKey(Subscription, on_delete=models.CASCADE)
status = models.CharField(max_length=2, choices=STATUS_CHOICES,
default=PENDING)
missing_pages = models.IntegerField(default=0)
send_date = models.DateTimeField(null=True, blank=True)
retry = models.IntegerField(default=0)
objects = ResultQuerySet.as_manager()
class Meta:
unique_together = ('issue', 'subscription')
def __str__(self):
return '%s (%s)' % (self.issue, self.get_status_display())
def get_absolute_url(self):
return reverse('result-detail', kwargs={'pk': self.pk})
def set_status(self, status):
self.status = status
# If the result is marked as FAILED, unset the `send_date`.
# In this way, if the result is moved to PENDING is not
# counted as SENT. Also if is not moved, the user can have
# one more issue for this day.
if status == Result.FAILED:
self.send_date = None
self.save()
def is_pending(self):
return self.status == Result.PENDING
def is_processing(self):
return self.status == Result.PROCESSING
def is_sent(self):
return self.status == Result.SENT
def is_failed(self):
return self.status == Result.FAILED
| gpl-3.0 |
jbenden/ansible | test/units/modules/network/nxos/test_nxos_vxlan_vtep.py | 47 | 2418 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vxlan_vtep
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVxlanVtepVniModule(TestNxosModule):
module = nxos_vxlan_vtep
def setUp(self):
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_vxlan_vtep', 'config.cfg')
self.load_config.return_value = None
def test_nxos_vxlan_vtep(self):
set_module_args(dict(interface='nve1', description='simple description'))
self.execute_module(changed=True, commands=['interface nve1', 'description simple description'])
def test_nxos_vxlan_vtep_present_no_change(self):
set_module_args(dict(interface='nve1'))
self.execute_module(changed=False, commands=[])
def test_nxos_vxlan_vtep_absent(self):
set_module_args(dict(interface='nve1', state='absent'))
self.execute_module(changed=True, commands=['no interface nve1'])
def test_nxos_vxlan_vtep_absent_no_change(self):
set_module_args(dict(interface='nve2', state='absent'))
self.execute_module(changed=False, commands=[])
| gpl-3.0 |
XiaosongWei/blink-crosswalk | Tools/Scripts/webkitpy/layout_tests/models/test_configuration_unittest.py | 42 | 18084 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
from webkitpy.layout_tests.models.test_configuration import *
def make_mock_all_test_configurations_set():
all_test_configurations = set()
for version, architecture in (('snowleopard', 'x86'), ('xp', 'x86'), ('win7', 'x86'), ('vista', 'x86'), ('lucid', 'x86'), ('lucid', 'x86_64')):
for build_type in ('debug', 'release'):
all_test_configurations.add(TestConfiguration(version, architecture, build_type))
return all_test_configurations
MOCK_MACROS = {
'mac': ['snowleopard'],
'win': ['xp', 'vista', 'win7'],
'linux': ['lucid'],
}
class TestConfigurationTest(unittest.TestCase):
def test_items(self):
config = TestConfiguration('xp', 'x86', 'release')
result_config_dict = {}
for category, specifier in config.items():
result_config_dict[category] = specifier
self.assertEqual({'version': 'xp', 'architecture': 'x86', 'build_type': 'release'}, result_config_dict)
def test_keys(self):
config = TestConfiguration('xp', 'x86', 'release')
result_config_keys = []
for category in config.keys():
result_config_keys.append(category)
self.assertEqual(set(['version', 'architecture', 'build_type']), set(result_config_keys))
def test_str(self):
config = TestConfiguration('xp', 'x86', 'release')
self.assertEqual('<xp, x86, release>', str(config))
def test_repr(self):
config = TestConfiguration('xp', 'x86', 'release')
self.assertEqual("TestConfig(version='xp', architecture='x86', build_type='release')", repr(config))
def test_hash(self):
config_dict = {}
config_dict[TestConfiguration('xp', 'x86', 'release')] = True
self.assertIn(TestConfiguration('xp', 'x86', 'release'), config_dict)
self.assertTrue(config_dict[TestConfiguration('xp', 'x86', 'release')])
def query_unknown_key():
return config_dict[TestConfiguration('xp', 'x86', 'debug')]
self.assertRaises(KeyError, query_unknown_key)
self.assertIn(TestConfiguration('xp', 'x86', 'release'), config_dict)
self.assertNotIn(TestConfiguration('xp', 'x86', 'debug'), config_dict)
configs_list = [TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'), TestConfiguration('xp', 'x86', 'debug')]
self.assertEqual(len(configs_list), 3)
self.assertEqual(len(set(configs_list)), 2)
def test_eq(self):
self.assertEqual(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'release'))
self.assertNotEquals(TestConfiguration('xp', 'x86', 'release'), TestConfiguration('xp', 'x86', 'debug'))
def test_values(self):
config = TestConfiguration('xp', 'x86', 'release')
result_config_values = []
for value in config.values():
result_config_values.append(value)
self.assertEqual(set(['xp', 'x86', 'release']), set(result_config_values))
class SpecifierSorterTest(unittest.TestCase):
def __init__(self, testFunc):
self._all_test_configurations = make_mock_all_test_configurations_set()
unittest.TestCase.__init__(self, testFunc)
def test_init(self):
sorter = SpecifierSorter()
self.assertIsNone(sorter.category_for_specifier('control'))
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.category_for_specifier('xp'), 'version')
sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
self.assertEqual(sorter.category_for_specifier('mac'), 'version')
def test_add_specifier(self):
sorter = SpecifierSorter()
self.assertIsNone(sorter.category_for_specifier('control'))
sorter.add_specifier('version', 'control')
self.assertEqual(sorter.category_for_specifier('control'), 'version')
sorter.add_specifier('version', 'one')
self.assertEqual(sorter.category_for_specifier('one'), 'version')
sorter.add_specifier('architecture', 'renaissance')
self.assertEqual(sorter.category_for_specifier('one'), 'version')
self.assertEqual(sorter.category_for_specifier('renaissance'), 'architecture')
def test_add_macros(self):
sorter = SpecifierSorter(self._all_test_configurations)
sorter.add_macros(MOCK_MACROS)
self.assertEqual(sorter.category_for_specifier('mac'), 'version')
self.assertEqual(sorter.category_for_specifier('win'), 'version')
self.assertEqual(sorter.category_for_specifier('x86'), 'architecture')
def test_category_priority(self):
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.category_priority('version'), 0)
self.assertEqual(sorter.category_priority('build_type'), 2)
def test_specifier_priority(self):
sorter = SpecifierSorter(self._all_test_configurations)
self.assertEqual(sorter.specifier_priority('x86'), 1)
self.assertEqual(sorter.specifier_priority('snowleopard'), 0)
def test_sort_specifiers(self):
sorter = SpecifierSorter(self._all_test_configurations, MOCK_MACROS)
self.assertEqual(sorter.sort_specifiers(set()), [])
self.assertEqual(sorter.sort_specifiers(set(['x86'])), ['x86'])
self.assertEqual(sorter.sort_specifiers(set(['x86', 'win7'])), ['win7', 'x86'])
self.assertEqual(sorter.sort_specifiers(set(['x86', 'debug', 'win7'])), ['win7', 'x86', 'debug'])
self.assertEqual(sorter.sort_specifiers(set(['snowleopard', 'x86', 'debug', 'win7'])), ['snowleopard', 'win7', 'x86', 'debug'])
self.assertEqual(sorter.sort_specifiers(set(['x86', 'mac', 'debug', 'win7'])), ['mac', 'win7', 'x86', 'debug'])
class TestConfigurationConverterTest(unittest.TestCase):
def __init__(self, testFunc):
self._all_test_configurations = make_mock_all_test_configurations_set()
unittest.TestCase.__init__(self, testFunc)
def test_symmetric_difference(self):
self.assertEqual(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c'])]), set(['a', 'c']))
self.assertEqual(TestConfigurationConverter.symmetric_difference([set(['a', 'b']), set(['b', 'c']), set(['b', 'd'])]), set(['a', 'c', 'd']))
def test_to_config_set(self):
converter = TestConfigurationConverter(self._all_test_configurations)
self.assertEqual(converter.to_config_set(set()), self._all_test_configurations)
self.assertEqual(converter.to_config_set(set(['foo'])), set())
self.assertEqual(converter.to_config_set(set(['xp', 'foo'])), set())
errors = []
self.assertEqual(converter.to_config_set(set(['xp', 'foo']), errors), set())
self.assertEqual(errors, ["Unrecognized specifier 'foo'"])
self.assertEqual(converter.to_config_set(set(['xp', 'x86_64'])), set())
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['xp', 'release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'release'),
])
self.assertEqual(converter.to_config_set(set(['release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('lucid', 'x86_64', 'release'),
TestConfiguration('lucid', 'x86_64', 'debug'),
])
self.assertEqual(converter.to_config_set(set(['x86_64'])), configs_to_match)
configs_to_match = set([
TestConfiguration('lucid', 'x86_64', 'release'),
TestConfiguration('lucid', 'x86_64', 'debug'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'debug'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'debug'),
])
self.assertEqual(converter.to_config_set(set(['lucid', 'snowleopard'])), configs_to_match)
configs_to_match = set([
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'debug'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'debug'),
])
self.assertEqual(converter.to_config_set(set(['lucid', 'snowleopard', 'x86'])), configs_to_match)
configs_to_match = set([
TestConfiguration('lucid', 'x86_64', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['lucid', 'snowleopard', 'release'])), configs_to_match)
def test_macro_expansion(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win', 'release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win', 'lucid', 'release'])), configs_to_match)
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_config_set(set(['win', 'mac', 'release'])), configs_to_match)
def test_to_specifier_lists(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
self.assertEqual(converter.to_specifiers_list(set(self._all_test_configurations)), [[]])
self.assertEqual(converter.to_specifiers_list(set()), [])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('xp', 'x86', 'debug'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['xp'])])
configs_to_match = set([
TestConfiguration('lucid', 'x86_64', 'debug'),
TestConfiguration('xp', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'x86_64', 'linux'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'debug'),
TestConfiguration('lucid', 'x86', 'debug'),
TestConfiguration('lucid', 'x86_64', 'debug'),
TestConfiguration('lucid', 'x86', 'debug'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release', 'xp']), set(['debug', 'linux'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['xp', 'mac', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'debug'),
TestConfiguration('lucid', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win7']), set(['release', 'linux', 'x86']), set(['release', 'xp', 'mac'])])
def test_macro_collapsing(self):
macros = {'foo': ['bar', 'baz'], 'people': ['bob', 'alice', 'john']}
specifiers_list = [set(['john', 'godzilla', 'bob', 'alice'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['people', 'godzilla'])])
specifiers_list = [set(['john', 'godzilla', 'alice'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['john', 'godzilla', 'alice', 'godzilla'])])
specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob', 'alice', 'john'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['foo', 'godzilla', 'people'])])
specifiers_list = [set(['bar', 'godzilla', 'baz', 'bob']), set(['bar', 'baz']), set(['people', 'alice', 'bob', 'john'])]
TestConfigurationConverter.collapse_macros(macros, specifiers_list)
self.assertEqual(specifiers_list, [set(['bob', 'foo', 'godzilla']), set(['foo']), set(['people'])])
def test_converter_macro_collapsing(self):
converter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS)
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('lucid', 'x86', 'release'),
TestConfiguration('lucid', 'x86_64', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'linux', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
TestConfiguration('snowleopard', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'mac', 'release'])])
configs_to_match = set([
TestConfiguration('xp', 'x86', 'release'),
TestConfiguration('vista', 'x86', 'release'),
TestConfiguration('win7', 'x86', 'release'),
])
self.assertEqual(converter.to_specifiers_list(configs_to_match), [set(['win', 'release'])])
def test_specifier_converter_access(self):
specifier_sorter = TestConfigurationConverter(self._all_test_configurations, MOCK_MACROS).specifier_sorter()
self.assertEqual(specifier_sorter.category_for_specifier('snowleopard'), 'version')
self.assertEqual(specifier_sorter.category_for_specifier('mac'), 'version')
| bsd-3-clause |
blueboxgroup/neutron | neutron/plugins/sriovnicagent/sriov_nic_agent.py | 1 | 14818 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import sys
import time
import eventlet
eventlet.monkey_patch()
from oslo.config import cfg
from oslo import messaging
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import constants as q_constants
from neutron.common import topics
from neutron.common import utils as q_utils
from neutron import context
from neutron.i18n import _LE, _LI
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.plugins.sriovnicagent.common import config # noqa
from neutron.plugins.sriovnicagent.common import exceptions as exc
from neutron.plugins.sriovnicagent import eswitch_manager as esm
LOG = logging.getLogger(__name__)
class SriovNicSwitchRpcCallbacks(sg_rpc.SecurityGroupAgentRpcCallbackMixin):
# Set RPC API version to 1.0 by default.
# history
# 1.1 Support Security Group RPC
target = messaging.Target(version='1.1')
def __init__(self, context, agent, sg_agent):
super(SriovNicSwitchRpcCallbacks, self).__init__()
self.context = context
self.agent = agent
self.sg_agent = sg_agent
def port_update(self, context, **kwargs):
LOG.debug("port_update received")
port = kwargs.get('port')
# Put the port mac address in the updated_devices set.
# Do not store port details, as if they're used for processing
# notifications there is no guarantee the notifications are
# processed in the same order as the relevant API requests.
self.agent.updated_devices.add(port['mac_address'])
LOG.debug("port_update RPC received for port: %s", port['id'])
class SriovNicSwitchAgent(object):
def __init__(self, physical_devices_mappings, exclude_devices,
polling_interval, root_helper):
self.polling_interval = polling_interval
self.root_helper = root_helper
self.setup_eswitch_mgr(physical_devices_mappings,
exclude_devices)
configurations = {'device_mappings': physical_devices_mappings}
self.agent_state = {
'binary': 'neutron-sriov-nic-agent',
'host': cfg.CONF.host,
'topic': q_constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': q_constants.AGENT_TYPE_NIC_SWITCH,
'start_flag': True}
# Stores port update notifications for processing in the main loop
self.updated_devices = set()
self.context = context.get_admin_context_without_session()
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context,
self.sg_plugin_rpc, self.root_helper)
self._setup_rpc()
# Initialize iteration counter
self.iter_num = 0
def _setup_rpc(self):
self.agent_id = 'nic-switch-agent.%s' % socket.gethostname()
LOG.info(_LI("RPC agent_id: %s"), self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN)
# RPC network init
# Handle updates from service
self.endpoints = [SriovNicSwitchRpcCallbacks(self.context, self,
self.sg_agent)]
# Define the listening consumers for the agent
consumers = [[topics.PORT, topics.UPDATE],
[topics.NETWORK, topics.DELETE],
[topics.SECURITY_GROUP, topics.UPDATE]]
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers)
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
def _report_state(self):
try:
devices = len(self.eswitch_mgr.get_assigned_devices())
self.agent_state.get('configurations')['devices'] = devices
self.state_rpc.report_state(self.context,
self.agent_state)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception(_LE("Failed reporting state!"))
def setup_eswitch_mgr(self, device_mappings, exclude_devices={}):
self.eswitch_mgr = esm.ESwitchManager(device_mappings,
exclude_devices,
self.root_helper)
def scan_devices(self, registered_devices, updated_devices):
curr_devices = self.eswitch_mgr.get_assigned_devices()
device_info = {}
device_info['current'] = curr_devices
device_info['added'] = curr_devices - registered_devices
# we don't want to process updates for devices that don't exist
device_info['updated'] = updated_devices & curr_devices
# we need to clean up after devices are removed
device_info['removed'] = registered_devices - curr_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.prepare_devices_filter(device_info.get('added'))
if device_info.get('updated'):
self.sg_agent.refresh_firewall()
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_device(self, device, pci_slot, admin_state_up):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.SriovNicError:
LOG.exception(_LE("Failed to set device %s state"), device)
return
if admin_state_up:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.info(_LI("No device with MAC %s defined on agent."), device)
def treat_devices_added_updated(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, devices, self.agent_id)
except Exception as e:
LOG.debug("Unable to get port details for devices "
"with MAC address %(devices)s: %(e)s",
{'devices': devices, 'e': e})
# resync is needed
return True
for device_details in devices_details_list:
device = device_details['device']
LOG.debug("Port with MAC address %s is added", device)
if 'port_id' in device_details:
LOG.info(_LI("Port %(device)s updated. Details: %(details)s"),
{'device': device, 'details': device_details})
profile = device_details['profile']
self.treat_device(device_details['device'],
profile.get('pci_slot'),
device_details['admin_state_up'])
else:
LOG.info(_LI("Device with MAC %s not defined on plugin"),
device)
return False
def treat_devices_removed(self, devices):
resync = False
for device in devices:
LOG.info(_LI("Removing device with mac_address %s"), device)
try:
dev_details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception as e:
LOG.debug("Removing port failed for device %(device)s "
"due to %(exc)s", {'device': device, 'exc': e})
resync = True
continue
if dev_details['exists']:
LOG.info(_LI("Port %s updated."), device)
else:
LOG.debug("Device %s not defined on plugin", device)
return resync
def daemon_loop(self):
sync = True
devices = set()
LOG.info(_LI("SRIOV NIC Agent RPC Daemon Started!"))
while True:
start = time.time()
LOG.debug("Agent rpc_loop - iteration:%d started",
self.iter_num)
if sync:
LOG.info(_LI("Agent out of sync with plugin!"))
devices.clear()
sync = False
device_info = {}
# Save updated devices dict to perform rollback in case
# resync would be needed, and then clear self.updated_devices.
# As the greenthread should not yield between these
# two statements, this will should be thread-safe.
updated_devices_copy = self.updated_devices
self.updated_devices = set()
try:
device_info = self.scan_devices(devices, updated_devices_copy)
if self._device_info_has_changes(device_info):
LOG.debug("Agent loop found changes! %s", device_info)
# If treat devices fails - indicates must resync with
# plugin
sync = self.process_network_devices(device_info)
devices = device_info['current']
except Exception:
LOG.exception(_LE("Error in agent loop. Devices info: %s"),
device_info)
sync = True
# Restore devices that were removed from this set earlier
# without overwriting ones that may have arrived since.
self.updated_devices |= updated_devices_copy
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
self.iter_num = self.iter_num + 1
class SriovNicAgentConfigParser(object):
def __init__(self):
self.device_mappings = {}
self.exclude_devices = {}
def parse(self):
"""Parses device_mappings and exclude_devices.
Parse and validate the consistency in both mappings
"""
self.device_mappings = q_utils.parse_mappings(
cfg.CONF.SRIOV_NIC.physical_device_mappings)
self.exclude_devices = config.parse_exclude_devices(
cfg.CONF.SRIOV_NIC.exclude_devices)
self._validate()
def _validate(self):
"""Validate configuration.
Validate that network_device in excluded_device
exists in device mappings
"""
dev_net_set = set(self.device_mappings.itervalues())
for dev_name in self.exclude_devices.iterkeys():
if dev_name not in dev_net_set:
raise ValueError(_("Device name %(dev_name)s is missing from "
"physical_device_mappings") % {'dev_name':
dev_name})
def main():
common_config.init(sys.argv[1:])
common_config.setup_logging()
try:
config_parser = SriovNicAgentConfigParser()
config_parser.parse()
device_mappings = config_parser.device_mappings
exclude_devices = config_parser.exclude_devices
except ValueError:
LOG.exception(_LE("Failed on Agent configuration parse. "
"Agent terminated!"))
raise SystemExit(1)
LOG.info(_LI("Physical Devices mappings: %s"), device_mappings)
LOG.info(_LI("Exclude Devices: %s"), exclude_devices)
polling_interval = cfg.CONF.AGENT.polling_interval
root_helper = cfg.CONF.AGENT.root_helper
try:
agent = SriovNicSwitchAgent(device_mappings,
exclude_devices,
polling_interval,
root_helper)
except exc.SriovNicError:
LOG.exception(_LE("Agent Initialization Failed"))
raise SystemExit(1)
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
agent.daemon_loop()
if __name__ == '__main__':
main()
| apache-2.0 |
hryamzik/ansible | test/units/modules/network/enos/test_enos_facts.py | 57 | 3233 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from .enos_module import TestEnosModule, load_fixture
from ansible.modules.network.enos import enos_facts
from units.modules.utils import set_module_args
class TestEnosFacts(TestEnosModule):
module = enos_facts
def setUp(self):
super(TestEnosFacts, self).setUp()
self.mock_run_commands = patch(
'ansible.modules.network.enos.enos_facts.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestEnosFacts, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj['command']
except ValueError:
command = item
filename = str(command).replace(' ', '_')
filename = filename.replace('/', '7')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_enos_facts_gather_subset_default(self):
set_module_args(dict())
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('test1', ansible_facts['ansible_net_hostname'])
self.assertIn('MGT', ansible_facts['ansible_net_interfaces'].keys())
self.assertEquals(3992.75390625, ansible_facts['ansible_net_memtotal_mb'])
self.assertEquals(3383.109375, ansible_facts['ansible_net_memfree_mb'])
def test_enos_facts_gather_subset_config(self):
set_module_args({'gather_subset': 'config'})
result = self.execute_module()
ansible_facts = result['ansible_facts']
self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
self.assertEquals('test1', ansible_facts['ansible_net_hostname'])
self.assertIn('ansible_net_config', ansible_facts)
| gpl-3.0 |
batxes/4c2vhic | SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/SHH_WT_models_highres8464.py | 4 | 88234 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((1322.17, 2091.85, 117.893), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((1366.03, 1972.29, -302.489), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((1406.32, 1945.34, 94.1765), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((1393.22, 1602.42, -122.277), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((1474.51, 1172.09, -288.852), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((1786.56, 1414.14, 175.412), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((2048.96, 1505.73, 651.586), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((1858.61, 994.629, 825.615), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((1721.13, 390.022, 950.258), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((1974.17, 705.491, 1052.41), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2172.49, 1196.85, 930.47), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((2360.28, 1797.96, 837.795), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((2525.86, 2053.27, 530.83), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2398.85, 1855.89, 311.637), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2529.13, 1918.67, -15.4931), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((2518.29, 2156.03, -441.909), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((2547.41, 2543.93, -883.843), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((2632.25, 3061, -1064.57), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((2609.98, 3638.57, -1254.24), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((2416.33, 4153.87, -1708.64), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((2535.52, 4123.64, -1080.39), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((2912.07, 4191.41, -850.236), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((3267.49, 4466.21, -714.057), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((3293.45, 4656.56, -385.846), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((3163.58, 4845.34, -146.87), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((2937.31, 4864.14, 39.8653), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((3015.31, 4453.44, 86.1654), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((2777.02, 3980.62, 356.208), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((2603.22, 3553.18, 672.938), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((2614.44, 3108.97, 783.009), (0.7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((2738.05, 2866.89, 947.377), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((2826.54, 3135.53, 787.77), (0.7, 0.7, 0.7), 156.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((2556.2, 3209.15, 692.529), (0.7, 0.7, 0.7), 183.244)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((2307.44, 3273.44, 621.299), (0.7, 0.7, 0.7), 181.382)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((2206.33, 3438.49, 672.747), (0.7, 0.7, 0.7), 101.943)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((1987.85, 3664.38, 495.776), (1, 0.7, 0), 138.913)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((1693.37, 2685.46, 170.291), (0.7, 0.7, 0.7), 221.737)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((1204.61, 1986.42, 106.406), (0.7, 0.7, 0.7), 256.38)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((910.936, 1626.54, 568.214), (0.7, 0.7, 0.7), 221.694)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((1287.73, 1626.68, 1143.14), (0.7, 0.7, 0.7), 259.341)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((1977.4, 1947.92, 1397.14), (0.7, 0.7, 0.7), 117.89)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((2512.84, 2593.68, 1362.35), (0.7, 0.7, 0.7), 116.071)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((2390.97, 3080.08, 1375.24), (0.7, 0.7, 0.7), 268.224)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((2104.86, 2996.42, 1514.48), (0.7, 0.7, 0.7), 386.918)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((1875.82, 2476.4, 1772.24), (0.7, 0.7, 0.7), 121.316)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((1488.13, 2359.68, 1927.69), (0.7, 0.7, 0.7), 138.363)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((1505.11, 2950.1, 1523.11), (1, 0.7, 0), 175.207)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((1017.88, 2561.3, 1868.77), (0.7, 0.7, 0.7), 131.468)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((421.615, 2340.12, 2219.44), (0.7, 0.7, 0.7), 287.894)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((832.18, 2646.6, 2416.57), (0.7, 0.7, 0.7), 88.1109)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((1357.59, 2833.37, 2249.94), (0.7, 0.7, 0.7), 145.385)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((1568.66, 2788.75, 2268.89), (0.7, 0.7, 0.7), 155.452)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((1168.5, 2563.2, 2686.72), (0.7, 0.7, 0.7), 145.512)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((862.587, 2334.38, 3039.5), (0.7, 0.7, 0.7), 99.9972)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((666.885, 2034.99, 3303.74), (0.7, 0.7, 0.7), 327.529)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((1273.48, 1884.68, 3182.78), (0.7, 0.7, 0.7), 137.983)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((1642.28, 2172.13, 2985.21), (0.7, 0.7, 0.7), 83.3733)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((1969.22, 2542.92, 2706.66), (0.7, 0.7, 0.7), 101.562)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((2169.53, 2879.89, 2357.47), (0.7, 0.7, 0.7), 165.689)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((1946.45, 3098.66, 2326.66), (0.7, 0.7, 0.7), 136.925)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((1849.76, 3116.06, 2331.82), (0.7, 0.7, 0.7), 123.389)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((1787.65, 2895.62, 2723.86), (0.7, 0.7, 0.7), 184.47)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((1643.44, 2650.52, 3466.86), (0.7, 0.7, 0.7), 148.473)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((1398.21, 2434.66, 4397.82), (0.7, 0.7, 0.7), 241.406)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((1613.17, 2135.32, 3871.37), (0.7, 0.7, 0.7), 182.736)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((1616.24, 2102.11, 3420.59), (0.7, 0.7, 0.7), 166.62)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((1587.38, 2396.05, 3337.87), (0.7, 0.7, 0.7), 113.872)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((1628.21, 2540.37, 3045.97), (0.7, 0.7, 0.7), 110.065)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((1461.41, 2719.67, 2760.94), (0.7, 0.7, 0.7), 150.08)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((1151.14, 2906.51, 2480.21), (0.7, 0.7, 0.7), 118.525)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((730.453, 3189.99, 2325.2), (0.7, 0.7, 0.7), 163.955)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((452.378, 3243.03, 2552.91), (0.7, 0.7, 0.7), 170.131)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((658.188, 2881.34, 3193.56), (0.7, 0.7, 0.7), 78.2127)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((928.863, 2390.79, 3811.83), (0.7, 0.7, 0.7), 251.896)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((1219.48, 1881.64, 4146.74), (0.7, 0.7, 0.7), 167.55)
if "particle_75 geometry" not in marker_sets:
s=new_marker_set('particle_75 geometry')
marker_sets["particle_75 geometry"]=s
s= marker_sets["particle_75 geometry"]
mark=s.place_marker((1477.32, 1560.96, 4156.81), (0.7, 0.7, 0.7), 167.846)
if "particle_76 geometry" not in marker_sets:
s=new_marker_set('particle_76 geometry')
marker_sets["particle_76 geometry"]=s
s= marker_sets["particle_76 geometry"]
mark=s.place_marker((1468.83, 1974.91, 4476.66), (0.7, 0.7, 0.7), 259.68)
if "particle_77 geometry" not in marker_sets:
s=new_marker_set('particle_77 geometry')
marker_sets["particle_77 geometry"]=s
s= marker_sets["particle_77 geometry"]
mark=s.place_marker((1153.92, 2340.43, 4477.53), (0.7, 0.7, 0.7), 80.2854)
if "particle_78 geometry" not in marker_sets:
s=new_marker_set('particle_78 geometry')
marker_sets["particle_78 geometry"]=s
s= marker_sets["particle_78 geometry"]
mark=s.place_marker((998.731, 2344.9, 4617.16), (0.7, 0.7, 0.7), 82.4427)
if "particle_79 geometry" not in marker_sets:
s=new_marker_set('particle_79 geometry')
marker_sets["particle_79 geometry"]=s
s= marker_sets["particle_79 geometry"]
mark=s.place_marker((1080.79, 2393.43, 4988.13), (0.7, 0.7, 0.7), 212.811)
if "particle_80 geometry" not in marker_sets:
s=new_marker_set('particle_80 geometry')
marker_sets["particle_80 geometry"]=s
s= marker_sets["particle_80 geometry"]
mark=s.place_marker((1796.41, 2632.4, 4954.99), (0.7, 0.7, 0.7), 176.391)
if "particle_81 geometry" not in marker_sets:
s=new_marker_set('particle_81 geometry')
marker_sets["particle_81 geometry"]=s
s= marker_sets["particle_81 geometry"]
mark=s.place_marker((2232.19, 2758.09, 4366.89), (0.7, 0.7, 0.7), 99.3204)
if "particle_82 geometry" not in marker_sets:
s=new_marker_set('particle_82 geometry')
marker_sets["particle_82 geometry"]=s
s= marker_sets["particle_82 geometry"]
mark=s.place_marker((2271.11, 2973.05, 3802.67), (0.7, 0.7, 0.7), 166.62)
if "particle_83 geometry" not in marker_sets:
s=new_marker_set('particle_83 geometry')
marker_sets["particle_83 geometry"]=s
s= marker_sets["particle_83 geometry"]
mark=s.place_marker((2335.41, 3234.77, 3653.74), (0.7, 0.7, 0.7), 102.831)
if "particle_84 geometry" not in marker_sets:
s=new_marker_set('particle_84 geometry')
marker_sets["particle_84 geometry"]=s
s= marker_sets["particle_84 geometry"]
mark=s.place_marker((2267.15, 3154.24, 4552.37), (0.7, 0.7, 0.7), 65.0997)
if "particle_85 geometry" not in marker_sets:
s=new_marker_set('particle_85 geometry')
marker_sets["particle_85 geometry"]=s
s= marker_sets["particle_85 geometry"]
mark=s.place_marker((2011.14, 2729.63, 4380.67), (0.7, 0.7, 0.7), 92.1294)
if "particle_86 geometry" not in marker_sets:
s=new_marker_set('particle_86 geometry')
marker_sets["particle_86 geometry"]=s
s= marker_sets["particle_86 geometry"]
mark=s.place_marker((1858.41, 2414.26, 3923.32), (0.7, 0.7, 0.7), 194.791)
if "particle_87 geometry" not in marker_sets:
s=new_marker_set('particle_87 geometry')
marker_sets["particle_87 geometry"]=s
s= marker_sets["particle_87 geometry"]
mark=s.place_marker((1814.74, 2089.08, 3639.97), (0.7, 0.7, 0.7), 120.766)
if "particle_88 geometry" not in marker_sets:
s=new_marker_set('particle_88 geometry')
marker_sets["particle_88 geometry"]=s
s= marker_sets["particle_88 geometry"]
mark=s.place_marker((1834.95, 1789.21, 4131.21), (0.7, 0.7, 0.7), 217.803)
if "particle_89 geometry" not in marker_sets:
s=new_marker_set('particle_89 geometry')
marker_sets["particle_89 geometry"]=s
s= marker_sets["particle_89 geometry"]
mark=s.place_marker((1589.58, 2098.94, 4149.67), (0.7, 0.7, 0.7), 115.775)
if "particle_90 geometry" not in marker_sets:
s=new_marker_set('particle_90 geometry')
marker_sets["particle_90 geometry"]=s
s= marker_sets["particle_90 geometry"]
mark=s.place_marker((1331.39, 2282.63, 3873.38), (0.7, 0.7, 0.7), 115.648)
if "particle_91 geometry" not in marker_sets:
s=new_marker_set('particle_91 geometry')
marker_sets["particle_91 geometry"]=s
s= marker_sets["particle_91 geometry"]
mark=s.place_marker((1503.36, 2260.18, 3582.56), (0.7, 0.7, 0.7), 83.8386)
if "particle_92 geometry" not in marker_sets:
s=new_marker_set('particle_92 geometry')
marker_sets["particle_92 geometry"]=s
s= marker_sets["particle_92 geometry"]
mark=s.place_marker((1678.42, 1921.55, 3470.3), (0.7, 0.7, 0.7), 124.32)
if "particle_93 geometry" not in marker_sets:
s=new_marker_set('particle_93 geometry')
marker_sets["particle_93 geometry"]=s
s= marker_sets["particle_93 geometry"]
mark=s.place_marker((1712.46, 1522.64, 3238.01), (0.7, 0.7, 0.7), 185.993)
if "particle_94 geometry" not in marker_sets:
s=new_marker_set('particle_94 geometry')
marker_sets["particle_94 geometry"]=s
s= marker_sets["particle_94 geometry"]
mark=s.place_marker((1342.27, 1029.3, 3113.08), (0.7, 0.7, 0.7), 238.826)
if "particle_95 geometry" not in marker_sets:
s=new_marker_set('particle_95 geometry')
marker_sets["particle_95 geometry"]=s
s= marker_sets["particle_95 geometry"]
mark=s.place_marker((814.868, 894.957, 3179.26), (0.7, 0.7, 0.7), 128.465)
if "particle_96 geometry" not in marker_sets:
s=new_marker_set('particle_96 geometry')
marker_sets["particle_96 geometry"]=s
s= marker_sets["particle_96 geometry"]
mark=s.place_marker((730.906, 1529.52, 3270.11), (0.7, 0.7, 0.7), 203.209)
if "particle_97 geometry" not in marker_sets:
s=new_marker_set('particle_97 geometry')
marker_sets["particle_97 geometry"]=s
s= marker_sets["particle_97 geometry"]
mark=s.place_marker((1097.53, 1882.45, 3355.34), (0.7, 0.7, 0.7), 160.486)
if "particle_98 geometry" not in marker_sets:
s=new_marker_set('particle_98 geometry')
marker_sets["particle_98 geometry"]=s
s= marker_sets["particle_98 geometry"]
mark=s.place_marker((1330.31, 1643.73, 3459.36), (0.7, 0.7, 0.7), 149.277)
if "particle_99 geometry" not in marker_sets:
s=new_marker_set('particle_99 geometry')
marker_sets["particle_99 geometry"]=s
s= marker_sets["particle_99 geometry"]
mark=s.place_marker((1027.89, 1418.21, 3846.67), (0.7, 0.7, 0.7), 35.7435)
if "particle_100 geometry" not in marker_sets:
s=new_marker_set('particle_100 geometry')
marker_sets["particle_100 geometry"]=s
s= marker_sets["particle_100 geometry"]
mark=s.place_marker((1303.28, 2272.41, 3406.08), (0.7, 0.7, 0.7), 98.3898)
if "particle_101 geometry" not in marker_sets:
s=new_marker_set('particle_101 geometry')
marker_sets["particle_101 geometry"]=s
s= marker_sets["particle_101 geometry"]
mark=s.place_marker((1810.34, 3038.55, 2888.29), (0.7, 0.7, 0.7), 188.404)
if "particle_102 geometry" not in marker_sets:
s=new_marker_set('particle_102 geometry')
marker_sets["particle_102 geometry"]=s
s= marker_sets["particle_102 geometry"]
mark=s.place_marker((2279.62, 3113.28, 2698.57), (0.7, 0.7, 0.7), 110.318)
if "particle_103 geometry" not in marker_sets:
s=new_marker_set('particle_103 geometry')
marker_sets["particle_103 geometry"]=s
s= marker_sets["particle_103 geometry"]
mark=s.place_marker((2253.89, 2902.59, 3025.04), (0.7, 0.7, 0.7), 127.534)
if "particle_104 geometry" not in marker_sets:
s=new_marker_set('particle_104 geometry')
marker_sets["particle_104 geometry"]=s
s= marker_sets["particle_104 geometry"]
mark=s.place_marker((2062.87, 2668.48, 3266.33), (0.7, 0.7, 0.7), 91.368)
if "particle_105 geometry" not in marker_sets:
s=new_marker_set('particle_105 geometry')
marker_sets["particle_105 geometry"]=s
s= marker_sets["particle_105 geometry"]
mark=s.place_marker((1788.82, 2420.19, 3425.26), (0.7, 0.7, 0.7), 131.045)
if "particle_106 geometry" not in marker_sets:
s=new_marker_set('particle_106 geometry')
marker_sets["particle_106 geometry"]=s
s= marker_sets["particle_106 geometry"]
mark=s.place_marker((1457.69, 2169.71, 3373.97), (0.7, 0.7, 0.7), 143.608)
if "particle_107 geometry" not in marker_sets:
s=new_marker_set('particle_107 geometry')
marker_sets["particle_107 geometry"]=s
s= marker_sets["particle_107 geometry"]
mark=s.place_marker((1485.04, 1780.54, 3291.26), (0.7, 0.7, 0.7), 135.783)
if "particle_108 geometry" not in marker_sets:
s=new_marker_set('particle_108 geometry')
marker_sets["particle_108 geometry"]=s
s= marker_sets["particle_108 geometry"]
mark=s.place_marker((1549.05, 1455.09, 3234.46), (0.7, 0.7, 0.7), 92.5947)
if "particle_109 geometry" not in marker_sets:
s=new_marker_set('particle_109 geometry')
marker_sets["particle_109 geometry"]=s
s= marker_sets["particle_109 geometry"]
mark=s.place_marker((1795.58, 1550.57, 3246.42), (0.7, 0.7, 0.7), 150.123)
if "particle_110 geometry" not in marker_sets:
s=new_marker_set('particle_110 geometry')
marker_sets["particle_110 geometry"]=s
s= marker_sets["particle_110 geometry"]
mark=s.place_marker((2010.63, 1652.11, 3291.92), (0.7, 0.7, 0.7), 121.57)
if "particle_111 geometry" not in marker_sets:
s=new_marker_set('particle_111 geometry')
marker_sets["particle_111 geometry"]=s
s= marker_sets["particle_111 geometry"]
mark=s.place_marker((2184.93, 1430.67, 3467.46), (0.7, 0.7, 0.7), 104.777)
if "particle_112 geometry" not in marker_sets:
s=new_marker_set('particle_112 geometry')
marker_sets["particle_112 geometry"]=s
s= marker_sets["particle_112 geometry"]
mark=s.place_marker((2446.63, 1668.26, 3251.14), (0.7, 0.7, 0.7), 114.844)
if "particle_113 geometry" not in marker_sets:
s=new_marker_set('particle_113 geometry')
marker_sets["particle_113 geometry"]=s
s= marker_sets["particle_113 geometry"]
mark=s.place_marker((2724.26, 1925.56, 3012.09), (0.7, 0.7, 0.7), 150.588)
if "particle_114 geometry" not in marker_sets:
s=new_marker_set('particle_114 geometry')
marker_sets["particle_114 geometry"]=s
s= marker_sets["particle_114 geometry"]
mark=s.place_marker((2616.15, 2325.48, 2953.25), (0.7, 0.7, 0.7), 103.55)
if "particle_115 geometry" not in marker_sets:
s=new_marker_set('particle_115 geometry')
marker_sets["particle_115 geometry"]=s
s= marker_sets["particle_115 geometry"]
mark=s.place_marker((2523.99, 2778.66, 3237.96), (0.7, 0.7, 0.7), 215.392)
if "particle_116 geometry" not in marker_sets:
s=new_marker_set('particle_116 geometry')
marker_sets["particle_116 geometry"]=s
s= marker_sets["particle_116 geometry"]
mark=s.place_marker((2513.7, 3304.4, 3414.48), (0.7, 0.7, 0.7), 99.9126)
if "particle_117 geometry" not in marker_sets:
s=new_marker_set('particle_117 geometry')
marker_sets["particle_117 geometry"]=s
s= marker_sets["particle_117 geometry"]
mark=s.place_marker((2578.06, 3592.66, 4067.96), (0.7, 0.7, 0.7), 99.7857)
if "particle_118 geometry" not in marker_sets:
s=new_marker_set('particle_118 geometry')
marker_sets["particle_118 geometry"]=s
s= marker_sets["particle_118 geometry"]
mark=s.place_marker((2422.27, 3722.1, 4597.19), (0.7, 0.7, 0.7), 109.98)
if "particle_119 geometry" not in marker_sets:
s=new_marker_set('particle_119 geometry')
marker_sets["particle_119 geometry"]=s
s= marker_sets["particle_119 geometry"]
mark=s.place_marker((2587.24, 3369.21, 4261.35), (0.7, 0.7, 0.7), 102.831)
if "particle_120 geometry" not in marker_sets:
s=new_marker_set('particle_120 geometry')
marker_sets["particle_120 geometry"]=s
s= marker_sets["particle_120 geometry"]
mark=s.place_marker((2489.28, 3105.85, 3965.55), (0.7, 0.7, 0.7), 103.593)
if "particle_121 geometry" not in marker_sets:
s=new_marker_set('particle_121 geometry')
marker_sets["particle_121 geometry"]=s
s= marker_sets["particle_121 geometry"]
mark=s.place_marker((2290.98, 2736.54, 3707.29), (0.7, 0.7, 0.7), 173.472)
if "particle_122 geometry" not in marker_sets:
s=new_marker_set('particle_122 geometry')
marker_sets["particle_122 geometry"]=s
s= marker_sets["particle_122 geometry"]
mark=s.place_marker((1912.31, 2341.01, 3826.07), (0.7, 0.7, 0.7), 113.575)
if "particle_123 geometry" not in marker_sets:
s=new_marker_set('particle_123 geometry')
marker_sets["particle_123 geometry"]=s
s= marker_sets["particle_123 geometry"]
mark=s.place_marker((1743.92, 1951.65, 3600.37), (0.7, 0.7, 0.7), 128.296)
if "particle_124 geometry" not in marker_sets:
s=new_marker_set('particle_124 geometry')
marker_sets["particle_124 geometry"]=s
s= marker_sets["particle_124 geometry"]
mark=s.place_marker((1654.17, 1513.17, 3422.62), (0.7, 0.7, 0.7), 145.004)
if "particle_125 geometry" not in marker_sets:
s=new_marker_set('particle_125 geometry')
marker_sets["particle_125 geometry"]=s
s= marker_sets["particle_125 geometry"]
mark=s.place_marker((1679.99, 1088.66, 3055.62), (0.7, 0.7, 0.7), 148.261)
if "particle_126 geometry" not in marker_sets:
s=new_marker_set('particle_126 geometry')
marker_sets["particle_126 geometry"]=s
s= marker_sets["particle_126 geometry"]
mark=s.place_marker((1474.53, 497.386, 2854.5), (0.7, 0.7, 0.7), 127.704)
if "particle_127 geometry" not in marker_sets:
s=new_marker_set('particle_127 geometry')
marker_sets["particle_127 geometry"]=s
s= marker_sets["particle_127 geometry"]
mark=s.place_marker((1105.55, 37.5155, 2729.91), (0.7, 0.7, 0.7), 129.607)
if "particle_128 geometry" not in marker_sets:
s=new_marker_set('particle_128 geometry')
marker_sets["particle_128 geometry"]=s
s= marker_sets["particle_128 geometry"]
mark=s.place_marker((892.595, 399.331, 2922.9), (0.7, 0.7, 0.7), 139.759)
if "particle_129 geometry" not in marker_sets:
s=new_marker_set('particle_129 geometry')
marker_sets["particle_129 geometry"]=s
s= marker_sets["particle_129 geometry"]
mark=s.place_marker((826.614, 994.339, 3044.07), (0.7, 0.7, 0.7), 118.567)
if "particle_130 geometry" not in marker_sets:
s=new_marker_set('particle_130 geometry')
marker_sets["particle_130 geometry"]=s
s= marker_sets["particle_130 geometry"]
mark=s.place_marker((1037.77, 1134.59, 3331.62), (0.7, 0.7, 0.7), 136.164)
if "particle_131 geometry" not in marker_sets:
s=new_marker_set('particle_131 geometry')
marker_sets["particle_131 geometry"]=s
s= marker_sets["particle_131 geometry"]
mark=s.place_marker((1370.28, 1389.15, 3457.64), (0.7, 0.7, 0.7), 121.655)
if "particle_132 geometry" not in marker_sets:
s=new_marker_set('particle_132 geometry')
marker_sets["particle_132 geometry"]=s
s= marker_sets["particle_132 geometry"]
mark=s.place_marker((1775.23, 1534.99, 3545.89), (0.7, 0.7, 0.7), 127.492)
if "particle_133 geometry" not in marker_sets:
s=new_marker_set('particle_133 geometry')
marker_sets["particle_133 geometry"]=s
s= marker_sets["particle_133 geometry"]
mark=s.place_marker((2052.78, 1459.28, 3869.91), (0.7, 0.7, 0.7), 138.617)
if "particle_134 geometry" not in marker_sets:
s=new_marker_set('particle_134 geometry')
marker_sets["particle_134 geometry"]=s
s= marker_sets["particle_134 geometry"]
mark=s.place_marker((2414.72, 1484.68, 3802.49), (0.7, 0.7, 0.7), 120.766)
if "particle_135 geometry" not in marker_sets:
s=new_marker_set('particle_135 geometry')
marker_sets["particle_135 geometry"]=s
s= marker_sets["particle_135 geometry"]
mark=s.place_marker((2552.02, 1710.88, 3736.16), (0.7, 0.7, 0.7), 145.893)
if "particle_136 geometry" not in marker_sets:
s=new_marker_set('particle_136 geometry')
marker_sets["particle_136 geometry"]=s
s= marker_sets["particle_136 geometry"]
mark=s.place_marker((2254.73, 1821.56, 3393.17), (0.7, 0.7, 0.7), 185.02)
if "particle_137 geometry" not in marker_sets:
s=new_marker_set('particle_137 geometry')
marker_sets["particle_137 geometry"]=s
s= marker_sets["particle_137 geometry"]
mark=s.place_marker((2199.63, 1973.7, 2867.08), (0.7, 0.7, 0.7), 221.314)
if "particle_138 geometry" not in marker_sets:
s=new_marker_set('particle_138 geometry')
marker_sets["particle_138 geometry"]=s
s= marker_sets["particle_138 geometry"]
mark=s.place_marker((2352.13, 2102.42, 2387.3), (0.7, 0.7, 0.7), 165.139)
if "particle_139 geometry" not in marker_sets:
s=new_marker_set('particle_139 geometry')
marker_sets["particle_139 geometry"]=s
s= marker_sets["particle_139 geometry"]
mark=s.place_marker((2312.08, 2367.11, 2474.83), (0.7, 0.7, 0.7), 179.437)
if "particle_140 geometry" not in marker_sets:
s=new_marker_set('particle_140 geometry')
marker_sets["particle_140 geometry"]=s
s= marker_sets["particle_140 geometry"]
mark=s.place_marker((2160.13, 2491.11, 2858.57), (0.7, 0.7, 0.7), 137.898)
if "particle_141 geometry" not in marker_sets:
s=new_marker_set('particle_141 geometry')
marker_sets["particle_141 geometry"]=s
s= marker_sets["particle_141 geometry"]
mark=s.place_marker((2068.69, 2580.33, 3207.93), (0.7, 0.7, 0.7), 124.658)
if "particle_142 geometry" not in marker_sets:
s=new_marker_set('particle_142 geometry')
marker_sets["particle_142 geometry"]=s
s= marker_sets["particle_142 geometry"]
mark=s.place_marker((2235.66, 2445.22, 3476.92), (0.7, 0.7, 0.7), 97.7553)
if "particle_143 geometry" not in marker_sets:
s=new_marker_set('particle_143 geometry')
marker_sets["particle_143 geometry"]=s
s= marker_sets["particle_143 geometry"]
mark=s.place_marker((2337.1, 2416.87, 3770.12), (0.7, 0.7, 0.7), 92.9331)
if "particle_144 geometry" not in marker_sets:
s=new_marker_set('particle_144 geometry')
marker_sets["particle_144 geometry"]=s
s= marker_sets["particle_144 geometry"]
mark=s.place_marker((2337.95, 2430.64, 4127.81), (0.7, 0.7, 0.7), 123.135)
if "particle_145 geometry" not in marker_sets:
s=new_marker_set('particle_145 geometry')
marker_sets["particle_145 geometry"]=s
s= marker_sets["particle_145 geometry"]
mark=s.place_marker((2311.11, 2644.32, 3791.47), (0.7, 0.7, 0.7), 125.716)
if "particle_146 geometry" not in marker_sets:
s=new_marker_set('particle_146 geometry')
marker_sets["particle_146 geometry"]=s
s= marker_sets["particle_146 geometry"]
mark=s.place_marker((2312.38, 2623.97, 3484.16), (0.7, 0.7, 0.7), 127.534)
if "particle_147 geometry" not in marker_sets:
s=new_marker_set('particle_147 geometry')
marker_sets["particle_147 geometry"]=s
s= marker_sets["particle_147 geometry"]
mark=s.place_marker((2145.98, 2400.55, 3419.09), (0.7, 0.7, 0.7), 94.9212)
if "particle_148 geometry" not in marker_sets:
s=new_marker_set('particle_148 geometry')
marker_sets["particle_148 geometry"]=s
s= marker_sets["particle_148 geometry"]
mark=s.place_marker((2122.87, 2444.91, 2984.37), (0.7, 0.7, 0.7), 137.644)
if "particle_149 geometry" not in marker_sets:
s=new_marker_set('particle_149 geometry')
marker_sets["particle_149 geometry"]=s
s= marker_sets["particle_149 geometry"]
mark=s.place_marker((2110.24, 2352.57, 2640.69), (0.7, 0.7, 0.7), 149.277)
if "particle_150 geometry" not in marker_sets:
s=new_marker_set('particle_150 geometry')
marker_sets["particle_150 geometry"]=s
s= marker_sets["particle_150 geometry"]
mark=s.place_marker((2394.24, 2157.27, 2739.51), (0.7, 0.7, 0.7), 103.677)
if "particle_151 geometry" not in marker_sets:
s=new_marker_set('particle_151 geometry')
marker_sets["particle_151 geometry"]=s
s= marker_sets["particle_151 geometry"]
mark=s.place_marker((2616.15, 1789.6, 2994.62), (0.7, 0.7, 0.7), 99.6588)
if "particle_152 geometry" not in marker_sets:
s=new_marker_set('particle_152 geometry')
marker_sets["particle_152 geometry"]=s
s= marker_sets["particle_152 geometry"]
mark=s.place_marker((2792.58, 1482.65, 3143.83), (0.7, 0.7, 0.7), 134.133)
if "particle_153 geometry" not in marker_sets:
s=new_marker_set('particle_153 geometry')
marker_sets["particle_153 geometry"]=s
s= marker_sets["particle_153 geometry"]
mark=s.place_marker((2825.46, 1812.91, 3170.11), (0.7, 0.7, 0.7), 173.007)
if "particle_154 geometry" not in marker_sets:
s=new_marker_set('particle_154 geometry')
marker_sets["particle_154 geometry"]=s
s= marker_sets["particle_154 geometry"]
mark=s.place_marker((2663.11, 2233.21, 2811.31), (0.7, 0.7, 0.7), 141.028)
if "particle_155 geometry" not in marker_sets:
s=new_marker_set('particle_155 geometry')
marker_sets["particle_155 geometry"]=s
s= marker_sets["particle_155 geometry"]
mark=s.place_marker((2456.5, 2599.59, 2586.8), (0.7, 0.7, 0.7), 161.121)
if "particle_156 geometry" not in marker_sets:
s=new_marker_set('particle_156 geometry')
marker_sets["particle_156 geometry"]=s
s= marker_sets["particle_156 geometry"]
mark=s.place_marker((2101.04, 2635.57, 2633.13), (0.7, 0.7, 0.7), 119.582)
if "particle_157 geometry" not in marker_sets:
s=new_marker_set('particle_157 geometry')
marker_sets["particle_157 geometry"]=s
s= marker_sets["particle_157 geometry"]
mark=s.place_marker((2000.65, 2497.18, 2996.91), (0.7, 0.7, 0.7), 137.094)
if "particle_158 geometry" not in marker_sets:
s=new_marker_set('particle_158 geometry')
marker_sets["particle_158 geometry"]=s
s= marker_sets["particle_158 geometry"]
mark=s.place_marker((1916.1, 2500.7, 3492.57), (0.7, 0.7, 0.7), 149.234)
if "particle_159 geometry" not in marker_sets:
s=new_marker_set('particle_159 geometry')
marker_sets["particle_159 geometry"]=s
s= marker_sets["particle_159 geometry"]
mark=s.place_marker((2029.39, 2906.98, 3513.92), (0.7, 0.7, 0.7), 151.011)
if "particle_160 geometry" not in marker_sets:
s=new_marker_set('particle_160 geometry')
marker_sets["particle_160 geometry"]=s
s= marker_sets["particle_160 geometry"]
mark=s.place_marker((2238.8, 3267.22, 3232.69), (0.7, 0.7, 0.7), 184.216)
if "particle_161 geometry" not in marker_sets:
s=new_marker_set('particle_161 geometry')
marker_sets["particle_161 geometry"]=s
s= marker_sets["particle_161 geometry"]
mark=s.place_marker((2637.17, 3182.07, 3237.74), (0.7, 0.7, 0.7), 170.596)
if "particle_162 geometry" not in marker_sets:
s=new_marker_set('particle_162 geometry')
marker_sets["particle_162 geometry"]=s
s= marker_sets["particle_162 geometry"]
mark=s.place_marker((2657.89, 2973.75, 3837), (0.7, 0.7, 0.7), 215.603)
if "particle_163 geometry" not in marker_sets:
s=new_marker_set('particle_163 geometry')
marker_sets["particle_163 geometry"]=s
s= marker_sets["particle_163 geometry"]
mark=s.place_marker((2612.99, 2723.2, 4689.15), (0.7, 0.7, 0.7), 79.0164)
if "particle_164 geometry" not in marker_sets:
s=new_marker_set('particle_164 geometry')
marker_sets["particle_164 geometry"]=s
s= marker_sets["particle_164 geometry"]
mark=s.place_marker((2824.31, 2458.64, 4719.86), (0.7, 0.7, 0.7), 77.2821)
if "particle_165 geometry" not in marker_sets:
s=new_marker_set('particle_165 geometry')
marker_sets["particle_165 geometry"]=s
s= marker_sets["particle_165 geometry"]
mark=s.place_marker((2853.38, 2298.04, 4418.3), (0.7, 0.7, 0.7), 188.658)
if "particle_166 geometry" not in marker_sets:
s=new_marker_set('particle_166 geometry')
marker_sets["particle_166 geometry"]=s
s= marker_sets["particle_166 geometry"]
mark=s.place_marker((3152.68, 2275.23, 4327.99), (0.7, 0.7, 0.7), 115.437)
if "particle_167 geometry" not in marker_sets:
s=new_marker_set('particle_167 geometry')
marker_sets["particle_167 geometry"]=s
s= marker_sets["particle_167 geometry"]
mark=s.place_marker((2930.68, 2456.89, 3805.4), (0.7, 0.7, 0.7), 88.4916)
if "particle_168 geometry" not in marker_sets:
s=new_marker_set('particle_168 geometry')
marker_sets["particle_168 geometry"]=s
s= marker_sets["particle_168 geometry"]
mark=s.place_marker((2649.32, 2658.42, 3276.66), (0.7, 0.7, 0.7), 108.88)
if "particle_169 geometry" not in marker_sets:
s=new_marker_set('particle_169 geometry')
marker_sets["particle_169 geometry"]=s
s= marker_sets["particle_169 geometry"]
mark=s.place_marker((2352.48, 2708.34, 3082.58), (0.7, 0.7, 0.7), 172.119)
if "particle_170 geometry" not in marker_sets:
s=new_marker_set('particle_170 geometry')
marker_sets["particle_170 geometry"]=s
s= marker_sets["particle_170 geometry"]
mark=s.place_marker((2324.09, 2569.1, 3545.11), (0.7, 0.7, 0.7), 139.505)
if "particle_171 geometry" not in marker_sets:
s=new_marker_set('particle_171 geometry')
marker_sets["particle_171 geometry"]=s
s= marker_sets["particle_171 geometry"]
mark=s.place_marker((2297.59, 2428.92, 4003.82), (0.7, 0.7, 0.7), 92.7639)
if "particle_172 geometry" not in marker_sets:
s=new_marker_set('particle_172 geometry')
marker_sets["particle_172 geometry"]=s
s= marker_sets["particle_172 geometry"]
mark=s.place_marker((2100.3, 2551.02, 3971.57), (0.7, 0.7, 0.7), 89.8452)
if "particle_173 geometry" not in marker_sets:
s=new_marker_set('particle_173 geometry')
marker_sets["particle_173 geometry"]=s
s= marker_sets["particle_173 geometry"]
mark=s.place_marker((2366.14, 2451.86, 3920.14), (0.7, 0.7, 0.7), 149.446)
if "particle_174 geometry" not in marker_sets:
s=new_marker_set('particle_174 geometry')
marker_sets["particle_174 geometry"]=s
s= marker_sets["particle_174 geometry"]
mark=s.place_marker((2666.58, 2302.01, 4036.74), (0.7, 0.7, 0.7), 126.858)
if "particle_175 geometry" not in marker_sets:
s=new_marker_set('particle_175 geometry')
marker_sets["particle_175 geometry"]=s
s= marker_sets["particle_175 geometry"]
mark=s.place_marker((2530.8, 2144.29, 4285.1), (0.7, 0.7, 0.7), 106.046)
if "particle_176 geometry" not in marker_sets:
s=new_marker_set('particle_176 geometry')
marker_sets["particle_176 geometry"]=s
s= marker_sets["particle_176 geometry"]
mark=s.place_marker((2036.9, 2193.63, 4298.27), (0.7, 0.7, 0.7), 156.298)
if "particle_177 geometry" not in marker_sets:
s=new_marker_set('particle_177 geometry')
marker_sets["particle_177 geometry"]=s
s= marker_sets["particle_177 geometry"]
mark=s.place_marker((1456.1, 2120.8, 4261.22), (0.7, 0.7, 0.7), 231.212)
if "particle_178 geometry" not in marker_sets:
s=new_marker_set('particle_178 geometry')
marker_sets["particle_178 geometry"]=s
s= marker_sets["particle_178 geometry"]
mark=s.place_marker((1133.41, 2430.03, 3919.78), (0.7, 0.7, 0.7), 88.4916)
if "particle_179 geometry" not in marker_sets:
s=new_marker_set('particle_179 geometry')
marker_sets["particle_179 geometry"]=s
s= marker_sets["particle_179 geometry"]
mark=s.place_marker((1259.6, 2639.67, 3529.9), (0.7, 0.7, 0.7), 111.334)
if "particle_180 geometry" not in marker_sets:
s=new_marker_set('particle_180 geometry')
marker_sets["particle_180 geometry"]=s
s= marker_sets["particle_180 geometry"]
mark=s.place_marker((1747.94, 2801.69, 3165.91), (0.7, 0.7, 0.7), 127.619)
if "particle_181 geometry" not in marker_sets:
s=new_marker_set('particle_181 geometry')
marker_sets["particle_181 geometry"]=s
s= marker_sets["particle_181 geometry"]
mark=s.place_marker((2118.03, 2970.88, 2919.07), (0.7, 0.7, 0.7), 230.746)
if "particle_182 geometry" not in marker_sets:
s=new_marker_set('particle_182 geometry')
marker_sets["particle_182 geometry"]=s
s= marker_sets["particle_182 geometry"]
mark=s.place_marker((2104.62, 2910.25, 3303.77), (0.7, 0.7, 0.7), 124.573)
if "particle_183 geometry" not in marker_sets:
s=new_marker_set('particle_183 geometry')
marker_sets["particle_183 geometry"]=s
s= marker_sets["particle_183 geometry"]
mark=s.place_marker((2042.68, 2808.57, 3906.43), (0.7, 0.7, 0.7), 124.489)
if "particle_184 geometry" not in marker_sets:
s=new_marker_set('particle_184 geometry')
marker_sets["particle_184 geometry"]=s
s= marker_sets["particle_184 geometry"]
mark=s.place_marker((2277.21, 2548.41, 4077.19), (0.7, 0.7, 0.7), 196.61)
if "particle_185 geometry" not in marker_sets:
s=new_marker_set('particle_185 geometry')
marker_sets["particle_185 geometry"]=s
s= marker_sets["particle_185 geometry"]
mark=s.place_marker((2347.29, 2892.31, 3962.52), (0.7, 0.7, 0.7), 134.049)
if "particle_186 geometry" not in marker_sets:
s=new_marker_set('particle_186 geometry')
marker_sets["particle_186 geometry"]=s
s= marker_sets["particle_186 geometry"]
mark=s.place_marker((2306.9, 3213.87, 4049.97), (0.7, 0.7, 0.7), 141.493)
if "particle_187 geometry" not in marker_sets:
s=new_marker_set('particle_187 geometry')
marker_sets["particle_187 geometry"]=s
s= marker_sets["particle_187 geometry"]
mark=s.place_marker((2172.29, 3425.23, 4371.58), (0.7, 0.7, 0.7), 172.203)
if "particle_188 geometry" not in marker_sets:
s=new_marker_set('particle_188 geometry')
marker_sets["particle_188 geometry"]=s
s= marker_sets["particle_188 geometry"]
mark=s.place_marker((2530.54, 2941.19, 4103.98), (0.7, 0.7, 0.7), 271.354)
if "particle_189 geometry" not in marker_sets:
s=new_marker_set('particle_189 geometry')
marker_sets["particle_189 geometry"]=s
s= marker_sets["particle_189 geometry"]
mark=s.place_marker((2637.74, 2516.74, 3892.36), (0.7, 0.7, 0.7), 97.0785)
if "particle_190 geometry" not in marker_sets:
s=new_marker_set('particle_190 geometry')
marker_sets["particle_190 geometry"]=s
s= marker_sets["particle_190 geometry"]
mark=s.place_marker((2502.88, 2126.9, 3845.7), (0.7, 0.7, 0.7), 151.857)
if "particle_191 geometry" not in marker_sets:
s=new_marker_set('particle_191 geometry')
marker_sets["particle_191 geometry"]=s
s= marker_sets["particle_191 geometry"]
mark=s.place_marker((2567.78, 1566.45, 3642.44), (0.7, 0.7, 0.7), 199.233)
if "particle_192 geometry" not in marker_sets:
s=new_marker_set('particle_192 geometry')
marker_sets["particle_192 geometry"]=s
s= marker_sets["particle_192 geometry"]
mark=s.place_marker((2652.11, 1501.2, 3057.56), (0.7, 0.7, 0.7), 118.863)
if "particle_193 geometry" not in marker_sets:
s=new_marker_set('particle_193 geometry')
marker_sets["particle_193 geometry"]=s
s= marker_sets["particle_193 geometry"]
mark=s.place_marker((2527.7, 1187.05, 2747.75), (0.7, 0.7, 0.7), 172.415)
if "particle_194 geometry" not in marker_sets:
s=new_marker_set('particle_194 geometry')
marker_sets["particle_194 geometry"]=s
s= marker_sets["particle_194 geometry"]
mark=s.place_marker((2354.51, 691.437, 2734.69), (0.7, 0.7, 0.7), 134.26)
if "particle_195 geometry" not in marker_sets:
s=new_marker_set('particle_195 geometry')
marker_sets["particle_195 geometry"]=s
s= marker_sets["particle_195 geometry"]
mark=s.place_marker((2184.19, -177.32, 3024.73), (0.7, 0.7, 0.7), 139.548)
if "particle_196 geometry" not in marker_sets:
s=new_marker_set('particle_196 geometry')
marker_sets["particle_196 geometry"]=s
s= marker_sets["particle_196 geometry"]
mark=s.place_marker((1754.67, 46.5932, 3304.69), (0.7, 0.7, 0.7), 196.526)
if "particle_197 geometry" not in marker_sets:
s=new_marker_set('particle_197 geometry')
marker_sets["particle_197 geometry"]=s
s= marker_sets["particle_197 geometry"]
mark=s.place_marker((1634.76, 809.834, 3447.58), (0.7, 0.7, 0.7), 136.206)
if "particle_198 geometry" not in marker_sets:
s=new_marker_set('particle_198 geometry')
marker_sets["particle_198 geometry"]=s
s= marker_sets["particle_198 geometry"]
mark=s.place_marker((1660.01, 1708.57, 3102.2), (0.7, 0.7, 0.7), 152.322)
if "particle_199 geometry" not in marker_sets:
s=new_marker_set('particle_199 geometry')
marker_sets["particle_199 geometry"]=s
s= marker_sets["particle_199 geometry"]
mark=s.place_marker((1878.4, 2349.62, 2903.16), (0.7, 0.7, 0.7), 126.054)
if "particle_200 geometry" not in marker_sets:
s=new_marker_set('particle_200 geometry')
marker_sets["particle_200 geometry"]=s
s= marker_sets["particle_200 geometry"]
mark=s.place_marker((2001.52, 2570.94, 3287.64), (0.7, 0.7, 0.7), 164.378)
if "particle_201 geometry" not in marker_sets:
s=new_marker_set('particle_201 geometry')
marker_sets["particle_201 geometry"]=s
s= marker_sets["particle_201 geometry"]
mark=s.place_marker((2267.95, 2601.77, 3668.47), (0.7, 0.7, 0.7), 122.205)
if "particle_202 geometry" not in marker_sets:
s=new_marker_set('particle_202 geometry')
marker_sets["particle_202 geometry"]=s
s= marker_sets["particle_202 geometry"]
mark=s.place_marker((2643.26, 2540.17, 3906.17), (0.7, 0.7, 0.7), 134.979)
if "particle_203 geometry" not in marker_sets:
s=new_marker_set('particle_203 geometry')
marker_sets["particle_203 geometry"]=s
s= marker_sets["particle_203 geometry"]
mark=s.place_marker((2857.41, 2470.44, 3622), (0.7, 0.7, 0.7), 136.375)
if "particle_204 geometry" not in marker_sets:
s=new_marker_set('particle_204 geometry')
marker_sets["particle_204 geometry"]=s
s= marker_sets["particle_204 geometry"]
mark=s.place_marker((2601.9, 2359.71, 3741.76), (0.7, 0.7, 0.7), 151.688)
if "particle_205 geometry" not in marker_sets:
s=new_marker_set('particle_205 geometry')
marker_sets["particle_205 geometry"]=s
s= marker_sets["particle_205 geometry"]
mark=s.place_marker((2604.07, 2358.94, 3865.37), (0.7, 0.7, 0.7), 116.156)
if "particle_206 geometry" not in marker_sets:
s=new_marker_set('particle_206 geometry')
marker_sets["particle_206 geometry"]=s
s= marker_sets["particle_206 geometry"]
mark=s.place_marker((2578.48, 2578.09, 3188.11), (0.7, 0.7, 0.7), 122.839)
if "particle_207 geometry" not in marker_sets:
s=new_marker_set('particle_207 geometry')
marker_sets["particle_207 geometry"]=s
s= marker_sets["particle_207 geometry"]
mark=s.place_marker((2213.95, 2672.38, 2800.1), (0.7, 0.7, 0.7), 164.716)
if "particle_208 geometry" not in marker_sets:
s=new_marker_set('particle_208 geometry')
marker_sets["particle_208 geometry"]=s
s= marker_sets["particle_208 geometry"]
mark=s.place_marker((1783.38, 2180.69, 3297.13), (0.7, 0.7, 0.7), 303.672)
if "particle_209 geometry" not in marker_sets:
s=new_marker_set('particle_209 geometry')
marker_sets["particle_209 geometry"]=s
s= marker_sets["particle_209 geometry"]
mark=s.place_marker((1819.9, 1460.78, 4163.11), (0.7, 0.7, 0.7), 220.298)
if "particle_210 geometry" not in marker_sets:
s=new_marker_set('particle_210 geometry')
marker_sets["particle_210 geometry"]=s
s= marker_sets["particle_210 geometry"]
mark=s.place_marker((2261.65, 1844.78, 4408.8), (0.7, 0.7, 0.7), 175.883)
if "particle_211 geometry" not in marker_sets:
s=new_marker_set('particle_211 geometry')
marker_sets["particle_211 geometry"]=s
s= marker_sets["particle_211 geometry"]
mark=s.place_marker((2391.27, 2488.85, 4595), (0.7, 0.7, 0.7), 233.581)
if "particle_212 geometry" not in marker_sets:
s=new_marker_set('particle_212 geometry')
marker_sets["particle_212 geometry"]=s
s= marker_sets["particle_212 geometry"]
mark=s.place_marker((2036.09, 3163.4, 4425.57), (0.7, 0.7, 0.7), 231.127)
if "particle_213 geometry" not in marker_sets:
s=new_marker_set('particle_213 geometry')
marker_sets["particle_213 geometry"]=s
s= marker_sets["particle_213 geometry"]
mark=s.place_marker((2236.56, 3757.9, 4428.9), (0.7, 0.7, 0.7), 247.413)
if "particle_214 geometry" not in marker_sets:
s=new_marker_set('particle_214 geometry')
marker_sets["particle_214 geometry"]=s
s= marker_sets["particle_214 geometry"]
mark=s.place_marker((2774.63, 4117.81, 4522.07), (0.7, 0.7, 0.7), 200.206)
if "particle_215 geometry" not in marker_sets:
s=new_marker_set('particle_215 geometry')
marker_sets["particle_215 geometry"]=s
s= marker_sets["particle_215 geometry"]
mark=s.place_marker((3154.42, 3950.01, 4563.82), (0.7, 0.7, 0.7), 150.419)
if "particle_216 geometry" not in marker_sets:
s=new_marker_set('particle_216 geometry')
marker_sets["particle_216 geometry"]=s
s= marker_sets["particle_216 geometry"]
mark=s.place_marker((2922.84, 3899.95, 3995.1), (0.7, 0.7, 0.7), 140.14)
if "particle_217 geometry" not in marker_sets:
s=new_marker_set('particle_217 geometry')
marker_sets["particle_217 geometry"]=s
s= marker_sets["particle_217 geometry"]
mark=s.place_marker((2616.43, 4051.76, 3689.89), (0.7, 0.7, 0.7), 132.949)
if "particle_218 geometry" not in marker_sets:
s=new_marker_set('particle_218 geometry')
marker_sets["particle_218 geometry"]=s
s= marker_sets["particle_218 geometry"]
mark=s.place_marker((2415.58, 4027.45, 3362.73), (0.7, 0.7, 0.7), 141.113)
if "particle_219 geometry" not in marker_sets:
s=new_marker_set('particle_219 geometry')
marker_sets["particle_219 geometry"]=s
s= marker_sets["particle_219 geometry"]
mark=s.place_marker((2120.33, 3857.34, 3501.12), (0.7, 0.7, 0.7), 171.526)
if "particle_220 geometry" not in marker_sets:
s=new_marker_set('particle_220 geometry')
marker_sets["particle_220 geometry"]=s
s= marker_sets["particle_220 geometry"]
mark=s.place_marker((1915.33, 3554.6, 4006.39), (0.7, 0.7, 0.7), 326.937)
if "particle_221 geometry" not in marker_sets:
s=new_marker_set('particle_221 geometry')
marker_sets["particle_221 geometry"]=s
s= marker_sets["particle_221 geometry"]
mark=s.place_marker((1967.95, 3006.34, 4243.26), (0.7, 0.7, 0.7), 92.0871)
if "particle_222 geometry" not in marker_sets:
s=new_marker_set('particle_222 geometry')
marker_sets["particle_222 geometry"]=s
s= marker_sets["particle_222 geometry"]
mark=s.place_marker((1836.56, 2699.08, 3873.63), (0.7, 0.7, 0.7), 210.273)
if "particle_223 geometry" not in marker_sets:
s=new_marker_set('particle_223 geometry')
marker_sets["particle_223 geometry"]=s
s= marker_sets["particle_223 geometry"]
mark=s.place_marker((1906.83, 2867.93, 3127.77), (0.7, 0.7, 0.7), 122.628)
if "particle_224 geometry" not in marker_sets:
s=new_marker_set('particle_224 geometry')
marker_sets["particle_224 geometry"]=s
s= marker_sets["particle_224 geometry"]
mark=s.place_marker((1977.83, 3030.01, 2942.87), (0.7, 0.7, 0.7), 109.176)
if "particle_225 geometry" not in marker_sets:
s=new_marker_set('particle_225 geometry')
marker_sets["particle_225 geometry"]=s
s= marker_sets["particle_225 geometry"]
mark=s.place_marker((2120.94, 3037.34, 3222.43), (0.7, 0.7, 0.7), 142.213)
if "particle_226 geometry" not in marker_sets:
s=new_marker_set('particle_226 geometry')
marker_sets["particle_226 geometry"]=s
s= marker_sets["particle_226 geometry"]
mark=s.place_marker((1938.58, 2945.41, 3440.65), (0.7, 0.7, 0.7), 250.078)
if "particle_227 geometry" not in marker_sets:
s=new_marker_set('particle_227 geometry')
marker_sets["particle_227 geometry"]=s
s= marker_sets["particle_227 geometry"]
mark=s.place_marker((2353.05, 2768.74, 3398.7), (0.7, 0.7, 0.7), 123.558)
if "particle_228 geometry" not in marker_sets:
s=new_marker_set('particle_228 geometry')
marker_sets["particle_228 geometry"]=s
s= marker_sets["particle_228 geometry"]
mark=s.place_marker((2697.55, 2730.89, 3074.45), (0.7, 0.7, 0.7), 235.992)
if "particle_229 geometry" not in marker_sets:
s=new_marker_set('particle_229 geometry')
marker_sets["particle_229 geometry"]=s
s= marker_sets["particle_229 geometry"]
mark=s.place_marker((3091.53, 2560.68, 2829.63), (0.7, 0.7, 0.7), 172.373)
if "particle_230 geometry" not in marker_sets:
s=new_marker_set('particle_230 geometry')
marker_sets["particle_230 geometry"]=s
s= marker_sets["particle_230 geometry"]
mark=s.place_marker((3362.15, 2285.42, 3077.86), (0.7, 0.7, 0.7), 152.322)
if "particle_231 geometry" not in marker_sets:
s=new_marker_set('particle_231 geometry')
marker_sets["particle_231 geometry"]=s
s= marker_sets["particle_231 geometry"]
mark=s.place_marker((3472.34, 2111.15, 3311.71), (0.7, 0.7, 0.7), 196.653)
if "particle_232 geometry" not in marker_sets:
s=new_marker_set('particle_232 geometry')
marker_sets["particle_232 geometry"]=s
s= marker_sets["particle_232 geometry"]
mark=s.place_marker((3612.29, 2330.3, 3081.9), (0.7, 0.7, 0.7), 134.091)
if "particle_233 geometry" not in marker_sets:
s=new_marker_set('particle_233 geometry')
marker_sets["particle_233 geometry"]=s
s= marker_sets["particle_233 geometry"]
mark=s.place_marker((3658.17, 2590.41, 2902.22), (0.7, 0.7, 0.7), 180.325)
if "particle_234 geometry" not in marker_sets:
s=new_marker_set('particle_234 geometry')
marker_sets["particle_234 geometry"]=s
s= marker_sets["particle_234 geometry"]
mark=s.place_marker((3197.87, 2570.13, 2986.3), (0.7, 0.7, 0.7), 218.437)
if "particle_235 geometry" not in marker_sets:
s=new_marker_set('particle_235 geometry')
marker_sets["particle_235 geometry"]=s
s= marker_sets["particle_235 geometry"]
mark=s.place_marker((2874.67, 2560.13, 3329.59), (0.7, 0.7, 0.7), 148.008)
if "particle_236 geometry" not in marker_sets:
s=new_marker_set('particle_236 geometry')
marker_sets["particle_236 geometry"]=s
s= marker_sets["particle_236 geometry"]
mark=s.place_marker((2787.2, 2566.49, 3963.93), (0.7, 0.7, 0.7), 191.873)
if "particle_237 geometry" not in marker_sets:
s=new_marker_set('particle_237 geometry')
marker_sets["particle_237 geometry"]=s
s= marker_sets["particle_237 geometry"]
mark=s.place_marker((2725.41, 2821.66, 4442.24), (0.7, 0.7, 0.7), 138.575)
if "particle_238 geometry" not in marker_sets:
s=new_marker_set('particle_238 geometry')
marker_sets["particle_238 geometry"]=s
s= marker_sets["particle_238 geometry"]
mark=s.place_marker((3054.21, 2896.19, 4717.61), (0.7, 0.7, 0.7), 161.205)
if "particle_239 geometry" not in marker_sets:
s=new_marker_set('particle_239 geometry')
marker_sets["particle_239 geometry"]=s
s= marker_sets["particle_239 geometry"]
mark=s.place_marker((2885.05, 2544.18, 4475.2), (0.7, 0.7, 0.7), 288.021)
if "particle_240 geometry" not in marker_sets:
s=new_marker_set('particle_240 geometry')
marker_sets["particle_240 geometry"]=s
s= marker_sets["particle_240 geometry"]
mark=s.place_marker((3280.65, 2595.16, 3891.62), (0.7, 0.7, 0.7), 227.405)
if "particle_241 geometry" not in marker_sets:
s=new_marker_set('particle_241 geometry')
marker_sets["particle_241 geometry"]=s
s= marker_sets["particle_241 geometry"]
mark=s.place_marker((3318.8, 2554.73, 3377.47), (0.7, 0.7, 0.7), 126.519)
if "particle_242 geometry" not in marker_sets:
s=new_marker_set('particle_242 geometry')
marker_sets["particle_242 geometry"]=s
s= marker_sets["particle_242 geometry"]
mark=s.place_marker((3350.96, 2295.13, 3538.93), (0.7, 0.7, 0.7), 117.975)
if "particle_243 geometry" not in marker_sets:
s=new_marker_set('particle_243 geometry')
marker_sets["particle_243 geometry"]=s
s= marker_sets["particle_243 geometry"]
mark=s.place_marker((3035.96, 2349.53, 3284.48), (0.7, 0.7, 0.7), 200.883)
if "particle_244 geometry" not in marker_sets:
s=new_marker_set('particle_244 geometry')
marker_sets["particle_244 geometry"]=s
s= marker_sets["particle_244 geometry"]
mark=s.place_marker((3117.97, 2634, 3069.02), (0.7, 0.7, 0.7), 158.794)
if "particle_245 geometry" not in marker_sets:
s=new_marker_set('particle_245 geometry')
marker_sets["particle_245 geometry"]=s
s= marker_sets["particle_245 geometry"]
mark=s.place_marker((3179.01, 2946.18, 3066.89), (0.7, 0.7, 0.7), 115.86)
if "particle_246 geometry" not in marker_sets:
s=new_marker_set('particle_246 geometry')
marker_sets["particle_246 geometry"]=s
s= marker_sets["particle_246 geometry"]
mark=s.place_marker((3105.22, 2971.79, 2832.3), (0.7, 0.7, 0.7), 133.034)
if "particle_247 geometry" not in marker_sets:
s=new_marker_set('particle_247 geometry')
marker_sets["particle_247 geometry"]=s
s= marker_sets["particle_247 geometry"]
mark=s.place_marker((3110.34, 2615.59, 2512.7), (0.7, 0.7, 0.7), 314.627)
if "particle_248 geometry" not in marker_sets:
s=new_marker_set('particle_248 geometry')
marker_sets["particle_248 geometry"]=s
s= marker_sets["particle_248 geometry"]
mark=s.place_marker((3181.87, 2513.78, 2858.12), (0.7, 0.7, 0.7), 115.352)
if "particle_249 geometry" not in marker_sets:
s=new_marker_set('particle_249 geometry')
marker_sets["particle_249 geometry"]=s
s= marker_sets["particle_249 geometry"]
mark=s.place_marker((3313.54, 2617.3, 3257.35), (0.7, 0.7, 0.7), 180.621)
if "particle_250 geometry" not in marker_sets:
s=new_marker_set('particle_250 geometry')
marker_sets["particle_250 geometry"]=s
s= marker_sets["particle_250 geometry"]
mark=s.place_marker((3339.88, 2981.27, 3280.75), (0.7, 0.7, 0.7), 126.265)
if "particle_251 geometry" not in marker_sets:
s=new_marker_set('particle_251 geometry')
marker_sets["particle_251 geometry"]=s
s= marker_sets["particle_251 geometry"]
mark=s.place_marker((3141.46, 3274.14, 3137.46), (0.7, 0.7, 0.7), 133.541)
if "particle_252 geometry" not in marker_sets:
s=new_marker_set('particle_252 geometry')
marker_sets["particle_252 geometry"]=s
s= marker_sets["particle_252 geometry"]
mark=s.place_marker((2922.34, 3645.01, 3178.18), (0.7, 0.7, 0.7), 171.019)
if "particle_253 geometry" not in marker_sets:
s=new_marker_set('particle_253 geometry')
marker_sets["particle_253 geometry"]=s
s= marker_sets["particle_253 geometry"]
mark=s.place_marker((2758.13, 3948.24, 3370.48), (0.7, 0.7, 0.7), 115.437)
if "particle_254 geometry" not in marker_sets:
s=new_marker_set('particle_254 geometry')
marker_sets["particle_254 geometry"]=s
s= marker_sets["particle_254 geometry"]
mark=s.place_marker((3019.93, 3752.33, 3335.17), (0.7, 0.7, 0.7), 158.583)
if "particle_255 geometry" not in marker_sets:
s=new_marker_set('particle_255 geometry')
marker_sets["particle_255 geometry"]=s
s= marker_sets["particle_255 geometry"]
mark=s.place_marker((2875.05, 3416.54, 3051.31), (0.7, 0.7, 0.7), 192)
if "particle_256 geometry" not in marker_sets:
s=new_marker_set('particle_256 geometry')
marker_sets["particle_256 geometry"]=s
s= marker_sets["particle_256 geometry"]
mark=s.place_marker((2904.55, 3082.16, 2769.45), (0.7, 0.7, 0.7), 150.165)
if "particle_257 geometry" not in marker_sets:
s=new_marker_set('particle_257 geometry')
marker_sets["particle_257 geometry"]=s
s= marker_sets["particle_257 geometry"]
mark=s.place_marker((2715.33, 3249.89, 2681.57), (0.7, 0.7, 0.7), 157.567)
if "particle_258 geometry" not in marker_sets:
s=new_marker_set('particle_258 geometry')
marker_sets["particle_258 geometry"]=s
s= marker_sets["particle_258 geometry"]
mark=s.place_marker((2639.46, 3139.24, 2662.89), (0.7, 0.7, 0.7), 199.36)
if "particle_259 geometry" not in marker_sets:
s=new_marker_set('particle_259 geometry')
marker_sets["particle_259 geometry"]=s
s= marker_sets["particle_259 geometry"]
mark=s.place_marker((2575.11, 3089.69, 3119.83), (0.7, 0.7, 0.7), 105.369)
if "particle_260 geometry" not in marker_sets:
s=new_marker_set('particle_260 geometry')
marker_sets["particle_260 geometry"]=s
s= marker_sets["particle_260 geometry"]
mark=s.place_marker((2479.79, 2924.03, 3313.54), (0.7, 0.7, 0.7), 118.651)
if "particle_261 geometry" not in marker_sets:
s=new_marker_set('particle_261 geometry')
marker_sets["particle_261 geometry"]=s
s= marker_sets["particle_261 geometry"]
mark=s.place_marker((2635.39, 2763.58, 2924.43), (0.7, 0.7, 0.7), 219.664)
if "particle_262 geometry" not in marker_sets:
s=new_marker_set('particle_262 geometry')
marker_sets["particle_262 geometry"]=s
s= marker_sets["particle_262 geometry"]
mark=s.place_marker((2777.98, 2763.41, 2347.96), (0.7, 0.7, 0.7), 196.018)
if "particle_263 geometry" not in marker_sets:
s=new_marker_set('particle_263 geometry')
marker_sets["particle_263 geometry"]=s
s= marker_sets["particle_263 geometry"]
mark=s.place_marker((2844.11, 2719.83, 1863.7), (0.7, 0.7, 0.7), 218.141)
if "particle_264 geometry" not in marker_sets:
s=new_marker_set('particle_264 geometry')
marker_sets["particle_264 geometry"]=s
s= marker_sets["particle_264 geometry"]
mark=s.place_marker((2520.37, 2862.24, 1782.34), (0.7, 0.7, 0.7), 181.636)
if "particle_265 geometry" not in marker_sets:
s=new_marker_set('particle_265 geometry')
marker_sets["particle_265 geometry"]=s
s= marker_sets["particle_265 geometry"]
mark=s.place_marker((2367.22, 2923.14, 2025.64), (0.7, 0.7, 0.7), 195.003)
if "particle_266 geometry" not in marker_sets:
s=new_marker_set('particle_266 geometry')
marker_sets["particle_266 geometry"]=s
s= marker_sets["particle_266 geometry"]
mark=s.place_marker((2527.67, 2940.78, 1824.75), (0.7, 0.7, 0.7), 139.209)
if "particle_267 geometry" not in marker_sets:
s=new_marker_set('particle_267 geometry')
marker_sets["particle_267 geometry"]=s
s= marker_sets["particle_267 geometry"]
mark=s.place_marker((2542.82, 3016.41, 1797.63), (0.7, 0.7, 0.7), 189.885)
if "particle_268 geometry" not in marker_sets:
s=new_marker_set('particle_268 geometry')
marker_sets["particle_268 geometry"]=s
s= marker_sets["particle_268 geometry"]
mark=s.place_marker((2772.46, 3107.17, 1963.08), (0.7, 0.7, 0.7), 267.674)
if "particle_269 geometry" not in marker_sets:
s=new_marker_set('particle_269 geometry')
marker_sets["particle_269 geometry"]=s
s= marker_sets["particle_269 geometry"]
mark=s.place_marker((3231.44, 3194.33, 2287.42), (0.7, 0.7, 0.7), 196.568)
if "particle_270 geometry" not in marker_sets:
s=new_marker_set('particle_270 geometry')
marker_sets["particle_270 geometry"]=s
s= marker_sets["particle_270 geometry"]
mark=s.place_marker((3123.25, 3491.67, 2232.08), (0.7, 0.7, 0.7), 192.423)
if "particle_271 geometry" not in marker_sets:
s=new_marker_set('particle_271 geometry')
marker_sets["particle_271 geometry"]=s
s= marker_sets["particle_271 geometry"]
mark=s.place_marker((2922.11, 3517.76, 1888.46), (1, 0.7, 0), 202.405)
if "particle_272 geometry" not in marker_sets:
s=new_marker_set('particle_272 geometry')
marker_sets["particle_272 geometry"]=s
s= marker_sets["particle_272 geometry"]
mark=s.place_marker((3404.72, 3538.3, 2601.12), (0.7, 0.7, 0.7), 135.529)
if "particle_273 geometry" not in marker_sets:
s=new_marker_set('particle_273 geometry')
marker_sets["particle_273 geometry"]=s
s= marker_sets["particle_273 geometry"]
mark=s.place_marker((3875.63, 3704.8, 3443.14), (0.7, 0.7, 0.7), 114.21)
if "particle_274 geometry" not in marker_sets:
s=new_marker_set('particle_274 geometry')
marker_sets["particle_274 geometry"]=s
s= marker_sets["particle_274 geometry"]
mark=s.place_marker((3642.96, 3577.19, 3626.15), (0.7, 0.7, 0.7), 159.133)
if "particle_275 geometry" not in marker_sets:
s=new_marker_set('particle_275 geometry')
marker_sets["particle_275 geometry"]=s
s= marker_sets["particle_275 geometry"]
mark=s.place_marker((3442.67, 3242.2, 3508.37), (0.7, 0.7, 0.7), 144.412)
if "particle_276 geometry" not in marker_sets:
s=new_marker_set('particle_276 geometry')
marker_sets["particle_276 geometry"]=s
s= marker_sets["particle_276 geometry"]
mark=s.place_marker((3320.64, 2969.35, 3396.31), (0.7, 0.7, 0.7), 70.8525)
if "particle_277 geometry" not in marker_sets:
s=new_marker_set('particle_277 geometry')
marker_sets["particle_277 geometry"]=s
s= marker_sets["particle_277 geometry"]
mark=s.place_marker((3006.92, 2923.59, 2861.29), (0.7, 0.7, 0.7), 141.874)
if "particle_278 geometry" not in marker_sets:
s=new_marker_set('particle_278 geometry')
marker_sets["particle_278 geometry"]=s
s= marker_sets["particle_278 geometry"]
mark=s.place_marker((2745.68, 2966.26, 2306.62), (0.7, 0.7, 0.7), 217.337)
if "particle_279 geometry" not in marker_sets:
s=new_marker_set('particle_279 geometry')
marker_sets["particle_279 geometry"]=s
s= marker_sets["particle_279 geometry"]
mark=s.place_marker((2789.36, 2915.93, 2247.4), (0.7, 0.7, 0.7), 237.641)
if "particle_280 geometry" not in marker_sets:
s=new_marker_set('particle_280 geometry')
marker_sets["particle_280 geometry"]=s
s= marker_sets["particle_280 geometry"]
mark=s.place_marker((3038.22, 2624.59, 2505.24), (0.7, 0.7, 0.7), 229.393)
if "particle_281 geometry" not in marker_sets:
s=new_marker_set('particle_281 geometry')
marker_sets["particle_281 geometry"]=s
s= marker_sets["particle_281 geometry"]
mark=s.place_marker((3193.26, 2406.53, 1961.68), (0.7, 0.7, 0.7), 349.906)
if "particle_282 geometry" not in marker_sets:
s=new_marker_set('particle_282 geometry')
marker_sets["particle_282 geometry"]=s
s= marker_sets["particle_282 geometry"]
mark=s.place_marker((3406.3, 2581.03, 1467.84), (0.7, 0.7, 0.7), 162.347)
if "particle_283 geometry" not in marker_sets:
s=new_marker_set('particle_283 geometry')
marker_sets["particle_283 geometry"]=s
s= marker_sets["particle_283 geometry"]
mark=s.place_marker((3541.07, 2601.43, 1361.73), (0.7, 0.7, 0.7), 194.072)
if "particle_284 geometry" not in marker_sets:
s=new_marker_set('particle_284 geometry')
marker_sets["particle_284 geometry"]=s
s= marker_sets["particle_284 geometry"]
mark=s.place_marker((3654.73, 2464.77, 1401.6), (0.7, 0.7, 0.7), 242.21)
if "particle_285 geometry" not in marker_sets:
s=new_marker_set('particle_285 geometry')
marker_sets["particle_285 geometry"]=s
s= marker_sets["particle_285 geometry"]
mark=s.place_marker((4065.88, 2433.52, 1706.08), (0.7, 0.7, 0.7), 320.93)
if "particle_286 geometry" not in marker_sets:
s=new_marker_set('particle_286 geometry')
marker_sets["particle_286 geometry"]=s
s= marker_sets["particle_286 geometry"]
mark=s.place_marker((4615.54, 2382.93, 1577.74), (0.7, 0.7, 0.7), 226.432)
if "particle_287 geometry" not in marker_sets:
s=new_marker_set('particle_287 geometry')
marker_sets["particle_287 geometry"]=s
s= marker_sets["particle_287 geometry"]
mark=s.place_marker((4399.27, 2268.9, 1243.04), (0.7, 0.7, 0.7), 125.208)
if "particle_288 geometry" not in marker_sets:
s=new_marker_set('particle_288 geometry')
marker_sets["particle_288 geometry"]=s
s= marker_sets["particle_288 geometry"]
mark=s.place_marker((3880.73, 1918.45, 1162.1), (0.7, 0.7, 0.7), 197.837)
if "particle_289 geometry" not in marker_sets:
s=new_marker_set('particle_289 geometry')
marker_sets["particle_289 geometry"]=s
s= marker_sets["particle_289 geometry"]
mark=s.place_marker((3883.74, 1256.16, 1101.6), (0.7, 0.7, 0.7), 167.804)
if "particle_290 geometry" not in marker_sets:
s=new_marker_set('particle_290 geometry')
marker_sets["particle_290 geometry"]=s
s= marker_sets["particle_290 geometry"]
mark=s.place_marker((4208.02, 483.233, 1016.74), (0.7, 0.7, 0.7), 136.84)
if "particle_291 geometry" not in marker_sets:
s=new_marker_set('particle_291 geometry')
marker_sets["particle_291 geometry"]=s
s= marker_sets["particle_291 geometry"]
mark=s.place_marker((4549.87, 536.555, 1299.78), (0.7, 0.7, 0.7), 85.7421)
if "particle_292 geometry" not in marker_sets:
s=new_marker_set('particle_292 geometry')
marker_sets["particle_292 geometry"]=s
s= marker_sets["particle_292 geometry"]
mark=s.place_marker((4314.35, 1920.01, 1285.3), (1, 0.7, 0), 256)
if "particle_293 geometry" not in marker_sets:
s=new_marker_set('particle_293 geometry')
marker_sets["particle_293 geometry"]=s
s= marker_sets["particle_293 geometry"]
mark=s.place_marker((3829.66, 922.024, 1043.36), (0.7, 0.7, 0.7), 138.702)
if "particle_294 geometry" not in marker_sets:
s=new_marker_set('particle_294 geometry')
marker_sets["particle_294 geometry"]=s
s= marker_sets["particle_294 geometry"]
mark=s.place_marker((3586.4, 583.978, 790.976), (0.7, 0.7, 0.7), 140.732)
if "particle_295 geometry" not in marker_sets:
s=new_marker_set('particle_295 geometry')
marker_sets["particle_295 geometry"]=s
s= marker_sets["particle_295 geometry"]
mark=s.place_marker((3821.42, 789.071, 721.996), (0.7, 0.7, 0.7), 81.3006)
if "particle_296 geometry" not in marker_sets:
s=new_marker_set('particle_296 geometry')
marker_sets["particle_296 geometry"]=s
s= marker_sets["particle_296 geometry"]
mark=s.place_marker((4249.66, 715.927, 603.365), (0.7, 0.7, 0.7), 133.837)
if "particle_297 geometry" not in marker_sets:
s=new_marker_set('particle_297 geometry')
marker_sets["particle_297 geometry"]=s
s= marker_sets["particle_297 geometry"]
mark=s.place_marker((4201.76, 1292.84, 847.12), (0.7, 0.7, 0.7), 98.3475)
if "particle_298 geometry" not in marker_sets:
s=new_marker_set('particle_298 geometry')
marker_sets["particle_298 geometry"]=s
s= marker_sets["particle_298 geometry"]
mark=s.place_marker((3821.75, 1973.93, 1069.05), (0.7, 0.7, 0.7), 297.623)
if "particle_299 geometry" not in marker_sets:
s=new_marker_set('particle_299 geometry')
marker_sets["particle_299 geometry"]=s
s= marker_sets["particle_299 geometry"]
mark=s.place_marker((3848.51, 2348.05, 1256.57), (0.7, 0.7, 0.7), 212.938)
if "particle_300 geometry" not in marker_sets:
s=new_marker_set('particle_300 geometry')
marker_sets["particle_300 geometry"]=s
s= marker_sets["particle_300 geometry"]
mark=s.place_marker((3879.35, 2445.82, 1082.54), (0.7, 0.7, 0.7), 154.183)
if "particle_301 geometry" not in marker_sets:
s=new_marker_set('particle_301 geometry')
marker_sets["particle_301 geometry"]=s
s= marker_sets["particle_301 geometry"]
mark=s.place_marker((4247.23, 2599.1, 1219.79), (0.7, 0.7, 0.7), 180.832)
if "particle_302 geometry" not in marker_sets:
s=new_marker_set('particle_302 geometry')
marker_sets["particle_302 geometry"]=s
s= marker_sets["particle_302 geometry"]
mark=s.place_marker((4496.85, 2579.85, 1514.06), (0.7, 0.7, 0.7), 122.332)
if "particle_303 geometry" not in marker_sets:
s=new_marker_set('particle_303 geometry')
marker_sets["particle_303 geometry"]=s
s= marker_sets["particle_303 geometry"]
mark=s.place_marker((4632.33, 2447.31, 1849.97), (0.7, 0.7, 0.7), 209.047)
if "particle_304 geometry" not in marker_sets:
s=new_marker_set('particle_304 geometry')
marker_sets["particle_304 geometry"]=s
s= marker_sets["particle_304 geometry"]
mark=s.place_marker((4866.79, 2769.14, 1742.44), (0.7, 0.7, 0.7), 126.985)
if "particle_305 geometry" not in marker_sets:
s=new_marker_set('particle_305 geometry')
marker_sets["particle_305 geometry"]=s
s= marker_sets["particle_305 geometry"]
mark=s.place_marker((5261.31, 2814.4, 1556.03), (0.7, 0.7, 0.7), 122.205)
if "particle_306 geometry" not in marker_sets:
s=new_marker_set('particle_306 geometry')
marker_sets["particle_306 geometry"]=s
s= marker_sets["particle_306 geometry"]
mark=s.place_marker((5351.67, 2735.41, 1274.47), (0.7, 0.7, 0.7), 107.95)
if "particle_307 geometry" not in marker_sets:
s=new_marker_set('particle_307 geometry')
marker_sets["particle_307 geometry"]=s
s= marker_sets["particle_307 geometry"]
mark=s.place_marker((4753.72, 2779.18, 1250.37), (0.7, 0.7, 0.7), 182.567)
if "particle_308 geometry" not in marker_sets:
s=new_marker_set('particle_308 geometry')
marker_sets["particle_308 geometry"]=s
s= marker_sets["particle_308 geometry"]
mark=s.place_marker((4150.32, 2630.07, 1315.74), (0.7, 0.7, 0.7), 185.274)
if "particle_309 geometry" not in marker_sets:
s=new_marker_set('particle_309 geometry')
marker_sets["particle_309 geometry"]=s
s= marker_sets["particle_309 geometry"]
mark=s.place_marker((3855.72, 2421.33, 1570.7), (0.7, 0.7, 0.7), 413.567)
if "particle_310 geometry" not in marker_sets:
s=new_marker_set('particle_310 geometry')
marker_sets["particle_310 geometry"]=s
s= marker_sets["particle_310 geometry"]
mark=s.place_marker((3637.34, 2490.69, 1480.63), (0.7, 0.7, 0.7), 240.01)
if "particle_311 geometry" not in marker_sets:
s=new_marker_set('particle_311 geometry')
marker_sets["particle_311 geometry"]=s
s= marker_sets["particle_311 geometry"]
mark=s.place_marker((3674.32, 2488.96, 1516.29), (0.7, 0.7, 0.7), 238.995)
if "particle_312 geometry" not in marker_sets:
s=new_marker_set('particle_312 geometry')
marker_sets["particle_312 geometry"]=s
s= marker_sets["particle_312 geometry"]
mark=s.place_marker((3533.07, 2209.29, 1225.96), (0.7, 0.7, 0.7), 203.674)
if "particle_313 geometry" not in marker_sets:
s=new_marker_set('particle_313 geometry')
marker_sets["particle_313 geometry"]=s
s= marker_sets["particle_313 geometry"]
mark=s.place_marker((3330.91, 1724.21, 780.404), (0.7, 0.7, 0.7), 266.744)
if "particle_314 geometry" not in marker_sets:
s=new_marker_set('particle_314 geometry')
marker_sets["particle_314 geometry"]=s
s= marker_sets["particle_314 geometry"]
mark=s.place_marker((3482.76, 2101.05, 581.125), (0.7, 0.7, 0.7), 147.585)
if "particle_315 geometry" not in marker_sets:
s=new_marker_set('particle_315 geometry')
marker_sets["particle_315 geometry"]=s
s= marker_sets["particle_315 geometry"]
mark=s.place_marker((3546.61, 2378.48, 987.139), (0.7, 0.7, 0.7), 249.485)
if "particle_316 geometry" not in marker_sets:
s=new_marker_set('particle_316 geometry')
marker_sets["particle_316 geometry"]=s
s= marker_sets["particle_316 geometry"]
mark=s.place_marker((3474.41, 2330.72, 1527.35), (0.7, 0.7, 0.7), 119.371)
if "particle_317 geometry" not in marker_sets:
s=new_marker_set('particle_317 geometry')
marker_sets["particle_317 geometry"]=s
s= marker_sets["particle_317 geometry"]
mark=s.place_marker((3774.42, 1961.37, 2147.88), (0.7, 0.7, 0.7), 155.875)
if "particle_318 geometry" not in marker_sets:
s=new_marker_set('particle_318 geometry')
marker_sets["particle_318 geometry"]=s
s= marker_sets["particle_318 geometry"]
mark=s.place_marker((4270.76, 1523.53, 2351.58), (0.7, 0.7, 0.7), 189.419)
if "particle_319 geometry" not in marker_sets:
s=new_marker_set('particle_319 geometry')
marker_sets["particle_319 geometry"]=s
s= marker_sets["particle_319 geometry"]
mark=s.place_marker((4131.75, 1248.16, 2116.59), (0.7, 0.7, 0.7), 137.475)
if "particle_320 geometry" not in marker_sets:
s=new_marker_set('particle_320 geometry')
marker_sets["particle_320 geometry"]=s
s= marker_sets["particle_320 geometry"]
mark=s.place_marker((3744.1, 1214.35, 1976.2), (0.7, 0.7, 0.7), 176.179)
if "particle_321 geometry" not in marker_sets:
s=new_marker_set('particle_321 geometry')
marker_sets["particle_321 geometry"]=s
s= marker_sets["particle_321 geometry"]
mark=s.place_marker((3456.95, 994.852, 1806.45), (0.7, 0.7, 0.7), 138.829)
if "particle_322 geometry" not in marker_sets:
s=new_marker_set('particle_322 geometry')
marker_sets["particle_322 geometry"]=s
s= marker_sets["particle_322 geometry"]
mark=s.place_marker((3351.41, 687.058, 1620.36), (0.7, 0.7, 0.7), 148.727)
if "particle_323 geometry" not in marker_sets:
s=new_marker_set('particle_323 geometry')
marker_sets["particle_323 geometry"]=s
s= marker_sets["particle_323 geometry"]
mark=s.place_marker((3406.89, 218.045, 1464.82), (0.7, 0.7, 0.7), 230.323)
if "particle_324 geometry" not in marker_sets:
s=new_marker_set('particle_324 geometry')
marker_sets["particle_324 geometry"]=s
s= marker_sets["particle_324 geometry"]
mark=s.place_marker((3732.74, 750.471, 1617.5), (0.7, 0.7, 0.7), 175.376)
if "particle_325 geometry" not in marker_sets:
s=new_marker_set('particle_325 geometry')
marker_sets["particle_325 geometry"]=s
s= marker_sets["particle_325 geometry"]
mark=s.place_marker((3819.98, 1218.24, 1804.57), (0.7, 0.7, 0.7), 161.163)
if "particle_326 geometry" not in marker_sets:
s=new_marker_set('particle_326 geometry')
marker_sets["particle_326 geometry"]=s
s= marker_sets["particle_326 geometry"]
mark=s.place_marker((3809.34, 1043.08, 2257.82), (0.7, 0.7, 0.7), 125.885)
if "particle_327 geometry" not in marker_sets:
s=new_marker_set('particle_327 geometry')
marker_sets["particle_327 geometry"]=s
s= marker_sets["particle_327 geometry"]
mark=s.place_marker((3984.4, 875.36, 2622.53), (0.7, 0.7, 0.7), 206.635)
if "particle_328 geometry" not in marker_sets:
s=new_marker_set('particle_328 geometry')
marker_sets["particle_328 geometry"]=s
s= marker_sets["particle_328 geometry"]
mark=s.place_marker((3566.24, 1022.06, 2570.11), (0.7, 0.7, 0.7), 151.392)
if "particle_329 geometry" not in marker_sets:
s=new_marker_set('particle_329 geometry')
marker_sets["particle_329 geometry"]=s
s= marker_sets["particle_329 geometry"]
mark=s.place_marker((3253.12, 1111.33, 2449.61), (0.7, 0.7, 0.7), 173.388)
if "particle_330 geometry" not in marker_sets:
s=new_marker_set('particle_330 geometry')
marker_sets["particle_330 geometry"]=s
s= marker_sets["particle_330 geometry"]
mark=s.place_marker((3320.08, 911.634, 2354.63), (0.7, 0.7, 0.7), 135.825)
if "particle_331 geometry" not in marker_sets:
s=new_marker_set('particle_331 geometry')
marker_sets["particle_331 geometry"]=s
s= marker_sets["particle_331 geometry"]
mark=s.place_marker((3470.44, 548.572, 2212.26), (0.7, 0.7, 0.7), 186.839)
if "particle_332 geometry" not in marker_sets:
s=new_marker_set('particle_332 geometry')
marker_sets["particle_332 geometry"]=s
s= marker_sets["particle_332 geometry"]
mark=s.place_marker((3655.75, 150.619, 2067.29), (0.7, 0.7, 0.7), 121.189)
if "particle_333 geometry" not in marker_sets:
s=new_marker_set('particle_333 geometry')
marker_sets["particle_333 geometry"]=s
s= marker_sets["particle_333 geometry"]
mark=s.place_marker((3733.72, 559.068, 2121.19), (0.7, 0.7, 0.7), 102.916)
if "particle_334 geometry" not in marker_sets:
s=new_marker_set('particle_334 geometry')
marker_sets["particle_334 geometry"]=s
s= marker_sets["particle_334 geometry"]
mark=s.place_marker((3832.66, 1163.77, 2060.66), (0.7, 0.7, 0.7), 212.769)
if "particle_335 geometry" not in marker_sets:
s=new_marker_set('particle_335 geometry')
marker_sets["particle_335 geometry"]=s
s= marker_sets["particle_335 geometry"]
mark=s.place_marker((3690.66, 1806.27, 2083.26), (0.7, 0.7, 0.7), 173.092)
if "particle_336 geometry" not in marker_sets:
s=new_marker_set('particle_336 geometry')
marker_sets["particle_336 geometry"]=s
s= marker_sets["particle_336 geometry"]
mark=s.place_marker((3840.02, 2277.39, 2146.49), (0.7, 0.7, 0.7), 264.502)
if "particle_337 geometry" not in marker_sets:
s=new_marker_set('particle_337 geometry')
marker_sets["particle_337 geometry"]=s
s= marker_sets["particle_337 geometry"]
mark=s.place_marker((4324.16, 2522.04, 2240.37), (0.7, 0.7, 0.7), 208.666)
if "particle_338 geometry" not in marker_sets:
s=new_marker_set('particle_338 geometry')
marker_sets["particle_338 geometry"]=s
s= marker_sets["particle_338 geometry"]
mark=s.place_marker((4760.01, 2669.28, 2062.06), (0.7, 0.7, 0.7), 186.797)
if "particle_339 geometry" not in marker_sets:
s=new_marker_set('particle_339 geometry')
marker_sets["particle_339 geometry"]=s
s= marker_sets["particle_339 geometry"]
mark=s.place_marker((4711.74, 3018.27, 1695.23), (0.7, 0.7, 0.7), 255.534)
if "particle_340 geometry" not in marker_sets:
s=new_marker_set('particle_340 geometry')
marker_sets["particle_340 geometry"]=s
s= marker_sets["particle_340 geometry"]
mark=s.place_marker((4818.69, 2952.64, 1283.45), (0.7, 0.7, 0.7), 153.126)
if "particle_341 geometry" not in marker_sets:
s=new_marker_set('particle_341 geometry')
marker_sets["particle_341 geometry"]=s
s= marker_sets["particle_341 geometry"]
mark=s.place_marker((5164.79, 2982.5, 1477.86), (0.7, 0.7, 0.7), 165.816)
if "particle_342 geometry" not in marker_sets:
s=new_marker_set('particle_342 geometry')
marker_sets["particle_342 geometry"]=s
s= marker_sets["particle_342 geometry"]
mark=s.place_marker((4931.53, 3150.83, 1755.67), (0.7, 0.7, 0.7), 134.429)
if "particle_343 geometry" not in marker_sets:
s=new_marker_set('particle_343 geometry')
marker_sets["particle_343 geometry"]=s
s= marker_sets["particle_343 geometry"]
mark=s.place_marker((4686.93, 2953.93, 2035.02), (0.7, 0.7, 0.7), 178.971)
if "particle_344 geometry" not in marker_sets:
s=new_marker_set('particle_344 geometry')
marker_sets["particle_344 geometry"]=s
s= marker_sets["particle_344 geometry"]
mark=s.place_marker((4659.7, 2439.77, 2192.26), (0.7, 0.7, 0.7), 189.969)
if "particle_345 geometry" not in marker_sets:
s=new_marker_set('particle_345 geometry')
marker_sets["particle_345 geometry"]=s
s= marker_sets["particle_345 geometry"]
mark=s.place_marker((5077.54, 2043.76, 2454.5), (0.7, 0.7, 0.7), 121.359)
if "particle_346 geometry" not in marker_sets:
s=new_marker_set('particle_346 geometry')
marker_sets["particle_346 geometry"]=s
s= marker_sets["particle_346 geometry"]
mark=s.place_marker((4889.79, 1549.21, 2553.24), (0.7, 0.7, 0.7), 187.262)
if "particle_347 geometry" not in marker_sets:
s=new_marker_set('particle_347 geometry')
marker_sets["particle_347 geometry"]=s
s= marker_sets["particle_347 geometry"]
mark=s.place_marker((4295.7, 1292.19, 2565.87), (0.7, 0.7, 0.7), 164.335)
if "particle_348 geometry" not in marker_sets:
s=new_marker_set('particle_348 geometry')
marker_sets["particle_348 geometry"]=s
s= marker_sets["particle_348 geometry"]
mark=s.place_marker((4049.69, 1072.47, 2192.99), (0.7, 0.7, 0.7), 138.363)
if "particle_349 geometry" not in marker_sets:
s=new_marker_set('particle_349 geometry')
marker_sets["particle_349 geometry"]=s
s= marker_sets["particle_349 geometry"]
mark=s.place_marker((3906.89, 768.373, 2051.58), (0.7, 0.7, 0.7), 138.49)
if "particle_350 geometry" not in marker_sets:
s=new_marker_set('particle_350 geometry')
marker_sets["particle_350 geometry"]=s
s= marker_sets["particle_350 geometry"]
mark=s.place_marker((3725.32, 765.173, 2353.84), (0.7, 0.7, 0.7), 116.325)
if "particle_351 geometry" not in marker_sets:
s=new_marker_set('particle_351 geometry')
marker_sets["particle_351 geometry"]=s
s= marker_sets["particle_351 geometry"]
mark=s.place_marker((3871.11, 1159.6, 2539.45), (0.7, 0.7, 0.7), 106.511)
if "particle_352 geometry" not in marker_sets:
s=new_marker_set('particle_352 geometry')
marker_sets["particle_352 geometry"]=s
s= marker_sets["particle_352 geometry"]
mark=s.place_marker((4124.89, 1632.46, 2454.19), (0.7, 0.7, 0.7), 151.096)
if "particle_353 geometry" not in marker_sets:
s=new_marker_set('particle_353 geometry')
marker_sets["particle_353 geometry"]=s
s= marker_sets["particle_353 geometry"]
mark=s.place_marker((4601.77, 2074.19, 2301.58), (0.7, 0.7, 0.7), 240.856)
if "particle_354 geometry" not in marker_sets:
s=new_marker_set('particle_354 geometry')
marker_sets["particle_354 geometry"]=s
s= marker_sets["particle_354 geometry"]
mark=s.place_marker((4982.62, 2360.55, 2112.06), (0.7, 0.7, 0.7), 149.7)
if "particle_355 geometry" not in marker_sets:
s=new_marker_set('particle_355 geometry')
marker_sets["particle_355 geometry"]=s
s= marker_sets["particle_355 geometry"]
mark=s.place_marker((4976.1, 2693.08, 2106.32), (0.7, 0.7, 0.7), 165.943)
if "particle_356 geometry" not in marker_sets:
s=new_marker_set('particle_356 geometry')
marker_sets["particle_356 geometry"]=s
s= marker_sets["particle_356 geometry"]
mark=s.place_marker((4426.02, 2870.24, 1903.5), (0.7, 0.7, 0.7), 178.971)
if "particle_357 geometry" not in marker_sets:
s=new_marker_set('particle_357 geometry')
marker_sets["particle_357 geometry"]=s
s= marker_sets["particle_357 geometry"]
mark=s.place_marker((3825.99, 3321.93, 1743.93), (0.7, 0.7, 0.7), 154.945)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
bhilburn/gnuradio | gr-digital/python/digital/qa_pfb_clock_sync.py | 23 | 7031 | #!/usr/bin/env python
#
# Copyright 2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
import random
import cmath
import time
from gnuradio import gr, gr_unittest, filter, digital, blocks
class test_pfb_clock_sync(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test01(self):
# Test BPSK sync
excess_bw = 0.35
sps = 4
loop_bw = cmath.pi/100.0
nfilts = 32
init_phase = nfilts/2
max_rate_deviation = 0.5
osps = 1
ntaps = 11 * int(sps*nfilts)
taps = filter.firdes.root_raised_cosine(nfilts, nfilts*sps,
1.0, excess_bw, ntaps)
self.test = digital.pfb_clock_sync_ccf(sps, loop_bw, taps,
nfilts, init_phase,
max_rate_deviation,
osps)
data = 10000*[complex(1,0), complex(-1,0)]
self.src = blocks.vector_source_c(data, False)
# pulse shaping interpolation filter
rrc_taps = filter.firdes.root_raised_cosine(
nfilts, # gain
nfilts, # sampling rate based on 32 filters in resampler
1.0, # symbol rate
excess_bw, # excess bandwidth (roll-off factor)
ntaps)
self.rrc_filter = filter.pfb_arb_resampler_ccf(sps, rrc_taps)
self.snk = blocks.vector_sink_c()
self.tb.connect(self.src, self.rrc_filter, self.test, self.snk)
self.tb.run()
expected_result = 10000*[complex(1,0), complex(-1,0)]
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 1000
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#for e,d in zip(expected_result, dst_data):
# print e, d
self.assertComplexTuplesAlmostEqual(expected_result, dst_data, 1)
def test02(self):
# Test real BPSK sync
excess_bw = 0.35
sps = 4
loop_bw = cmath.pi/100.0
nfilts = 32
init_phase = nfilts/2
max_rate_deviation = 0.5
osps = 1
ntaps = 11 * int(sps*nfilts)
taps = filter.firdes.root_raised_cosine(nfilts, nfilts*sps,
1.0, excess_bw, ntaps)
self.test = digital.pfb_clock_sync_fff(sps, loop_bw, taps,
nfilts, init_phase,
max_rate_deviation,
osps)
data = 10000*[1, -1]
self.src = blocks.vector_source_f(data, False)
# pulse shaping interpolation filter
rrc_taps = filter.firdes.root_raised_cosine(
nfilts, # gain
nfilts, # sampling rate based on 32 filters in resampler
1.0, # symbol rate
excess_bw, # excess bandwidth (roll-off factor)
ntaps)
self.rrc_filter = filter.pfb_arb_resampler_fff(sps, rrc_taps)
self.snk = blocks.vector_sink_f()
self.tb.connect(self.src, self.rrc_filter, self.test, self.snk)
self.tb.run()
expected_result = 10000*[1, -1]
dst_data = self.snk.data()
# Only compare last Ncmp samples
Ncmp = 1000
len_e = len(expected_result)
len_d = len(dst_data)
expected_result = expected_result[len_e - Ncmp:]
dst_data = dst_data[len_d - Ncmp:]
#for e,d in zip(expected_result, dst_data):
# print e, d
self.assertFloatTuplesAlmostEqual(expected_result, dst_data, 1)
def test03(self):
# Test resting of taps
excess_bw0 = 0.35
excess_bw1 = 0.22
sps = 4
loop_bw = cmath.pi/100.0
nfilts = 32
init_phase = nfilts/2
max_rate_deviation = 0.5
osps = 1
ntaps = 11 * int(sps*nfilts)
taps = filter.firdes.root_raised_cosine(nfilts, nfilts*sps,
1.0, excess_bw0, ntaps)
self.test = digital.pfb_clock_sync_ccf(sps, loop_bw, taps,
nfilts, init_phase,
max_rate_deviation,
osps)
self.src = blocks.null_source(gr.sizeof_gr_complex)
self.snk = blocks.null_sink(gr.sizeof_gr_complex)
self.tb.connect(self.src, self.test, self.snk)
self.tb.start()
time.sleep(0.1)
taps = filter.firdes.root_raised_cosine(nfilts, nfilts*sps,
1.0, excess_bw1, ntaps)
self.test.update_taps(taps)
self.tb.stop()
self.tb.wait()
self.assertTrue(True)
def test03_f(self):
# Test resting of taps
excess_bw0 = 0.35
excess_bw1 = 0.22
sps = 4
loop_bw = cmath.pi/100.0
nfilts = 32
init_phase = nfilts/2
max_rate_deviation = 0.5
osps = 1
ntaps = 11 * int(sps*nfilts)
taps = filter.firdes.root_raised_cosine(nfilts, nfilts*sps,
1.0, excess_bw0, ntaps)
self.test = digital.pfb_clock_sync_fff(sps, loop_bw, taps,
nfilts, init_phase,
max_rate_deviation,
osps)
self.src = blocks.null_source(gr.sizeof_float)
self.snk = blocks.null_sink(gr.sizeof_float)
self.tb.connect(self.src, self.test, self.snk)
self.tb.start()
time.sleep(0.1)
taps = filter.firdes.root_raised_cosine(nfilts, nfilts*sps,
1.0, excess_bw1, ntaps)
self.test.update_taps(taps)
self.tb.stop()
self.tb.wait()
self.assertTrue(True)
if __name__ == '__main__':
gr_unittest.run(test_pfb_clock_sync, "test_pfb_clock_sync.xml")
| gpl-3.0 |
Azure/azure-sdk-for-python | sdk/appservice/azure-mgmt-web/azure/mgmt/web/v2020_12_01/aio/operations/_certificates_operations.py | 1 | 21769 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class CertificatesOperations:
"""CertificatesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.web.v2020_12_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
filter: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.CertificateCollection"]:
"""Get all certificates for a subscription.
Description for Get all certificates for a subscription.
:param filter: Return only information specified in the filter (using OData syntax). For
example: $filter=KeyVaultId eq 'KeyVaultId'.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_12_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str', skip_quote=True)
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Web/certificates'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.CertificateCollection"]:
"""Get all certificates in a resource group.
Description for Get all certificates in a resource group.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateCollection or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.web.v2020_12_01.models.CertificateCollection]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateCollection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateCollection', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates'} # type: ignore
async def get(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> "_models.Certificate":
"""Get a certificate.
Description for Get a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_12_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
async def create_or_update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.Certificate",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2020_12_01.models.Certificate
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_12_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_envelope, 'Certificate')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
async def delete(
self,
resource_group_name: str,
name: str,
**kwargs: Any
) -> None:
"""Delete a certificate.
Description for Delete a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
accept = "application/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
async def update(
self,
resource_group_name: str,
name: str,
certificate_envelope: "_models.CertificatePatchResource",
**kwargs: Any
) -> "_models.Certificate":
"""Create or update a certificate.
Description for Create or update a certificate.
:param resource_group_name: Name of the resource group to which the resource belongs.
:type resource_group_name: str
:param name: Name of the certificate.
:type name: str
:param certificate_envelope: Details of certificate, if it exists already.
:type certificate_envelope: ~azure.mgmt.web.v2020_12_01.models.CertificatePatchResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Certificate, or the result of cls(response)
:rtype: ~azure.mgmt.web.v2020_12_01.models.Certificate
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Certificate"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-12-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+[^\.]$'),
'name': self._serialize.url("name", name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_envelope, 'CertificatePatchResource')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DefaultErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('Certificate', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Web/certificates/{name}'} # type: ignore
| mit |
marcoitur/FreeCAD | src/Mod/Assembly/Init.py | 55 | 1879 | # FreeCAD init script of the Assembly module
# (c) 2001 Juergen Riegel
#***************************************************************************
#* (c) Juergen Riegel (juergen.riegel@web.de) 2002 *
#* *
#* This file is part of the FreeCAD CAx development system. *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* FreeCAD is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with FreeCAD; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#* Juergen Riegel 2002 *
#***************************************************************************/
| lgpl-2.1 |
KL-WLCR/incubator-airflow | tests/contrib/operators/test_dataproc_operator.py | 9 | 13326 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import re
import unittest
from airflow import DAG
from airflow.contrib.operators.dataproc_operator import DataprocClusterCreateOperator
from airflow.contrib.operators.dataproc_operator import DataprocClusterDeleteOperator
from airflow.contrib.operators.dataproc_operator import DataProcHadoopOperator
from airflow.contrib.operators.dataproc_operator import DataProcHiveOperator
from airflow.contrib.operators.dataproc_operator import DataProcPySparkOperator
from airflow.contrib.operators.dataproc_operator import DataProcSparkOperator
from airflow.version import version
from copy import deepcopy
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
from mock import Mock
from mock import patch
TASK_ID = 'test-dataproc-operator'
CLUSTER_NAME = 'test-cluster-name'
PROJECT_ID = 'test-project-id'
NUM_WORKERS = 123
ZONE = 'us-central1-a'
NETWORK_URI = '/projects/project_id/regions/global/net'
SUBNETWORK_URI = '/projects/project_id/regions/global/subnet'
TAGS = ['tag1', 'tag2']
STORAGE_BUCKET = 'gs://airflow-test-bucket/'
IMAGE_VERSION = '1.1'
MASTER_MACHINE_TYPE = 'n1-standard-2'
MASTER_DISK_SIZE = 100
WORKER_MACHINE_TYPE = 'n1-standard-2'
WORKER_DISK_SIZE = 100
NUM_PREEMPTIBLE_WORKERS = 2
LABEL1 = {}
LABEL2 = {'application':'test', 'year': 2017}
SERVICE_ACCOUNT_SCOPES = [
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/bigtable.data'
]
DEFAULT_DATE = datetime.datetime(2017, 6, 6)
REGION = 'test-region'
MAIN_URI = 'test-uri'
class DataprocClusterCreateOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterCreateOperator
def setUp(self):
# instantiate two different test cases with different labels.
self.labels = [LABEL1, LABEL2]
self.dataproc_operators = []
self.mock_conn = Mock()
for labels in self.labels:
self.dataproc_operators.append(
DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
network_uri=NETWORK_URI,
subnetwork_uri=SUBNETWORK_URI,
tags=TAGS,
storage_bucket=STORAGE_BUCKET,
image_version=IMAGE_VERSION,
master_machine_type=MASTER_MACHINE_TYPE,
master_disk_size=MASTER_DISK_SIZE,
worker_machine_type=WORKER_MACHINE_TYPE,
worker_disk_size=WORKER_DISK_SIZE,
num_preemptible_workers=NUM_PREEMPTIBLE_WORKERS,
labels = deepcopy(labels),
service_account_scopes = SERVICE_ACCOUNT_SCOPES
)
)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_init(self):
"""Test DataProcClusterOperator instance is properly initialized."""
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
self.assertEqual(dataproc_operator.cluster_name, CLUSTER_NAME)
self.assertEqual(dataproc_operator.project_id, PROJECT_ID)
self.assertEqual(dataproc_operator.num_workers, NUM_WORKERS)
self.assertEqual(dataproc_operator.zone, ZONE)
self.assertEqual(dataproc_operator.network_uri, NETWORK_URI)
self.assertEqual(dataproc_operator.subnetwork_uri, SUBNETWORK_URI)
self.assertEqual(dataproc_operator.tags, TAGS)
self.assertEqual(dataproc_operator.storage_bucket, STORAGE_BUCKET)
self.assertEqual(dataproc_operator.image_version, IMAGE_VERSION)
self.assertEqual(dataproc_operator.master_machine_type, MASTER_MACHINE_TYPE)
self.assertEqual(dataproc_operator.master_disk_size, MASTER_DISK_SIZE)
self.assertEqual(dataproc_operator.worker_machine_type, WORKER_MACHINE_TYPE)
self.assertEqual(dataproc_operator.worker_disk_size, WORKER_DISK_SIZE)
self.assertEqual(dataproc_operator.num_preemptible_workers, NUM_PREEMPTIBLE_WORKERS)
self.assertEqual(dataproc_operator.labels, self.labels[suffix])
self.assertEqual(dataproc_operator.service_account_scopes, SERVICE_ACCOUNT_SCOPES)
def test_build_cluster_data(self):
for suffix, dataproc_operator in enumerate(self.dataproc_operators):
cluster_data = dataproc_operator._build_cluster_data()
self.assertEqual(cluster_data['clusterName'], CLUSTER_NAME)
self.assertEqual(cluster_data['projectId'], PROJECT_ID)
self.assertEqual(cluster_data['config']['softwareConfig'], {'imageVersion': IMAGE_VERSION})
self.assertEqual(cluster_data['config']['configBucket'], STORAGE_BUCKET)
self.assertEqual(cluster_data['config']['workerConfig']['numInstances'], NUM_WORKERS)
self.assertEqual(cluster_data['config']['secondaryWorkerConfig']['numInstances'],
NUM_PREEMPTIBLE_WORKERS)
self.assertEqual(cluster_data['config']['gceClusterConfig']['serviceAccountScopes'],
SERVICE_ACCOUNT_SCOPES)
self.assertEqual(cluster_data['config']['gceClusterConfig']['subnetworkUri'],
SUBNETWORK_URI)
self.assertEqual(cluster_data['config']['gceClusterConfig']['networkUri'],
NETWORK_URI)
self.assertEqual(cluster_data['config']['gceClusterConfig']['tags'],
TAGS)
# test whether the default airflow-version label has been properly
# set to the dataproc operator.
merged_labels = {}
merged_labels.update(self.labels[suffix])
merged_labels.update({'airflow-version': 'v' + version.replace('.', '-').replace('+','-')})
self.assertTrue(re.match(r'[a-z]([-a-z0-9]*[a-z0-9])?',
cluster_data['labels']['airflow-version']))
self.assertEqual(cluster_data['labels'], merged_labels)
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError) as _:
dataproc_task.execute(None)
mock_info.assert_called_with('Creating cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterCreateOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=PROJECT_ID,
num_workers=NUM_WORKERS,
zone=ZONE,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = { 'ts_nodash' : 'testnodash'}
rendered = dataproc_task.render_template('cluster_name', getattr(dataproc_task,'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError) as _:
dataproc_task.execute(None)
mock_info.assert_called_with('Creating cluster: %s', u'smoke-cluster-testnodash')
class DataprocClusterDeleteOperatorTest(unittest.TestCase):
# Unit test for the DataprocClusterDeleteOperator
def setUp(self):
self.mock_execute = Mock()
self.mock_execute.execute = Mock(return_value={'done' : True})
self.mock_get = Mock()
self.mock_get.get = Mock(return_value=self.mock_execute)
self.mock_operations = Mock()
self.mock_operations.get = Mock(return_value=self.mock_get)
self.mock_regions = Mock()
self.mock_regions.operations = Mock(return_value=self.mock_operations)
self.mock_projects=Mock()
self.mock_projects.regions = Mock(return_value=self.mock_regions)
self.mock_conn = Mock()
self.mock_conn.projects = Mock(return_value=self.mock_projects)
self.dag = DAG(
'test_dag',
default_args={
'owner': 'airflow',
'start_date': DEFAULT_DATE,
'end_date': DEFAULT_DATE,
},
schedule_interval='@daily')
def test_cluster_name_log_no_sub(self):
with patch('airflow.contrib.hooks.gcp_dataproc_hook.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterDeleteOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
project_id=PROJECT_ID,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
with self.assertRaises(TypeError) as _:
dataproc_task.execute(None)
mock_info.assert_called_with('Deleting cluster: %s', CLUSTER_NAME)
def test_cluster_name_log_sub(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
mock_hook.return_value.get_conn = self.mock_conn
dataproc_task = DataprocClusterDeleteOperator(
task_id=TASK_ID,
cluster_name='smoke-cluster-{{ ts_nodash }}',
project_id=PROJECT_ID,
dag=self.dag
)
with patch.object(dataproc_task.log, 'info') as mock_info:
context = { 'ts_nodash' : 'testnodash'}
rendered = dataproc_task.render_template('cluster_name', getattr(dataproc_task,'cluster_name'), context)
setattr(dataproc_task, 'cluster_name', rendered)
with self.assertRaises(TypeError) as _:
dataproc_task.execute(None)
mock_info.assert_called_with('Deleting cluster: %s', u'smoke-cluster-testnodash')
class DataProcHadoopOperatorTest(unittest.TestCase):
# Unit test for the DataProcHadoopOperator
def test_hook_correct_region(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
dataproc_task = DataProcHadoopOperator(
task_id=TASK_ID,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY, REGION)
class DataProcHiveOperatorTest(unittest.TestCase):
# Unit test for the DataProcHiveOperator
def test_hook_correct_region(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
dataproc_task = DataProcHiveOperator(
task_id=TASK_ID,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY, REGION)
class DataProcPySparkOperatorTest(unittest.TestCase):
# Unit test for the DataProcPySparkOperator
def test_hook_correct_region(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
dataproc_task = DataProcPySparkOperator(
task_id=TASK_ID,
main=MAIN_URI,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY, REGION)
class DataProcSparkOperatorTest(unittest.TestCase):
# Unit test for the DataProcSparkOperator
def test_hook_correct_region(self):
with patch('airflow.contrib.operators.dataproc_operator.DataProcHook') as mock_hook:
dataproc_task = DataProcSparkOperator(
task_id=TASK_ID,
region=REGION
)
dataproc_task.execute(None)
mock_hook.return_value.submit.assert_called_once_with(mock.ANY, mock.ANY, REGION)
| apache-2.0 |
mozilla/popcorn_maker | vendor-local/lib/python/html5lib/treewalkers/__init__.py | 133 | 2416 | """A collection of modules for iterating through different kinds of
tree, generating tokens identical to those produced by the tokenizer
module.
To create a tree walker for a new type of tree, you need to do
implement a tree walker object (called TreeWalker by convention) that
implements a 'serialize' method taking a tree as sole argument and
returning an iterator generating tokens.
"""
treeWalkerCache = {}
def getTreeWalker(treeType, implementation=None, **kwargs):
"""Get a TreeWalker class for various types of tree with built-in support
treeType - the name of the tree type required (case-insensitive). Supported
values are "simpletree", "dom", "etree" and "beautifulsoup"
"simpletree" - a built-in DOM-ish tree type with support for some
more pythonic idioms.
"dom" - The xml.dom.minidom DOM implementation
"pulldom" - The xml.dom.pulldom event stream
"etree" - A generic walker for tree implementations exposing an
elementtree-like interface (known to work with
ElementTree, cElementTree and lxml.etree).
"lxml" - Optimized walker for lxml.etree
"beautifulsoup" - Beautiful soup (if installed)
"genshi" - a Genshi stream
implementation - (Currently applies to the "etree" tree type only). A module
implementing the tree type e.g. xml.etree.ElementTree or
cElementTree."""
treeType = treeType.lower()
if treeType not in treeWalkerCache:
if treeType in ("dom", "pulldom", "simpletree"):
mod = __import__(treeType, globals())
treeWalkerCache[treeType] = mod.TreeWalker
elif treeType == "genshi":
import genshistream
treeWalkerCache[treeType] = genshistream.TreeWalker
elif treeType == "beautifulsoup":
import soup
treeWalkerCache[treeType] = soup.TreeWalker
elif treeType == "lxml":
import lxmletree
treeWalkerCache[treeType] = lxmletree.TreeWalker
elif treeType == "etree":
import etree
# XXX: NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeWalker
return treeWalkerCache.get(treeType)
| bsd-3-clause |
Maspear/odoo | addons/stock/report/__init__.py | 376 | 1088 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product_stock
import report_stock
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
buddyli/private2w | libs/pony/orm/tests/test_queries.py | 2 | 3478 | from __future__ import with_statement
import re, os, os.path, sys, imp
from pony import orm
from pony.orm import core
from pony.orm.tests import testutils
directive_re = re.compile(r'(\w+)(\s+[0-9\.]+)?:')
directive = module_name = None
statements = []
lines = []
def Schema(param):
if not statement_used:
print
print 'Statement not used:'
print
print '\n'.join(statements)
print
sys.exit()
assert len(lines) == 1
global module_name
module_name = lines[0].strip()
def SQLite(server_version):
do_test('sqlite', server_version)
def MySQL(server_version):
do_test('mysql', server_version)
def PostgreSQL(server_version):
do_test('postgres', server_version)
def Oracle(server_version):
do_test('oracle', server_version)
unavailable_providers = set()
def do_test(provider_name, raw_server_version):
if provider_name in unavailable_providers: return
testutils.TestDatabase.real_provider_name = provider_name
testutils.TestDatabase.raw_server_version = raw_server_version
core.Database = orm.Database = testutils.TestDatabase
sys.modules.pop(module_name, None)
try: __import__(module_name)
except ImportError, e:
print
print 'ImportError for database provider %s:\n%s' % (provider_name, e)
print
unavailable_providers.add(provider_name)
return
module = sys.modules[module_name]
core.debug = orm.debug = False
globals = vars(module).copy()
with orm.db_session:
for statement in statements[:-1]:
code = compile(statement, '<string>', 'exec')
exec code in globals
statement = statements[-1]
try: last_code = compile(statement, '<string>', 'eval')
except SyntaxError:
last_code = compile(statement, '<string>', 'exec')
exec last_code in globals
else:
result = eval(last_code, globals)
if isinstance(result, core.Query): result = list(result)
sql = module.db.sql
expected_sql = '\n'.join(lines)
if sql == expected_sql: print '+', provider_name, statements[-1]
else:
print '-', provider_name, statements[-1]
print
print 'Expected:'
print expected_sql
print
print 'Got:'
print sql
print
global statement_used
statement_used = True
dirname, fname = os.path.split(__file__)
queries_fname = os.path.join(dirname, 'queries.txt')
def orphan_lines(lines):
SQLite(None)
lines[:] = []
statement_used = True
for raw_line in file(queries_fname):
line = raw_line.strip()
if not line: continue
if line.startswith('#'): continue
match = directive_re.match(line)
if match:
if directive:
directive(directive_param)
lines[:] = []
elif lines: orphan_lines(lines)
directive = eval(match.group(1))
if match.group(2):
directive_param = match.group(2)
else: directive_param = None
elif line.startswith('>>> '):
if directive:
directive(directive_param)
lines[:] = []
statements[:] = []
elif lines: orphan_lines(lines)
directive = None
directive_param = None
statements.append(line[4:])
statement_used = False
else:
lines.append(raw_line.rstrip())
if directive:
directive(directive_param)
elif lines:
orphan_lines(lines)
| apache-2.0 |
HingeChat/HingeChat | src/hingechat/qt/qChatWidget.py | 1 | 8829 | import re
from PyQt5.QtCore import Qt
from PyQt5.QtCore import QTimer
from PyQt5.QtGui import QFontMetrics
from PyQt5.QtWidgets import QHBoxLayout
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QPushButton
from PyQt5.QtWidgets import QSplitter
from PyQt5.QtWidgets import QTextBrowser
from PyQt5.QtWidgets import QTextEdit
from PyQt5.QtWidgets import QLineEdit
from PyQt5.QtWidgets import QWidget
from src.hingechat.qt import qtUtils
from src.hinge.utils import *
class QChatWidget(QWidget):
def __init__(self, chat_window, nick, parent=None):
QWidget.__init__(self, parent)
self.chat_window = chat_window
self.nick = nick
self.disabled = False
self.cleared = False
self.url_regex = re.compile(URL_REGEX)
self.chat_log = QTextBrowser()
self.chat_log.setOpenExternalLinks(True)
self.chat_input = QTextEdit()
self.chat_input.textChanged.connect(self.chatInputTextChanged)
self.send_button = QPushButton("Send")
self.send_button.clicked.connect(self.sendMessage)
# Set the min height for the chatlog and a matching fixed height for the send button
chat_input_font_metrics = QFontMetrics(self.chat_input.font())
self.chat_input.setMinimumHeight(chat_input_font_metrics.lineSpacing() * 3)
self.send_button.setFixedHeight(chat_input_font_metrics.lineSpacing() * 3)
hbox = QHBoxLayout()
hbox.addWidget(self.chat_input)
hbox.addWidget(self.send_button)
# Put the chatinput and send button in a wrapper widget so they may be added to the splitter
chat_input_wrapper = QWidget()
chat_input_wrapper.setLayout(hbox)
chat_input_wrapper.setMinimumHeight(chat_input_font_metrics.lineSpacing() * 3.7)
# Put the chat log and chat input into a splitter so the user can resize them at will
splitter = QSplitter(Qt.Vertical)
splitter.addWidget(self.chat_log)
splitter.addWidget(chat_input_wrapper)
splitter.setSizes([int(parent.height()), 1])
hbox = QHBoxLayout()
hbox.addWidget(splitter)
self.setLayout(hbox)
self.typing_timer = QTimer()
self.typing_timer.setSingleShot(True)
self.typing_timer.timeout.connect(self.stoppedTyping)
def setRemoteNick(self, nick):
self.nick = nick
def chatInputTextChanged(self):
# Check if the text changed was the text box being cleared to avoid sending an invalid typing status
if self.cleared:
self.cleared = False
return
if str(self.chat_input.toPlainText())[-1:] == '\n':
self.sendMessage()
else:
# Start a timer to check for the user stopping typing
self.typing_timer.start(TYPING_TIMEOUT)
self.sendTypingStatus(TYPING_START)
def stoppedTyping(self):
self.typing_timer.stop()
if str(self.chat_input.toPlainText()) == '':
self.sendTypingStatus(TYPING_STOP_WITHOUT_TEXT)
else:
self.sendTypingStatus(TYPING_STOP_WITH_TEXT)
def sendMessage(self):
if self.disabled:
return
else:
pass
self.typing_timer.stop()
text = str(self.chat_input.toPlainText())[:-1]
# Don't send empty messages
if text == '':
return
# Convert URLs into clickable links
text = self.__linkify(text)
# Add the message to the message queue to be sent
self.chat_window.client.getSession(self.remote_id).sendChatMessage(text)
# Clear the chat input
self.wasCleared = True
self.chat_input.clear()
self.appendMessage(text, MSG_SENDER)
def sendTypingStatus(self, status):
self.chat_window.client.getSession(self.remote_id).sendTypingMessage(status)
def showNowChattingMessage(self, nick):
self.nick = nick
self.remote_id = self.chat_window.client.getClientId(self.nick)
self.appendMessage("You are now securely chatting with " + self.nick + " :)",
MSG_SERVICE, show_timestamp_and_nick=False)
self.appendMessage("It's a good idea to verify the communcation is secure by selecting "
"\"authenticate buddy\" in the options menu.", MSG_SERVICE, show_timestamp_and_nick=False)
self.addNickButton = QPushButton('Add', self)
self.addNickButton.setGeometry(584, 8, 31, 23)
self.addNickButton.clicked.connect(self.addNickScreen)
self.addNickButton.show()
def addUser(self, user):
nick = str(user.text()).lower()
# Validate the given nick
nickStatus = utils.isValidNick(nick)
if nickStatus == errors.VALID_NICK:
# TODO: Group chats
pass
elif nickStatus == errors.INVALID_NICK_CONTENT:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_CONTENT)
elif nickStatus == errors.INVALID_NICK_LENGTH:
QMessageBox.warning(self, errors.TITLE_INVALID_NICK, errors.INVALID_NICK_LENGTH)
elif nickStatus == errors.INVALID_EMPTY_NICK:
QMessageBox.warning(self, errors.TITLE_EMPTY_NICK, errors.EMPTY_NICK)
def addNickScreen(self):
self.chat_log.setEnabled(False)
self.chat_input.setEnabled(False)
self.send_button.setEnabled(False)
self.addNickButton.hide()
self.addUserText = QLabel("Enter a username to add a user to the group chat.", self)
self.addUserText.setGeometry(200, 20, 300, 100)
self.addUserText.show()
self.user = QLineEdit(self)
self.user.setGeometry(200, 120, 240, 20)
self.user.returnPressed.connect(self.addUser)
self.user.show()
self.addUserButton = QPushButton('Add User', self)
self.addUserButton.setGeometry(250, 150, 150, 25)
self.addUserButton.clicked.connect(lambda: self.addUser(self.user))
self.addUserButton.show()
self.cancel = QPushButton('Cancel', self)
self.cancel.setGeometry(298, 210, 51, 23)
self.cancel.clicked.connect(lambda: self.chat_log.setEnabled(True))
self.cancel.clicked.connect(lambda: self.chat_input.setEnabled(True))
self.cancel.clicked.connect(lambda: self.send_button.setEnabled(True))
self.cancel.clicked.connect(self.addUserText.hide)
self.cancel.clicked.connect(self.user.hide)
self.cancel.clicked.connect(self.addUserButton.hide)
self.cancel.clicked.connect(self.addNickButton.show)
self.cancel.clicked.connect(self.cancel.hide)
self.cancel.show()
def appendMessage(self, message, source, show_timestamp_and_nick=True):
color = self.__getColor(source)
if show_timestamp_and_nick:
timestamp = '<font color="' + color + '">(' + getTimestamp() + ') <strong>' + \
(self.chat_window.client.nick if source == MSG_SENDER else self.nick) + \
':</strong></font> '
else:
timestamp = ''
# If the user has scrolled up (current value != maximum), do not move the scrollbar
# to the bottom after appending the message
shouldScroll = True
scrollbar = self.chat_log.verticalScrollBar()
if scrollbar.value() != scrollbar.maximum() and source != constants.SENDER:
shouldScroll = False
self.chat_log.append(timestamp + message)
# Move the vertical scrollbar to the bottom of the chat log
if shouldScroll:
scrollbar.setValue(scrollbar.maximum())
def __linkify(self, text):
matches = self.url_regex.findall(text)
for match in matches:
text = text.replace(match[0], '<a href="%s">%s</a>' % (match[0], match[0]))
return text
def __getColor(self, source):
if source == MSG_SENDER:
if qtUtils.is_light_theme:
return '#0000CC'
else:
return '#6666FF'
elif source == MSG_RECEIVER:
if qtUtils.is_light_theme:
return '#CC0000'
else:
return '#CC3333'
else:
if qtUtils.is_light_theme:
return '#000000'
else:
return '#FFFFFF'
def disable(self):
self.disabled = True
self.chat_input.setReadOnly(True)
def enable(self):
self.disabled = False
self.chat_input.setReadOnly(False)
| lgpl-3.0 |
emanuelschuetze/OpenSlides | tests/integration/topics/test_viewset.py | 2 | 1395 | import pytest
from django.urls import reverse
from rest_framework import status
from openslides.agenda.models import Item
from openslides.topics.models import Topic
from openslides.utils.test import TestCase
from ..helpers import count_queries
@pytest.mark.django_db(transaction=False)
def test_topic_item_db_queries():
"""
Tests that only the following db queries are done:
* 1 requests to get the list of all topics,
* 1 request to get attachments,
* 1 request to get the agenda item
"""
for index in range(10):
Topic.objects.create(title=f"topic-{index}")
assert count_queries(Topic.get_elements) == 3
class TopicCreate(TestCase):
"""
Tests creation of new topics.
"""
def setUp(self):
self.client.login(username="admin", password="admin")
def test_simple_create(self):
response = self.client.post(
reverse("topic-list"),
{
"title": "test_title_ahyo1uifoo9Aiph2av5a",
"text": "test_text_chu9Uevoo5choo0Xithe",
},
)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
topic = Topic.objects.get()
self.assertEqual(topic.title, "test_title_ahyo1uifoo9Aiph2av5a")
self.assertEqual(topic.text, "test_text_chu9Uevoo5choo0Xithe")
self.assertEqual(Item.objects.get(), topic.agenda_item)
| mit |
hankcs/HanLP | hanlp/components/parsers/biaffine_tf/alg.py | 2 | 10513 | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-26 19:49
# Ported from the PyTorch implementation https://github.com/zysite/biaffine-parser
from typing import List
import numpy as np
import tensorflow as tf
from collections import defaultdict
def nonzero(t: tf.Tensor) -> tf.Tensor:
return tf.where(t > 0)
def view(t: tf.Tensor, *dims) -> tf.Tensor:
return tf.reshape(t, dims)
def arange(n: int) -> tf.Tensor:
return tf.range(n)
def randperm(n: int) -> tf.Tensor:
return tf.random.shuffle(arange(n))
def tolist(t: tf.Tensor) -> List:
if isinstance(t, tf.Tensor):
t = t.numpy()
return t.tolist()
def kmeans(x, k, seed=None):
"""See https://github.com/zysite/biaffine-parser/blob/master/parser/utils/alg.py#L7
Args:
x(list): Lengths of sentences
k(int):
seed: (Default value = None)
Returns:
"""
x = tf.constant(x, dtype=tf.float32)
# count the frequency of each datapoint
d, indices, f = tf.unique_with_counts(x, tf.int32)
f = tf.cast(f, tf.float32)
# calculate the sum of the values of the same datapoints
total = d * f
# initialize k centroids randomly
c, old = tf.random.shuffle(d, seed)[:k], None
# assign labels to each datapoint based on centroids
dists = tf.abs(tf.expand_dims(d, -1) - c)
y = tf.argmin(dists, axis=-1, output_type=tf.int32)
dists = tf.gather_nd(dists, tf.transpose(tf.stack([tf.range(tf.shape(dists)[0], dtype=tf.int32), y])))
# make sure number of datapoints is greater than that of clusters
assert len(d) >= k, f"unable to assign {len(d)} datapoints to {k} clusters"
while old is None or not tf.reduce_all(c == old):
# if an empty cluster is encountered,
# choose the farthest datapoint from the biggest cluster
# and move that the empty one
for i in range(k):
if not tf.reduce_any(y == i):
mask = tf.cast(y == tf.expand_dims(tf.range(k, dtype=tf.int32), -1), tf.float32)
lens = tf.reduce_sum(mask, axis=-1)
biggest = view(nonzero(mask[tf.argmax(lens)]), -1)
farthest = tf.argmax(tf.gather(dists, biggest))
tf.tensor_scatter_nd_update(y, tf.expand_dims(tf.expand_dims(biggest[farthest], -1), -1), [i])
mask = tf.cast(y == tf.expand_dims(tf.range(k, dtype=tf.int32), -1), tf.float32)
# update the centroids
c, old = tf.cast(tf.reduce_sum(total * mask, axis=-1), tf.float32) / tf.cast(tf.reduce_sum(f * mask, axis=-1),
tf.float32), c
# re-assign all datapoints to clusters
dists = tf.abs(tf.expand_dims(d, -1) - c)
y = tf.argmin(dists, axis=-1, output_type=tf.int32)
dists = tf.gather_nd(dists, tf.transpose(tf.stack([tf.range(tf.shape(dists)[0], dtype=tf.int32), y])))
# assign all datapoints to the new-generated clusters
# without considering the empty ones
y, (assigned, _) = tf.gather(y, indices), tf.unique(y)
# get the centroids of the assigned clusters
centroids = tf.gather(c, assigned).numpy().tolist()
# map all values of datapoints to buckets
clusters = [tf.squeeze(tf.where(y == i), axis=-1).numpy().tolist() for i in assigned]
return centroids, clusters
# ***************************************************************
class Tarjan:
"""Computes Tarjan's algorithm for finding strongly connected components (cycles) of a graph"""
def __init__(self, prediction, tokens):
"""
Parameters
----------
prediction : numpy.ndarray
a predicted dependency tree where prediction[dep_idx] = head_idx
tokens : numpy.ndarray
the tokens we care about (i.e. exclude _GO, _EOS, and _PAD)
"""
self._edges = defaultdict(set)
self._vertices = set((0,))
for dep, head in enumerate(prediction[tokens]):
self._vertices.add(dep + 1)
self._edges[head].add(dep + 1)
self._indices = {}
self._lowlinks = {}
self._onstack = defaultdict(lambda: False)
self._SCCs = []
index = 0
stack = []
for v in self.vertices:
if v not in self.indices:
self.strongconnect(v, index, stack)
# =============================================================
def strongconnect(self, v, index, stack):
"""
Args:
v:
index:
stack:
Returns:
"""
self._indices[v] = index
self._lowlinks[v] = index
index += 1
stack.append(v)
self._onstack[v] = True
for w in self.edges[v]:
if w not in self.indices:
self.strongconnect(w, index, stack)
self._lowlinks[v] = min(self._lowlinks[v], self._lowlinks[w])
elif self._onstack[w]:
self._lowlinks[v] = min(self._lowlinks[v], self._indices[w])
if self._lowlinks[v] == self._indices[v]:
self._SCCs.append(set())
while stack[-1] != v:
w = stack.pop()
self._onstack[w] = False
self._SCCs[-1].add(w)
w = stack.pop()
self._onstack[w] = False
self._SCCs[-1].add(w)
return
# ======================
@property
def edges(self):
return self._edges
@property
def vertices(self):
return self._vertices
@property
def indices(self):
return self._indices
@property
def SCCs(self):
return self._SCCs
def tarjan(parse_probs, length, tokens_to_keep, ensure_tree=True):
"""Adopted from Timothy Dozat https://github.com/tdozat/Parser/blob/master/lib/models/nn.py
Args:
parse_probs(NDArray): seq_len x seq_len, the probability of arcs
length(NDArray): sentence length including ROOT
tokens_to_keep(NDArray): mask matrix
ensure_tree: (Default value = True)
Returns:
"""
if ensure_tree:
I = np.eye(len(tokens_to_keep))
# block loops and pad heads
parse_probs = parse_probs * tokens_to_keep * (1 - I)
parse_preds = np.argmax(parse_probs, axis=1)
tokens = np.arange(1, length)
roots = np.where(parse_preds[tokens] == 0)[0] + 1
# ensure at least one root
if len(roots) < 1:
# The current root probabilities
root_probs = parse_probs[tokens, 0]
# The current head probabilities
old_head_probs = parse_probs[tokens, parse_preds[tokens]]
# Get new potential root probabilities
new_root_probs = root_probs / old_head_probs
# Select the most probable root
new_root = tokens[np.argmax(new_root_probs)]
# Make the change
parse_preds[new_root] = 0
# ensure at most one root
elif len(roots) > 1:
# The probabilities of the current heads
root_probs = parse_probs[roots, 0]
# Set the probability of depending on the root zero
parse_probs[roots, 0] = 0
# Get new potential heads and their probabilities
new_heads = np.argmax(parse_probs[roots][:, tokens], axis=1) + 1
new_head_probs = parse_probs[roots, new_heads] / root_probs
# Select the most probable root
new_root = roots[np.argmin(new_head_probs)]
# Make the change
parse_preds[roots] = new_heads
parse_preds[new_root] = 0
# remove cycles
tarjan = Tarjan(parse_preds, tokens)
for SCC in tarjan.SCCs:
if len(SCC) > 1:
dependents = set()
to_visit = set(SCC)
while len(to_visit) > 0:
node = to_visit.pop()
if not node in dependents:
dependents.add(node)
to_visit.update(tarjan.edges[node])
# The indices of the nodes that participate in the cycle
cycle = np.array(list(SCC))
# The probabilities of the current heads
old_heads = parse_preds[cycle]
old_head_probs = parse_probs[cycle, old_heads]
# Set the probability of depending on a non-head to zero
non_heads = np.array(list(dependents))
parse_probs[np.repeat(cycle, len(non_heads)), np.repeat([non_heads], len(cycle), axis=0).flatten()] = 0
# Get new potential heads and their probabilities
new_heads = np.argmax(parse_probs[cycle][:, tokens], axis=1) + 1
new_head_probs = parse_probs[cycle, new_heads] / old_head_probs
# Select the most probable change
change = np.argmax(new_head_probs)
changed_cycle = cycle[change]
old_head = old_heads[change]
new_head = new_heads[change]
# Make the change
parse_preds[changed_cycle] = new_head
tarjan.edges[new_head].add(changed_cycle)
tarjan.edges[old_head].remove(changed_cycle)
return parse_preds
else:
# block and pad heads
parse_probs = parse_probs * tokens_to_keep
parse_preds = np.argmax(parse_probs, axis=1)
return parse_preds
def rel_argmax(rel_probs, length, root, ensure_tree=True):
"""Fix the relation prediction by heuristic rules
Args:
rel_probs(NDArray): seq_len x rel_size
length: real sentence length
ensure_tree: (Default value = True)
root:
Returns:
"""
if ensure_tree:
tokens = np.arange(1, length)
rel_preds = np.argmax(rel_probs, axis=1)
roots = np.where(rel_preds[tokens] == root)[0] + 1
if len(roots) < 1:
rel_preds[1 + np.argmax(rel_probs[tokens, root])] = root
elif len(roots) > 1:
root_probs = rel_probs[roots, root]
rel_probs[roots, root] = 0
new_rel_preds = np.argmax(rel_probs[roots], axis=1)
new_rel_probs = rel_probs[roots, new_rel_preds] / root_probs
new_root = roots[np.argmin(new_rel_probs)]
rel_preds[roots] = new_rel_preds
rel_preds[new_root] = root
return rel_preds
else:
rel_preds = np.argmax(rel_probs, axis=1)
return rel_preds
| apache-2.0 |
AutorestCI/azure-sdk-for-python | azure-mgmt-storage/azure/mgmt/storage/v2017_10_01/models/usage_name.py | 8 | 1317 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UsageName(Model):
"""The usage names that can be used; currently limited to StorageAccount.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar value: Gets a string describing the resource name.
:vartype value: str
:ivar localized_value: Gets a localized string describing the resource
name.
:vartype localized_value: str
"""
_validation = {
'value': {'readonly': True},
'localized_value': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(self):
super(UsageName, self).__init__()
self.value = None
self.localized_value = None
| mit |
LLNL/spack | var/spack/repos/builtin/packages/ghost/package.py | 4 | 2514 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Ghost(CMakePackage, CudaPackage):
"""GHOST: a General, Hybrid and Optimized Sparse Toolkit.
This library provides highly optimized building blocks for implementing
sparse iterative eigenvalue and linear solvers multi- and manycore
clusters and on heterogenous CPU/GPU machines. For an iterative solver
library using these kernels, see the phist package.
"""
homepage = "https://www.bitbucket.org/essex/ghost/"
git = "https://bitbucket.org/essex/ghost/ghost.git"
version('develop', branch='devel')
variant('shared', default=True,
description='Enables the build of shared libraries')
variant('mpi', default=True,
description='enable/disable MPI')
variant('scotch', default=False,
description='enable/disable matrix reordering with PT-SCOTCH')
variant('zoltan', default=False,
description='enable/disable matrix reordering with Zoltan')
# ###################### Dependencies ##########################
# Everything should be compiled position independent (-fpic)
depends_on('cmake@3.5:')
depends_on('hwloc')
depends_on('blas')
depends_on('mpi', when='+mpi')
depends_on('scotch', when='+scotch')
depends_on('zoltan', when='+zoltan')
def cmake_args(self):
spec = self.spec
# note: we require the cblas_include_dir property from the blas
# provider, this is implemented at least for intel-mkl and
# netlib-lapack
args = ['-DGHOST_ENABLE_MPI:BOOL=%s'
% ('ON' if '+mpi' in spec else 'OFF'),
'-DGHOST_USE_CUDA:BOOL=%s'
% ('ON' if '+cuda' in spec else 'OFF'),
'-DGHOST_USE_SCOTCH:BOOL=%s'
% ('ON' if '+scotch' in spec else 'OFF'),
'-DGHOST_USE_ZOLTAN:BOOL=%s'
% ('ON' if '+zoltan' in spec else 'OFF'),
'-DBUILD_SHARED_LIBS:BOOL=%s'
% ('ON' if '+shared' in spec else 'OFF'),
'-DCBLAS_INCLUDE_DIR:STRING=%s'
% format(spec['blas'].headers.directories[0]),
'-DBLAS_LIBRARIES=%s'
% spec['blas:c'].libs.joined(';')
]
return args
def check(self):
make('test')
| lgpl-2.1 |
pranjan77/ranjansample | lib/ranjansample/ranjansampleClient.py | 1 | 7092 | ############################################################
#
# Autogenerated by the KBase type compiler -
# any changes made here will be overwritten
#
############################################################
try:
import json as _json
except ImportError:
import sys
sys.path.append('simplejson-2.3.3')
import simplejson as _json
import requests as _requests
import urlparse as _urlparse
import random as _random
import base64 as _base64
from ConfigParser import ConfigParser as _ConfigParser
import os as _os
_CT = 'content-type'
_AJ = 'application/json'
_URL_SCHEME = frozenset(['http', 'https'])
def _get_token(user_id, password,
auth_svc='https://nexus.api.globusonline.org/goauth/token?' +
'grant_type=client_credentials'):
# This is bandaid helper function until we get a full
# KBase python auth client released
auth = _base64.encodestring(user_id + ':' + password)
headers = {'Authorization': 'Basic ' + auth}
ret = _requests.get(auth_svc, headers=headers, allow_redirects=True)
status = ret.status_code
if status >= 200 and status <= 299:
tok = _json.loads(ret.text)
elif status == 403:
raise Exception('Authentication failed: Bad user_id/password ' +
'combination for user %s' % (user_id))
else:
raise Exception(ret.text)
return tok['access_token']
def _read_rcfile(file=_os.environ['HOME'] + '/.authrc'): # @ReservedAssignment
# Another bandaid to read in the ~/.authrc file if one is present
authdata = None
if _os.path.exists(file):
try:
with open(file) as authrc:
rawdata = _json.load(authrc)
# strip down whatever we read to only what is legit
authdata = {x: rawdata.get(x) for x in (
'user_id', 'token', 'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading authrc file %s: %s" % (file, e)
return authdata
def _read_inifile(file=_os.environ.get( # @ReservedAssignment
'KB_DEPLOYMENT_CONFIG', _os.environ['HOME'] +
'/.kbase_config')):
# Another bandaid to read in the ~/.kbase_config file if one is present
authdata = None
if _os.path.exists(file):
try:
config = _ConfigParser()
config.read(file)
# strip down whatever we read to only what is legit
authdata = {x: config.get('authentication', x)
if config.has_option('authentication', x)
else None for x in ('user_id', 'token',
'client_secret', 'keyfile',
'keyfile_passphrase', 'password')}
except Exception, e:
print "Error while reading INI file %s: %s" % (file, e)
return authdata
class ServerError(Exception):
def __init__(self, name, code, message, data=None, error=None):
self.name = name
self.code = code
self.message = '' if message is None else message
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
class _JSONObjectEncoder(_json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
return _json.JSONEncoder.default(self, obj)
class ranjansample(object):
def __init__(self, url=None, timeout=30 * 60, user_id=None,
password=None, token=None, ignore_authrc=False,
trust_all_ssl_certificates=False):
if url is None:
raise ValueError('A url is required')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in _URL_SCHEME:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
# token overrides user_id and password
if token is not None:
self._headers['AUTHORIZATION'] = token
elif user_id is not None and password is not None:
self._headers['AUTHORIZATION'] = _get_token(user_id, password)
elif 'KB_AUTH_TOKEN' in _os.environ:
self._headers['AUTHORIZATION'] = _os.environ.get('KB_AUTH_TOKEN')
elif not ignore_authrc:
authdata = _read_inifile()
if authdata is None:
authdata = _read_rcfile()
if authdata is not None:
if authdata.get('token') is not None:
self._headers['AUTHORIZATION'] = authdata['token']
elif(authdata.get('user_id') is not None
and authdata.get('password') is not None):
self._headers['AUTHORIZATION'] = _get_token(
authdata['user_id'], authdata['password'])
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_context:
arg_hash['context'] = json_rpc_context
body = _json.dumps(arg_hash, cls=_JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
json_header = None
if _CT in ret.headers:
json_header = ret.headers[_CT]
if _CT in ret.headers and ret.headers[_CT] == _AJ:
err = _json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
ret.encoding = 'utf-8'
resp = _json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def myfunc1(self, wrkspace, myname, json_rpc_context = None):
if json_rpc_context and type(json_rpc_context) is not dict:
raise ValueError('Method myfunc1: argument json_rpc_context is not type dict as required.')
resp = self._call('ranjansample.myfunc1',
[wrkspace, myname], json_rpc_context)
return resp[0]
| mit |
memoz/ShadowDNS | shadowdns/dnsrelay.py | 79 | 11669 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2014 clowwindy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import socket
import struct
import errno
import logging
from shadowsocks import eventloop, asyncdns, lru_cache, encrypt
from shadowsocks import utils as shadowsocks_utils
from shadowsocks.common import parse_header
BUF_SIZE = 16384
CACHE_TIMEOUT = 10
class DNSRelay(object):
def __init__(self, config):
self._loop = None
self._config = config
self._last_time = time.time()
self._local_addr = (config['local_address'], 53)
self._remote_addr = (config['server'], config['server_port'])
dns_addr = config['dns']
addrs = socket.getaddrinfo(dns_addr, 53, 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if not addrs:
raise Exception("can't get addrinfo for DNS address")
af, socktype, proto, canonname, sa = addrs[0]
dns_port = struct.pack('>H', 53)
if af == socket.AF_INET:
self._address_to_send = '\x01' + socket.inet_aton(sa[0]) + dns_port
else:
self._address_to_send = '\x04' + socket.inet_pton(af, sa[0]) + \
dns_port
def add_to_loop(self, loop):
if self._loop:
raise Exception('already add to loop')
self._loop = loop
loop.add_handler(self.handle_events)
def handle_events(self, events):
pass
class UDPDNSRelay(DNSRelay):
def __init__(self, config):
DNSRelay.__init__(self, config)
self._id_to_addr = lru_cache.LRUCache(CACHE_TIMEOUT)
self._local_sock = None
self._remote_sock = None
sockets = []
for addr in (self._local_addr, self._remote_addr):
addrs = socket.getaddrinfo(addr[0], addr[1], 0,
socket.SOCK_DGRAM, socket.SOL_UDP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" % addr)
af, socktype, proto, canonname, sa = addrs[0]
sock = socket.socket(af, socktype, proto)
sock.setblocking(False)
sockets.append(sock)
self._local_sock, self._remote_sock = sockets
self._local_sock.bind(self._local_addr)
def add_to_loop(self, loop):
DNSRelay.add_to_loop(self, loop)
loop.add(self._local_sock, eventloop.POLL_IN)
loop.add(self._remote_sock, eventloop.POLL_IN)
def _handle_local(self, sock):
data, addr = sock.recvfrom(BUF_SIZE)
header = asyncdns.parse_header(data)
if header:
try:
req_id = header[0]
req = asyncdns.parse_response(data)
self._id_to_addr[req_id] = addr
data = self._address_to_send + data
data = encrypt.encrypt_all(self._config['password'],
self._config['method'], 1, data)
self._remote_sock.sendto(data, self._remote_addr)
logging.info('request %s', req.hostname)
except Exception as e:
import traceback
traceback.print_exc()
logging.error(e)
def _handle_remote(self, sock):
data, addr = sock.recvfrom(BUF_SIZE)
if data:
try:
data = encrypt.encrypt_all(self._config['password'],
self._config['method'], 0, data)
header_result = parse_header(data)
if header_result is None:
return None, None
addrtype, dest_addr, dest_port, header_length = header_result
data = data[header_length:]
header = asyncdns.parse_header(data)
if header:
req_id = header[0]
res = asyncdns.parse_response(data)
addr = self._id_to_addr.get(req_id, None)
if addr:
self._local_sock.sendto(data, addr)
del self._id_to_addr[req_id]
logging.info('response %s', res)
except Exception as e:
import traceback
traceback.print_exc()
logging.error(e)
def handle_events(self, events):
for sock, fd, event in events:
if sock == self._local_sock:
self._handle_local(sock)
elif sock == self._remote_sock:
self._handle_remote(sock)
now = time.time()
if now - self._last_time > CACHE_TIMEOUT / 2:
self._id_to_addr.sweep()
class TCPDNSRelay(DNSRelay):
def __init__(self, config):
DNSRelay.__init__(self, config)
self._local_to_remote = {}
self._remote_to_local = {}
self._local_to_encryptor = {}
addrs = socket.getaddrinfo(self._local_addr[0], self._local_addr[1], 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" % self._local_addr)
af, socktype, proto, canonname, sa = addrs[0]
self._listen_sock = socket.socket(af, socktype, proto)
self._listen_sock.setblocking(False)
self._listen_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._listen_sock.bind(self._local_addr)
self._listen_sock.listen(1024)
def _handle_conn(self, sock):
try:
local, addr = sock.accept()
addrs = socket.getaddrinfo(self._remote_addr[0],
self._remote_addr[1], 0,
socket.SOCK_STREAM, socket.SOL_TCP)
if len(addrs) == 0:
raise Exception("can't get addrinfo for %s:%d" %
self._remote_addr)
af, socktype, proto, canonname, sa = addrs[0]
remote = socket.socket(af, socktype, proto)
local.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
remote.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self._local_to_remote[local] = remote
self._remote_to_local[remote] = local
self._loop.add(local, 0)
self._loop.add(remote, eventloop.POLL_OUT)
try:
remote.connect(self._remote_addr)
except (OSError, IOError) as e:
if eventloop.errno_from_exception(e) in (errno.EINPROGRESS,
errno.EAGAIN):
pass
else:
raise
except (OSError, IOError) as e:
logging.error(e)
def _destroy(self, local, remote):
if local in self._local_to_remote:
self._loop.remove(local)
self._loop.remove(remote)
del self._local_to_remote[local]
del self._remote_to_local[remote]
if local in self._local_to_encryptor:
del self._local_to_encryptor[local]
local.close()
remote.close()
else:
logging.error('already destroyed')
def _handle_local(self, local, event):
remote = self._local_to_remote[local]
encryptor = self._local_to_encryptor.get(local, None)
if event & eventloop.POLL_ERR:
self._destroy(local, remote)
elif event & eventloop.POLL_IN:
try:
data = local.recv(BUF_SIZE)
if not data:
self._destroy(local, remote)
else:
if not encryptor:
try:
req = asyncdns.parse_response(data[2:])
if req:
logging.info('request %s', req.hostname)
except Exception as e:
logging.error(e)
encryptor = \
encrypt.Encryptor(self._config['password'],
self._config['method'])
self._local_to_encryptor[local] = encryptor
data = self._address_to_send + data
data = encryptor.encrypt(data)
remote.send(data)
except (OSError, IOError) as e:
self._destroy(local, self._local_to_remote[local])
logging.error(e)
def _handle_remote(self, remote, event):
local = self._remote_to_local[remote]
if event & eventloop.POLL_ERR:
self._destroy(local, remote)
elif event & eventloop.POLL_OUT:
self._loop.modify(remote, eventloop.POLL_IN)
self._loop.modify(local, eventloop.POLL_IN)
elif event & eventloop.POLL_IN:
try:
data = remote.recv(BUF_SIZE)
if not data:
self._destroy(local, remote)
else:
encryptor = self._local_to_encryptor[local]
data = encryptor.decrypt(data)
try:
res = asyncdns.parse_response(data[2:])
if res:
logging.info('response %s', res)
except Exception as e:
logging.error(e)
local.send(data)
except (OSError, IOError) as e:
self._destroy(local, remote)
logging.error(e)
def add_to_loop(self, loop):
DNSRelay.add_to_loop(self, loop)
loop.add(self._listen_sock, eventloop.POLL_IN)
def handle_events(self, events):
for sock, fd, event in events:
if sock == self._listen_sock:
self._handle_conn(sock)
elif sock in self._local_to_remote:
self._handle_local(sock, event)
elif sock in self._remote_to_local:
self._handle_remote(sock, event)
# TODO implement timeout
def main():
shadowsocks_utils.check_python()
config = shadowsocks_utils.get_config(True)
encrypt.init_table(config['password'], config['method'])
logging.info("starting dns at %s:%d" % (config['local_address'], 53))
config['dns'] = config.get('dns', '8.8.8.8')
loop = eventloop.EventLoop()
udprelay = UDPDNSRelay(config)
udprelay.add_to_loop(loop)
tcprelay = TCPDNSRelay(config)
tcprelay.add_to_loop(loop)
loop.run()
if __name__ == '__main__':
main()
| mit |
namccart/gnuradio | gr-qtgui/apps/plot_base.py | 58 | 5829 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, blocks
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import os, sys
os.environ['GR_CONF_CONTROLPORT_ON'] = 'False'
try:
from gnuradio import qtgui
from PyQt4 import QtGui, QtCore
import sip
except ImportError:
print "Error: Program requires PyQt4 and gr-qtgui."
sys.exit(1)
try:
import scipy
except ImportError:
print "Error: Scipy required (www.scipy.org)."
sys.exit(1)
try:
from gnuradio.qtgui.plot_constellation_form import *
from gnuradio.qtgui.plot_psd_form import *
from gnuradio.qtgui.plot_spectrogram_form import *
from gnuradio.qtgui.plot_time_form import *
from gnuradio.qtgui.plot_time_raster_form import *
except ImportError:
from plot_constellation_form import *
from plot_psd_form import *
from plot_spectrogram_form import *
from plot_time_form import *
from plot_time_raster_form import *
def read_samples(filename, start, in_size, min_size, dtype, dtype_size):
# Read in_size number of samples from file
fhandle = open(filename, 'r')
fhandle.seek(start*dtype_size, 0)
data = scipy.fromfile(fhandle, dtype=dtype, count=in_size)
data_min = 1.1*data.min()
data_max = 1.1*data.max()
data = data.tolist()
fhandle.close()
if(min_size > 0):
if(len(data) < in_size):
print "Warning: read in {0} samples but asked for {1} samples.".format(
len(data), in_size)
else:
# If we have to, append 0's to create min_size samples of data
if(len(data) < min_size):
data += (min_size - len(data)) * [dtype(0)]
return data, data_min, data_max
def read_samples_f(filename, start, in_size, min_size=0):
return read_samples(filename, start, in_size, min_size,
scipy.float32, gr.sizeof_float)
def read_samples_i(filename, start, in_size, min_size=0):
return read_samples(filename, start, in_size, min_size,
scipy.int32, gr.sizeof_int)
def read_samples_s(filename, start, in_size, min_size=0):
return read_samples(filename, start, in_size, min_size,
scipy.int16, gr.sizeof_short)
def read_samples_b(filename, start, in_size, min_size=0):
d,mn,mx = read_samples(filename, start, in_size, min_size,
scipy.int8, gr.sizeof_char)
# Bit of a hack since we want to read the data as signed ints, but
# the blocks.vector_source_b will only accept unsigned. We read in as
# signed, do our min/max and things on that, then convert here.
d = scipy.array(d, dtype=scipy.uint8).tolist()
return d,mn,mx
def read_samples_c(filename, start, in_size, min_size=0):
# Complex samples are handled differently
fhandle = open(filename, 'r')
fhandle.seek(start*gr.sizeof_gr_complex, 0)
data = scipy.fromfile(fhandle, dtype=scipy.complex64, count=in_size)
data_min = 1.1*float(min(data.real.min(), data.imag.min()))
data_max = 1.1*float(max(data.real.max(), data.imag.max()))
data = data.tolist()
fhandle.close()
if(min_size > 0):
if(len(data) < in_size):
print "Warning: read in {0} samples but asked for {1} samples.".format(
len(data), in_size)
else:
# If we have to, append 0's to create min_size samples of data
if(len(data) < min_size):
data += (min_size - len(data)) * [complex(0,0)]
return data, data_min, data_max
class source_ints_to_float(gr.hier_block2):
def __init__(self, data):
gr.hier_block2.__init__(self, "ints_to_floats",
gr.io_signature(0, 0, 0),
gr.io_signature(1, 1, gr.sizeof_float))
self.src = blocks.vector_source_i(data)
self.cvt = blocks.int_to_float()
self.connect(self.src, self.cvt, self)
def set_data(self, data):
self.src.set_data(data)
class source_shorts_to_float(gr.hier_block2):
def __init__(self, data):
gr.hier_block2.__init__(self, "shorts_to_floats",
gr.io_signature(0, 0, 0),
gr.io_signature(1, 1, gr.sizeof_float))
self.src = blocks.vector_source_s(data)
self.cvt = blocks.short_to_float()
self.connect(self.src, self.cvt, self)
def set_data(self, data):
self.src.set_data(data)
class source_chars_to_float(gr.hier_block2):
def __init__(self, data):
gr.hier_block2.__init__(self, "chars_to_floats",
gr.io_signature(0, 0, 0),
gr.io_signature(1, 1, gr.sizeof_float))
self.src = blocks.vector_source_b(data)
self.cvt = blocks.char_to_float()
self.connect(self.src, self.cvt, self)
def set_data(self, data):
self.src.set_data(data)
def find_max_nsamples(filelist):
# Find the smallest number of samples in all files and use that as
# a maximum value possible.
filesizes = []
for f in filelist:
if(os.path.exists(f)):
filesizes.append(os.path.getsize(f) / gr.sizeof_gr_complex)
max_nsamples = min(filesizes)
return max_nsamples
| gpl-3.0 |
benburry/carbon | lib/carbon/client.py | 4 | 14604 | from collections import deque
from time import time
from twisted.application.service import Service
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.protocols.basic import Int32StringReceiver
from carbon.conf import settings
from carbon.util import pickle
from carbon import instrumentation, log, pipeline, state
try:
import signal
except ImportError:
log.debug("Couldn't import signal module")
SEND_QUEUE_LOW_WATERMARK = settings.MAX_QUEUE_SIZE * settings.QUEUE_LOW_WATERMARK_PCT
class CarbonClientProtocol(Int32StringReceiver):
def connectionMade(self):
log.clients("%s::connectionMade" % self)
self.paused = False
self.connected = True
self.transport.registerProducer(self, streaming=True)
# Define internal metric names
self.lastResetTime = time()
self.destinationName = self.factory.destinationName
self.queuedUntilReady = 'destinations.%s.queuedUntilReady' % self.destinationName
self.sent = 'destinations.%s.sent' % self.destinationName
self.batchesSent = 'destinations.%s.batchesSent' % self.destinationName
self.slowConnectionReset = 'destinations.%s.slowConnectionReset' % self.destinationName
self.factory.connectionMade.callback(self)
self.factory.connectionMade = Deferred()
self.sendQueued()
def connectionLost(self, reason):
log.clients("%s::connectionLost %s" % (self, reason.getErrorMessage()))
self.connected = False
def pauseProducing(self):
self.paused = True
def resumeProducing(self):
self.paused = False
self.sendQueued()
def stopProducing(self):
self.disconnect()
def disconnect(self):
if self.connected:
self.transport.unregisterProducer()
self.transport.loseConnection()
self.connected = False
def sendDatapoint(self, metric, datapoint):
self.factory.enqueue(metric, datapoint)
reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.sendQueued)
def _sendDatapoints(self, datapoints):
self.sendString(pickle.dumps(datapoints, protocol=-1))
instrumentation.increment(self.sent, len(datapoints))
instrumentation.increment(self.batchesSent)
self.factory.checkQueue()
def sendQueued(self):
"""This should be the only method that will be used to send stats.
In order to not hold the event loop and prevent stats from flowing
in while we send them out, this will process
settings.MAX_DATAPOINTS_PER_MESSAGE stats, send them, and if there
are still items in the queue, this will invoke reactor.callLater
to schedule another run of sendQueued after a reasonable enough time
for the destination to process what it has just received.
Given a queue size of one million stats, and using a
chained_invocation_delay of 0.0001 seconds, you'd get 1,000
sendQueued() invocations/second max. With a
settings.MAX_DATAPOINTS_PER_MESSAGE of 100, the rate of stats being
sent could theoretically be as high as 100,000 stats/sec, or
6,000,000 stats/minute. This is probably too high for a typical
receiver to handle.
In practice this theoretical max shouldn't be reached because
network delays should add an extra delay - probably on the order
of 10ms per send, so the queue should drain with an order of
minutes, which seems more realistic.
"""
chained_invocation_delay = 0.0001
queueSize = self.factory.queueSize
if self.paused:
instrumentation.max(self.queuedUntilReady, queueSize)
return
if not self.factory.hasQueuedDatapoints():
return
if settings.USE_RATIO_RESET is True:
if not self.connectionQualityMonitor():
self.resetConnectionForQualityReasons("Sent: {0}, Received: {1}".format(
instrumentation.prior_stats.get(self.sent, 0),
instrumentation.prior_stats.get('metricsReceived', 0)))
self._sendDatapoints(self.factory.takeSomeFromQueue())
if (self.factory.queueFull.called and
queueSize < SEND_QUEUE_LOW_WATERMARK):
self.factory.queueHasSpace.callback(queueSize)
if self.factory.hasQueuedDatapoints():
reactor.callLater(chained_invocation_delay, self.sendQueued)
def connectionQualityMonitor(self):
"""Checks to see if the connection for this factory appears to
be delivering stats at a speed close to what we're receiving
them at.
This is open to other measures of connection quality.
Returns a Bool
True means that quality is good, OR
True means that the total received is less than settings.MIN_RESET_STAT_FLOW
False means that quality is bad
"""
destination_sent = float(instrumentation.prior_stats.get(self.sent, 0))
total_received = float(instrumentation.prior_stats.get('metricsReceived', 0))
instrumentation.increment(self.slowConnectionReset, 0)
if total_received < settings.MIN_RESET_STAT_FLOW:
return True
if (destination_sent / total_received) < settings.MIN_RESET_RATIO:
return False
else:
return True
def resetConnectionForQualityReasons(self, reason):
"""Only re-sets the connection if it's been
settings.MIN_RESET_INTERVAL seconds since the last re-set.
Reason should be a string containing the quality info that led to
a re-set.
"""
if (time() - self.lastResetTime) < float(settings.MIN_RESET_INTERVAL):
return
else:
self.factory.connectedProtocol.disconnect()
self.lastResetTime = time()
instrumentation.increment(self.slowConnectionReset)
log.clients("%s:: resetConnectionForQualityReasons: %s" % (self, reason))
def __str__(self):
return 'CarbonClientProtocol(%s:%d:%s)' % (self.factory.destination)
__repr__ = __str__
class CarbonClientFactory(ReconnectingClientFactory):
maxDelay = 5
def __init__(self, destination):
self.destination = destination
self.destinationName = ('%s:%d:%s' % destination).replace('.', '_')
self.host, self.port, self.carbon_instance = destination
self.addr = (self.host, self.port)
self.started = False
# This factory maintains protocol state across reconnects
self.queue = deque() # Change to make this the sole source of metrics to be sent.
self.connectedProtocol = None
self.queueEmpty = Deferred()
self.queueFull = Deferred()
self.queueFull.addCallback(self.queueFullCallback)
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallback(self.queueSpaceCallback)
self.connectFailed = Deferred()
self.connectionMade = Deferred()
self.connectionLost = Deferred()
# Define internal metric names
self.attemptedRelays = 'destinations.%s.attemptedRelays' % self.destinationName
self.fullQueueDrops = 'destinations.%s.fullQueueDrops' % self.destinationName
self.queuedUntilConnected = 'destinations.%s.queuedUntilConnected' % self.destinationName
self.relayMaxQueueLength = 'destinations.%s.relayMaxQueueLength' % self.destinationName
def queueFullCallback(self, result):
state.events.cacheFull()
log.clients('%s send queue is full (%d datapoints)' % (self, result))
def queueSpaceCallback(self, result):
if self.queueFull.called:
log.clients('%s send queue has space available' % self.connectedProtocol)
self.queueFull = Deferred()
self.queueFull.addCallback(self.queueFullCallback)
state.events.cacheSpaceAvailable()
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallback(self.queueSpaceCallback)
def buildProtocol(self, addr):
self.connectedProtocol = CarbonClientProtocol()
self.connectedProtocol.factory = self
return self.connectedProtocol
def startConnecting(self): # calling this startFactory yields recursion problems
self.started = True
self.connector = reactor.connectTCP(self.host, self.port, self)
def stopConnecting(self):
self.started = False
self.stopTrying()
if self.connectedProtocol and self.connectedProtocol.connected:
return self.connectedProtocol.disconnect()
@property
def queueSize(self):
return len(self.queue)
def hasQueuedDatapoints(self):
return bool(self.queue)
def takeSomeFromQueue(self):
"""Use self.queue, which is a collections.deque, to pop up to
settings.MAX_DATAPOINTS_PER_MESSAGE items from the left of the
queue.
"""
def yield_max_datapoints():
for count in range(settings.MAX_DATAPOINTS_PER_MESSAGE):
try:
yield self.queue.popleft()
except IndexError:
raise StopIteration
return list(yield_max_datapoints())
def checkQueue(self):
"""Check if the queue is empty. If the queue isn't empty or
doesn't exist yet, then this will invoke the callback chain on the
self.queryEmpty Deferred chain with the argument 0, and will
re-set the queueEmpty callback chain with a new Deferred
object.
"""
if not self.queue:
self.queueEmpty.callback(0)
self.queueEmpty = Deferred()
def enqueue(self, metric, datapoint):
self.queue.append((metric, datapoint))
def enqueue_from_left(self, metric, datapoint):
self.queue.appendleft((metric, datapoint))
def sendDatapoint(self, metric, datapoint):
instrumentation.increment(self.attemptedRelays)
instrumentation.max(self.relayMaxQueueLength, self.queueSize)
if self.queueSize >= settings.MAX_QUEUE_SIZE:
if not self.queueFull.called:
self.queueFull.callback(self.queueSize)
instrumentation.increment(self.fullQueueDrops)
else:
self.enqueue(metric, datapoint)
if self.connectedProtocol:
reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.connectedProtocol.sendQueued)
else:
instrumentation.increment(self.queuedUntilConnected)
def sendHighPriorityDatapoint(self, metric, datapoint):
"""The high priority datapoint is one relating to the carbon
daemon itself. It puts the datapoint on the left of the deque,
ahead of other stats, so that when the carbon-relay, specifically,
is overwhelmed its stats are more likely to make it through and
expose the issue at hand.
In addition, these stats go on the deque even when the max stats
capacity has been reached. This relies on not creating the deque
with a fixed max size.
"""
instrumentation.increment(self.attemptedRelays)
self.enqueue_from_left(metric, datapoint)
if self.connectedProtocol:
reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.connectedProtocol.sendQueued)
else:
instrumentation.increment(self.queuedUntilConnected)
def startedConnecting(self, connector):
log.clients("%s::startedConnecting (%s:%d)" % (self, connector.host, connector.port))
def clientConnectionLost(self, connector, reason):
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
log.clients("%s::clientConnectionLost (%s:%d) %s" % (self, connector.host, connector.port, reason.getErrorMessage()))
self.connectedProtocol = None
self.connectionLost.callback(0)
self.connectionLost = Deferred()
def clientConnectionFailed(self, connector, reason):
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
log.clients("%s::clientConnectionFailed (%s:%d) %s" % (self, connector.host, connector.port, reason.getErrorMessage()))
self.connectFailed.callback(dict(connector=connector, reason=reason))
self.connectFailed = Deferred()
def disconnect(self):
self.queueEmpty.addCallback(lambda result: self.stopConnecting())
readyToStop = DeferredList(
[self.connectionLost, self.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
self.checkQueue()
# This can happen if the client is stopped before a connection is ever made
if (not readyToStop.called) and (not self.started):
readyToStop.callback(None)
return readyToStop
def __str__(self):
return 'CarbonClientFactory(%s:%d:%s)' % self.destination
__repr__ = __str__
class CarbonClientManager(Service):
def __init__(self, router):
self.router = router
self.client_factories = {} # { destination : CarbonClientFactory() }
def startService(self):
if 'signal' in globals().keys():
log.debug("Installing SIG_IGN for SIGHUP")
signal.signal(signal.SIGHUP, signal.SIG_IGN)
Service.startService(self)
for factory in self.client_factories.values():
if not factory.started:
factory.startConnecting()
def stopService(self):
Service.stopService(self)
self.stopAllClients()
def startClient(self, destination):
if destination in self.client_factories:
return
log.clients("connecting to carbon daemon at %s:%d:%s" % destination)
self.router.addDestination(destination)
factory = self.client_factories[destination] = CarbonClientFactory(destination)
connectAttempted = DeferredList(
[factory.connectionMade, factory.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
if self.running:
factory.startConnecting() # this can trigger & replace connectFailed
return connectAttempted
def stopClient(self, destination):
factory = self.client_factories.get(destination)
if factory is None:
return
self.router.removeDestination(destination)
stopCompleted = factory.disconnect()
stopCompleted.addCallback(lambda result: self.disconnectClient(destination))
return stopCompleted
def disconnectClient(self, destination):
factory = self.client_factories.pop(destination)
c = factory.connector
if c and c.state == 'connecting' and not factory.hasQueuedDatapoints():
c.stopConnecting()
def stopAllClients(self):
deferreds = []
for destination in list(self.client_factories):
deferreds.append( self.stopClient(destination) )
return DeferredList(deferreds)
def sendDatapoint(self, metric, datapoint):
for destination in self.router.getDestinations(metric):
self.client_factories[destination].sendDatapoint(metric, datapoint)
def sendHighPriorityDatapoint(self, metric, datapoint):
for destination in self.router.getDestinations(metric):
self.client_factories[destination].sendHighPriorityDatapoint(metric, datapoint)
def __str__(self):
return "<%s[%x]>" % (self.__class__.__name__, id(self))
class RelayProcessor(pipeline.Processor):
plugin_name = 'relay'
def process(self, metric, datapoint):
state.client_manager.sendDatapoint(metric, datapoint)
return pipeline.Processor.NO_OUTPUT
| apache-2.0 |
hfp/tensorflow-xsmm | tensorflow/contrib/opt/python/training/adamax.py | 39 | 8261 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AdaMax for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import adam
from tensorflow.python.training import training_ops
class AdaMaxOptimizer(adam.AdamOptimizer):
"""Optimizer that implements the AdaMax algorithm.
Adamax is sometimes superior to adam, specially in models with embeddings,
see [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
use_locking=False, name="AdaMax"):
"""Construct a new AdaMax optimizer.
Initialization:
```
m_0 <- 0 (Initialize initial 1st moment vector)
v_0 <- 0 (Initialize the exponentially weighted infinity norm)
t <- 0 (Initialize timestep)
```
The update rule for `variable` with gradient `g` uses an optimization
described at the end of section 7.1 of the paper:
```
t <- t + 1
m_t <- beta1 * m_{t-1} + (1 - beta1) * g
v_t <- max(beta2 * v_{t-1}, abs(g))
variable <- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
```
Similar to AdamOptimizer, the epsilon is added for numerical stability
(especially to get rid of division by zero when v_t = 0).
Contrast to AdamOptimizer, the sparse implementation of this algorithm
(used when the gradient is an IndexedSlices object, typically because of
`tf.gather` or an embedding lookup in the forward pass) only updates
variable slices and corresponding `m_t`, `v_t` terms when that part of
the variable was used in the forward pass. This means that the sparse
behavior is contrast to the dense behavior (similar to some momentum
implementations which ignore momentum unless a variable slice was actually
used).
Args:
learning_rate: A Tensor or a floating point value. The learning rate.
beta1: A float value or a constant float tensor.
The exponential decay rate for the 1st moment estimates.
beta2: A float value or a constant float tensor.
The exponential decay rate for the exponentially weighted infinity norm.
epsilon: A small constant for numerical stability.
use_locking: If True use locks for update operations.
name: Optional name for the operations created when applying gradients.
Defaults to "AdaMax".
"""
super(AdaMaxOptimizer, self).__init__(learning_rate, beta1, beta2,
epsilon, use_locking, name)
def _get_beta_accumulators(self):
if context.executing_eagerly():
graph = None
else:
graph = ops.get_default_graph()
return self._get_non_slot_variable("beta1_power", graph=graph)
def _create_slots(self, var_list):
# Create the beta1 accumulators on the same device as the first
# variable. Sort the var_list to make sure this device is consistent across
# workers (these need to go on the same PS, otherwise some updates are
# silently ignored).
first_var = min(var_list, key=lambda x: x.name)
self._create_non_slot_variable(initial_value=self._beta1,
name="beta1_power",
colocate_with=first_var)
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power = self._get_beta_accumulators()
return training_ops.apply_ada_max(
var, m, v,
math_ops.cast(beta1_power, var.dtype.base_dtype),
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._beta1_t, var.dtype.base_dtype),
math_ops.cast(self._beta2_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad, use_locking=self._use_locking).op
def _resource_apply_dense(self, grad, var):
m = self.get_slot(var, "m")
v = self.get_slot(var, "v")
beta1_power = self._get_beta_accumulators()
return training_ops.resource_apply_ada_max(
var.handle, m.handle, v.handle,
math_ops.cast(beta1_power, grad.dtype.base_dtype),
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._beta1_t, grad.dtype.base_dtype),
math_ops.cast(self._beta2_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad, use_locking=self._use_locking)
def _apply_sparse_shared(self, grad, var, indices,
scatter_add, scatter_update):
beta1_power = self._get_beta_accumulators()
beta1_power = math_ops.cast(beta1_power, var.dtype.base_dtype)
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
epsilon_t = math_ops.cast(self._epsilon_t, var.dtype.base_dtype)
# m_t = beta1 * m + (1 - beta1) * g_t
m = self.get_slot(var, "m")
m_slice = array_ops.gather(m, indices)
m_t_slice = m_slice * beta1_t + grad * (1 - beta1_t)
with ops.control_dependencies([m_t_slice]):
m_t = scatter_update(m, indices, m_t_slice)
# u_t = max(beta2 * u, abs(g_t))
v = self.get_slot(var, "v")
v_slice = array_ops.gather(v, indices)
v_t_slice = math_ops.maximum(v_slice * beta2_t, math_ops.abs(grad))
with ops.control_dependencies([v_t_slice]):
v_t = scatter_update(v, indices, v_t_slice)
# theta_t = theta - lr / (1 - beta1^t) * m_t / u_t
var_slice = -lr_t / (1 - beta1_power) * (m_t_slice /
(v_t_slice + epsilon_t))
with ops.control_dependencies([var_slice]):
var_update = scatter_add(var, indices, var_slice)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
return self._apply_sparse_shared(
grad.values, var, grad.indices,
lambda x, i, v: state_ops.scatter_add( # pylint: disable=g-long-lambda
x, i, v, use_locking=self._use_locking),
lambda x, i, v: state_ops.scatter_update( # pylint: disable=g-long-lambda
x, i, v, use_locking=self._use_locking))
def _resource_scatter_update(self, x, i, v):
with ops.control_dependencies(
[resource_variable_ops.resource_scatter_update(
x.handle, i, v)]):
return x.value()
def _resource_apply_sparse(self, grad, var, indices):
return self._apply_sparse_shared(
grad, var, indices,
self._resource_scatter_add, self._resource_scatter_update)
def _finish(self, update_ops, name_scope):
# Update the power accumulators.
with ops.control_dependencies(update_ops):
beta1_power = self._get_beta_accumulators()
with ops.colocate_with(beta1_power):
update_beta1 = beta1_power.assign(
beta1_power * self._beta1_t, use_locking=self._use_locking)
return control_flow_ops.group(*update_ops + [update_beta1],
name=name_scope)
| apache-2.0 |
pkruskal/scikit-learn | sklearn/cluster/k_means_.py | 128 | 54694 | """K-means clustering"""
# Authors: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Thomas Rueckstiess <ruecksti@in.tum.de>
# James Bergstra <james.bergstra@umontreal.ca>
# Jan Schlueter <scikit-learn@jan-schlueter.de>
# Nelle Varoquaux
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, ClusterMixin, TransformerMixin
from ..metrics.pairwise import euclidean_distances
from ..utils.extmath import row_norms, squared_norm
from ..utils.sparsefuncs_fast import assign_rows_csr
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.fixes import astype
from ..utils import check_array
from ..utils import check_random_state
from ..utils import as_float_array
from ..utils import gen_batches
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.random import choice
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from . import _k_means
###############################################################################
# Initialization heuristic
def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None):
"""Init n_clusters seeds according to k-means++
Parameters
-----------
X: array or sparse matrix, shape (n_samples, n_features)
The data to pick seeds for. To avoid memory copy, the input data
should be double precision (dtype=np.float64).
n_clusters: integer
The number of seeds to choose
x_squared_norms: array, shape (n_samples,)
Squared Euclidean norm of each data point.
random_state: numpy.RandomState
The generator used to initialize the centers.
n_local_trials: integer, optional
The number of seeding trials for each center (except the first),
of which the one reducing inertia the most is greedily chosen.
Set to None to make the number of trials depend logarithmically
on the number of seeds (2+log(k)); this is the default.
Notes
-----
Selects initial cluster centers for k-mean clustering in a smart way
to speed up convergence. see: Arthur, D. and Vassilvitskii, S.
"k-means++: the advantages of careful seeding". ACM-SIAM symposium
on Discrete algorithms. 2007
Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip,
which is the implementation used in the aforementioned paper.
"""
n_samples, n_features = X.shape
centers = np.empty((n_clusters, n_features))
assert x_squared_norms is not None, 'x_squared_norms None in _k_init'
# Set the number of local seeding trials if none is given
if n_local_trials is None:
# This is what Arthur/Vassilvitskii tried, but did not report
# specific results for other than mentioning in the conclusion
# that it helped.
n_local_trials = 2 + int(np.log(n_clusters))
# Pick first center randomly
center_id = random_state.randint(n_samples)
if sp.issparse(X):
centers[0] = X[center_id].toarray()
else:
centers[0] = X[center_id]
# Initialize list of closest distances and calculate current potential
closest_dist_sq = euclidean_distances(
centers[0], X, Y_norm_squared=x_squared_norms, squared=True)
current_pot = closest_dist_sq.sum()
# Pick the remaining n_clusters-1 points
for c in range(1, n_clusters):
# Choose center candidates by sampling with probability proportional
# to the squared distance to the closest existing center
rand_vals = random_state.random_sample(n_local_trials) * current_pot
candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals)
# Compute distances to center candidates
distance_to_candidates = euclidean_distances(
X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True)
# Decide which candidate is the best
best_candidate = None
best_pot = None
best_dist_sq = None
for trial in range(n_local_trials):
# Compute potential when including center candidate
new_dist_sq = np.minimum(closest_dist_sq,
distance_to_candidates[trial])
new_pot = new_dist_sq.sum()
# Store result if it is the best local trial so far
if (best_candidate is None) or (new_pot < best_pot):
best_candidate = candidate_ids[trial]
best_pot = new_pot
best_dist_sq = new_dist_sq
# Permanently add best center candidate found in local tries
if sp.issparse(X):
centers[c] = X[best_candidate].toarray()
else:
centers[c] = X[best_candidate]
current_pot = best_pot
closest_dist_sq = best_dist_sq
return centers
###############################################################################
# K-means batch estimation by EM (expectation maximization)
def _tolerance(X, tol):
"""Return a tolerance which is independent of the dataset"""
if sp.issparse(X):
variances = mean_variance_axis(X, axis=0)[1]
else:
variances = np.var(X, axis=0)
return np.mean(variances) * tol
def k_means(X, n_clusters, init='k-means++', precompute_distances='auto',
n_init=10, max_iter=300, verbose=False,
tol=1e-4, random_state=None, copy_x=True, n_jobs=1,
return_n_iter=False):
"""K-means clustering algorithm.
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
The observations to cluster.
n_clusters : int
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
n_init : int, optional, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, optional
The relative increment in the results before declaring convergence.
verbose : boolean, optional
Verbosity mode.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
copy_x : boolean, optional
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
return_n_iter : bool, optional
Whether or not to return the number of iterations.
Returns
-------
centroid : float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label : integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia : float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
best_n_iter: int
Number of iterations corresponding to the best results.
Returned only if `return_n_iter` is set to True.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
# If the distances are precomputed every job will create a matrix of shape
# (n_clusters, n_samples). To stop KMeans from eating up memory we only
# activate this if the created matrix is guaranteed to be under 100MB. 12
# million entries consume a little under 100MB if they are of type double.
if precompute_distances == 'auto':
n_samples = X.shape[0]
precompute_distances = (n_clusters * n_samples) < 12e6
elif isinstance(precompute_distances, bool):
pass
else:
raise ValueError("precompute_distances should be 'auto' or True/False"
", but a value of %r was passed" %
precompute_distances)
# subtract of mean of x for more accurate distance computations
if not sp.issparse(X) or hasattr(init, '__array__'):
X_mean = X.mean(axis=0)
if not sp.issparse(X):
# The copy was already done above
X -= X_mean
if hasattr(init, '__array__'):
init = np.asarray(init).copy()
init -= X_mean
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
best_labels, best_inertia, best_centers = None, None, None
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _kmeans_single(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
precompute_distances=precompute_distances, tol=tol,
x_squared_norms=x_squared_norms, random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter,
init=init, verbose=verbose, tol=tol,
precompute_distances=precompute_distances,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if not sp.issparse(X):
if not copy_x:
X += X_mean
best_centers += X_mean
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300,
init='k-means++', verbose=False, random_state=None,
tol=1e-4, precompute_distances=True):
"""A single run of k-means, assumes preparation completed prior.
Parameters
----------
X: array-like of floats, shape (n_samples, n_features)
The observations to cluster.
n_clusters: int
The number of clusters to form as well as the number of
centroids to generate.
max_iter: int, optional, default 300
Maximum number of iterations of the k-means algorithm to run.
init: {'k-means++', 'random', or ndarray, or a callable}, optional
Method for initialization, default to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': generate k centroids from a Gaussian with mean and
variance estimated from the data.
If an ndarray is passed, it should be of shape (k, p) and gives
the initial centers.
If a callable is passed, it should take arguments X, k and
and a random state and return an initialization.
tol: float, optional
The relative increment in the results before declaring convergence.
verbose: boolean, optional
Verbosity mode
x_squared_norms: array
Precomputed x_squared_norms.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Returns
-------
centroid: float ndarray with shape (k, n_features)
Centroids found at the last iteration of k-means.
label: integer ndarray with shape (n_samples,)
label[i] is the code or index of the centroid the
i'th observation is closest to.
inertia: float
The final value of the inertia criterion (sum of squared distances to
the closest centroid for all observations in the training set).
n_iter : int
Number of iterations run.
"""
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=np.float64)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment is also called the E-step of EM
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means is also called the M-step of EM
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
if squared_norm(centers_old - centers) <= tol:
if verbose:
print("Converged at iteration %d" % i)
break
return best_labels, best_inertia, best_centers, i + 1
def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances):
"""Compute labels and inertia using a full distance matrix.
This will overwrite the 'distances' array in-place.
Parameters
----------
X : numpy array, shape (n_sample, n_features)
Input data.
x_squared_norms : numpy array, shape (n_samples,)
Precomputed squared norms of X.
centers : numpy array, shape (n_clusters, n_features)
Cluster centers which data is assigned to.
distances : numpy array, shape (n_samples,)
Pre-allocated array in which distances are stored.
Returns
-------
labels : numpy array, dtype=np.int, shape (n_samples,)
Indices of clusters that samples are assigned to.
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
k = centers.shape[0]
all_distances = euclidean_distances(centers, X, x_squared_norms,
squared=True)
labels = np.empty(n_samples, dtype=np.int32)
labels.fill(-1)
mindist = np.empty(n_samples)
mindist.fill(np.infty)
for center_id in range(k):
dist = all_distances[center_id]
labels[dist < mindist] = center_id
mindist = np.minimum(dist, mindist)
if n_samples == distances.shape[0]:
# distances will be changed in-place
distances[:] = mindist
inertia = mindist.sum()
return labels, inertia
def _labels_inertia(X, x_squared_norms, centers,
precompute_distances=True, distances=None):
"""E step of the K-means EM algorithm.
Compute the labels and the inertia of the given samples and centers.
This will compute the distances in-place.
Parameters
----------
X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features)
The input samples to assign to the labels.
x_squared_norms: array, shape (n_samples,)
Precomputed squared euclidean norm of each data point, to speed up
computations.
centers: float64 array, shape (k, n_features)
The cluster centers.
precompute_distances : boolean, default: True
Precompute distances (faster but takes more memory).
distances: float64 array, shape (n_samples,)
Pre-allocated array to be filled in with each sample's distance
to the closest center.
Returns
-------
labels: int array of shape(n)
The resulting assignment
inertia : float
Sum of distances of samples to their closest cluster center.
"""
n_samples = X.shape[0]
# set the default value of centers to -1 to be able to detect any anomaly
# easily
labels = -np.ones(n_samples, np.int32)
if distances is None:
distances = np.zeros(shape=(0,), dtype=np.float64)
# distances will be changed in-place
if sp.issparse(X):
inertia = _k_means._assign_labels_csr(
X, x_squared_norms, centers, labels, distances=distances)
else:
if precompute_distances:
return _labels_inertia_precompute_dense(X, x_squared_norms,
centers, distances)
inertia = _k_means._assign_labels_array(
X, x_squared_norms, centers, labels, distances=distances)
return labels, inertia
def _init_centroids(X, k, init, random_state=None, x_squared_norms=None,
init_size=None):
"""Compute the initial centroids
Parameters
----------
X: array, shape (n_samples, n_features)
k: int
number of centroids
init: {'k-means++', 'random' or ndarray or callable} optional
Method for initialization
random_state: integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
x_squared_norms: array, shape (n_samples,), optional
Squared euclidean norm of each data point. Pass it if you have it at
hands already to avoid it being recomputed here. Default: None
init_size : int, optional
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than k.
Returns
-------
centers: array, shape(k, n_features)
"""
random_state = check_random_state(random_state)
n_samples = X.shape[0]
if init_size is not None and init_size < n_samples:
if init_size < k:
warnings.warn(
"init_size=%d should be larger than k=%d. "
"Setting it to 3*k" % (init_size, k),
RuntimeWarning, stacklevel=2)
init_size = 3 * k
init_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X = X[init_indices]
x_squared_norms = x_squared_norms[init_indices]
n_samples = X.shape[0]
elif n_samples < k:
raise ValueError(
"n_samples=%d should be larger than k=%d" % (n_samples, k))
if init == 'k-means++':
centers = _k_init(X, k, random_state=random_state,
x_squared_norms=x_squared_norms)
elif init == 'random':
seeds = random_state.permutation(n_samples)[:k]
centers = X[seeds]
elif hasattr(init, '__array__'):
centers = init
elif callable(init):
centers = init(X, k, random_state=random_state)
else:
raise ValueError("the init parameter for the k-means should "
"be 'k-means++' or 'random' or an ndarray, "
"'%s' (type '%s') was passed." % (init, type(init)))
if sp.issparse(centers):
centers = centers.toarray()
if len(centers) != k:
raise ValueError('The shape of the initial centers (%s) '
'does not match the number of clusters %i'
% (centers.shape, k))
return centers
class KMeans(BaseEstimator, ClusterMixin, TransformerMixin):
"""K-Means clustering
Read more in the :ref:`User Guide <k_means>`.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
precompute_distances : {'auto', True, False}
Precompute distances (faster but takes more memory).
'auto' : do not precompute distances if n_samples * n_clusters > 12
million. This corresponds to about 100MB overhead per job using
double precision.
True : always precompute distances
False : never precompute distances
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
Notes
------
The k-means problem is solved using Lloyd's algorithm.
The average complexity is given by O(k n T), were n is the number of
samples and T is the number of iteration.
The worst case complexity is given by O(n^(k+2/p)) with
n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii,
'How slow is the k-means method?' SoCG2006)
In practice, the k-means algorithm is very fast (one of the fastest
clustering algorithms available), but it falls in local minima. That's why
it can be useful to restart it several times.
See also
--------
MiniBatchKMeans:
Alternative online implementation that does incremental updates
of the centers positions using mini-batches.
For large scale learning (say n_samples > 10k) MiniBatchKMeans is
probably much faster to than the default batch implementation.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300,
tol=1e-4, precompute_distances='auto',
verbose=0, random_state=None, copy_x=True, n_jobs=1):
if hasattr(init, '__array__'):
n_clusters = init.shape[0]
init = np.asarray(init, dtype=np.float64)
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.precompute_distances = precompute_distances
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def _check_fit_data(self, X):
"""Verify that the number of samples given is larger than k"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
if X.shape[0] < self.n_clusters:
raise ValueError("n_samples=%d should be >= n_clusters=%d" % (
X.shape[0], self.n_clusters))
return X
def _check_test_data(self, X):
X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES,
warn_on_dtype=True)
n_samples, n_features = X.shape
expected_n_features = self.cluster_centers_.shape[1]
if not n_features == expected_n_features:
raise ValueError("Incorrect number of features. "
"Got %d features, expected %d" % (
n_features, expected_n_features))
return X
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter,
verbose=self.verbose, return_n_iter=True,
precompute_distances=self.precompute_distances,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs)
return self
def fit_predict(self, X, y=None):
"""Compute cluster centers and predict cluster index for each sample.
Convenience method; equivalent to calling fit(X) followed by
predict(X).
"""
return self.fit(X).labels_
def fit_transform(self, X, y=None):
"""Compute clustering and transform X to cluster-distance space.
Equivalent to fit(X).transform(X), but more efficiently implemented.
"""
# Currently, this just skips a copy of the data if it is not in
# np.array or CSR format already.
# XXX This skips _check_test_data, which may change the dtype;
# we should refactor the input validation.
X = self._check_fit_data(X)
return self.fit(X)._transform(X)
def transform(self, X, y=None):
"""Transform X to a cluster-distance space.
In the new space, each dimension is the distance to the cluster
centers. Note that even if X is sparse, the array returned by
`transform` will typically be dense.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to transform.
Returns
-------
X_new : array, shape [n_samples, k]
X transformed in the new space.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._transform(X)
def _transform(self, X):
"""guts of transform method; no input validation"""
return euclidean_distances(X, self.cluster_centers_)
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0]
def score(self, X, y=None):
"""Opposite of the value of X on the K-means objective.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data.
Returns
-------
score : float
Opposite of the value of X on the K-means objective.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
x_squared_norms = row_norms(X, squared=True)
return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1]
def _mini_batch_step(X, x_squared_norms, centers, counts,
old_center_buffer, compute_squared_diff,
distances, random_reassign=False,
random_state=None, reassignment_ratio=.01,
verbose=False):
"""Incremental update of the centers for the Minibatch K-Means algorithm.
Parameters
----------
X : array, shape (n_samples, n_features)
The original data array.
x_squared_norms : array, shape (n_samples,)
Squared euclidean norm of each data point.
centers : array, shape (k, n_features)
The cluster centers. This array is MODIFIED IN PLACE
counts : array, shape (k,)
The vector in which we keep track of the numbers of elements in a
cluster. This array is MODIFIED IN PLACE
distances : array, dtype float64, shape (n_samples), optional
If not None, should be a pre-allocated array that will be used to store
the distances of each sample to its closest center.
May not be None when random_reassign is True.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
random_reassign : boolean, optional
If True, centers with very low counts are randomly reassigned
to observations.
reassignment_ratio : float, optional
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more likely to be reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : bool, optional, default False
Controls the verbosity.
compute_squared_diff : bool
If set to False, the squared diff computation is skipped.
old_center_buffer : int
Copy of old centers for monitoring convergence.
Returns
-------
inertia : float
Sum of distances of samples to their closest cluster center.
squared_diff : numpy array, shape (n_clusters,)
Squared distances between previous and updated cluster centers.
"""
# Perform label assignment to nearest centers
nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers,
distances=distances)
if random_reassign and reassignment_ratio > 0:
random_state = check_random_state(random_state)
# Reassign clusters that have very low counts
to_reassign = counts < reassignment_ratio * counts.max()
# pick at most .5 * batch_size samples as new centers
if to_reassign.sum() > .5 * X.shape[0]:
indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):]
to_reassign[indices_dont_reassign] = False
n_reassigns = to_reassign.sum()
if n_reassigns:
# Pick new clusters amongst observations with uniform probability
new_centers = choice(X.shape[0], replace=False, size=n_reassigns,
random_state=random_state)
if verbose:
print("[MiniBatchKMeans] Reassigning %i cluster centers."
% n_reassigns)
if sp.issparse(X) and not sp.issparse(centers):
assign_rows_csr(X,
astype(new_centers, np.intp),
astype(np.where(to_reassign)[0], np.intp),
centers)
else:
centers[to_reassign] = X[new_centers]
# reset counts of reassigned centers, but don't reset them too small
# to avoid instant reassignment. This is a pretty dirty hack as it
# also modifies the learning rates.
counts[to_reassign] = np.min(counts[~to_reassign])
# implementation for the sparse CSR representation completely written in
# cython
if sp.issparse(X):
return inertia, _k_means._mini_batch_update_csr(
X, x_squared_norms, centers, counts, nearest_center,
old_center_buffer, compute_squared_diff)
# dense variant in mostly numpy (not as memory efficient though)
k = centers.shape[0]
squared_diff = 0.0
for center_idx in range(k):
# find points from minibatch that are assigned to this center
center_mask = nearest_center == center_idx
count = center_mask.sum()
if count > 0:
if compute_squared_diff:
old_center_buffer[:] = centers[center_idx]
# inplace remove previous count scaling
centers[center_idx] *= counts[center_idx]
# inplace sum with new points members of this cluster
centers[center_idx] += np.sum(X[center_mask], axis=0)
# update the count statistics for this center
counts[center_idx] += count
# inplace rescale to compute mean of all points (old and new)
centers[center_idx] /= counts[center_idx]
# update the squared diff if necessary
if compute_squared_diff:
diff = centers[center_idx].ravel() - old_center_buffer.ravel()
squared_diff += np.dot(diff, diff)
return inertia, squared_diff
def _mini_batch_convergence(model, iteration_idx, n_iter, tol,
n_samples, centers_squared_diff, batch_inertia,
context, verbose=0):
"""Helper function to encapsulte the early stopping logic"""
# Normalize inertia to be able to compare values when
# batch_size changes
batch_inertia /= model.batch_size
centers_squared_diff /= model.batch_size
# Compute an Exponentially Weighted Average of the squared
# diff to monitor the convergence while discarding
# minibatch-local stochastic variability:
# https://en.wikipedia.org/wiki/Moving_average
ewa_diff = context.get('ewa_diff')
ewa_inertia = context.get('ewa_inertia')
if ewa_diff is None:
ewa_diff = centers_squared_diff
ewa_inertia = batch_inertia
else:
alpha = float(model.batch_size) * 2.0 / (n_samples + 1)
alpha = 1.0 if alpha > 1.0 else alpha
ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha
ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha
# Log progress to be able to monitor convergence
if verbose:
progress_msg = (
'Minibatch iteration %d/%d:'
' mean batch inertia: %f, ewa inertia: %f ' % (
iteration_idx + 1, n_iter, batch_inertia,
ewa_inertia))
print(progress_msg)
# Early stopping based on absolute tolerance on squared change of
# centers position (using EWA smoothing)
if tol > 0.0 and ewa_diff <= tol:
if verbose:
print('Converged (small centers change) at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# Early stopping heuristic due to lack of improvement on smoothed inertia
ewa_inertia_min = context.get('ewa_inertia_min')
no_improvement = context.get('no_improvement', 0)
if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min:
no_improvement = 0
ewa_inertia_min = ewa_inertia
else:
no_improvement += 1
if (model.max_no_improvement is not None
and no_improvement >= model.max_no_improvement):
if verbose:
print('Converged (lack of improvement in inertia)'
' at iteration %d/%d'
% (iteration_idx + 1, n_iter))
return True
# update the convergence context to maintain state across successive calls:
context['ewa_diff'] = ewa_diff
context['ewa_inertia'] = ewa_inertia
context['ewa_inertia_min'] = ewa_inertia_min
context['no_improvement'] = no_improvement
return False
class MiniBatchKMeans(KMeans):
"""Mini-Batch K-Means clustering
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, optional
Maximum number of iterations over the complete dataset before
stopping independently of any early stopping criterion heuristics.
max_no_improvement : int, default: 10
Control early stopping based on the consecutive number of mini
batches that does not yield an improvement on the smoothed inertia.
To disable convergence detection based on inertia, set
max_no_improvement to None.
tol : float, default: 0.0
Control early stopping based on the relative center changes as
measured by a smoothed, variance-normalized of the mean center
squared position changes. This early stopping heuristics is
closer to the one used for the batch variant of the algorithms
but induces a slight computational and memory overhead over the
inertia heuristic.
To disable convergence detection based on normalized center
change, set tol to 0.0 (default).
batch_size : int, optional, default: 100
Size of the mini batches.
init_size : int, optional, default: 3 * batch_size
Number of samples to randomly sample for speeding up the
initialization (sometimes at the expense of accuracy): the
only algorithm is initialized by running a batch KMeans on a
random subset of the data. This needs to be larger than n_clusters.
init : {'k-means++', 'random' or an ndarray}, default: 'k-means++'
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
n_init : int, default=3
Number of random initializations that are tried.
In contrast to KMeans, the algorithm is only run once, using the
best of the ``n_init`` initializations as measured by inertia.
compute_labels : boolean, default=True
Compute label assignment and inertia for the complete dataset
once the minibatch optimization has converged in fit.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
reassignment_ratio : float, default: 0.01
Control the fraction of the maximum number of counts for a
center to be reassigned. A higher value means that low count
centers are more easily reassigned, which means that the
model will take longer to converge, but should converge in a
better clustering.
verbose : boolean, optional
Verbosity mode.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point (if compute_labels is set to True).
inertia_ : float
The value of the inertia criterion associated with the chosen
partition (if compute_labels is set to True). The inertia is
defined as the sum of square distances of samples to their nearest
neighbor.
Notes
-----
See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf
"""
def __init__(self, n_clusters=8, init='k-means++', max_iter=100,
batch_size=100, verbose=0, compute_labels=True,
random_state=None, tol=0.0, max_no_improvement=10,
init_size=None, n_init=3, reassignment_ratio=0.01):
super(MiniBatchKMeans, self).__init__(
n_clusters=n_clusters, init=init, max_iter=max_iter,
verbose=verbose, random_state=random_state, tol=tol, n_init=n_init)
self.max_no_improvement = max_no_improvement
self.batch_size = batch_size
self.compute_labels = compute_labels
self.init_size = init_size
self.reassignment_ratio = reassignment_ratio
def fit(self, X, y=None):
"""Compute the centroids on X by chunking it into mini-batches.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster
"""
random_state = check_random_state(self.random_state)
X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64)
n_samples, n_features = X.shape
if n_samples < self.n_clusters:
raise ValueError("Number of samples smaller than number "
"of clusters.")
n_init = self.n_init
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in MiniBatchKMeans instead of '
'n_init=%d'
% self.n_init, RuntimeWarning, stacklevel=2)
n_init = 1
x_squared_norms = row_norms(X, squared=True)
if self.tol > 0.0:
tol = _tolerance(X, self.tol)
# using tol-based early stopping needs the allocation of a
# dedicated before which can be expensive for high dim data:
# hence we allocate it outside of the main loop
old_center_buffer = np.zeros(n_features, np.double)
else:
tol = 0.0
# no need for the center buffer if tol-based early stopping is
# disabled
old_center_buffer = np.zeros(0, np.double)
distances = np.zeros(self.batch_size, dtype=np.float64)
n_batches = int(np.ceil(float(n_samples) / self.batch_size))
n_iter = int(self.max_iter * n_batches)
init_size = self.init_size
if init_size is None:
init_size = 3 * self.batch_size
if init_size > n_samples:
init_size = n_samples
self.init_size_ = init_size
validation_indices = random_state.random_integers(
0, n_samples - 1, init_size)
X_valid = X[validation_indices]
x_squared_norms_valid = x_squared_norms[validation_indices]
# perform several inits with random sub-sets
best_inertia = None
for init_idx in range(n_init):
if self.verbose:
print("Init %d/%d with method: %s"
% (init_idx + 1, n_init, self.init))
counts = np.zeros(self.n_clusters, dtype=np.int32)
# TODO: once the `k_means` function works with sparse input we
# should refactor the following init to use it instead.
# Initialize the centers using only a fraction of the data as we
# expect n_samples to be very large when using MiniBatchKMeans
cluster_centers = _init_centroids(
X, self.n_clusters, self.init,
random_state=random_state,
x_squared_norms=x_squared_norms,
init_size=init_size)
# Compute the label assignment on the init dataset
batch_inertia, centers_squared_diff = _mini_batch_step(
X_valid, x_squared_norms[validation_indices],
cluster_centers, counts, old_center_buffer, False,
distances=None, verbose=self.verbose)
# Keep only the best cluster centers across independent inits on
# the common validation set
_, inertia = _labels_inertia(X_valid, x_squared_norms_valid,
cluster_centers)
if self.verbose:
print("Inertia for init %d/%d: %f"
% (init_idx + 1, n_init, inertia))
if best_inertia is None or inertia < best_inertia:
self.cluster_centers_ = cluster_centers
self.counts_ = counts
best_inertia = inertia
# Empty context to be used inplace by the convergence check routine
convergence_context = {}
# Perform the iterative optimization until the final convergence
# criterion
for iteration_idx in range(n_iter):
# Sample a minibatch from the full dataset
minibatch_indices = random_state.random_integers(
0, n_samples - 1, self.batch_size)
# Perform the actual update step on the minibatch data
batch_inertia, centers_squared_diff = _mini_batch_step(
X[minibatch_indices], x_squared_norms[minibatch_indices],
self.cluster_centers_, self.counts_,
old_center_buffer, tol > 0.0, distances=distances,
# Here we randomly choose whether to perform
# random reassignment: the choice is done as a function
# of the iteration index, and the minimum number of
# counts, in order to force this reassignment to happen
# every once in a while
random_reassign=((iteration_idx + 1)
% (10 + self.counts_.min()) == 0),
random_state=random_state,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
# Monitor convergence and do early stopping if necessary
if _mini_batch_convergence(
self, iteration_idx, n_iter, tol, n_samples,
centers_squared_diff, batch_inertia, convergence_context,
verbose=self.verbose):
break
self.n_iter_ = iteration_idx + 1
if self.compute_labels:
self.labels_, self.inertia_ = self._labels_inertia_minibatch(X)
return self
def _labels_inertia_minibatch(self, X):
"""Compute labels and inertia using mini batches.
This is slightly slower than doing everything at once but preventes
memory errors / segfaults.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
Returns
-------
labels : array, shap (n_samples,)
Cluster labels for each point.
inertia : float
Sum of squared distances of points to nearest cluster.
"""
if self.verbose:
print('Computing label assignment and total inertia')
x_squared_norms = row_norms(X, squared=True)
slices = gen_batches(X.shape[0], self.batch_size)
results = [_labels_inertia(X[s], x_squared_norms[s],
self.cluster_centers_) for s in slices]
labels, inertia = zip(*results)
return np.hstack(labels), np.sum(inertia)
def partial_fit(self, X, y=None):
"""Update k means estimate on a single mini-batch X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Coordinates of the data points to cluster.
"""
X = check_array(X, accept_sparse="csr")
n_samples, n_features = X.shape
if hasattr(self.init, '__array__'):
self.init = np.ascontiguousarray(self.init, dtype=np.float64)
if n_samples == 0:
return self
x_squared_norms = row_norms(X, squared=True)
self.random_state_ = getattr(self, "random_state_",
check_random_state(self.random_state))
if (not hasattr(self, 'counts_')
or not hasattr(self, 'cluster_centers_')):
# this is the first call partial_fit on this object:
# initialize the cluster centers
self.cluster_centers_ = _init_centroids(
X, self.n_clusters, self.init,
random_state=self.random_state_,
x_squared_norms=x_squared_norms, init_size=self.init_size)
self.counts_ = np.zeros(self.n_clusters, dtype=np.int32)
random_reassign = False
distances = None
else:
# The lower the minimum count is, the more we do random
# reassignment, however, we don't want to do random
# reassignment too often, to allow for building up counts
random_reassign = self.random_state_.randint(
10 * (1 + self.counts_.min())) == 0
distances = np.zeros(X.shape[0], dtype=np.float64)
_mini_batch_step(X, x_squared_norms, self.cluster_centers_,
self.counts_, np.zeros(0, np.double), 0,
random_reassign=random_reassign, distances=distances,
random_state=self.random_state_,
reassignment_ratio=self.reassignment_ratio,
verbose=self.verbose)
if self.compute_labels:
self.labels_, self.inertia_ = _labels_inertia(
X, x_squared_norms, self.cluster_centers_)
return self
def predict(self, X):
"""Predict the closest cluster each sample in X belongs to.
In the vector quantization literature, `cluster_centers_` is called
the code book and each value returned by `predict` is the index of
the closest code in the code book.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
New data to predict.
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
check_is_fitted(self, 'cluster_centers_')
X = self._check_test_data(X)
return self._labels_inertia_minibatch(X)[0]
| bsd-3-clause |
tchellomello/home-assistant | tests/components/cloud/test_prefs.py | 13 | 2312 | """Test Cloud preferences."""
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.cloud.prefs import STORAGE_KEY, CloudPreferences
from tests.async_mock import patch
async def test_set_username(hass):
"""Test we clear config if we set different username."""
prefs = CloudPreferences(hass)
await prefs.async_initialize()
assert prefs.google_enabled
await prefs.async_update(google_enabled=False)
assert not prefs.google_enabled
await prefs.async_set_username("new-username")
assert prefs.google_enabled
async def test_set_username_migration(hass):
"""Test we not clear config if we had no username."""
prefs = CloudPreferences(hass)
with patch.object(prefs, "_empty_config", return_value=prefs._empty_config(None)):
await prefs.async_initialize()
assert prefs.google_enabled
await prefs.async_update(google_enabled=False)
assert not prefs.google_enabled
await prefs.async_set_username("new-username")
assert not prefs.google_enabled
async def test_load_invalid_cloud_user(hass, hass_storage):
"""Test loading cloud user with invalid storage."""
hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": "non-existing"}}
prefs = CloudPreferences(hass)
await prefs.async_initialize()
cloud_user_id = await prefs.get_cloud_user()
assert cloud_user_id != "non-existing"
cloud_user = await hass.auth.async_get_user(
hass_storage[STORAGE_KEY]["data"]["cloud_user"]
)
assert cloud_user
assert cloud_user.groups[0].id == GROUP_ID_ADMIN
async def test_setup_remove_cloud_user(hass, hass_storage):
"""Test creating and removing cloud user."""
hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": None}}
prefs = CloudPreferences(hass)
await prefs.async_initialize()
await prefs.async_set_username("user1")
cloud_user = await hass.auth.async_get_user(await prefs.get_cloud_user())
assert cloud_user
assert cloud_user.groups[0].id == GROUP_ID_ADMIN
await prefs.async_set_username("user2")
cloud_user2 = await hass.auth.async_get_user(await prefs.get_cloud_user())
assert cloud_user2
assert cloud_user2.groups[0].id == GROUP_ID_ADMIN
assert cloud_user2.id != cloud_user.id
| apache-2.0 |
350dotorg/Django | django/contrib/sessions/backends/file.py | 43 | 5309 | import errno
import os
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase, CreateError
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured
class SessionStore(SessionBase):
"""
Implements a file based session store.
"""
def __init__(self, session_key=None):
self.storage_path = getattr(settings, "SESSION_FILE_PATH", None)
if not self.storage_path:
self.storage_path = tempfile.gettempdir()
# Make sure the storage path is valid.
if not os.path.isdir(self.storage_path):
raise ImproperlyConfigured(
"The session storage path %r doesn't exist. Please set your"
" SESSION_FILE_PATH setting to an existing directory in which"
" Django can store session data." % self.storage_path)
self.file_prefix = settings.SESSION_COOKIE_NAME
super(SessionStore, self).__init__(session_key)
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
"""
if session_key is None:
session_key = self.session_key
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if os.path.sep in session_key:
raise SuspiciousOperation(
"Invalid characters (directory components) in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
def load(self):
session_data = {}
try:
session_file = open(self._key_to_file(), "rb")
try:
file_data = session_file.read()
# Don't fail if there is no data in the session file.
# We may have opened the empty placeholder file.
if file_data:
try:
session_data = self.decode(file_data)
except (EOFError, SuspiciousOperation):
self.create()
finally:
session_file.close()
except IOError:
pass
return session_data
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
self._session_cache = {}
return
def save(self, must_create=False):
# Get the session data now, before we start messing
# with the file it is stored within.
session_data = self._get_session(no_load=must_create)
session_file_name = self._key_to_file()
try:
# Make sure the file exists. If it does not already exist, an
# empty placeholder file is created.
flags = os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0)
if must_create:
flags |= os.O_EXCL
fd = os.open(session_file_name, flags)
os.close(fd)
except OSError, e:
if must_create and e.errno == errno.EEXIST:
raise CreateError
raise
# Write the session file without interfering with other threads
# or processes. By writing to an atomically generated temporary
# file and then using the atomic os.rename() to make the complete
# file visible, we avoid having to lock the session file, while
# still maintaining its integrity.
#
# Note: Locking the session file was explored, but rejected in part
# because in order to be atomic and cross-platform, it required a
# long-lived lock file for each session, doubling the number of
# files in the session storage directory at any given time. This
# rename solution is cleaner and avoids any additional overhead
# when reading the session data, which is the more common case
# unless SESSION_SAVE_EVERY_REQUEST = True.
#
# See ticket #8616.
dir, prefix = os.path.split(session_file_name)
try:
output_file_fd, output_file_name = tempfile.mkstemp(dir=dir,
prefix=prefix + '_out_')
renamed = False
try:
try:
os.write(output_file_fd, self.encode(session_data))
finally:
os.close(output_file_fd)
os.rename(output_file_name, session_file_name)
renamed = True
finally:
if not renamed:
os.unlink(output_file_name)
except (OSError, IOError, EOFError):
pass
def exists(self, session_key):
if os.path.exists(self._key_to_file(session_key)):
return True
return False
def delete(self, session_key=None):
if session_key is None:
if self._session_key is None:
return
session_key = self._session_key
try:
os.unlink(self._key_to_file(session_key))
except OSError:
pass
def clean(self):
pass
| bsd-3-clause |
zahodi/ansible | lib/ansible/modules/cloud/openstack/os_keystone_service.py | 27 | 6355 | #!/usr/bin/python
# Copyright 2016 Sam Yaple
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
try:
import shade
HAS_SHADE = True
except ImportError:
HAS_SHADE = False
from distutils.version import StrictVersion
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: os_keystone_service
short_description: Manage OpenStack Identity services
extends_documentation_fragment: openstack
author: "Sam Yaple (@SamYaple)"
version_added: "2.2"
description:
- Create, update, or delete OpenStack Identity service. If a service
with the supplied name already exists, it will be updated with the
new description and enabled attributes.
options:
name:
description:
- Name of the service
required: true
description:
description:
- Description of the service
required: false
default: None
enabled:
description:
- Is the service enabled
required: false
default: True
service_type:
description:
- The type of service
required: true
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
requirements:
- "python >= 2.6"
- "shade"
'''
EXAMPLES = '''
# Create a service for glance
- os_keystone_service:
cloud: mycloud
state: present
name: glance
service_type: image
description: OpenStack Image Service
# Delete a service
- os_keystone_service:
cloud: mycloud
state: absent
name: glance
service_type: image
'''
RETURN = '''
service:
description: Dictionary describing the service.
returned: On success when I(state) is 'present'
type: dictionary
contains:
id:
description: Service ID.
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
name:
description: Service name.
type: string
sample: "glance"
service_type:
description: Service type.
type: string
sample: "image"
description:
description: Service description.
type: string
sample: "OpenStack Image Service"
enabled:
description: Service status.
type: boolean
sample: True
id:
description: The service ID.
returned: On success when I(state) is 'present'
type: string
sample: "3292f020780b4d5baf27ff7e1d224c44"
'''
def _needs_update(module, service):
if service.enabled != module.params['enabled']:
return True
if service.description is not None and \
service.description != module.params['description']:
return True
return False
def _system_state_change(module, service):
state = module.params['state']
if state == 'absent' and service:
return True
if state == 'present':
if service is None:
return True
return _needs_update(module, service)
return False
def main():
argument_spec = openstack_full_argument_spec(
description=dict(default=None),
enabled=dict(default=True, type='bool'),
name=dict(required=True),
service_type=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
if not HAS_SHADE:
module.fail_json(msg='shade is required for this module')
if StrictVersion(shade.__version__) < StrictVersion('1.6.0'):
module.fail_json(msg="To utilize this module, the installed version of"
"the shade library MUST be >=1.6.0")
description = module.params['description']
enabled = module.params['enabled']
name = module.params['name']
state = module.params['state']
service_type = module.params['service_type']
try:
cloud = shade.operator_cloud(**module.params)
services = cloud.search_services(name_or_id=name,
filters=dict(type=service_type))
if len(services) > 1:
module.fail_json(msg='Service name %s and type %s are not unique' %
(name, service_type))
elif len(services) == 1:
service = services[0]
else:
service = None
if module.check_mode:
module.exit_json(changed=_system_state_change(module, service))
if state == 'present':
if service is None:
service = cloud.create_service(name=name,
description=description, type=service_type, enabled=True)
changed = True
else:
if _needs_update(module, service):
service = cloud.update_service(
service.id, name=name, type=service_type, enabled=enabled,
description=description)
changed = True
else:
changed = False
module.exit_json(changed=changed, service=service, id=service.id)
elif state == 'absent':
if service is None:
changed=False
else:
cloud.delete_service(service.id)
changed=True
module.exit_json(changed=changed)
except shade.OpenStackCloudException as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
JiahuiZHONG/Internship_Thread | tests/scripts/thread-cert/Cert_5_6_08_ContextManagement.py | 1 | 4303 | #!/usr/bin/python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import pexpect
import time
import unittest
import node
LEADER = 1
ROUTER = 2
ED = 3
class Cert_5_6_8_ContextManagement(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,4):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ROUTER].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[LEADER].set_context_reuse_delay(10)
self.nodes[ROUTER].set_panid(0xface)
self.nodes[ROUTER].set_mode('rsdn')
self.nodes[ROUTER].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ROUTER].enable_whitelist()
self.nodes[ED].set_panid(0xface)
self.nodes[ED].set_mode('rsn')
self.nodes[ED].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED].enable_whitelist()
def tearDown(self):
for node in self.nodes.itervalues():
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ROUTER].start()
time.sleep(3)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[ED].start()
time.sleep(3)
self.assertEqual(self.nodes[ED].get_state(), 'child')
self.nodes[ROUTER].add_prefix('2001::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(2)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
self.nodes[ROUTER].remove_prefix('2001::/64')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
self.nodes[ROUTER].add_prefix('2002::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
time.sleep(5)
self.nodes[ROUTER].add_prefix('2003::/64', 'pvcrs')
self.nodes[ROUTER].register_netdata()
time.sleep(5)
addrs = self.nodes[LEADER].get_addrs()
for addr in addrs:
if addr[0:3] == '200':
self.nodes[ED].ping(addr)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
mvs-live/metaverse | test/test-rpc-v3/TestCase/MVSTestCase.py | 3 | 5796 | import unittest
from utils import mvs_rpc, common, code
from Roles import Alice, Bob, Cindy, Dale, Eric, Frank, Zac
class MVSTestCaseBase(unittest.TestCase):
roles = (Alice, Bob, Cindy, Dale, Eric, Frank, Zac)
need_mine = True
def setUp(self):
for role in self.roles:
try:
# check if the role exists by get_balance
role.delete()
finally:
result, message = role.create()
self.assertEqual(result, 0, message)
# register did for role A~F, if not registered
for role in self.roles[:-1]:
ec, message = mvs_rpc.list_dids(role.name, role.password)
if ec == 0 and len(message['dids']) > 0:
pass
else:
if role != Alice:
Alice.send_etp(role.mainaddress(), 10 ** 8)
Alice.mining()
ec, message = role.register_did()
Alice.mining()
if self.need_mine:
# mine to clear the tx pool
Alice.mining()
# record current height
_, (hash, height) = mvs_rpc.getblockheader()
print( "current block height=[%s], hash=[%s]" % (height, hash) )
def tearDown(self):
pass
# for role in self.roles:
# result, message = role.delete()
# self.assertEqual(result, 0, message)
def checkResponseKeys(self, result, expect_keys):
if result != None and isinstance(result, dict):
expect_keys.sort()
actual_keys = list(result.keys())
actual_keys.sort()
self.assertEqual(actual_keys, expect_keys)
class MultiSigDIDTestCase(MVSTestCaseBase):
def setUp(self):
MVSTestCaseBase.setUp(self)
self.group_ABC = [Alice, Bob, Cindy]
self.group_DEF = [Dale, Eric, Frank]
self.addr_ABC = common.create_multisig_address(self.group_ABC, 2)
self.addr_DEF = common.create_multisig_address(self.group_DEF, 2)
# register did if not registered
self.did_ABC = "Alice.Bob.Cindy@DIID"
self.did_DEF = "Dale.Eric.Frank@DIID"
for roles, addr, attr_name in [(self.group_ABC, self.addr_ABC, "did_ABC"), (self.group_DEF, self.addr_DEF, "did_DEF")]:
ec, message = mvs_rpc.list_dids(roles[-1].name, roles[-1].password)
self.assertEqual(ec, 0, message)
for did_info in message['dids']:
if did_info["address"] == addr:
setattr(self, attr_name, did_info["symbol"])
break
else:
# not issued
Alice.send_etp(addr, 10**8)
Alice.mining()
did_symbol = getattr(self, attr_name)
ec, tx = mvs_rpc.register_did(
roles[0].name, roles[0].password, addr, did_symbol)
self.assertEqual(ec, 0, tx)
ec, tx = mvs_rpc.sign_multisigtx(
roles[1].name, roles[1].password, tx, True)
self.assertEqual(ec, 0, tx)
Alice.mining()
class ForkTestCase(MVSTestCaseBase):
remote_ip = "10.10.10.92"
remote_ctrl = None
@classmethod
def setUpClass(cls):
cls.remote_ctrl = mvs_rpc.RemoteCtrl(cls.remote_ip)
def setUp(self):
'''import Alice account to the remote'''
MVSTestCaseBase.setUp(self)
self.partion_height = -1
try:
with open(Alice.keystore_file) as f:
ec, message = self.remote_ctrl.import_keyfile(
Alice.name, Alice.password, "any", f.read())
# it still works if the account Alice already exist in remote wallet
#self.assertEqual(ec, 0, message)
except:
print( 'unable to connect remote url:' + self.remote_ip )
def tearDown(self):
try:
self.remote_ctrl.delete_account(
Alice.name, Alice.password, Alice.lastword())
ec, message = mvs_rpc.add_node(self.remote_ip + ':5251')
self.assertEqual(ec, 0, message)
except:
print( 'unable to connect remote url:' + self.remote_ip )
MVSTestCaseBase.tearDown(self)
def make_partion(self):
'''make the p2p network partion into 2 seperate ones.'''
# record current block height
ec, message = mvs_rpc.get_info()
self.assertEqual(ec, 0, message)
self.partion_height = message[0]
ec, peers = mvs_rpc.getpeerinfo()
for peer in peers:
ec, message = mvs_rpc.ban_node(peer)
self.assertEqual(ec, 0, message)
def remote_ming(self, times):
mining = mvs_rpc.remote_call(self.remote_ip, Alice.mining)
mining(times)
get_info = mvs_rpc.remote_call(self.remote_ip, mvs_rpc.get_info)
ec, message = get_info()
self.assertEqual(ec, 0, message)
return message[0] # expect hight
def fork(self):
try:
ming_round = 6
expect_hight = self.remote_ming(ming_round)
ec, message = mvs_rpc.add_node(self.remote_ip + ':5251')
self.assertEqual(ec, 0, message)
import time
new_height = 0
# wait until the fork complete
timeout = 300
while new_height < expect_hight: # self.partion_height + ming_round:
time.sleep(1)
ec, message = mvs_rpc.get_info()
self.assertEqual(ec, 0, message)
new_height = message[0]
timeout -= 1
self.assertGreater(timeout, 0, "wait fork timeout error!")
except:
print( 'unable to connect remote url:' + self.remote_ip )
| agpl-3.0 |
scherroman/mugen | mugen/location_utility.py | 1 | 2143 | from typing import List, Tuple
"""
Module for Location & Interval manipulation
"""
def intervals_from_locations(locations: List[float]) -> List[float]:
intervals = []
previous_location = None
for index, location in enumerate(locations):
if index == 0:
intervals.append(location)
else:
intervals.append(location - previous_location)
previous_location = location
return intervals
def locations_from_intervals(intervals: List[float]) -> List[float]:
locations = []
running_duration = 0
for index, interval in enumerate(intervals):
if index < len(intervals):
running_duration += interval
locations.append(running_duration)
return locations
def start_end_locations_from_locations(locations: List[float]) -> Tuple[List[float], List[float]]:
"""
Calculates the start and end times of each location
Ex) 5, 10, 15
start_times == 5, 10, 15
end_times == 10, 15, 15
Returns
-------
A tuple of start and end times
"""
start_locations = []
end_locations = []
for index, location in enumerate(locations):
start_time = location
if index == len(locations) - 1:
end_time = location
else:
end_time = locations[index + 1]
start_locations.append(start_time)
end_locations.append(end_time)
return start_locations, end_locations
def start_end_locations_from_intervals(intervals: List[float]) -> Tuple[List[float], List[float]]:
"""
Calculates the start and end times of each interval
Ex) 5, 10, 15
start_times == 0, 5, 10
end_times == 5, 10, 15
Returns
-------
A tuple of start and end times
"""
start_locations = []
end_locations = []
running_duration = 0
for index, duration in enumerate(intervals):
start_time = running_duration
end_time = start_time + duration
start_locations.append(start_time)
end_locations.append(end_time)
running_duration += duration
return start_locations, end_locations
| mit |
iychoi/syndicate | python/syndicate/ag/fs_driver_common/fs_backends/iplant_datastore/irods_client.py | 3 | 7553 | #!/usr/bin/env python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import irods
import logging
from os import O_RDONLY
from io import RawIOBase, BufferedRandom
from irods.session import iRODSSession
from irods.data_object import iRODSDataObject, iRODSDataObjectFileRaw
from retrying import retry
from timeout_decorator import timeout
logger = logging.getLogger('irods_client')
logger.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
fh = logging.FileHandler('irods_client.log')
fh.setLevel(logging.DEBUG)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
MAX_ATTEMPT = 3 # 3 retries
ATTEMPT_INTERVAL = 5000 # 5 sec
TIMEOUT_SECONDS = 20 # 20 sec
"""
Timeout only works at a main thread.
"""
"""
Do not call these functions directly.
These functions are called by irods_client class!
"""
#@timeout(TIMEOUT_SECONDS)
def _getCollection(session, path):
return session.collections.get(path)
#@timeout(TIMEOUT_SECONDS)
def _readLargeBlock(br):
return br.read(1024*1024)
"""
Interface class to iRODS
"""
class irods_status(object):
def __init__(self, directory=False,
path=None,
name=None,
size=0,
checksum=0,
create_time=0,
modify_time=0):
self.directory = directory
self.path = path
self.name = name
self.size = size
self.checksum = checksum
self.create_time = create_time
self.modify_time = modify_time
@classmethod
def fromCollection(cls, col):
return irods_status(directory=True,
path=col.path,
name=col.name)
@classmethod
def fromDataObject(cls, obj):
return irods_status(directory=False,
path=obj.path,
name=obj.name,
size=obj.size,
checksum=obj.checksum,
create_time=obj.create_time,
modify_time=obj.modify_time)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
rep_d = "F"
if self.directory:
rep_d = "D"
return "<irods_status %s %s %d %s>" % (rep_d, self.name, self.size, self.checksum)
class irods_client(object):
def __init__(self, host=None,
port=1247,
user=None,
password=None,
zone=None):
self.host = host
self.port = port
self.user = user
self.password = password
self.zone = zone
self.session = None
def connect(self):
self.session = iRODSSession(host=self.host,
port=self.port,
user=self.user,
password=self.password,
zone=self.zone)
def close(self):
self.session.cleanup()
def __enter__(self):
self.connect()
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
"""
Returns directory entries in string
"""
#@retry(stop_max_attempt_number=MAX_ATTEMPT, wait_fixed=ATTEMPT_INTERVAL, wrap_exception=True)
def list(self, path):
coll = _getCollection(self.session, path)
entries = []
for col in coll.subcollections:
entries.append(col.name)
for obj in coll.data_objects:
entries.append(obj.name)
return entries
"""
Returns directory entries with status
"""
#@retry(stop_max_attempt_number=MAX_ATTEMPT, wait_fixed=ATTEMPT_INTERVAL, wrap_exception=True)
def listStats(self, path):
coll = _getCollection(self.session, path)
stats = []
for col in coll.subcollections:
stats.append(irods_status.fromCollection(col))
for obj in coll.data_objects:
stats.append(irods_status.fromDataObject(obj))
return stats
#@retry(stop_max_attempt_number=MAX_ATTEMPT, wait_fixed=ATTEMPT_INTERVAL, wrap_exception=True)
def isDir(self, path):
parent = os.path.dirname(path)
coll = _getCollection(self.session, parent)
for col in coll.subcollections:
if col.path == path:
return True
return False
#@retry(stop_max_attempt_number=MAX_ATTEMPT, wait_fixed=ATTEMPT_INTERVAL, wrap_exception=True)
def isFile(self, path):
parent = os.path.dirname(path)
coll = _getCollection(self.session, parent)
for obj in coll.data_objects:
if obj.path == path:
return True
return False
#@retry(stop_max_attempt_number=MAX_ATTEMPT, wait_fixed=ATTEMPT_INTERVAL, wrap_exception=True)
def exists(self, path):
stat = self.getStat(path)
if stat:
return True
return False
#@retry(stop_max_attempt_number=MAX_ATTEMPT, wait_fixed=ATTEMPT_INTERVAL, wrap_exception=True)
def getStat(self, path):
parent = os.path.dirname(path)
coll = _getCollection(self.session, parent)
for col in coll.subcollections:
if col.path == path:
return irods_status.fromCollection(col)
for obj in coll.data_objects:
if obj.path == path:
return irods_status.fromDataObject(obj)
return None
#@retry(stop_max_attempt_number=MAX_ATTEMPT, wait_fixed=ATTEMPT_INTERVAL, wrap_exception=True)
def read(self, path, offset, size):
buf = None
br = None
conn = None
try:
conn, desc = self.session.data_objects.open(path, O_RDONLY)
raw = iRODSDataObjectFileRaw(conn, desc)
br = BufferedRandom(raw)
new_offset = br.seek(offset)
if new_offset == offset:
buf = br.read(size)
finally:
if br:
br.close()
if conn:
conn.release(True)
return buf
#@retry(stop_max_attempt_number=MAX_ATTEMPT, wait_fixed=ATTEMPT_INTERVAL, wrap_exception=True)
def download(self, path, to):
conn, desc = self.session.data_objects.open(path, O_RDONLY)
raw = iRODSDataObjectFileRaw(conn, desc)
br = BufferedRandom(raw)
try:
with open(to, 'w') as wf:
while(True):
buf = _readLargeBlock(br)
if not buf:
break
wf.write(buf)
finally:
conn.release(True)
br.close()
return to
| apache-2.0 |
darkleons/odoo | addons/account/project/project.py | 32 | 2411 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_journal(osv.osv):
_name = 'account.analytic.journal'
_description = 'Analytic Journal'
_columns = {
'name': fields.char('Journal Name', required=True),
'code': fields.char('Journal Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the analytic journal without removing it."),
'type': fields.selection([('sale','Sale'), ('purchase','Purchase'), ('cash','Cash'), ('general','General'), ('situation','Situation')], 'Type', required=True, help="Gives the type of the analytic journal. When it needs for a document (eg: an invoice) to create analytic entries, Odoo will look for a matching journal of the same type."),
'line_ids': fields.one2many('account.analytic.line', 'journal_id', 'Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': True,
'type': 'general',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
class account_journal(osv.osv):
_inherit="account.journal"
_columns = {
'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Zelgadis87/Sick-Beard | lib/jsonrpclib/jsonrpc.py | 86 | 17140 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
============================
JSONRPC Library (jsonrpclib)
============================
This library is a JSON-RPC v.2 (proposed) implementation which
follows the xmlrpclib API for portability between clients. It
uses the same Server / ServerProxy, loads, dumps, etc. syntax,
while providing features not present in XML-RPC like:
* Keyword arguments
* Notifications
* Versioning
* Batches and batch notifications
Eventually, I'll add a SimpleXMLRPCServer compatible library,
and other things to tie the thing off nicely. :)
For a quick-start, just open a console and type the following,
replacing the server address, method, and parameters
appropriately.
>>> import jsonrpclib
>>> server = jsonrpclib.Server('http://localhost:8181')
>>> server.add(5, 6)
11
>>> server._notify.add(5, 6)
>>> batch = jsonrpclib.MultiCall(server)
>>> batch.add(3, 50)
>>> batch.add(2, 3)
>>> batch._notify.add(3, 5)
>>> batch()
[53, 5]
See http://code.google.com/p/jsonrpclib/ for more info.
"""
import types
import sys
from xmlrpclib import Transport as XMLTransport
from xmlrpclib import SafeTransport as XMLSafeTransport
from xmlrpclib import ServerProxy as XMLServerProxy
from xmlrpclib import _Method as XML_Method
import time
import string
import random
# Library includes
import lib.jsonrpclib
from lib.jsonrpclib import config
from lib.jsonrpclib import history
# JSON library importing
cjson = None
json = None
try:
import cjson
except ImportError:
try:
import json
except ImportError:
try:
import lib.simplejson as json
except ImportError:
raise ImportError(
'You must have the cjson, json, or simplejson ' +
'module(s) available.'
)
IDCHARS = string.ascii_lowercase+string.digits
class UnixSocketMissing(Exception):
"""
Just a properly named Exception if Unix Sockets usage is
attempted on a platform that doesn't support them (Windows)
"""
pass
#JSON Abstractions
def jdumps(obj, encoding='utf-8'):
# Do 'serialize' test at some point for other classes
global cjson
if cjson:
return cjson.encode(obj)
else:
return json.dumps(obj, encoding=encoding)
def jloads(json_string):
global cjson
if cjson:
return cjson.decode(json_string)
else:
return json.loads(json_string)
# XMLRPClib re-implementations
class ProtocolError(Exception):
pass
class TransportMixIn(object):
""" Just extends the XMLRPC transport where necessary. """
user_agent = config.user_agent
# for Python 2.7 support
_connection = None
def send_content(self, connection, request_body):
connection.putheader("Content-Type", "application/json-rpc")
connection.putheader("Content-Length", str(len(request_body)))
connection.endheaders()
if request_body:
connection.send(request_body)
def getparser(self):
target = JSONTarget()
return JSONParser(target), target
class JSONParser(object):
def __init__(self, target):
self.target = target
def feed(self, data):
self.target.feed(data)
def close(self):
pass
class JSONTarget(object):
def __init__(self):
self.data = []
def feed(self, data):
self.data.append(data)
def close(self):
return ''.join(self.data)
class Transport(TransportMixIn, XMLTransport):
pass
class SafeTransport(TransportMixIn, XMLSafeTransport):
pass
from httplib import HTTP, HTTPConnection
from socket import socket
USE_UNIX_SOCKETS = False
try:
from socket import AF_UNIX, SOCK_STREAM
USE_UNIX_SOCKETS = True
except ImportError:
pass
if (USE_UNIX_SOCKETS):
class UnixHTTPConnection(HTTPConnection):
def connect(self):
self.sock = socket(AF_UNIX, SOCK_STREAM)
self.sock.connect(self.host)
class UnixHTTP(HTTP):
_connection_class = UnixHTTPConnection
class UnixTransport(TransportMixIn, XMLTransport):
def make_connection(self, host):
import httplib
host, extra_headers, x509 = self.get_host_info(host)
return UnixHTTP(host)
class ServerProxy(XMLServerProxy):
"""
Unfortunately, much more of this class has to be copied since
so much of it does the serialization.
"""
def __init__(self, uri, transport=None, encoding=None,
verbose=0, version=None):
import urllib
if not version:
version = config.version
self.__version = version
schema, uri = urllib.splittype(uri)
if schema not in ('http', 'https', 'unix'):
raise IOError('Unsupported JSON-RPC protocol.')
if schema == 'unix':
if not USE_UNIX_SOCKETS:
# Don't like the "generic" Exception...
raise UnixSocketMissing("Unix sockets not available.")
self.__host = uri
self.__handler = '/'
else:
self.__host, self.__handler = urllib.splithost(uri)
if not self.__handler:
# Not sure if this is in the JSON spec?
#self.__handler = '/'
self.__handler == '/'
if transport is None:
if schema == 'unix':
transport = UnixTransport()
elif schema == 'https':
transport = SafeTransport()
else:
transport = Transport()
self.__transport = transport
self.__encoding = encoding
self.__verbose = verbose
def _request(self, methodname, params, rpcid=None):
request = dumps(params, methodname, encoding=self.__encoding,
rpcid=rpcid, version=self.__version)
response = self._run_request(request)
check_for_errors(response)
return response['result']
def _request_notify(self, methodname, params, rpcid=None):
request = dumps(params, methodname, encoding=self.__encoding,
rpcid=rpcid, version=self.__version, notify=True)
response = self._run_request(request, notify=True)
check_for_errors(response)
return
def _run_request(self, request, notify=None):
history.add_request(request)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
# Here, the XMLRPC library translates a single list
# response to the single value -- should we do the
# same, and require a tuple / list to be passed to
# the response object, or expect the Server to be
# outputting the response appropriately?
history.add_response(response)
if not response:
return None
return_obj = loads(response)
return return_obj
def __getattr__(self, name):
# Same as original, just with new _Method reference
return _Method(self._request, name)
@property
def _notify(self):
# Just like __getattr__, but with notify namespace.
return _Notify(self._request_notify)
class _Method(XML_Method):
def __call__(self, *args, **kwargs):
if len(args) > 0 and len(kwargs) > 0:
raise ProtocolError('Cannot use both positional ' +
'and keyword arguments (according to JSON-RPC spec.)')
if len(args) > 0:
return self.__send(self.__name, args)
else:
return self.__send(self.__name, kwargs)
def __getattr__(self, name):
self.__name = '%s.%s' % (self.__name, name)
return self
# The old method returned a new instance, but this seemed wasteful.
# The only thing that changes is the name.
#return _Method(self.__send, "%s.%s" % (self.__name, name))
class _Notify(object):
def __init__(self, request):
self._request = request
def __getattr__(self, name):
return _Method(self._request, name)
# Batch implementation
class MultiCallMethod(object):
def __init__(self, method, notify=False):
self.method = method
self.params = []
self.notify = notify
def __call__(self, *args, **kwargs):
if len(kwargs) > 0 and len(args) > 0:
raise ProtocolError('JSON-RPC does not support both ' +
'positional and keyword arguments.')
if len(kwargs) > 0:
self.params = kwargs
else:
self.params = args
def request(self, encoding=None, rpcid=None):
return dumps(self.params, self.method, version=2.0,
encoding=encoding, rpcid=rpcid, notify=self.notify)
def __repr__(self):
return '%s' % self.request()
def __getattr__(self, method):
new_method = '%s.%s' % (self.method, method)
self.method = new_method
return self
class MultiCallNotify(object):
def __init__(self, multicall):
self.multicall = multicall
def __getattr__(self, name):
new_job = MultiCallMethod(name, notify=True)
self.multicall._job_list.append(new_job)
return new_job
class MultiCallIterator(object):
def __init__(self, results):
self.results = results
def __iter__(self):
for i in range(0, len(self.results)):
yield self[i]
raise StopIteration
def __getitem__(self, i):
item = self.results[i]
check_for_errors(item)
return item['result']
def __len__(self):
return len(self.results)
class MultiCall(object):
def __init__(self, server):
self._server = server
self._job_list = []
def _request(self):
if len(self._job_list) < 1:
# Should we alert? This /is/ pretty obvious.
return
request_body = '[ %s ]' % ','.join([job.request() for
job in self._job_list])
responses = self._server._run_request(request_body)
del self._job_list[:]
if not responses:
responses = []
return MultiCallIterator(responses)
@property
def _notify(self):
return MultiCallNotify(self)
def __getattr__(self, name):
new_job = MultiCallMethod(name)
self._job_list.append(new_job)
return new_job
__call__ = _request
# These lines conform to xmlrpclib's "compatibility" line.
# Not really sure if we should include these, but oh well.
Server = ServerProxy
class Fault(object):
# JSON-RPC error class
def __init__(self, code=-32000, message='Server error', rpcid=None):
self.faultCode = code
self.faultString = message
self.rpcid = rpcid
def error(self):
return {'code':self.faultCode, 'message':self.faultString}
def response(self, rpcid=None, version=None):
if not version:
version = config.version
if rpcid:
self.rpcid = rpcid
return dumps(
self, methodresponse=True, rpcid=self.rpcid, version=version
)
def __repr__(self):
return '<Fault %s: %s>' % (self.faultCode, self.faultString)
def random_id(length=8):
return_id = ''
for i in range(length):
return_id += random.choice(IDCHARS)
return return_id
class Payload(dict):
def __init__(self, rpcid=None, version=None):
if not version:
version = config.version
self.id = rpcid
self.version = float(version)
def request(self, method, params=[]):
if type(method) not in types.StringTypes:
raise ValueError('Method name must be a string.')
if not self.id:
self.id = random_id()
request = { 'id':self.id, 'method':method }
if params:
request['params'] = params
if self.version >= 2:
request['jsonrpc'] = str(self.version)
return request
def notify(self, method, params=[]):
request = self.request(method, params)
if self.version >= 2:
del request['id']
else:
request['id'] = None
return request
def response(self, result=None):
response = {'result':result, 'id':self.id}
if self.version >= 2:
response['jsonrpc'] = str(self.version)
else:
response['error'] = None
return response
def error(self, code=-32000, message='Server error.'):
error = self.response()
if self.version >= 2:
del error['result']
else:
error['result'] = None
error['error'] = {'code':code, 'message':message}
return error
def dumps(params=[], methodname=None, methodresponse=None,
encoding=None, rpcid=None, version=None, notify=None):
"""
This differs from the Python implementation in that it implements
the rpcid argument since the 2.0 spec requires it for responses.
"""
if not version:
version = config.version
valid_params = (types.TupleType, types.ListType, types.DictType)
if methodname in types.StringTypes and \
type(params) not in valid_params and \
not isinstance(params, Fault):
"""
If a method, and params are not in a listish or a Fault,
error out.
"""
raise TypeError('Params must be a dict, list, tuple or Fault ' +
'instance.')
# Begin parsing object
payload = Payload(rpcid=rpcid, version=version)
if not encoding:
encoding = 'utf-8'
if type(params) is Fault:
response = payload.error(params.faultCode, params.faultString)
return jdumps(response, encoding=encoding)
if type(methodname) not in types.StringTypes and methodresponse != True:
raise ValueError('Method name must be a string, or methodresponse '+
'must be set to True.')
if config.use_jsonclass == True:
from lib.jsonrpclib import jsonclass
params = jsonclass.dump(params)
if methodresponse is True:
if rpcid is None:
raise ValueError('A method response must have an rpcid.')
response = payload.response(params)
return jdumps(response, encoding=encoding)
request = None
if notify == True:
request = payload.notify(methodname, params)
else:
request = payload.request(methodname, params)
return jdumps(request, encoding=encoding)
def loads(data):
"""
This differs from the Python implementation, in that it returns
the request structure in Dict format instead of the method, params.
It will return a list in the case of a batch request / response.
"""
if data == '':
# notification
return None
result = jloads(data)
# if the above raises an error, the implementing server code
# should return something like the following:
# { 'jsonrpc':'2.0', 'error': fault.error(), id: None }
if config.use_jsonclass == True:
from lib.jsonrpclib import jsonclass
result = jsonclass.load(result)
return result
def check_for_errors(result):
if not result:
# Notification
return result
if type(result) is not types.DictType:
raise TypeError('Response is not a dict.')
if 'jsonrpc' in result.keys() and float(result['jsonrpc']) > 2.0:
raise NotImplementedError('JSON-RPC version not yet supported.')
if 'result' not in result.keys() and 'error' not in result.keys():
raise ValueError('Response does not have a result or error key.')
if 'error' in result.keys() and result['error'] != None:
code = result['error']['code']
message = result['error']['message']
raise ProtocolError((code, message))
return result
def isbatch(result):
if type(result) not in (types.ListType, types.TupleType):
return False
if len(result) < 1:
return False
if type(result[0]) is not types.DictType:
return False
if 'jsonrpc' not in result[0].keys():
return False
try:
version = float(result[0]['jsonrpc'])
except ValueError:
raise ProtocolError('"jsonrpc" key must be a float(able) value.')
if version < 2:
return False
return True
def isnotification(request):
if 'id' not in request.keys():
# 2.0 notification
return True
if request['id'] == None:
# 1.0 notification
return True
return False
| gpl-3.0 |
adbrebs/spynet | models/max_pool_3d.py | 1 | 3198 |
from theano import tensor
from theano.tensor.signal.downsample import DownsampleFactorMax
def max_pool_3d(input, ds, ignore_border=False):
"""
Takes as input a N-D tensor, where N >= 3. It downscales the input by
the specified factor, by keeping only the maximum value of non-overlapping
patches of size (ds[0],ds[1],ds[2]) (depth, height, width)
Arguments:
input (N-D theano tensor of input images): input images. Max pooling will be done over the 3 last dimensions.
ds (tuple of length 3): factor by which to downscale. (2,2,2) will halve the video in each dimension.
ignore_border (boolean): When True, (5,5,5) input with ds=(2,2,2)
will generate a (2,2,2) output. (3,3,3) otherwise.
"""
if input.ndim < 3:
raise NotImplementedError('max_pool_3d requires a dimension >= 3')
# extract nr dimensions
vid_dim = input.ndim
# max pool in two different steps, so we can use the 2d implementation of
# downsamplefactormax. First maxpool frames as usual.
# Then maxpool the depth dimension. Shift the depth dimension to the third
# position, so rows and cols are in the back
# extract dimensions
frame_shape = input.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,height,width)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
frame_shape), 'int32')
input_4D = tensor.reshape(input, new_shape, ndim=4)
# downsample mini-batch of videos in rows and cols
op = DownsampleFactorMax((ds[1],ds[2]), ignore_border)
output = op(input_4D)
# restore to original shape
outshape = tensor.join(0, input.shape[:-2], output.shape[-2:])
out = tensor.reshape(output, outshape, ndim=input.ndim)
# now maxpool depth
# output (depth, rows, cols), reshape so that depth is in the back
shufl = (list(range(vid_dim-3)) + [vid_dim-2]+[vid_dim-1]+[vid_dim-3])
input_depth = out.dimshuffle(shufl)
# reset dimensions
vid_shape = input_depth.shape[-2:]
# count the number of "leading" dimensions, store as dmatrix
batch_size = tensor.prod(input_depth.shape[:-2])
batch_size = tensor.shape_padright(batch_size,1)
# store as 4D tensor with shape: (batch_size,1,width,depth)
new_shape = tensor.cast(tensor.join(0, batch_size,
tensor.as_tensor([1,]),
vid_shape), 'int32')
input_4D_depth = tensor.reshape(input_depth, new_shape, ndim=4)
# downsample mini-batch of videos in depth
op = DownsampleFactorMax((1,ds[0]), ignore_border)
outdepth = op(input_4D_depth)
# output
# restore to original shape (xxx, rows, cols, depth)
outshape = tensor.join(0, input_depth.shape[:-2], outdepth.shape[-2:])
shufl = (list(range(vid_dim-3)) + [vid_dim-1]+[vid_dim-3]+[vid_dim-2])
return tensor.reshape(outdepth, outshape, ndim=input.ndim).dimshuffle(shufl) | bsd-2-clause |
zikifer/node-gyp | legacy/tools/gyp/pylib/gyp/easy_xml_test.py | 60 | 3231 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Unit tests for the easy_xml.py file. """
import gyp.easy_xml as easy_xml
import unittest
import StringIO
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
self.stderr = StringIO.StringIO()
def test_EasyXml_simple(self):
self.assertEqual(
easy_xml.XmlToString(['test']),
'<?xml version="1.0" encoding="utf-8"?><test/>')
self.assertEqual(
easy_xml.XmlToString(['test'], encoding='Windows-1252'),
'<?xml version="1.0" encoding="Windows-1252"?><test/>')
def test_EasyXml_simple_with_attributes(self):
self.assertEqual(
easy_xml.XmlToString(['test2', {'a': 'value1', 'b': 'value2'}]),
'<?xml version="1.0" encoding="utf-8"?><test2 a="value1" b="value2"/>')
def test_EasyXml_escaping(self):
original = '<test>\'"\r&\nfoo'
converted = '<test>'"
&
foo'
self.assertEqual(
easy_xml.XmlToString(['test3', {'a': original}, original]),
'<?xml version="1.0" encoding="utf-8"?><test3 a="%s">%s</test3>' %
(converted, converted))
def test_EasyXml_pretty(self):
self.assertEqual(
easy_xml.XmlToString(
['test3',
['GrandParent',
['Parent1',
['Child']
],
['Parent2']
]
],
pretty=True),
'<?xml version="1.0" encoding="utf-8"?>\n'
'<test3>\n'
' <GrandParent>\n'
' <Parent1>\n'
' <Child/>\n'
' </Parent1>\n'
' <Parent2/>\n'
' </GrandParent>\n'
'</test3>\n')
def test_EasyXml_complex(self):
# We want to create:
target = (
'<?xml version="1.0" encoding="utf-8"?>'
'<Project>'
'<PropertyGroup Label="Globals">'
'<ProjectGuid>{D2250C20-3A94-4FB9-AF73-11BC5B73884B}</ProjectGuid>'
'<Keyword>Win32Proj</Keyword>'
'<RootNamespace>automated_ui_tests</RootNamespace>'
'</PropertyGroup>'
'<Import Project="$(VCTargetsPath)\\Microsoft.Cpp.props"/>'
'<PropertyGroup '
'Condition="'$(Configuration)|$(Platform)'=='
''Debug|Win32'" Label="Configuration">'
'<ConfigurationType>Application</ConfigurationType>'
'<CharacterSet>Unicode</CharacterSet>'
'</PropertyGroup>'
'</Project>')
xml = easy_xml.XmlToString(
['Project',
['PropertyGroup', {'Label': 'Globals'},
['ProjectGuid', '{D2250C20-3A94-4FB9-AF73-11BC5B73884B}'],
['Keyword', 'Win32Proj'],
['RootNamespace', 'automated_ui_tests']
],
['Import', {'Project': '$(VCTargetsPath)\\Microsoft.Cpp.props'}],
['PropertyGroup',
{'Condition': "'$(Configuration)|$(Platform)'=='Debug|Win32'",
'Label': 'Configuration'},
['ConfigurationType', 'Application'],
['CharacterSet', 'Unicode']
]
])
self.assertEqual(xml, target)
if __name__ == '__main__':
unittest.main()
| mit |
chrismedrela/scriptcraft | scriptcraft/client.py | 1 | 76856 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
import ConfigParser
try:
import cPickle as pickle
except:
import pickle
import itertools
import math
import os.path
from Queue import Queue
import random
import time
import threading
from Tkinter import *
import tkColorChooser
import tkFileDialog
import tkFont
import tkMessageBox
import tkSimpleDialog
# Imports from PIL
import Image, ImageTk # it overrides Tkinter.Image so it must be after Tkinter
# import statement
# We have to explicitly statically import PIL plugins so py2exe know that they
# are necessary.
import PngImagePlugin
import GifImagePlugin
from scriptcraft import direction
from scriptcraft.gamemap import GameMap
from scriptcraft.gamestate import (actions, Game, DEFAULT_GAME_CONFIGURATION,
Language, Program, STAR_PROGRAM, Unit,
NoFreeStartPosition, Tree, MineralDeposit,
load_game_map, InvalidGameMapData, cmds)
from scriptcraft.gamesession import (GameSession, SystemConfiguration,
AlreadyExecuteGame)
from scriptcraft.utils import *
class GameViewer(Canvas):
"""GameViewer is canvas widget to display a scriptcraft Game
instance. It provides scrolling and zooming the map and selecting
fields.
About selecting:
When a mouse motion is detected and the selection (the selection is the
field under the cursor) changed then <<selection-changed>> event is
emitted. You can find out which field is selected by checking
GameViewer.selection_position which is (x, y) tuple or None.
When a left mouse button is being pressed then <<field-selected>> event is
emitted (it doesn't matter if the mouse is inside or outside map). You can
check position of clicked field by getting
GameViewer.selection_position. If there is a double click, then
<<field-double-clicked>> event is also emitted.
You can set pointer at any valid position by calling
set_pointer_position. Pointer is a special selection.
There is also second pointer. Its color can be changed. Use
set_pointer_2_position and set_pointer_2_color methods.
Layers:
layer-1 -- ground
layer-1.5 -- grid of ground
layer-2 -- arrows
layer-3 -- units, trees, objects on map, texts over units
layer-4 -- pointer
interface
"""
SCROLLING_SENSITIVITY = 2**(1/2.0) # in (1, +inf); greater means faster scrolling
TILE_WIDTH = 64
TILE_HEIGHT = 32
GROUND_TILE_WIDTH = 32
GROUND_TILE_HEIGHT = 32
GROUND_TILES_IN_ROW = 4
GROUND_TILES_IN_COLUMN = 4
GROUND_TYPE_TO_NAME = {
0:'ground-black',
1:'ground-dirt',
2:'ground-grass',
3:'ground-rock',
4:'ground-stones',
5:'ground-flowers',
6:'ground-hardearth',
7:'ground-tiles',
8:'ground-sand',
}
MAX_ZOOM = 1.0
MIN_ZOOM = 1.0/4
CORNER_TEXT_POSITION = (15, 20) # position at screen
CORNER_TEXT_FONT_OPTIONS = {'size':12,
'weight':'bold'}
CORNER_TEXT_COLOR = 'red'
CORNER_TEXT_INITIAL_TEXT = ''
FREQUENCY_OF_UPDATING_ANIMATIONS = 50 # ms
LOADING_INDICATOR_POSITION = (-45, 15)
LOADING_INDICATOR_SPEED = int(-360*1.5) # degrees per second
FREQUENCY_OF_CHECKING_QUERY = 100 # ms
COLOR_OF_GROUND_IMITATION = '#336633'
GRID_COLOR = '#555555'
POINTER_ROTATION_SPEED = int(-360*2.5) # degrees per second
POINTER_SIZE = (80, 40)
POINTER_COLORS = [
('white', '#ffffff'),
('red', '#ff0000'),
('green', '#00ff00'),
('darkblue', '#0000aa'),
]
POINTER_2_ROTATION_SPEED = int(-360*1.2)
POINTER_2_SIZE = (64, 32)
def __init__(self, master):
Canvas.__init__(self, master, width=800, height=600, bg='black')
self.pack(expand=YES, fill=BOTH)
# To enable receiving wheel rolling events under windows, we
# need this action before bindings:
self.focus_set()
# bindings
self.bind('<B1-Motion>',
self._mouse_motion_with_button_pressed_callback)
self.bind('<Motion>', self._mouse_motion_callback)
self.bind('<ButtonRelease-1>', self._release_callback)
self.bind('<MouseWheel>', self._roll_wheel_callback)
self.bind('<Button-4>', self._roll_wheel_callback)
self.bind('<Button-5>', self._roll_wheel_callback)
self.bind('<Button-1>', self._click_callback)
self.bind('<Double-Button-1>', self._double_click_callback)
self.bind("<Configure>", self._resized_callback)
# own attributes
self._zoom = 1.0
self._delta = (-5.0, 0.0)
self._game = None
self._scaled_images_cache = {}
self._ground_image_cache = None
self._ground_tiles_cache = {}
self._last_mouse_position = None # None unless button pressed
self._click_position = None
self.selection_position = None # None or (x, y)
self._trees_ids_by_position = {}
self._queue = Queue()
self._compute_ground_image_flag = False
# corner text
self._corner_text_id = self.create_text(
GameViewer.CORNER_TEXT_POSITION[0],
GameViewer.CORNER_TEXT_POSITION[1],
anchor=NW, text=GameViewer.CORNER_TEXT_INITIAL_TEXT,
font=tkFont.Font(**GameViewer.CORNER_TEXT_FONT_OPTIONS),
fill=GameViewer.CORNER_TEXT_COLOR,
tag=['interface'])
# loading indicator
image = self._get_image('loading')
self._loading_image = ImageTk.PhotoImage(image)
self._loading_indicator_id = self.create_image(
self._loading_indicator_position[0],
self._loading_indicator_position[1],
image=self._loading_image,
state=HIDDEN, anchor=NW,
tags=['interface'])
self._loading_indicator_turned_on = False
self._update_loading_indicator()
# load pointer images
self._pointer_images_by_color = {}
image = self._get_image('loading')
alpha = image.split()[-1]
for color_name, color in GameViewer.POINTER_COLORS:
colored = Image.new('RGBA', image.size, color)
colored.putalpha(alpha)
self._pointer_images_by_color[color_name] = colored
# pointer
image = self._pointer_images_by_color['white']
self._pointer_image = ImageTk.PhotoImage(image)
self._pointer_position = None
self._pointer_id = self.create_image(
0, 0,
image=self._pointer_image,
state=HIDDEN, anchor=NW,
tags=['layer-2', 'game'])
self._update_pointer()
# second pointer
self._pointer_2_image = None
self._pointer_2_color = 'white'
self._pointer_2_position = None
self._pointer_2_id = self.create_image(
0, 0,
image=self._pointer_2_image,
state=HIDDEN, anchor=NW,
tags=['layer-2', 'game'])
self._update_pointer_2()
# run checking queue
self._check_queue()
@log_on_enter('set game in game viewer', mode='only time')
def set_game(self, game):
""" Attribute game should be scriptcraft game instance or
None.
In this method game instance passed during previous
call is used. The previous game instance cannot be modified
since the previous call!
Use set_game(None) and set_game(new_game) to force redrawing
ground and delete current selection.
"""
previous_game = self._game
self._game = game
if previous_game:
self.delete('non-cached')
if not game:
# reset queue
self._queue = Queue()
self._compute_ground_image_flag = False
# selection position
self._set_selection_position(None, force_emitting=True)
# force redrawing ground during next set_game call
self._ground_image_cache = None
if 'ground' in self._scaled_images_cache:
del self._scaled_images_cache['ground']
# reset zoom and delta
self._zoom = 1.0
self._delta = (-5.0, 0.0)
# hide loading indicator
self.show_loading_indicator(False)
# other stuff
self._trees_ids_by_position.clear()
self.delete('tree')
else:
# selection position
self._set_selection_position(self.selection_position,
force_emitting=True)
# draw game
self._draw_game(game, old_game=previous_game)
def set_corner_text(self, text):
self.itemconfigure(self._corner_text_id,
text=text)
def show_loading_indicator(self, state):
assert isinstance(state, bool)
self._loading_indicator_turned_on = state
state = NORMAL if state else HIDDEN
self.itemconfig(self._loading_indicator_id,
state=state)
def set_pointer_position(self, position_or_None):
self._pointer_position = position_or_None
def set_pointer_2_position(self, position_or_None):
self._pointer_2_position = position_or_None
def set_pointer_2_color(self, color):
assert color in self._pointer_images_by_color, 'unknown color'
self._pointer_2_color = color
@property
def _loading_indicator_position(self):
x, y = GameViewer.LOADING_INDICATOR_POSITION
width, height = self.winfo_width(), self.winfo_height()
result = (x if x >= 0 else width+x,
y if y >= 0 else height+y)
return result
def _update_loading_indicator(self):
if self._loading_indicator_turned_on:
angle = time.time()*GameViewer.LOADING_INDICATOR_SPEED % 360
image = self._get_image('loading')
image = image.rotate(angle, resample=Image.BICUBIC)
self._loading_image = ImageTk.PhotoImage(image)
self.itemconfig(self._loading_indicator_id,
image=self._loading_image)
self.master.after(GameViewer.FREQUENCY_OF_UPDATING_ANIMATIONS,
self._update_loading_indicator)
def _update_pointer(self):
if self._pointer_position is not None:
angle = time.time()*GameViewer.POINTER_ROTATION_SPEED % 360
image = self._get_image('pointer')
image = image.rotate(angle, resample=Image.BICUBIC)
size = GameViewer.POINTER_SIZE
size = (size[0]*self._zoom,
size[1]*self._zoom)
size = tuple(map(int, size))
image = image.resize(size)
self._pointer_image = ImageTk.PhotoImage(image)
self.itemconfig(self._pointer_id, state=NORMAL,
image=self._pointer_image)
pos = self._to_screen_coordinate(self._pointer_position)
x, y = self._to_image_position('pointer', pos)
self.coords(self._pointer_id, x, y)
else:
self.itemconfig(self._pointer_id, state=HIDDEN)
self.master.after(GameViewer.FREQUENCY_OF_UPDATING_ANIMATIONS,
self._update_pointer)
def _update_pointer_2(self):
if self._pointer_2_position is not None:
angle = time.time()*GameViewer.POINTER_2_ROTATION_SPEED % 360
image = self._pointer_images_by_color[self._pointer_2_color]
image = image.rotate(angle, resample=Image.BICUBIC)
size = GameViewer.POINTER_2_SIZE
size = (size[0]*self._zoom,
size[1]*self._zoom)
size = tuple(map(int, size))
image = image.resize(size)
self._pointer_2_image = ImageTk.PhotoImage(image)
self.itemconfig(self._pointer_2_id, state=NORMAL,
image=self._pointer_2_image)
pos = self._to_screen_coordinate(self._pointer_2_position)
x, y = self._to_image_position('pointer2', pos)
self.coords(self._pointer_2_id, x, y)
else:
self.itemconfig(self._pointer_2_id, state=HIDDEN)
self.master.after(GameViewer.FREQUENCY_OF_UPDATING_ANIMATIONS,
self._update_pointer_2)
def _draw_game(self, game, old_game):
# draw imitation of ground
size = self._game.game_map.size
points = [(0, 0), (0, size[1]), (size[0], size[1]), (size[0], 0)]
points = [self._to_screen_coordinate(pos) for pos in points]
points = [coord for pos in points for coord in pos]
self.create_polygon(points,
fill=GameViewer.COLOR_OF_GROUND_IMITATION,
tags=['game', 'non-cached', 'layer-1'])
# draw ground
self._draw_ground()
# remove deleted trees
tree_positions = [position for (position, obj)
in self._game.game_map._objs.items()
if isinstance(obj, Tree)]
tree_positions = set(tree_positions)
if old_game is not None:
old_tree_positions = [
position for (position, obj)
in old_game.game_map._objs.items()
if isinstance(obj, Tree)]
old_tree_positions = set(old_tree_positions)
else:
old_tree_positions = set()
deleted_trees = old_tree_positions - tree_positions
for position in deleted_trees:
self.delete(self._trees_ids_by_position[position])
del self._trees_ids_by_position[position]
# draw objects
def draw_arrow(source, destination, type='red'):
assert type in ('red', 'blue')
delta = map(lambda (a, b): a-b, zip(destination,
source))
d = direction.FROM_RAY[tuple(delta)]
direction_name = direction.TO_FULL_NAME[d]
self._draw('arrow-%s-%s' % (type, direction_name),
source, layer=2)
objs = sorted(self._game.game_map._objs.items(),
key=lambda (pos, obj): pos[0]+pos[1])
for position, obj in objs:
if isinstance(obj, Tree): # draw tree
if position not in old_tree_positions:
name = 'tree%s' % obj.type
id_ = self._draw(name, position, layer=3, cached=True,
extra_tags=['tree'])
self._trees_ids_by_position[position] = id_
else:
pass
self.tag_raise(self._trees_ids_by_position[position])
elif isinstance(obj, MineralDeposit): # draw minerals
if obj.minerals:
self._draw('minerals', position, layer=3)
else:
self._draw('minerals-ex', position, layer=3)
elif isinstance(obj, Unit): # draw unit
unit = obj
# build sprite name
if unit.type.main_name == '4': # base
sprite_name = 'base'
elif unit.type.main_name == '5': # miner
storage_state = 'loaded' if unit.minerals else 'empty'
direction_name = direction.TO_FULL_NAME[unit.direction]
sprite_name = 'miner-%s-%s' % \
(storage_state, direction_name)
elif unit.type.main_name == '6':
direction_name = direction.TO_FULL_NAME[unit.direction]
sprite_name = 'tank-%s' % direction_name
else:
assert False, 'oops, unknown unit type %r' % unit.type
# draw the unit
self._draw(sprite_name, position, layer=3)
# draw label for the unit
x, y = self._to_screen_coordinate(position)
color = '#' + "%02x%02x%02x" % unit.player.color
font = self._get_font_for_current_zoom()
# this operation costs a lot [optimization]
self.create_text(x, y, fill=color, text=unit.player.name,
font=font, tags=['layer-3', 'game', 'text',
'non-cached'],
state=NORMAL if font else HIDDEN)
# draw arrows indicating executing action (or fire explosion)
if isinstance(unit.action, actions.MoveAction):
draw_arrow(unit.action.source,
unit.action.destination,
type='blue')
if isinstance(unit.action, actions.GatherAction):
draw_arrow(unit.position,
unit.action.source)
elif isinstance(unit.action, actions.StoreAction):
destination_unit = self._game.units_by_IDs[
unit.action.storage_ID]
destination = destination_unit.position
draw_arrow(unit.position, destination)
elif isinstance(unit.action, actions.FireAction):
self._draw('explosion', unit.action.destination, layer=3)
# draw lines (debug)
def draw_grid():
line_color = GameViewer.GRID_COLOR
for x in xrange(0, game.game_map.size[1] + 1):
start_position = (0, x)
end_position = (game.game_map.size[0], x)
start_position = self._to_screen_coordinate(start_position)
end_position = self._to_screen_coordinate(end_position)
self.create_line(*(start_position + end_position),
fill=line_color,
tag=['layer-1.5', 'game', 'non-cached'])
for y in xrange(0, game.game_map.size[0] + 1):
start_position = (y, 0)
end_position = (y, game.game_map.size[1])
start_position = self._to_screen_coordinate(start_position)
end_position = self._to_screen_coordinate(end_position)
self.create_line(*(start_position + end_position),
fill=line_color,
tag=['layer-1.5', 'game', 'non-cached'])
# draw grid
draw_grid()
# sort layers
self._sort_layers()
def _sort_layers(self):
self.tag_raise('layer-1')
self.tag_raise('layer-1.5')
self.tag_raise('layer-2')
self.tag_raise('layer-3')
self.tag_raise('layer-4')
self.tag_raise('interface')
def _draw_ground(self):
if self._ground_image_cache:
self._draw('ground', (0, 0), layer=1)
self.tag_lower('layer-1')
elif not self._compute_ground_image_flag:
target = lambda: self._compute_ground_image_asynch(self._queue)
thread = threading.Thread(target=target)
thread.start()
def _compute_ground_image_asynch(self, queue):
self._get_ground_image()
queue.put('ready')
self._compute_ground_image_flag = False
def _check_queue(self):
if not self._queue.empty():
command = self._queue.get_nowait()
assert command == 'ready'
self._draw_ground()
self.master.after(GameViewer.FREQUENCY_OF_CHECKING_QUERY,
self._check_queue)
@memoized
def _gradient(self, align):
assert align in ('ns', 'we')
gradient = Image.new('L', (255, 1))
for x in range(255):
gradient.putpixel((254-x, 0), x)
gradient = gradient.resize((255, 255))
if align == 'ns':
gradient = gradient.rotate(45-180, expand=True)
elif align == 'we':
gradient = gradient.rotate(-45, expand=True)
gradient = gradient.resize((GameViewer.TILE_WIDTH+2,
GameViewer.TILE_HEIGHT+2))
return gradient
def _draw(self, name, position, layer, state=NORMAL,
extra_tags=None, cached=False):
""" Draw sprite with name 'name' at position 'position' in
game coordinates."""
extra_tags = extra_tags or []
tags = [name, 'layer-%s' % layer, 'game']
if not cached:
tags.append('non-cached')
position = self._to_screen_coordinate(position)
x, y = self._to_image_position(name, position)
image = self._get_scaled_sprite(name)
id_ = self.create_image(x, y, image=image, anchor=NW,
state=state, tags=tags+extra_tags)
return id_
def _get_font_for_current_zoom(self):
size = int(12.2*self._zoom)
if size < 9:
if size >= 6:
return tkFont.Font(size=9)
else:
return None
else:
return tkFont.Font(size=size)
@memoized
def _get_image(self, name):
""" Return (PIL.)Image instance. """
path = 'graphic/%s.png' % name
image = Image.open(datafile_path(path))
return image
def _get_ground_tile(self, name, (x, y)):
x %= GameViewer.GROUND_TILES_IN_ROW
y %= GameViewer.GROUND_TILES_IN_COLUMN
key = (name, (x, y))
if key not in self._ground_tiles_cache:
start_point_x = x*GameViewer.GROUND_TILE_WIDTH
start_point_y = y*GameViewer.GROUND_TILE_HEIGHT
image = self._get_image(name) # '.'+name for testing
image = image.convert('RGBA')
box = (start_point_x, start_point_y,
GameViewer.GROUND_TILE_WIDTH+start_point_x,
GameViewer.GROUND_TILE_HEIGHT+start_point_y)
croped = image.crop(box)
rotated = croped.rotate(-45, expand=True,
resample=Image.BICUBIC)
scaled = rotated.resize((GameViewer.TILE_WIDTH+2,
GameViewer.TILE_HEIGHT+2))
self._ground_tiles_cache[key] = scaled
return self._ground_tiles_cache[key]
@log_on_enter('GameViewer._get_ground_image', mode='only time')
def _get_ground_image(self):
""" Return (PIL.)Image instance. """
if self._ground_image_cache is None: # then compute it and cache
log('computing ground image')
def blend(image_nw, image_ne, image_se, image_sw,
gradient_ns, gradient_we):
if image_nw == image_ne == image_se == image_sw:
return image_nw
image_w = (Image.composite(image_nw, image_sw, gradient_ns)
if image_nw != image_sw
else image_nw)
image_e = (Image.composite(image_ne, image_se, gradient_ns)
if image_ne != image_se
else image_ne)
return Image.composite(image_w, image_e, gradient_we)
gradient_ns = self._gradient('ns')
gradient_we = self._gradient('we')
size = self._game.game_map.size
image_size = (GameViewer.TILE_WIDTH/2.0*(size[0]+size[1]+2),
GameViewer.TILE_HEIGHT/2.0*(size[0]+size[1]+2))
result = Image.new('RGB', map(int, image_size))
game_map = self._game.game_map
for (x, y) in itertools.product(xrange(-1, size[0]),
xrange(-1, size[1])):
ground_type_nw = game_map[x, y].ground_type or 0
ground_type_ne = game_map[x+1, y].ground_type or 0
ground_type_se = game_map[x+1, y+1].ground_type or 0
ground_type_sw = game_map[x, y+1].ground_type or 0
tile_name_nw = GameViewer.GROUND_TYPE_TO_NAME[ground_type_nw]
tile_name_ne = GameViewer.GROUND_TYPE_TO_NAME[ground_type_ne]
tile_name_se = GameViewer.GROUND_TYPE_TO_NAME[ground_type_se]
tile_name_sw = GameViewer.GROUND_TYPE_TO_NAME[ground_type_sw]
tile_nw = self._get_ground_tile(tile_name_nw, (x, y))
tile_ne = self._get_ground_tile(tile_name_ne, (x, y))
tile_se = self._get_ground_tile(tile_name_se, (x, y))
tile_sw = self._get_ground_tile(tile_name_sw, (x, y))
tile = blend(tile_nw, tile_ne, tile_se, tile_sw,
gradient_ns, gradient_we)
box = [GameViewer.TILE_WIDTH/2.0*(x-y+size[1]),
GameViewer.TILE_HEIGHT/2.0*(x+y+2)]
result.paste(tile, tuple(map(int, box)), tile)
self._ground_image_cache = result
return self._ground_image_cache
def _get_scaled_sprite(self, name):
""" Return (PIL.)ImageTk scaled by self._zoom factor. """
# if cached, return cached value
key = name
image = self._scaled_images_cache.get(key, None)
if image:
return image
# otherwise compute, cache and return
if name == 'ground':
image = self._get_ground_image()
elif name.startswith('pointer-'):
_, color = name.split('-')
image = self._pointer_images_by_color[color]
else:
image = self._get_image(name)
width, height = image.size
delta = 0 if name == 'ground' else 2
new_width, new_height = (int(width*self._zoom)+delta,
int(height*self._zoom)+delta)
if width != new_width: # resize if it's necessary
image = image.resize((new_width, new_height), Image.NEAREST)
image = ImageTk.PhotoImage(image)
# no problem with bug connected with reference count --
# caching keeps image reference
self._scaled_images_cache[key] = image
return image
def _to_screen_coordinate(self, (x, y), delta=None, zoom=None):
""" From game coordinates. """
zoom = zoom or self._zoom
delta = delta or self._delta
return (32*zoom*(x-y-2*delta[0]),
16*zoom*(x+y-2*delta[1]))
def _to_game_coordinate(self, (x, y), delta=None, zoom=None):
""" From screen coordinates. """
zoom = zoom or self._zoom
delta = delta or self._delta
return (x/64.0/zoom + y/32.0/zoom \
+ delta[0] + delta[1],
-x/64.0/zoom + y/32.0/zoom \
- delta[0] + delta[1])
def _to_image_position(self, image_name, (x, y)):
""" From screen coordinaties. """
if image_name == 'ground':
dx = GameViewer.TILE_WIDTH/2.0 * (self._game.game_map.size[1]+1)
dy = GameViewer.TILE_HEIGHT/2.0
else:
switch = {
'tank' : (22, 0),
'miner' : (18, 3),
'base' : (31, 13),
'minerals' : (20, 10),
'tree1' : (10, 45),
'tree2' : (20, 25),
'tree3' : (20, 33),
'tree4' : (15, 25),
'tree5' : (18, 15),
'tree6' : (22, 18),
'arrow' : (32, 0),
'pointer' : (GameViewer.POINTER_SIZE[0]/2, 4),
'pointer2' : (GameViewer.POINTER_2_SIZE[0]/2, 0),
'explosion' : (10, -5),}
first_part = image_name.split('-', 1)[0]
dx, dy = switch[first_part]
return x-dx*self._zoom, y-dy*self._zoom
def _set_zoom(self, zoom, (XS, YS)):
""" Set zoom. The point (XS, YS) in screen coordinate doesn't
move."""
# bound zoom
zoom = max(zoom, GameViewer.MIN_ZOOM)
zoom = min(zoom, GameViewer.MAX_ZOOM)
if zoom == self._zoom:
# zoom hasn't been changed
return
# It clears cache of scaled images. Due to reference count bug
# all images will be removed from memory!
# compute new self._delta and self._zoom
xS, yS = self._to_game_coordinate((XS, YS))
delta = [-XS/64.0/zoom + xS/2.0 - yS/2.0,
-YS/32.0/zoom + xS/2.0 + yS/2.0]
self._zoom, old_zoom = zoom, self._zoom
cleared_delta = self._clear_delta(delta)
self._delta = cleared_delta
delta_delta = (cleared_delta[0]-delta[0],
cleared_delta[1]-delta[1])
# scale all images
with log_on_enter('GameViewer._set_zoom: rescaling images',
mode='only time'):
names = self._scaled_images_cache.keys()
self._scaled_images_cache = {} # clear cache
for name in names:
image = self._get_scaled_sprite(name)
self.itemconfigure(name, image=image)
# scale all texts
font = self._get_font_for_current_zoom()
self.itemconfigure('text', font=font,
state = NORMAL if font else HIDDEN)
# move all images
factor = zoom/old_zoom
self.scale('game', XS, YS, factor, factor)
self.move('game',
-delta_delta[0]*64.0*self._zoom,
-delta_delta[1]*32.0*self._zoom)
def _clear_delta(self, delta):
if not self._game:
return delta
size = self.winfo_width(), self.winfo_height()
center_of_screen = (size[0]/2, size[1]/2)
map_width = self._game.game_map.size[0]
map_height = self._game.game_map.size[1]
pos = self._to_game_coordinate(center_of_screen, delta=delta)
if (0 <= pos[0] < map_width and
0 <= pos[1] < map_height):
return delta
# If we are here it means that the delta is invalid.
# 1. Find valid position
pos = (min(map_width, max(0, pos[0])),
min(map_height, max(0, pos[1])))
# 2. Find delta which fullfils the condition:
# _to_screen_coordinate(pos) == center_of_screen
delta = (-(center_of_screen[0]/32.0/self._zoom - pos[0] + pos[1])/2.0,
-(center_of_screen[1]/16.0/self._zoom - pos[0] - pos[1])/2.0)
return delta
def _set_selection_position(self, value, force_emitting=False):
old_selection = self.selection_position
self.selection_position = value
if old_selection != value or force_emitting:
self.event_generate('<<selection-changed>>')
def _roll_wheel_callback(self, event):
if self._game:
delta = 0
if event.num == 5: # respond Linux wheel event
delta -= 1
elif event.num == 4: # -//-
delta += 1
else: # respond Windows wheel event
delta += event.delta // 120
factor = GameViewer.SCROLLING_SENSITIVITY**delta
self._set_zoom(self._zoom*factor, (event.x, event.y))
def _mouse_motion_with_button_pressed_callback(self, event):
# scrolling map
if self._game and self._last_mouse_position:
with log_on_enter('moving everything', mode='only time'):
dx, dy = (event.x - self._last_mouse_position[0],
event.y - self._last_mouse_position[1])
delta = (self._delta[0] - dx/64.0/self._zoom,
self._delta[1] - dy/32.0/self._zoom)
delta = self._clear_delta(delta)
dx, dy = ((self._delta[0]-delta[0])*64.0*self._zoom,
(self._delta[1]-delta[1])*32.0*self._zoom)
self._delta = delta
self.move('game', dx, dy)
self._last_mouse_position = (event.x, event.y)
def _mouse_motion_callback(self, event):
if not self._game:
return
# info about field/unit under mouse -- update corner text
pos = self._to_game_coordinate((event.x, event.y))
pos = tuple(map(lambda x: int(math.floor(x)), pos))
if self._game.game_map[pos].valid_position:
self._set_selection_position(pos)
else:
self._set_selection_position(None)
def _click_callback(self, event):
if self._game:
self._click_position = (event.x, event.y)
def _double_click_callback(self, event):
if self._game:
self.event_generate('<<field-double-clicked>>')
def _release_callback(self, event):
self._last_mouse_position = None
if self._click_position:
release_position = (event.x, event.y)
if self._click_position == release_position:
self._single_click_callback(event)
def _single_click_callback(self, event):
if self._game:
click_position = self._to_game_coordinate((event.x, event.y))
integer_click_position = map(lambda i: int(math.floor(i)),
click_position)
integer_click_position = tuple(integer_click_position)
# generate event even click position is outside map
self.event_generate('<<field-selected>>')
def _resized_callback(self, event):
# update delta
delta = self._clear_delta(self._delta)
dx, dy = ((self._delta[0]-delta[0])*64.0*self._zoom,
(self._delta[1]-delta[1])*32.0*self._zoom)
self._delta = delta
self.move('game', dx, dy)
# update loading indicator's position
self.coords(
self._loading_indicator_id,
self._loading_indicator_position[0],
self._loading_indicator_position[1])
class Scrolled(Frame):
"""Example:
>>> scroll = Scrolled(master)
>>> label = Label(scroll, text='Label')
>>> scroll.set_widget(label) # ==> label packed
>>> scroll.pack()
"""
def __init__(self, *args, **kwargs):
Frame.__init__(self, *args, **kwargs)
def set_widget(self, widget):
scroll = Scrollbar(self)
scroll.pack(side=RIGHT, fill=Y)
scroll.config(command=widget.yview)
widget.configure(yscrollcommand=scroll.set)
widget.pack(side=LEFT, fill=BOTH, expand=1)
class UnitInfoWindow(tkSimpleDialog.Dialog):
LANGUAGES = [None, STAR_PROGRAM]
LANGUAGES += (lang for lang in Language.ALL)
LANGUAGE_NAMES = tuple(('brak programu' if lang is None else
'star program' if lang is STAR_PROGRAM else
Language.TO_NAME[lang])
for lang in LANGUAGES)
FONT_ATTRS = {
'family':'Courier New',
'size':8,
}
CODE_FONT_ATTRS = dict(FONT_ATTRS)
CODE_FONT_ATTRS['size'] = 10
def __init__(self, master, program,
maybe_compilation_status,
maybe_run_status,
ok_callback):
self._program = program
self._maybe_compilation_status = maybe_compilation_status
self._maybe_run_status = maybe_run_status
self._ok_callback = ok_callback
tkSimpleDialog.Dialog.__init__(self, master)
def buttonbox(self):
tkSimpleDialog.Dialog.buttonbox(self)
self.unbind('<Return>') # so we can use enter in code textarea
def body(self, master):
left_box = Frame(master)
separator = Frame(master, width=2, bd=1, relief=SUNKEN)
right_box = Frame(master)
self._create_program_editor_part(left_box)
self._create_compilation_part(right_box)
self._add_horizontal_separator(right_box)
self._create_execution_part(right_box)
left_box.pack(side=LEFT, fill=BOTH, expand=1)
separator.pack(side=LEFT, fill=Y, padx=15, pady=15)
right_box.pack(side=RIGHT, fill=BOTH, expand=1)
master.pack(fill=BOTH, expand=1)
self.geometry('800x600')
self.attributes('-fullscreen', '1')
def _add_horizontal_separator(self, master):
separator = Frame(master, height=2, bd=1, relief=SUNKEN)
separator.pack(fill=X, padx=15, pady=15)
def _create_program_editor_part(self, master):
box = Frame(master)
self._create_language_label(box)
self._create_language_listbox(box)
box.pack(fill=BOTH)
self._create_code_label(master)
self._create_code_textarea(master)
def _create_language_label(self, master):
self._language_label = Label(master, text='Język: ')
self._language_label.pack(side=LEFT)
def _create_language_listbox(self, master):
scroll = Scrolled(master)
language_list = StringVar(value=UnitInfoWindow.LANGUAGE_NAMES)
self._language_listbox = Listbox(scroll,
selectmode=SINGLE,
height=5,
width=20,
exportselection=0, # to keep selection highlighted
listvariable=language_list)
language_index = UnitInfoWindow.LANGUAGES.index(
self._program.language
if isinstance(self._program, Program)
else self._program)
self._language_listbox.select_set(language_index)
scroll.set_widget(self._language_listbox)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def _create_code_label(self, master):
self._code_label = Label(master, text='Kod: ', anchor=W)
self._code_label.pack(fill=BOTH)
def _create_code_textarea(self, master):
scroll = Scrolled(master)
self._code_textarea = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.CODE_FONT_ATTRS))
text = (""
if self._program in (None, STAR_PROGRAM)
else self._program.code)
self._code_textarea.insert('1.0', text)
scroll.set_widget(self._code_textarea)
scroll.pack(fill=BOTH, expand=1)
def _create_compilation_part(self, master):
self._create_compilation_label(master)
box = Frame(master)
self._create_compilation_output_label(box)
self._create_compilation_error_output_label(box)
box.pack(fill=BOTH)
box = Frame(master)
self._create_compilation_output_textarea(box)
self._create_compilation_error_output_textarea(box)
box.pack(fill=BOTH, expand=1)
def _create_compilation_label(self, master):
text = u"Kompilacja: "
if self._maybe_compilation_status is None:
text += u"brak informacji"
else:
text += u"czas %.2f s" % self._maybe_compilation_status.execution_time
if self._maybe_compilation_status.killed:
text += u" (zabity)"
text += u"."
self._compilation_label = Label(master, text=text, anchor=W)
self._compilation_label.pack(fill=BOTH)
def _create_compilation_output_label(self, master):
label = Label(master, text="Standardowe wyjście: ")
label.pack(side=LEFT, fill=BOTH, expand=1)
def _create_compilation_error_output_label(self, master):
label = Label(master, text="Standardowe wyjście błędów: ")
label.pack(side=LEFT, fill=BOTH, expand=1)
def _create_compilation_output_textarea(self, master):
scroll = Scrolled(master)
self._compilation_output_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_compilation_status.output
if self._maybe_compilation_status
else "")
self._compilation_output_area.insert('1.0', text)
self._compilation_output_area.configure(state=DISABLED)
scroll.set_widget(self._compilation_output_area)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def _create_compilation_error_output_textarea(self, master):
scroll = Scrolled(master)
self._compilation_error_output_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_compilation_status.error_output
if self._maybe_compilation_status
else "")
self._compilation_error_output_area.insert('1.0', text)
self._compilation_error_output_area.configure(state=DISABLED)
scroll.set_widget(self._compilation_error_output_area)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def _create_execution_part(self, master):
self._create_execution_label(master)
self._create_execution_input_label(master)
self._create_execution_input_area(master)
box = Frame(master)
self._create_execution_output_label(box)
self._create_execution_error_output_label(box)
box.pack(fill=BOTH)
box = Frame(master)
self._create_execution_output_textarea(box)
self._create_execution_error_output_textarea(box)
box.pack(fill=BOTH, expand=1)
def _create_execution_label(self, master):
text = u"Wykonanie: "
if self._maybe_run_status is None:
text += u"program nie został wykonany"
else:
text += u"czas %.2f s" % self._maybe_run_status.execution_time
if self._maybe_run_status.killed:
text += u" (zabity)"
text += u"."
self._execution_label = Label(master, text=text, anchor=W)
self._execution_label.pack(fill=BOTH)
def _create_execution_input_label(self, master):
label = Label(master, text='Standardowe wejście: ')
label.pack(fill=BOTH)
def _create_execution_input_area(self, master):
scroll = Scrolled(master)
self._execution_input_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_run_status.input
if self._maybe_run_status
else "")
self._execution_input_area.insert('1.0', text)
self._execution_input_area.configure(state=DISABLED)
scroll.set_widget(self._execution_input_area)
scroll.pack(fill=BOTH, expand=1)
def _create_execution_output_label(self, master):
label = Label(master, text="Standardowe wyjście: ")
label.pack(side=LEFT, fill=BOTH, expand=1)
def _create_execution_error_output_label(self, master):
label = Label(master, text="Standardowe wyjście błędów: ")
label.pack(side=LEFT, fill=BOTH, expand=1)
def _create_execution_output_textarea(self, master):
scroll = Scrolled(master)
self._execution_output_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_run_status.output
if self._maybe_run_status
else "")
self._execution_output_area.insert('1.0', text)
self._execution_output_area.configure(state=DISABLED)
scroll.set_widget(self._execution_output_area)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def _create_execution_error_output_textarea(self, master):
scroll = Scrolled(master)
self._execution_error_output_area = Text(
scroll, height=1, width=1,
font=tkFont.Font(**UnitInfoWindow.FONT_ATTRS))
text = (self._maybe_run_status.error_output
if self._maybe_run_status
else "")
self._execution_error_output_area.insert('1.0', text)
self._execution_error_output_area.configure(state=DISABLED)
scroll.set_widget(self._execution_error_output_area)
scroll.pack(side=LEFT, fill=BOTH, expand=1)
def apply(self):
language_index = int(self._language_listbox.curselection()[0])
language = UnitInfoWindow.LANGUAGES[language_index]
program = (language if language in (None, STAR_PROGRAM) else
Program(language, self._code_textarea.get('1.0', END)))
self._ok_callback(program)
class ClientApplication(Frame):
CONFIGURATION_FILE = 'configuration.ini'
FREQUENCY_OF_CHECKING_QUERY = 50 # ms
TIME_BETWEEN_TICS = 100 # ms
MAPS_DIRECTORY = 'maps'
GAMES_DIRECTORY = 'games'
MENU_GAME_LABEL = "Gra"
NEW_GAME_LABEL = "Stwórz nową grę..."
SAVE_GAME_LABEL = "Zapisz grę"
LOAD_GAME_LABEL = "Wczytaj grę..."
ADD_PLAYER_LABEL = "Dodaj nowego gracza..."
SET_PROGRAM_LABEL = "Ustaw program zaznaczonej jednostce..."
SET_STAR_PROGRAM_LABEL = "Ustaw star program zaznaczonej jednostce"
DELETE_PROGRAM_LABEL = "Usuń program zaznaczonej jednostce"
TIC_LABEL = "Symuluj jedną turę gry"
TIC_IN_LOOP_LABEL = "Symulacja gry w pętli"
QUIT_LABEL = "Wyjdź"
MENU_ABOUT_LABEL = "O grze"
CHOOSE_MAP_FILE = 'Wybierz mapę'
CHOOSE_DIRECTORY_FOR_NEW_GAME = "Wybierz folder dla nowej gry"
TITLE_CREATE_NEW_GAME = 'Stwórz nową grę'
CANNOT_CREATE_NEW_GAME = 'Nie można stworzyć nowej gry.'
CANNOT_OPEN_FILE = ('Nie można otworzyć pliku '
'(być może nie masz wystarczających uprawnień).')
MAP_FILE_IS_CORRUPTED = 'Plik mapy jest uszkodzony.'
CANNOT_CREATE_FOLDER = 'Nie można utworzyć folderu gry.'
FILE_WITH_THE_SAME_NAME_EXISTS = ('Nie można utworzyć folderu gry ponieważ '
'istnieje już plik o takiej samej nazwie.')
IO_ERROR_DURING_READING = 'Wystąpił błąd podczas czytania pliku.'
TITLE_SAVE_GAME = 'Zapisz grę'
CANNOT_SAVE_GAME = 'Nie można zapisać gry.'
IO_ERROR_DURING_SAVING = 'Wystąpił błąd podczas zapisywania pliku.'
TITLE_LOAD_GAME = 'Wczytaj grę'
CANNOT_LOAD_GAME = 'Nie można wczytać gry.'
TITLE_CREATE_PLAYER = 'Dodaj nowego gracza'
ENTER_NEW_PLAYER_NAME = 'Wpisz nazwę nowego gracza.'
TITLE_CREATE_PLAYER_CHOOSE_COLOR = 'Wybierz kolor dla nowego gracza.'
CANNOT_CREATE_PLAYER = 'Nie można dodać nowego gracza.'
NO_FREE_START_POSITION = \
'Wszystkie pozycje startowe na mapie są już zajęte.'
TITLE_CHOOSE_SOURCE_FILE = 'Wybierz plik źródłowy'
TITLE_SET_PROGRAM = 'Ustaw program'
CANNOT_SET_PROGRAM = 'Nie można ustawić programu.'
UNKNOWN_SOURCE_FILE_EXTENSION = 'Nieznane rozszerzenie pliku źródłowego.'
TITLE_ARE_YOU_SURE = 'Czy jesteś pewien?'
WARNING_CURRENT_GAME_WILL_BE_LOST = \
'Czy jesteś pewien? Aktualna gra zostanie bezpowrotnie utracona.'
TITLE_QUIT_PROGRAM = 'Wyjdź z programu'
QUIT_PROGRAM_QUESTION = 'Czy na pewno chcesz wyjść z programu?'
ABOUT_TITLE = 'O grze'
ABOUT_CONTENT = ('Scriptcraft - gra programistyczna.\n\n'
'Właścicielem grafiki i map jest Marek Szykuła. '
'Nie mogą być one kopiowane ani rozpowszechniane. \n\n'
'Kod źródłowy jest na licencji GPLv3 '
'i może być rozpowszechniany i kopiowany.')
TITLE_INVALID_CONFIGURATION_FILE = 'Niepoprawny plik konfiguracji'
INVALID_CONFIGURATION_FILE = ('Nie można wczytać ustawień z pliku '
'konfiguracji. Aplikacja zostanie '
'zamknięta. Sprawdź zawartość pliku "' + \
CONFIGURATION_FILE + \
'".')
DIRECTION_TO_NAME = {
direction.N : u'północ',
direction.W : u'zachód',
direction.S : u'południe',
direction.E : u'wschód',
}
MAP_FILE_TYPES = (
('Plik mapy', '*.map'),
('Wszystkie pliki', '*')
)
DEFAULT_PLAYER_COLORS = (
(178, 146, 0),
(128, 0, 0),
(0, 255, 220),
(255, 0, 255),
(0, 0, 255),
(0, 200, 0),
(255, 255, 0),
(255, 0, 0), # the last one is get as the first one
)
# initializing --------------------------------------------------------
def __init__(self, master):
Frame.__init__(self, master)
self._tic_in_loop = BooleanVar(False)
self._init_gui()
self._game = None
self._game_session = None
self._queue = Queue()
self._master = master
self._pointed_unit_id = None
self._check_queue()
self._load_configuration_file()
if len(sys.argv) == 2 and sys.argv[1].lower() == '--test':
self._load_testing_game()
@log_on_enter('load game for testing')
def _load_testing_game(self):
filename = datafile_path('maps/small.map')
# create game_map
#game_map = load_game_map(open(filename, 'r').read())
def generate_simple_map():
import random
size = 96
game_map = GameMap((size, size), [(10, 10), (53, 10), (10, 53), (53, 53)])
number_of_trees = 0
for x in xrange(size):
for y in xrange(size):
p = 0.0
if (6 <= x <= 14 or 49 <= x <= 57 or
6 <= y <= 14 or 49 <= y <= 57):
p = 0.0
if (random.random() < p):
number_of_trees += 1
game_map[x, y].place_object(Tree())
game_map[x, y].change_ground(random.randint(1, 8))
log('map size: %d, number of fields: %d' % (size, size**2))
log('number of trees: %d' % number_of_trees)
return game_map
game = None
#game = Game(generate_simple_map(), DEFAULT_GAME_CONFIGURATION)
# create game and game session
session = GameSession(
directory='scriptcraft/.tmp',
system_configuration=self.system_configuration,
game=game)
self.set_game_session(session)
game = session.game
# modify game (set programs)
def set_program(unit_id, filename):
program = Program(Language.PYTHON,
open('scriptcraft/.tmp/'+filename).read())
game.set_program(game.units_by_IDs[unit_id], program)
try:
set_program(8, 'build_tank.py')
for i in xrange(3,7):
set_program(i, 'move_randomly.py')
for i in xrange(9,13):
set_program(i, 'move_randomly.py')
except Exception:
log_exception('cannot set program for testing game')
self._set_game(game)
def _check_queue(self):
if not self._queue.empty():
command = self._queue.get_nowait()
assert command == 'ready'
self._set_game(self._game_session.game)
if self._tic_in_loop.get():
self.master.after(ClientApplication.TIME_BETWEEN_TICS,
self._tic)
else:
self._game_viewer.show_loading_indicator(False)
self.master.after(ClientApplication.FREQUENCY_OF_CHECKING_QUERY,
self._check_queue)
def _load_configuration_file(self):
try:
filename = datafile_path(ClientApplication.CONFIGURATION_FILE)
self.system_configuration = SystemConfiguration(filename)
except (IOError, ValueError, ConfigParser.Error) as ex:
log_exception('invalid configuration file')
self._warning(
ClientApplication.TITLE_INVALID_CONFIGURATION_FILE,
ClientApplication.INVALID_CONFIGURATION_FILE
)
global root
root.destroy()
def _init_gui(self):
self.pack(expand=YES, fill=BOTH)
global root
root.protocol("WM_DELETE_WINDOW", self._quit_callback)
self._game_viewer = GameViewer(self)
self._game_viewer.bind('<<selection-changed>>',
self._selection_changed_callback)
self._game_viewer.bind('<<field-selected>>',
self._field_selected_callback)
self._game_viewer.bind('<Button-3>',
self._command_ordered_callback)
self._game_viewer.bind('<<field-double-clicked>>',
self._field_double_clicked_callback)
self._create_menubar()
self._create_keyboard_shortcuts()
def _create_menubar(self):
menubar = Menu(self)
self._game_menu = Menu(menubar, tearoff=0)
menubar.add_cascade(label=ClientApplication.MENU_GAME_LABEL,
menu=self._game_menu)
self._game_menu.add_command(
label=ClientApplication.NEW_GAME_LABEL,
command=self._new_game_callback)
self._game_menu.add_command(
label=ClientApplication.SAVE_GAME_LABEL,
command=self._save_game_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.LOAD_GAME_LABEL,
command=self._load_game_callback)
self._game_menu.add_separator()
self._game_menu.add_command(
label=ClientApplication.ADD_PLAYER_LABEL,
command=self._add_player_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.DELETE_PROGRAM_LABEL,
command=self._delete_program_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.SET_PROGRAM_LABEL,
command=self._set_program_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.SET_STAR_PROGRAM_LABEL,
command=self._set_star_program_callback,
state=DISABLED)
self._game_menu.add_command(
label=ClientApplication.TIC_LABEL,
command=self._tic_callback,
state=DISABLED)
self._game_menu.add_checkbutton(
label=ClientApplication.TIC_IN_LOOP_LABEL,
command=lambda: self._tic_in_loop_callback(switch=False),
state=DISABLED,
variable=self._tic_in_loop)
self._game_menu.add_separator()
self._game_menu.add_command(
label=ClientApplication.QUIT_LABEL,
command=self._quit_callback)
menubar.add_command(label=ClientApplication.MENU_ABOUT_LABEL,
command=self._about_callback)
global root
root.config(menu=menubar)
def _create_keyboard_shortcuts(self):
# new game
self._game_menu.entryconfigure(
ClientApplication.NEW_GAME_LABEL,
accelerator="Ctrl+N")
args = ("<Control-n>", lambda w: self._new_game_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# save game
self._game_menu.entryconfigure(
ClientApplication.SAVE_GAME_LABEL,
accelerator="Ctrl+S")
args = ("<Control-s>", lambda w: self._save_game_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# load game
self._game_menu.entryconfigure(
ClientApplication.LOAD_GAME_LABEL,
accelerator="Ctrl+O")
args = ("<Control-o>", lambda w: self._load_game_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# add player
self._game_menu.entryconfigure(
ClientApplication.ADD_PLAYER_LABEL,
accelerator="Ctrl+A")
args = ("<Control-a>", lambda w: self._add_player_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# tic item
self._game_menu.entryconfigure(
ClientApplication.TIC_LABEL,
accelerator="T")
args = ("<t>", lambda w: self._tic_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# tic in loop item
self._game_menu.entryconfigure(
ClientApplication.TIC_IN_LOOP_LABEL,
accelerator='spacja')
args = ("<space>", lambda w: self._tic_in_loop_callback(switch=True))
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# quit program
self._game_menu.entryconfigure(
ClientApplication.QUIT_LABEL,
accelerator="Ctrl+Q")
args = ("<Control-q>", lambda w: self._quit_callback())
self.bind(*args)
self._game_viewer.bind(*args)
self._game_menu.bind(*args)
# callbacks ----------------------------------------------------------
@log_on_enter('use case: new game', lvl='info')
def _new_game_callback(self):
if not self._ask_if_delete_current_game_if_exists():
return
map_filename = tkFileDialog.askopenfilename(
title=ClientApplication.CHOOSE_MAP_FILE,
filetypes=ClientApplication.MAP_FILE_TYPES,
initialdir=datafile_path(ClientApplication.MAPS_DIRECTORY),
parent=self,
)
if not map_filename:
return
directory = tkFileDialog.askdirectory(
title=ClientApplication.CHOOSE_DIRECTORY_FOR_NEW_GAME,
initialdir=datafile_path(ClientApplication.GAMES_DIRECTORY),
mustexist=False,
parent=self,
)
if is_it_py2exe_distribution():
directory = directory.replace('/', '\\')
if not directory:
return
try:
stream = open(map_filename, 'r')
except IOError as ex:
log_exception('io error during opening stream to map file')
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.CANNOT_OPEN_FILE)
return
if not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
log_exception('cannot create directory for a game')
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.CANNOT_CREATE_FOLDER)
return
else:
if not os.path.isdir(directory):
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.FILE_WITH_THE_SAME_NAME_EXISTS)
return
try:
game_map = load_game_map(stream.read())
except InvalidGameMapData as ex:
log_exception('invalid game map data')
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.MAP_FILE_IS_CORRUPTED)
except IOError as ex:
log_exception('io error during loading map file')
self._warning(ClientApplication.TITLE_CREATE_NEW_GAME,
ClientApplication.CANNOT_CREATE_NEW_GAME + ' ' + \
ClientApplication.IO_ERROR_DURING_READING)
else:
game = Game(game_map, DEFAULT_GAME_CONFIGURATION)
game_session = GameSession(directory,
self.system_configuration,
game=game)
self.set_game_session(game_session)
finally:
stream.close()
@log_on_enter('use case: save game', mode='time', lvl='info')
def _save_game_callback(self):
try:
self._game_session.save()
except IOError as ex:
log_exception('io error duing saving game')
self._warning(ClientApplication.TITLE_SAVE_GAME,
ClientApplication.CANNOT_SAVE_GAME + ' ' + \
ClientApplication.IO_ERROR_DURING_SAVING)
@log_on_enter('use case: load game', lvl='info')
def _load_game_callback(self):
if not self._ask_if_delete_current_game_if_exists():
return
directory = tkFileDialog.askdirectory(
title=ClientApplication.TITLE_LOAD_GAME,
initialdir=datafile_path(ClientApplication.GAMES_DIRECTORY),
mustexist=True,
parent=self,
)
if is_it_py2exe_distribution():
directory = directory.replace('/', '\\')
if not directory:
return
try:
game_session = GameSession(directory, self.system_configuration)
except IOError as ex:
log_exception('io error during loading game')
self._warning(ClientApplication.TITLE_LOAD_GAME,
ClientApplication.CANNOT_LOAD_GAME + ' ' + \
ClientApplication.IO_ERROR_DURING_READING)
except pickle.UnpicklingError as ex:
log_exception('pickle error during loading game')
self._warning(ClientApplication.TITLE_LOAD_GAME,
ClientApplication.CANNOT_LOAD_GAME + ' ' + \
ClientApplication.MAP_FILE_IS_CORRUPTED)
else:
self.set_game_session(game_session)
@log_on_enter('use case: add player', lvl='info')
def _add_player_callback(self):
if self._game is None:
return
name = tkSimpleDialog.askstring(
title=ClientApplication.TITLE_CREATE_PLAYER,
prompt=ClientApplication.ENTER_NEW_PLAYER_NAME,
parent=self)
if name is None:
return
color = self._reserve_color()
try:
self._game_session.new_player_with_units(name, color)
except NoFreeStartPosition:
self._warning(ClientApplication.TITLE_CREATE_PLAYER,
ClientApplication.CANNOT_CREATE_PLAYER + ' ' + \
ClientApplication.NO_FREE_START_POSITION)
else:
self._set_game(self._game)
@log_on_enter('use case: set program', lvl='info')
def _set_program_callback(self):
stream = tkFileDialog.askopenfile(
title=ClientApplication.TITLE_CHOOSE_SOURCE_FILE,
mode='r',
parent=self)
if stream is None:
return
filename = stream.name
if filename.endswith('.cpp'):
language = Language.CPP
elif filename.endswith('.py'):
language = Language.PYTHON
else:
self._warning(ClientApplication.TITLE_SET_PROGRAM,
ClientApplication.CANNOT_SET_PROGRAM + ' ' + \
ClientApplication.UNKNOWN_SOURCE_FILE_EXTENSION)
return
field = self._game.game_map[self._game_viewer._pointer_position]
unit = field.maybe_object
program = Program(language=language, code=stream.read())
self._game_session.set_program(unit, program)
@log_on_enter('use case: set star program', lvl='info')
def _set_star_program_callback(self):
field = self._game.game_map[self._game_viewer._pointer_position]
unit = field.maybe_object
self._game_session.set_program(unit, STAR_PROGRAM)
@log_on_enter('use case: delete program', lvl='info')
def _delete_program_callback(self):
field = self._game.game_map[self._game_viewer._pointer_position]
unit = field.maybe_object
self._game_session.set_program(unit, None)
@log_on_enter('use case: tic', mode='time', lvl='info')
def _tic_callback(self):
self._tic()
@log_on_enter('use case: switch tic in loop', lvl='info')
def _tic_in_loop_callback(self, switch):
if switch:
self._tic_in_loop.set(not self._tic_in_loop.get())
if self._tic_in_loop.get():
self._tic()
@log_on_enter('use case: quit', lvl='info')
def _quit_callback(self):
if not self._ask_if_quit_program():
return
global root
root.destroy()
@log_on_enter('use case: about game', lvl='info')
def _about_callback(self):
tkMessageBox.showinfo(
title=ClientApplication.ABOUT_TITLE,
message=ClientApplication.ABOUT_CONTENT,
parent=self)
def _selection_changed_callback(self, event):
# first, update the corner text
pos = self._game_viewer.selection_position
if pos is None:
text = u" "
else:
field = self._game.game_map[pos]
obj = field.maybe_object
if obj is None:
obj_info = u""
elif isinstance(obj, Tree):
obj_info = u"Drzewa."
elif isinstance(obj, MineralDeposit):
obj_info = u"Złoża minerałów (%d jednostek minerałów)." % obj.minerals
elif isinstance(obj, Unit):
# type of the unit
if obj.type.main_name == '4': # base
obj_info = u"Baza (%d minerałów)" % obj.minerals
elif obj.type.main_name == '5': # miner
state = (u'pełny' if obj.minerals else u'pusty')
obj_info = u"Zbieracz minerałów (%s)" % state
elif obj.type.main_name == '6': # tank
obj_info = u"Czołg"
else:
assert False, 'oops, unknown unit type %r' % unit.type
# player
obj_info += u' gracza %s.' % obj.player.name
# command
if isinstance(obj.command, cmds.StopCommand):
command_info = u'stop'
elif isinstance(obj.command, cmds.MoveCommand):
d = ClientApplication.DIRECTION_TO_NAME[obj.command.direction]
command_info = u'idź na %s' % d
elif isinstance(obj.command, cmds.ComplexMoveCommand):
command_info = u'idź do (%d, %d)' \
% obj.command.destination
elif isinstance(obj.command, cmds.ComplexGatherCommand):
command_info = u'zbieraj minerały z (%d, %d)' \
% obj.command.destination
elif isinstance(obj.command, cmds.FireCommand):
command_info = u'ogień na (%d, %d)' \
% obj.command.destination
elif isinstance(obj.command, cmds.ComplexAttackCommand):
command_info = u'atak na (%d, %d)' \
% obj.command.destination
elif isinstance(obj.command, cmds.BuildCommand):
command_info = u'buduj "%s"' \
% obj.command.unit_type_name
obj_info += u' Komenda: %s.' % command_info
else:
assert False, 'oops, unknown object on map %r' % obj
field_info = u"Pole (%d, %d)." % (pos[0], pos[1])
text = u" ".join([field_info, obj_info])
self._game_viewer.set_corner_text(text)
self._refresh_game_menu_items_state()
# second, update second pointer
self._update_pointer_2()
def _field_selected_callback(self, event):
pos = self._game_viewer.selection_position
if pos is None:
self._game_viewer.set_pointer_position(None)
else:
obj = self._game.game_map[pos].maybe_object
if obj and isinstance(obj, Unit):
self._pointed_unit_id = obj.ID
self._game_viewer.set_pointer_position(pos)
else:
self._pointed_unit_id = None
self._game_viewer.set_pointer_position(None)
self._update_pointer_2()
def _field_double_clicked_callback(self, event):
if self._game is None:
return
pos = self._game_viewer._pointer_position
if pos is None:
return
unit = self._game.game_map[pos].maybe_object
assert unit is not None
# because self._game_viewer._pointer_position is set to sth other than
# None only if there is an object on pointed field
def ok_callback(program):
if unit.ID in self._game_session.units_by_IDs:
self._game_session.set_program(unit, program)
window = UnitInfoWindow(self,
program=unit.program,
maybe_compilation_status=unit.maybe_last_compilation_status,
maybe_run_status=unit.maybe_run_status,
ok_callback=ok_callback)
def _command_ordered_callback(self, event):
command = self._command_for_pointed_unit()
clicked_pos = self._game_viewer._pointer_position
if clicked_pos is None:
return
clicked_obj = self._game.game_map[clicked_pos].maybe_object
pointed_pos = self._game_viewer.selection_position
if clicked_obj is None or not isinstance(clicked_obj, Unit):
return
unit = clicked_obj
if command == '':
self._game_session.set_program(unit, Program(Language.OUTPUT, ''))
return
command = {
'move' : 'MOVE %(x)d %(y)d',
'attack' : 'ATTACK %(x)d %(y)d',
'gather' : 'GATHER %(x)d %(y)d',
}[command]
command = command % {'x':pointed_pos[0], 'y':pointed_pos[1]}
self._game_session.set_program(unit, Program(Language.OUTPUT, command))
self._pointed_unit_id = None
self._game_viewer.set_pointer_2_position(None)
self._game_viewer.set_pointer_position(None)
def _update_pointer_2(self):
command = self._command_for_pointed_unit()
if command == '':
self._game_viewer.set_pointer_2_position(None)
return
pos = self._game_viewer.selection_position
self._game_viewer.set_pointer_2_position(pos)
color = {
'attack' : 'red',
'gather' : 'darkblue',
'move' : 'green',
}[command]
self._game_viewer.set_pointer_2_color(color)
def _command_for_pointed_unit(self):
""" Or '' if there is no pointed unit """
pos = self._game_viewer._pointer_position # selected (clicked) field
if pos is None:
return ''
clicked_unit = self._game.game_map[pos].maybe_object
if not clicked_unit or not isinstance(clicked_unit, Unit):
return ''
pointed_position = self._game_viewer.selection_position # field under cursor
if pointed_position is None:
return ''
selected_obj = self._game.game_map[pointed_position].maybe_object
if (clicked_unit.type.can_attack and
clicked_unit.type.movable):
selected_own_unit = (selected_obj is not None and
isinstance(selected_obj, Unit) and
selected_obj.player == clicked_unit.player)
if selected_own_unit:
return 'move'
else:
return 'attack'
elif (clicked_unit.type.has_storage and
clicked_unit.type.movable and
selected_obj is not None and
isinstance(selected_obj, MineralDeposit)):
return 'gather'
elif (clicked_unit.type.movable
and pos != pointed_position):
return 'move'
return ''
# other methods -------------------------------------------------------
def _tic(self):
try:
self._game_viewer.show_loading_indicator(True)
self._game_session.tic(self._queue)
except AlreadyExecuteGame as ex:
log('already execute game')
@log_on_enter('set game session')
def set_game_session(self, game_session):
self._game_session = game_session
self._set_game(None)
self._tic_in_loop.set(False)
self._queue = Queue()
if game_session:
self._set_game(game_session.game)
def _set_game(self, game):
""" Call it if game instance was changed and you want to make
the application up to date."""
# set game.free_colors
if game is not None and not hasattr(game, 'free_colors'):
if self._game is None or not hasattr(self._game, 'free_colors'):
game.free_colors = \
list(ClientApplication.DEFAULT_PLAYER_COLORS)
else:
game.free_colors = self._game.free_colors
# track pointed unit
self._game_viewer.set_pointer_position(None)
if game and self._pointed_unit_id:
unit = game.units_by_IDs.get(self._pointed_unit_id, None)
if unit:
self._game_viewer.set_pointer_position(unit.position)
# other stuff
self._game = game
self._game_viewer.set_game(game)
self._refresh_game_menu_items_state()
def _reserve_color(self):
if self._game.free_colors:
return self._game.free_colors.pop()
else:
rand = lambda: random.randint(0, 255)
return (rand(), rand(), rand())
def _print_info_about_field_at(self, position):
field = self._game.game_map[position]
print "\nSelected position: (%d, %d)" % position
print "Field: %s" % str(field)
if isinstance(field.maybe_object, Unit):
unit = field.maybe_object
print "Unit: %s" % (unit,)
print "Compilation: %s" % (unit.maybe_last_compilation_status,)
print "Executing: %s" % (unit.maybe_run_status,)
def _refresh_game_menu_items_state(self):
has_game = self._game is not None
obj = (self._game.game_map[self._game_viewer._pointer_position].maybe_object
if has_game and self._game_viewer._pointer_position is not None
else None)
has_unit = (self._game is not None and
self._game_viewer._pointer_position is not None and
isinstance(obj, Unit))
state = NORMAL if has_game else DISABLED
entries = [ClientApplication.ADD_PLAYER_LABEL,
ClientApplication.SAVE_GAME_LABEL,
ClientApplication.TIC_LABEL,
ClientApplication.TIC_IN_LOOP_LABEL]
for entry in entries:
self._game_menu.entryconfigure(entry, state=state)
state = NORMAL if has_unit else DISABLED
entries = [ClientApplication.SET_PROGRAM_LABEL,
ClientApplication.SET_STAR_PROGRAM_LABEL,
ClientApplication.DELETE_PROGRAM_LABEL]
for entry in entries:
self._game_menu.entryconfigure(entry, state=state)
def _warning(self, title, text):
tkMessageBox.showwarning(title, text, parent=self)
def _ask_if_delete_current_game_if_exists(self):
if self._game:
return tkMessageBox.askyesno(ClientApplication.TITLE_ARE_YOU_SURE,
ClientApplication.WARNING_CURRENT_GAME_WILL_BE_LOST,
icon=tkMessageBox.WARNING,
parent=self
)
else:
return True
def _ask_if_quit_program(self):
return tkMessageBox.askyesno(
ClientApplication.TITLE_QUIT_PROGRAM,
ClientApplication.QUIT_PROGRAM_QUESTION,
icon=tkMessageBox.WARNING,
parent=self
)
def run_with_profiling():
# profile run function
filename = '.stats'
import cProfile
cProfile.run('run()', filename)
import pstats
p = pstats.Stats(filename)
p.strip_dirs()
p.sort_stats('cumulative')
p.dump_stats(filename)
p.print_stats(25)
def run():
# prevent "errors occured" message box in py2exe distribution
turn_off_standard_streams_if_it_is_py2exe_distribution()
# run it!
global root, app
init_logging('debug')
try:
root = Tk()
root.report_callback_exception = log_error_callback
app = ClientApplication(master=root)
app.mainloop()
except Exception as ex:
log_exception('Unhandled exception outside tkinter!')
finally:
shutdown_logging()
if __name__ == "__main__":
run() # replace with run_with_profiling to enable profiling
| gpl-3.0 |
sillydan1/WhatEverEngine | packages/IronPython.StdLib.2.7.5/content/Lib/multiprocessing/__init__.py | 8 | 7897 | #
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
# This is down here because _multiprocessing uses BufferTooShort
try:
# IronPython does not provide _multiprocessing
import _multiprocessing
except ImportError:
pass
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
def Pipe(duplex=True):
'''
Returns two connection object connected by a pipe
'''
from multiprocessing.connection import Pipe
return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocessing import reduction
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
| apache-2.0 |
mikedh/trimesh | trimesh/path/exchange/load.py | 1 | 2622 | import os
from .dxf import _dxf_loaders
from .svg_io import svg_to_path
from ..path import Path
from . import misc
from ... import util
def load_path(file_obj, file_type=None, **kwargs):
"""
Load a file to a Path file_object.
Parameters
-----------
file_obj : One of the following:
- Path, Path2D, or Path3D file_objects
- open file file_object (dxf or svg)
- file name (dxf or svg)
- shapely.geometry.Polygon
- shapely.geometry.MultiLineString
- dict with kwargs for Path constructor
- (n,2,(2|3)) float, line segments
file_type : str
Type of file is required if file
file_object passed.
Returns
---------
path : Path, Path2D, Path3D file_object
Data as a native trimesh Path file_object
"""
if isinstance(file_obj, Path):
# we have been passed a Path file_object so
# do nothing and return the passed file_object
return file_obj
elif util.is_file(file_obj):
# for open file file_objects use loaders
kwargs.update(path_loaders[file_type](
file_obj, file_type=file_type))
elif util.is_string(file_obj):
# strings passed are evaluated as file file_objects
with open(file_obj, 'rb') as file_file_obj:
# get the file type from the extension
file_type = os.path.splitext(file_obj)[-1][1:].lower()
# call the loader
kwargs.update(path_loaders[file_type](
file_file_obj, file_type=file_type))
elif util.is_instance_named(file_obj, 'Polygon'):
# convert from shapely polygons to Path2D
kwargs.update(misc.polygon_to_path(file_obj))
elif util.is_instance_named(file_obj, 'MultiLineString'):
# convert from shapely LineStrings to Path2D
kwargs.update(misc.linestrings_to_path(file_obj))
elif isinstance(file_obj, dict):
# load as kwargs
from ...exchange.load import load_kwargs
return load_kwargs(file_obj)
elif util.is_sequence(file_obj):
# load as lines in space
kwargs.update(misc.lines_to_path(file_obj))
else:
raise ValueError('Not a supported object type!')
from ...exchange.load import load_kwargs
return load_kwargs(kwargs)
def path_formats():
"""
Get a list of supported path formats.
Returns
------------
loaders : list of str
Extensions of loadable formats, ie:
['svg', 'dxf']
"""
return list(path_loaders.keys())
path_loaders = {'svg': svg_to_path}
path_loaders.update(_dxf_loaders)
| mit |
jakdot/pyactr | tutorials/u7_simplecompilation.py | 1 | 1254 | """
Testing a simple case of production compilation. The compilation also allows for utility learning, shown in the model below, as well.
"""
import warnings
import pyactr as actr
class Compilation1(object):
"""
Model testing compilation -- basic cases.
"""
def __init__(self, **kwargs):
actr.chunktype("state", "starting ending")
self.m = actr.ACTRModel(**kwargs)
self.m.goal.add(actr.makechunk(nameofchunk="start", typename="state", starting=1))
self.m.productionstring(name="one", string="""
=g>
isa state
starting =x
ending ~=x
==>
=g>
isa state
ending =x""", utility=2)
self.m.productionstring(name="two", string="""
=g>
isa state
starting =x
ending =x
==>
=g>
isa state
starting =x
ending 4""")
if __name__ == "__main__":
warnings.simplefilter("ignore")
mm = Compilation1(production_compilation=True, utility_learning=True)
model = mm.m
sim = model.simulation(realtime=True)
sim.run(0.5)
print(model.productions["one and two"])
| gpl-3.0 |
mcfletch/AutobahnPython | autobahn/autobahn/wamp/broker.py | 9 | 8865 | ###############################################################################
##
## Copyright (C) 2013-2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
from autobahn import util
from autobahn.wamp import types
from autobahn.wamp import role
from autobahn.wamp import message
from autobahn.wamp.exception import ApplicationError
from autobahn.wamp.interfaces import IBroker
from autobahn.wamp.message import _URI_PAT_STRICT_NON_EMPTY, _URI_PAT_LOOSE_NON_EMPTY
class Broker:
"""
Basic WAMP broker, implements :class:`autobahn.wamp.interfaces.IBroker`.
"""
def __init__(self, realm, options = None):
"""
Constructor.
:param realm: The realm this broker is working for.
:type realm: str
:param options: Router options.
:type options: Instance of :class:`autobahn.wamp.types.RouterOptions`.
"""
self.realm = realm
self._options = options or types.RouterOptions()
## map: session -> set(subscription)
## needed for removeSession
self._session_to_subscriptions = {}
## map: session_id -> session
## needed for exclude/eligible
self._session_id_to_session = {}
## map: topic -> (subscription, set(session))
## needed for PUBLISH and SUBSCRIBE
self._topic_to_sessions = {}
## map: subscription -> (topic, set(session))
## needed for UNSUBSCRIBE
self._subscription_to_sessions = {}
## check all topic URIs with strict rules
self._option_uri_strict = self._options.uri_check == types.RouterOptions.URI_CHECK_STRICT
## supported features from "WAMP Advanced Profile"
self._role_features = role.RoleBrokerFeatures(publisher_identification = True, subscriber_blackwhite_listing = True, publisher_exclusion = True)
def attach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IBroker.attach`
"""
assert(session not in self._session_to_subscriptions)
self._session_to_subscriptions[session] = set()
self._session_id_to_session[session._session_id] = session
def detach(self, session):
"""
Implements :func:`autobahn.wamp.interfaces.IBroker.detach`
"""
assert(session in self._session_to_subscriptions)
for subscription in self._session_to_subscriptions[session]:
topic, subscribers = self._subscription_to_sessions[subscription]
subscribers.discard(session)
if not subscribers:
del self._subscription_to_sessions[subscription]
_, subscribers = self._topic_to_sessions[topic]
subscribers.discard(session)
if not subscribers:
del self._topic_to_sessions[topic]
del self._session_to_subscriptions[session]
del self._session_id_to_session[session._session_id]
def processPublish(self, session, publish):
"""
Implements :func:`autobahn.wamp.interfaces.IBroker.processPublish`
"""
assert(session in self._session_to_subscriptions)
## check topic URI
##
if (not self._option_uri_strict and not _URI_PAT_LOOSE_NON_EMPTY.match(publish.topic)) or \
( self._option_uri_strict and not _URI_PAT_STRICT_NON_EMPTY.match(publish.topic)):
if publish.acknowledge:
reply = message.Error(message.Publish.MESSAGE_TYPE, publish.request, ApplicationError.INVALID_URI, ["publish with invalid topic URI '{}'".format(publish.topic)])
session._transport.send(reply)
return
if publish.topic in self._topic_to_sessions and self._topic_to_sessions[publish.topic]:
## initial list of receivers are all subscribers ..
##
subscription, receivers = self._topic_to_sessions[publish.topic]
## filter by "eligible" receivers
##
if publish.eligible:
eligible = []
for s in publish.eligible:
if s in self._session_id_to_session:
eligible.append(self._session_id_to_session[s])
receivers = set(eligible) & receivers
## remove "excluded" receivers
##
if publish.exclude:
exclude = []
for s in publish.exclude:
if s in self._session_id_to_session:
exclude.append(self._session_id_to_session[s])
if exclude:
receivers = receivers - set(exclude)
## remove publisher
##
if publish.excludeMe is None or publish.excludeMe:
# receivers.discard(session) # bad: this would modify our actual subscriber list
me_also = False
else:
me_also = True
else:
subscription, receivers, me_also = None, [], False
publication = util.id()
## send publish acknowledge when requested
##
if publish.acknowledge:
msg = message.Published(publish.request, publication)
session._transport.send(msg)
## if receivers is non-empty, dispatch event ..
##
if receivers:
if publish.discloseMe:
publisher = session._session_id
else:
publisher = None
msg = message.Event(subscription,
publication,
args = publish.args,
kwargs = publish.kwargs,
publisher = publisher)
for receiver in receivers:
if me_also or receiver != session:
## the subscribing session might have been lost in the meantime ..
if receiver._transport:
receiver._transport.send(msg)
def processSubscribe(self, session, subscribe):
"""
Implements :func:`autobahn.wamp.interfaces.IBroker.processSubscribe`
"""
assert(session in self._session_to_subscriptions)
## check topic URI
##
if (not self._option_uri_strict and not _URI_PAT_LOOSE_NON_EMPTY.match(subscribe.topic)) or \
( self._option_uri_strict and not _URI_PAT_STRICT_NON_EMPTY.match(subscribe.topic)):
reply = message.Error(message.Subscribe.MESSAGE_TYPE, subscribe.request, ApplicationError.INVALID_URI, ["subscribe for invalid topic URI '{}'".format(subscribe.topic)])
else:
if not subscribe.topic in self._topic_to_sessions:
subscription = util.id()
self._topic_to_sessions[subscribe.topic] = (subscription, set())
subscription, subscribers = self._topic_to_sessions[subscribe.topic]
if not session in subscribers:
subscribers.add(session)
if not subscription in self._subscription_to_sessions:
self._subscription_to_sessions[subscription] = (subscribe.topic, set())
_, subscribers = self._subscription_to_sessions[subscription]
if not session in subscribers:
subscribers.add(session)
if not subscription in self._session_to_subscriptions[session]:
self._session_to_subscriptions[session].add(subscription)
reply = message.Subscribed(subscribe.request, subscription)
session._transport.send(reply)
def processUnsubscribe(self, session, unsubscribe):
"""
Implements :func:`autobahn.wamp.interfaces.IBroker.processUnsubscribe`
"""
assert(session in self._session_to_subscriptions)
if unsubscribe.subscription in self._subscription_to_sessions:
topic, subscribers = self._subscription_to_sessions[unsubscribe.subscription]
subscribers.discard(session)
if not subscribers:
del self._subscription_to_sessions[unsubscribe.subscription]
_, subscribers = self._topic_to_sessions[topic]
subscribers.discard(session)
if not subscribers:
del self._topic_to_sessions[topic]
self._session_to_subscriptions[session].discard(unsubscribe.subscription)
reply = message.Unsubscribed(unsubscribe.request)
else:
reply = message.Error(message.Unsubscribe.MESSAGE_TYPE, unsubscribe.request, ApplicationError.NO_SUCH_SUBSCRIPTION)
session._transport.send(reply)
IBroker.register(Broker)
| apache-2.0 |
hn8841182/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/csv.py | 637 | 16166 |
"""
csv.py - read/write/investigate CSV files
"""
import re
from _csv import Error, __version__, writer, reader, register_dialect, \
unregister_dialect, get_dialect, list_dialects, \
field_size_limit, \
QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \
__doc__
from _csv import Dialect as _Dialect
from io import StringIO
__all__ = [ "QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE",
"Error", "Dialect", "__doc__", "excel", "excel_tab",
"field_size_limit", "reader", "writer",
"register_dialect", "get_dialect", "list_dialects", "Sniffer",
"unregister_dialect", "__version__", "DictReader", "DictWriter" ]
class Dialect:
"""Describe a CSV dialect.
This must be subclassed (see csv.excel). Valid attributes are:
delimiter, quotechar, escapechar, doublequote, skipinitialspace,
lineterminator, quoting.
"""
_name = ""
_valid = False
# placeholders
delimiter = None
quotechar = None
escapechar = None
doublequote = None
skipinitialspace = None
lineterminator = None
quoting = None
def __init__(self):
if self.__class__ != Dialect:
self._valid = True
self._validate()
def _validate(self):
try:
_Dialect(self)
except TypeError as e:
# We do this for compatibility with py2.3
raise Error(str(e))
class excel(Dialect):
"""Describe the usual properties of Excel-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
register_dialect("excel", excel)
class excel_tab(excel):
"""Describe the usual properties of Excel-generated TAB-delimited files."""
delimiter = '\t'
register_dialect("excel-tab", excel_tab)
class unix_dialect(Dialect):
"""Describe the usual properties of Unix-generated CSV files."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = False
lineterminator = '\n'
quoting = QUOTE_ALL
register_dialect("unix", unix_dialect)
class DictReader:
def __init__(self, f, fieldnames=None, restkey=None, restval=None,
dialect="excel", *args, **kwds):
self._fieldnames = fieldnames # list of keys for the dict
self.restkey = restkey # key to catch long rows
self.restval = restval # default value for short rows
self.reader = reader(f, dialect, *args, **kwds)
self.dialect = dialect
self.line_num = 0
def __iter__(self):
return self
@property
def fieldnames(self):
if self._fieldnames is None:
try:
self._fieldnames = next(self.reader)
except StopIteration:
pass
self.line_num = self.reader.line_num
return self._fieldnames
@fieldnames.setter
def fieldnames(self, value):
self._fieldnames = value
def __next__(self):
if self.line_num == 0:
# Used only for its side effect.
self.fieldnames
row = next(self.reader)
self.line_num = self.reader.line_num
# unlike the basic reader, we prefer not to return blanks,
# because we will typically wind up with a dict full of None
# values
while row == []:
row = next(self.reader)
d = dict(zip(self.fieldnames, row))
lf = len(self.fieldnames)
lr = len(row)
if lf < lr:
d[self.restkey] = row[lf:]
elif lf > lr:
for key in self.fieldnames[lr:]:
d[key] = self.restval
return d
class DictWriter:
def __init__(self, f, fieldnames, restval="", extrasaction="raise",
dialect="excel", *args, **kwds):
self.fieldnames = fieldnames # list of keys for the dict
self.restval = restval # for writing short dicts
if extrasaction.lower() not in ("raise", "ignore"):
raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'"
% extrasaction)
self.extrasaction = extrasaction
self.writer = writer(f, dialect, *args, **kwds)
def writeheader(self):
header = dict(zip(self.fieldnames, self.fieldnames))
self.writerow(header)
def _dict_to_list(self, rowdict):
if self.extrasaction == "raise":
wrong_fields = [k for k in rowdict if k not in self.fieldnames]
if wrong_fields:
raise ValueError("dict contains fields not in fieldnames: "
+ ", ".join(wrong_fields))
return [rowdict.get(key, self.restval) for key in self.fieldnames]
def writerow(self, rowdict):
return self.writer.writerow(self._dict_to_list(rowdict))
def writerows(self, rowdicts):
rows = []
for rowdict in rowdicts:
rows.append(self._dict_to_list(rowdict))
return self.writer.writerows(rows)
# Guard Sniffer's type checking against builds that exclude complex()
try:
complex
except NameError:
complex = float
class Sniffer:
'''
"Sniffs" the format of a CSV file (i.e. delimiter, quotechar)
Returns a Dialect object.
'''
def __init__(self):
# in case there is more than one possible delimiter
self.preferred = [',', '\t', ';', ' ', ':']
def sniff(self, sample, delimiters=None):
"""
Returns a dialect (or None) corresponding to the sample
"""
quotechar, doublequote, delimiter, skipinitialspace = \
self._guess_quote_and_delimiter(sample, delimiters)
if not delimiter:
delimiter, skipinitialspace = self._guess_delimiter(sample,
delimiters)
if not delimiter:
raise Error("Could not determine delimiter")
class dialect(Dialect):
_name = "sniffed"
lineterminator = '\r\n'
quoting = QUOTE_MINIMAL
# escapechar = ''
dialect.doublequote = doublequote
dialect.delimiter = delimiter
# _csv.reader won't accept a quotechar of ''
dialect.quotechar = quotechar or '"'
dialect.skipinitialspace = skipinitialspace
return dialect
def _guess_quote_and_delimiter(self, data, delimiters):
"""
Looks for text enclosed between two identical quotes
(the probable quotechar) which are preceded and followed
by the same character (the probable delimiter).
For example:
,'some text',
The quote with the most wins, same with the delimiter.
If there is no quotechar the delimiter can't be determined
this way.
"""
matches = []
for restr in ('(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?",
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?",
'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?"
'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space)
regexp = re.compile(restr, re.DOTALL | re.MULTILINE)
matches = regexp.findall(data)
if matches:
break
if not matches:
# (quotechar, doublequote, delimiter, skipinitialspace)
return ('', False, None, 0)
quotes = {}
delims = {}
spaces = 0
for m in matches:
n = regexp.groupindex['quote'] - 1
key = m[n]
if key:
quotes[key] = quotes.get(key, 0) + 1
try:
n = regexp.groupindex['delim'] - 1
key = m[n]
except KeyError:
continue
if key and (delimiters is None or key in delimiters):
delims[key] = delims.get(key, 0) + 1
try:
n = regexp.groupindex['space'] - 1
except KeyError:
continue
if m[n]:
spaces += 1
quotechar = max(quotes, key=quotes.get)
if delims:
delim = max(delims, key=delims.get)
skipinitialspace = delims[delim] == spaces
if delim == '\n': # most likely a file with a single column
delim = ''
else:
# there is *no* delimiter, it's a single column of quoted data
delim = ''
skipinitialspace = 0
# if we see an extra quote between delimiters, we've got a
# double quoted format
dq_regexp = re.compile(
r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \
{'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE)
if dq_regexp.search(data):
doublequote = True
else:
doublequote = False
return (quotechar, doublequote, delim, skipinitialspace)
def _guess_delimiter(self, data, delimiters):
"""
The delimiter /should/ occur the same number of times on
each row. However, due to malformed data, it may not. We don't want
an all or nothing approach, so we allow for small variations in this
number.
1) build a table of the frequency of each character on every line.
2) build a table of frequencies of this frequency (meta-frequency?),
e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows,
7 times in 2 rows'
3) use the mode of the meta-frequency to determine the /expected/
frequency for that character
4) find out how often the character actually meets that goal
5) the character that best meets its goal is the delimiter
For performance reasons, the data is evaluated in chunks, so it can
try and evaluate the smallest portion of the data possible, evaluating
additional chunks as necessary.
"""
data = list(filter(None, data.split('\n')))
ascii = [chr(c) for c in range(127)] # 7-bit ASCII
# build frequency tables
chunkLength = min(10, len(data))
iteration = 0
charFrequency = {}
modes = {}
delims = {}
start, end = 0, min(chunkLength, len(data))
while start < len(data):
iteration += 1
for line in data[start:end]:
for char in ascii:
metaFrequency = charFrequency.get(char, {})
# must count even if frequency is 0
freq = line.count(char)
# value is the mode
metaFrequency[freq] = metaFrequency.get(freq, 0) + 1
charFrequency[char] = metaFrequency
for char in charFrequency.keys():
items = list(charFrequency[char].items())
if len(items) == 1 and items[0][0] == 0:
continue
# get the mode of the frequencies
if len(items) > 1:
modes[char] = max(items, key=lambda x: x[1])
# adjust the mode - subtract the sum of all
# other frequencies
items.remove(modes[char])
modes[char] = (modes[char][0], modes[char][1]
- sum(item[1] for item in items))
else:
modes[char] = items[0]
# build a list of possible delimiters
modeList = modes.items()
total = float(chunkLength * iteration)
# (rows of consistent data) / (number of rows) = 100%
consistency = 1.0
# minimum consistency threshold
threshold = 0.9
while len(delims) == 0 and consistency >= threshold:
for k, v in modeList:
if v[0] > 0 and v[1] > 0:
if ((v[1]/total) >= consistency and
(delimiters is None or k in delimiters)):
delims[k] = v
consistency -= 0.01
if len(delims) == 1:
delim = list(delims.keys())[0]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
# analyze another chunkLength lines
start = end
end += chunkLength
if not delims:
return ('', 0)
# if there's more than one, fall back to a 'preferred' list
if len(delims) > 1:
for d in self.preferred:
if d in delims.keys():
skipinitialspace = (data[0].count(d) ==
data[0].count("%c " % d))
return (d, skipinitialspace)
# nothing else indicates a preference, pick the character that
# dominates(?)
items = [(v,k) for (k,v) in delims.items()]
items.sort()
delim = items[-1][1]
skipinitialspace = (data[0].count(delim) ==
data[0].count("%c " % delim))
return (delim, skipinitialspace)
def has_header(self, sample):
# Creates a dictionary of types of data in each column. If any
# column is of a single type (say, integers), *except* for the first
# row, then the first row is presumed to be labels. If the type
# can't be determined, it is assumed to be a string in which case
# the length of the string is the determining factor: if all of the
# rows except for the first are the same length, it's a header.
# Finally, a 'vote' is taken at the end for each column, adding or
# subtracting from the likelihood of the first row being a header.
rdr = reader(StringIO(sample), self.sniff(sample))
header = next(rdr) # assume first row is header
columns = len(header)
columnTypes = {}
for i in range(columns): columnTypes[i] = None
checked = 0
for row in rdr:
# arbitrary number of rows to check, to keep it sane
if checked > 20:
break
checked += 1
if len(row) != columns:
continue # skip rows that have irregular number of columns
for col in list(columnTypes.keys()):
for thisType in [int, float, complex]:
try:
thisType(row[col])
break
except (ValueError, OverflowError):
pass
else:
# fallback to length of string
thisType = len(row[col])
if thisType != columnTypes[col]:
if columnTypes[col] is None: # add new column type
columnTypes[col] = thisType
else:
# type is inconsistent, remove column from
# consideration
del columnTypes[col]
# finally, compare results against first row and "vote"
# on whether it's a header
hasHeader = 0
for col, colType in columnTypes.items():
if type(colType) == type(0): # it's a length
if len(header[col]) != colType:
hasHeader += 1
else:
hasHeader -= 1
else: # attempt typecast
try:
colType(header[col])
except (ValueError, TypeError):
hasHeader += 1
else:
hasHeader -= 1
return hasHeader > 0
| gpl-3.0 |
mottosso/mindbender-setup | bin/windows/python36/Lib/lib2to3/fixes/fix_unicode.py | 136 | 1256 | r"""Fixer for unicode.
* Changes unicode to str and unichr to chr.
* If "...\u..." is not unicode literal change it into "...\\u...".
* Change u"..." into "...".
"""
from ..pgen2 import token
from .. import fixer_base
_mapping = {"unichr" : "chr", "unicode" : "str"}
class FixUnicode(fixer_base.BaseFix):
BM_compatible = True
PATTERN = "STRING | 'unicode' | 'unichr'"
def start_tree(self, tree, filename):
super(FixUnicode, self).start_tree(tree, filename)
self.unicode_literals = 'unicode_literals' in tree.future_features
def transform(self, node, results):
if node.type == token.NAME:
new = node.clone()
new.value = _mapping[node.value]
return new
elif node.type == token.STRING:
val = node.value
if not self.unicode_literals and val[0] in '\'"' and '\\' in val:
val = r'\\'.join([
v.replace('\\u', r'\\u').replace('\\U', r'\\U')
for v in val.split(r'\\')
])
if val[0] in 'uU':
val = val[1:]
if val == node.value:
return node
new = node.clone()
new.value = val
return new
| mit |
leo524/7mos-fourm | node_modules/nodebb-plugin-markdown/node_modules/pygmentize-bundled/vendor/pygments/pygments/lexers/hdl.py | 363 | 16209 | # -*- coding: utf-8 -*-
"""
pygments.lexers.hdl
~~~~~~~~~~~~~~~~~~~
Lexers for hardware descriptor languages.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups, include, using, this
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Punctuation, \
Error
__all__ = ['VerilogLexer', 'SystemVerilogLexer', 'VhdlLexer']
class VerilogLexer(RegexLexer):
"""
For verilog source code with preprocessor directives.
*New in Pygments 1.4.*
"""
name = 'verilog'
aliases = ['verilog', 'v']
filenames = ['*.v']
mimetypes = ['text/x-verilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text),
'import'),
(r'(always|always_comb|always_ff|always_latch|and|assign|automatic|'
r'begin|break|buf|bufif0|bufif1|case|casex|casez|cmos|const|'
r'continue|deassign|default|defparam|disable|do|edge|else|end|endcase|'
r'endfunction|endgenerate|endmodule|endpackage|endprimitive|endspecify|'
r'endtable|endtask|enum|event|final|for|force|forever|fork|function|'
r'generate|genvar|highz0|highz1|if|initial|inout|input|'
r'integer|join|large|localparam|macromodule|medium|module|'
r'nand|negedge|nmos|nor|not|notif0|notif1|or|output|packed|'
r'parameter|pmos|posedge|primitive|pull0|pull1|pulldown|pullup|rcmos|'
r'ref|release|repeat|return|rnmos|rpmos|rtran|rtranif0|'
r'rtranif1|scalared|signed|small|specify|specparam|strength|'
r'string|strong0|strong1|struct|table|task|'
r'tran|tranif0|tranif1|type|typedef|'
r'unsigned|var|vectored|void|wait|weak0|weak1|while|'
r'xnor|xor)\b', Keyword),
(r'`(accelerate|autoexpand_vectornets|celldefine|default_nettype|'
r'else|elsif|endcelldefine|endif|endprotect|endprotected|'
r'expand_vectornets|ifdef|ifndef|include|noaccelerate|noexpand_vectornets|'
r'noremove_gatenames|noremove_netnames|nounconnected_drive|'
r'protect|protected|remove_gatenames|remove_netnames|resetall|'
r'timescale|unconnected_drive|undef)\b', Comment.Preproc),
(r'\$(bits|bitstoreal|bitstoshortreal|countdrivers|display|fclose|'
r'fdisplay|finish|floor|fmonitor|fopen|fstrobe|fwrite|'
r'getpattern|history|incsave|input|itor|key|list|log|'
r'monitor|monitoroff|monitoron|nokey|nolog|printtimescale|'
r'random|readmemb|readmemh|realtime|realtobits|reset|reset_count|'
r'reset_value|restart|rtoi|save|scale|scope|shortrealtobits|'
r'showscopes|showvariables|showvars|sreadmemb|sreadmemh|'
r'stime|stop|strobe|time|timeformat|write)\b', Name.Builtin),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
class SystemVerilogLexer(RegexLexer):
"""
Extends verilog lexer to recognise all SystemVerilog keywords from IEEE
1800-2009 standard.
*New in Pygments 1.5.*
"""
name = 'systemverilog'
aliases = ['systemverilog', 'sv']
filenames = ['*.sv', '*.svh']
mimetypes = ['text/x-systemverilog']
#: optional Comment or Whitespace
_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
tokens = {
'root': [
(r'^\s*`define', Comment.Preproc, 'macro'),
(r'^(\s*)(package)(\s+)', bygroups(Text, Keyword.Namespace, Text)),
(r'^(\s*)(import)(\s+)', bygroups(Text, Keyword.Namespace, Text), 'import'),
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'/(\\\n)?/(\n|(.|\n)*?[^\\]\n)', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'[{}#@]', Punctuation),
(r'L?"', String, 'string'),
(r"L?'(\\.|\\[0-7]{1,3}|\\x[a-fA-F0-9]{1,2}|[^\\\'\n])'", String.Char),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+[lL]?', Number.Float),
(r'(\d+\.\d*|\.\d+|\d+[fF])[fF]?', Number.Float),
(r'([0-9]+)|(\'h)[0-9a-fA-F]+', Number.Hex),
(r'([0-9]+)|(\'b)[0-1]+', Number.Hex), # should be binary
(r'([0-9]+)|(\'d)[0-9]+', Number.Integer),
(r'([0-9]+)|(\'o)[0-7]+', Number.Oct),
(r'\'[01xz]', Number),
(r'\d+[Ll]?', Number.Integer),
(r'\*/', Error),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r'[()\[\],.;\']', Punctuation),
(r'`[a-zA-Z_][a-zA-Z0-9_]*', Name.Constant),
(r'(accept_on|alias|always|always_comb|always_ff|always_latch|'
r'and|assert|assign|assume|automatic|before|begin|bind|bins|'
r'binsof|bit|break|buf|bufif0|bufif1|byte|case|casex|casez|'
r'cell|chandle|checker|class|clocking|cmos|config|const|constraint|'
r'context|continue|cover|covergroup|coverpoint|cross|deassign|'
r'default|defparam|design|disable|dist|do|edge|else|end|endcase|'
r'endchecker|endclass|endclocking|endconfig|endfunction|endgenerate|'
r'endgroup|endinterface|endmodule|endpackage|endprimitive|'
r'endprogram|endproperty|endsequence|endspecify|endtable|'
r'endtask|enum|event|eventually|expect|export|extends|extern|'
r'final|first_match|for|force|foreach|forever|fork|forkjoin|'
r'function|generate|genvar|global|highz0|highz1|if|iff|ifnone|'
r'ignore_bins|illegal_bins|implies|import|incdir|include|'
r'initial|inout|input|inside|instance|int|integer|interface|'
r'intersect|join|join_any|join_none|large|let|liblist|library|'
r'local|localparam|logic|longint|macromodule|matches|medium|'
r'modport|module|nand|negedge|new|nexttime|nmos|nor|noshowcancelled|'
r'not|notif0|notif1|null|or|output|package|packed|parameter|'
r'pmos|posedge|primitive|priority|program|property|protected|'
r'pull0|pull1|pulldown|pullup|pulsestyle_ondetect|pulsestyle_onevent|'
r'pure|rand|randc|randcase|randsequence|rcmos|real|realtime|'
r'ref|reg|reject_on|release|repeat|restrict|return|rnmos|'
r'rpmos|rtran|rtranif0|rtranif1|s_always|s_eventually|s_nexttime|'
r's_until|s_until_with|scalared|sequence|shortint|shortreal|'
r'showcancelled|signed|small|solve|specify|specparam|static|'
r'string|strong|strong0|strong1|struct|super|supply0|supply1|'
r'sync_accept_on|sync_reject_on|table|tagged|task|this|throughout|'
r'time|timeprecision|timeunit|tran|tranif0|tranif1|tri|tri0|'
r'tri1|triand|trior|trireg|type|typedef|union|unique|unique0|'
r'unsigned|until|until_with|untyped|use|uwire|var|vectored|'
r'virtual|void|wait|wait_order|wand|weak|weak0|weak1|while|'
r'wildcard|wire|with|within|wor|xnor|xor)\b', Keyword ),
(r'(`__FILE__|`__LINE__|`begin_keywords|`celldefine|`default_nettype|'
r'`define|`else|`elsif|`end_keywords|`endcelldefine|`endif|'
r'`ifdef|`ifndef|`include|`line|`nounconnected_drive|`pragma|'
r'`resetall|`timescale|`unconnected_drive|`undef|`undefineall)\b',
Comment.Preproc ),
(r'(\$display|\$displayb|\$displayh|\$displayo|\$dumpall|\$dumpfile|'
r'\$dumpflush|\$dumplimit|\$dumpoff|\$dumpon|\$dumpports|'
r'\$dumpportsall|\$dumpportsflush|\$dumpportslimit|\$dumpportsoff|'
r'\$dumpportson|\$dumpvars|\$fclose|\$fdisplay|\$fdisplayb|'
r'\$fdisplayh|\$fdisplayo|\$feof|\$ferror|\$fflush|\$fgetc|'
r'\$fgets|\$fmonitor|\$fmonitorb|\$fmonitorh|\$fmonitoro|'
r'\$fopen|\$fread|\$fscanf|\$fseek|\$fstrobe|\$fstrobeb|\$fstrobeh|'
r'\$fstrobeo|\$ftell|\$fwrite|\$fwriteb|\$fwriteh|\$fwriteo|'
r'\$monitor|\$monitorb|\$monitorh|\$monitoro|\$monitoroff|'
r'\$monitoron|\$plusargs|\$readmemb|\$readmemh|\$rewind|\$sformat|'
r'\$sformatf|\$sscanf|\$strobe|\$strobeb|\$strobeh|\$strobeo|'
r'\$swrite|\$swriteb|\$swriteh|\$swriteo|\$test|\$ungetc|'
r'\$value\$plusargs|\$write|\$writeb|\$writeh|\$writememb|'
r'\$writememh|\$writeo)\b' , Name.Builtin ),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(byte|shortint|int|longint|integer|time|'
r'bit|logic|reg|'
r'supply0|supply1|tri|triand|trior|tri0|tri1|trireg|uwire|wire|wand|wor'
r'shortreal|real|realtime)\b', Keyword.Type),
('[a-zA-Z_][a-zA-Z0-9_]*:(?!:)', Name.Label),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop'),
],
'string': [
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|x[a-fA-F0-9]{2,4}|[0-7]{1,3})', String.Escape),
(r'[^\\"\n]+', String), # all other characters
(r'\\\n', String), # line continuation
(r'\\', String), # stray backslash
],
'macro': [
(r'[^/\n]+', Comment.Preproc),
(r'/[*](.|\n)*?[*]/', Comment.Multiline),
(r'//.*?\n', Comment.Single, '#pop'),
(r'/', Comment.Preproc),
(r'(?<=\\)\n', Comment.Preproc),
(r'\n', Comment.Preproc, '#pop'),
],
'import': [
(r'[a-zA-Z0-9_:]+\*?', Name.Namespace, '#pop')
]
}
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
# Convention: mark all upper case names as constants
if token is Name:
if value.isupper():
token = Name.Constant
yield index, token, value
def analyse_text(text):
if text.startswith('//') or text.startswith('/*'):
return 0.5
class VhdlLexer(RegexLexer):
"""
For VHDL source code.
*New in Pygments 1.5.*
"""
name = 'vhdl'
aliases = ['vhdl']
filenames = ['*.vhdl', '*.vhd']
mimetypes = ['text/x-vhdl']
flags = re.MULTILINE | re.IGNORECASE
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
(r'\\\n', Text), # line continuation
(r'--(?![!#$%&*+./<=>?@\^|_~]).*?$', Comment.Single),
(r"'(U|X|0|1|Z|W|L|H|-)'", String.Char),
(r'[~!%^&*+=|?:<>/-]', Operator),
(r"'[a-zA-Z_][a-zA-Z0-9_]*", Name.Attribute),
(r'[()\[\],.;\']', Punctuation),
(r'"[^\n\\]*"', String),
(r'(library)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(use)(\s+)(entity)', bygroups(Keyword, Text, Keyword)),
(r'(use)(\s+)([a-zA-Z_][\.a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(entity|component)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Class)),
(r'(architecture|configuration)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)'
r'(of)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)(\s+)(is)',
bygroups(Keyword, Text, Name.Class, Text, Keyword, Text,
Name.Class, Text, Keyword)),
(r'(end)(\s+)', bygroups(using(this), Text), 'endblock'),
include('types'),
include('keywords'),
include('numbers'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'endblock': [
include('keywords'),
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class),
(r'(\s+)', Text),
(r';', Punctuation, '#pop'),
],
'types': [
(r'(boolean|bit|character|severity_level|integer|time|delay_length|'
r'natural|positive|string|bit_vector|file_open_kind|'
r'file_open_status|std_ulogic|std_ulogic_vector|std_logic|'
r'std_logic_vector)\b', Keyword.Type),
],
'keywords': [
(r'(abs|access|after|alias|all|and|'
r'architecture|array|assert|attribute|begin|block|'
r'body|buffer|bus|case|component|configuration|'
r'constant|disconnect|downto|else|elsif|end|'
r'entity|exit|file|for|function|generate|'
r'generic|group|guarded|if|impure|in|'
r'inertial|inout|is|label|library|linkage|'
r'literal|loop|map|mod|nand|new|'
r'next|nor|not|null|of|on|'
r'open|or|others|out|package|port|'
r'postponed|procedure|process|pure|range|record|'
r'register|reject|return|rol|ror|select|'
r'severity|signal|shared|sla|sli|sra|'
r'srl|subtype|then|to|transport|type|'
r'units|until|use|variable|wait|when|'
r'while|with|xnor|xor)\b', Keyword),
],
'numbers': [
(r'\d{1,2}#[0-9a-fA-F_]+#?', Number.Integer),
(r'[0-1_]+(\.[0-1_])', Number.Integer),
(r'\d+', Number.Integer),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d+', Number.Float),
(r'H"[0-9a-fA-F_]+"', Number.Oct),
(r'O"[0-7_]+"', Number.Oct),
(r'B"[0-1_]+"', Number.Oct),
],
}
| gpl-3.0 |
er1iang/hfut_stu_lib | hfut/value.py | 3 | 1841 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import re
HF = 'HF'
XC = 'XC'
ENV = {
HF: 'http://bkjw.hfut.edu.cn/',
XC: 'http://222.195.8.201/',
# 宣城校区教务地址, 只有第一个可以外网访问
'XC_HOSTS': [
'http://222.195.8.201/',
'http://172.18.6.93/',
'http://172.18.6.94/',
'http://172.18.6.95/',
'http://172.18.6.96/',
'http://172.18.6.97/',
'http://172.18.6.98/',
'http://172.18.6.99/'
],
# 默认请求头
'DEFAULT_HEADERS': {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/45.0.2454.101 Safari/537.36'
},
# 每个会话保存的历史记录最多数量
'MAX_HISTORIES': 10,
# 当状态响应码异常时是否触发错误
'RAISE_FOR_STATUS': False,
# 教务页面的编码
'SITE_ENCODING': 'GBK',
# 全局使用的 BeautifulSoup 特性, 可以通过改变此值修改 HTML 解析器
'SOUP_FEATURES': 'html.parser',
# 是否启用参数检查
'REQUEST_ARGUMENTS_CHECK': True,
# 非法字符正则
'ILLEGAL_CHARACTERS_PATTERN': re.compile(r'[,;*@$]'),
# IP 禁用响应检查参数, 分别为响应体最小长度, 最大长度, 以及正则
'IP_BANNED_RESPONSE': (320, 800, re.compile(r'SQL通用防注入')),
# 学期名称正则
'TERM_PATTERN': re.compile(r'(\d{4})(?:|学年)-\d{4}学年\s*第(一|二|二/三)学期(|/暑期)', flags=re.UNICODE),
# 登录学号正则
'ACCOUNT_PATTERN': re.compile(r'^\d{10}$'),
# 宣城校区登录账号正则
'XC_PASSWORD_PATTERN': re.compile(r'^[\da-z]{6,12}$'),
# 合肥校区登录账号正则
'HF_PASSWORD_PATTERN': re.compile(r'^[^\s,;*_?@#$%&()+=><]{6,16}$'),
}
| mit |
denis-pitul/django | tests/select_related/models.py | 99 | 3320 | """
Tests for select_related()
``select_related()`` follows all relationships and pre-caches any foreign key
values so that complex trees can be fetched in a single query. However, this
isn't always a good idea, so the ``depth`` argument control how many "levels"
the select-related behavior will traverse.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
# Who remembers high school biology?
@python_2_unicode_compatible
class Domain(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Kingdom(models.Model):
name = models.CharField(max_length=50)
domain = models.ForeignKey(Domain)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Phylum(models.Model):
name = models.CharField(max_length=50)
kingdom = models.ForeignKey(Kingdom)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Klass(models.Model):
name = models.CharField(max_length=50)
phylum = models.ForeignKey(Phylum)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Order(models.Model):
name = models.CharField(max_length=50)
klass = models.ForeignKey(Klass)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Family(models.Model):
name = models.CharField(max_length=50)
order = models.ForeignKey(Order)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Genus(models.Model):
name = models.CharField(max_length=50)
family = models.ForeignKey(Family)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Species(models.Model):
name = models.CharField(max_length=50)
genus = models.ForeignKey(Genus)
def __str__(self):
return self.name
# and we'll invent a new thing so we have a model with two foreign keys
@python_2_unicode_compatible
class HybridSpecies(models.Model):
name = models.CharField(max_length=50)
parent_1 = models.ForeignKey(Species, related_name='child_1')
parent_2 = models.ForeignKey(Species, related_name='child_2')
def __str__(self):
return self.name
@python_2_unicode_compatible
class Topping(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Pizza(models.Model):
name = models.CharField(max_length=100)
toppings = models.ManyToManyField(Topping)
def __str__(self):
return self.name
@python_2_unicode_compatible
class TaggedItem(models.Model):
tag = models.CharField(max_length=30)
content_type = models.ForeignKey(ContentType, related_name='select_related_tagged_items')
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
def __str__(self):
return self.tag
@python_2_unicode_compatible
class Bookmark(models.Model):
url = models.URLField()
tags = GenericRelation(TaggedItem)
def __str__(self):
return self.url
| bsd-3-clause |
duyet-website/api.duyet.net | lib/docs/conf.py | 1 | 8187 | # -*- coding: utf-8 -*-
#
# Faker documentation build configuration file, created by
# sphinx-quickstart on Tue Mar 11 11:25:48 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'faker.build_docs',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Faker'
copyright = u'2014, Daniele Faraglia'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.7.5'
# The full version, including alpha/beta/rc tags.
release = '0.7.5'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Fakerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Faker.tex', u'Faker Documentation',
u'Daniele Faraglia', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'faker', u'Faker Documentation',
[u'Daniele Faraglia'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Faker', u'Faker Documentation',
u'Daniele Faraglia', 'Faker', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
coyotevz/nobix-app | nbs/models/misc.py | 1 | 3865 | # -*- coding: utf-8 -*-
from datetime import datetime
from sqlalchemy.ext.declarative import declared_attr
from nbs.models import db
class TimestampMixin(object):
created = db.Column(db.DateTime, default=datetime.now)
modified = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
@staticmethod
def stamp_modified(mapper, connection, target):
if db.object_session(target).is_modified(target):
target.modified = datetime.now()
@classmethod
def __declare_last__(cls):
db.event.listen(cls, 'before_update', cls.stamp_modified)
class RefEntityMixin(object):
@declared_attr
def entity_id(cls):
return db.Column('entity_id', db.Integer, db.ForeignKey('entity.id'),
nullable=False)
@declared_attr
def entity(cls):
name = cls.__name__.lower()
return db.relationship('Entity',
backref=db.backref(name, lazy='joined'),
lazy='joined')
class Address(RefEntityMixin, db.Model):
"""Stores addresses information"""
__tablename__ = 'address'
id = db.Column(db.Integer, primary_key=True)
address_type = db.Column(db.Unicode)
street = db.Column(db.Unicode(128), nullable=False)
city = db.Column(db.Unicode(64))
province = db.Column(db.Unicode(32), nullable=False)
postal_code = db.Column(db.Unicode(32))
def __str__(eslf):
retval = self.street
if self.city:
retval += ", {}".format(self.city)
retval += ", {}".format(self.province)
if self.postal_code:
retval += " ({})".format(self.postal_code)
return retval
def __repr__(self):
return "<Address '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class Phone(RefEntityMixin, db.Model):
"""Model to store phone information"""
__tablename__ = 'phone'
id = db.Column(db.Integer, primary_key=True)
phone_type = db.Column(db.Unicode)
prefix = db.Column(db.Unicode(8))
number = db.Column(db.Unicode, nullable=False)
extension = db.Column(db.Unicode(5))
def __str__(self):
retval = self.phone_type+': ' if self.phone_type else ''
if self.prefix:
retval += "({})".format(self.prefix)
retval += self.number
if self.extension:
retval += " ext: {}".format(self.extension)
return retval
def __repr__(self):
return "<Phone '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class Email(RefEntityMixin, db.Model):
"""Model to store email information"""
__tablename__ = 'email'
id = db.Column(db.Integer, primary_key=True)
email_type = db.Column(db.Unicode(50))
email = db.Column(db.Unicode(50), nullable=False)
def __str__(self):
retval = self.email_type + ': ' if self.email_type else ''
retval += self.email
return retval
def __repr__(self):
return "<Email '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
class ExtraField(RefEntityMixin, db.Model):
"""Model to store information of additional data"""
__tablename__ = 'extra_field'
id = db.Column(db.Integer, primary_key=True)
field_name = db.Column(db.Unicode(50), nullable=False)
field_value = db.Column(db.Unicode(50), nullable=False)
def __str__(self):
return self.field_name + ': ' + self.field_value
def __repr__(self):
return "<ExtraField '{}' of '{}: {}'>".format(
str(self),
self.entity.entity_type,
self.entity.full_name
)
| mit |
pablodiguerero/asterisk.api | migrations/versions/4_add_physical_users_.py | 1 | 1968 | """empty message
Revision ID: a374e36d0888
Revises: 4a6559da7594
Create Date: 2017-05-21 22:53:53.490856
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import Session
from models import physical
from models import user
# revision identifiers, used by Alembic.
revision = '4_add_physical_users'
down_revision = '3_modify_user_fields'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('crm_physical',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('fam', sa.String(length=255), nullable=True),
sa.Column('name', sa.String(length=255), nullable=False),
sa.Column('otch', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('crm_users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('login', sa.String(length=255), nullable=False),
sa.Column('password', sa.LargeBinary(), nullable=False),
sa.Column('access_level', sa.Integer(), server_default='10', nullable=False),
sa.Column('is_active', sa.Boolean(), server_default='f', nullable=False),
sa.ForeignKeyConstraint(['id'], ['crm_physical.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('login')
)
op.add_column('crm_users', sa.Column('sip_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'crm_users', 'asterisk_sip_users', ['sip_id'], ['id'], ondelete='SET NULL')
# ### end Alembic commands ###
bind = op.get_bind()
session = Session(bind)
phys = physical.Physical("Администратор")
phys.user = user.User("admin", "admin")
phys.user.access_level = user.User.BOSS
session.add(phys)
session.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('crm_users')
op.drop_table('crm_physical')
# ### end Alembic commands ###
| mit |
gkoelln/youtube-dl | youtube_dl/extractor/livestream.py | 10 | 13739 | from __future__ import unicode_literals
import re
import itertools
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urlparse,
)
from ..utils import (
find_xpath_attr,
xpath_attr,
xpath_with_ns,
xpath_text,
orderedSet,
update_url_query,
int_or_none,
float_or_none,
parse_iso8601,
determine_ext,
)
class LivestreamIE(InfoExtractor):
IE_NAME = 'livestream'
_VALID_URL = r'https?://(?:new\.)?livestream\.com/(?:accounts/(?P<account_id>\d+)|(?P<account_name>[^/]+))/(?:events/(?P<event_id>\d+)|(?P<event_name>[^/]+))(?:/videos/(?P<id>\d+))?'
_TESTS = [{
'url': 'http://new.livestream.com/CoheedandCambria/WebsterHall/videos/4719370',
'md5': '53274c76ba7754fb0e8d072716f2292b',
'info_dict': {
'id': '4719370',
'ext': 'mp4',
'title': 'Live from Webster Hall NYC',
'timestamp': 1350008072,
'upload_date': '20121012',
'duration': 5968.0,
'like_count': int,
'view_count': int,
'thumbnail': r're:^http://.*\.jpg$'
}
}, {
'url': 'http://new.livestream.com/tedx/cityenglish',
'info_dict': {
'title': 'TEDCity2.0 (English)',
'id': '2245590',
},
'playlist_mincount': 4,
}, {
'url': 'http://new.livestream.com/chess24/tatasteelchess',
'info_dict': {
'title': 'Tata Steel Chess',
'id': '3705884',
},
'playlist_mincount': 60,
}, {
'url': 'https://new.livestream.com/accounts/362/events/3557232/videos/67864563/player?autoPlay=false&height=360&mute=false&width=640',
'only_matching': True,
}, {
'url': 'http://livestream.com/bsww/concacafbeachsoccercampeonato2015',
'only_matching': True,
}]
_API_URL_TEMPLATE = 'http://livestream.com/api/accounts/%s/events/%s'
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
base_ele = find_xpath_attr(
smil, self._xpath_ns('.//meta', namespace), 'name', 'httpBase')
base = base_ele.get('content') if base_ele is not None else 'http://livestreamvod-f.akamaihd.net/'
formats = []
video_nodes = smil.findall(self._xpath_ns('.//video', namespace))
for vn in video_nodes:
tbr = int_or_none(vn.attrib.get('system-bitrate'), 1000)
furl = (
update_url_query(compat_urlparse.urljoin(base, vn.attrib['src']), {
'v': '3.0.3',
'fp': 'WIN% 14,0,0,145',
}))
if 'clipBegin' in vn.attrib:
furl += '&ssek=' + vn.attrib['clipBegin']
formats.append({
'url': furl,
'format_id': 'smil_%d' % tbr,
'ext': 'flv',
'tbr': tbr,
'preference': -1000,
})
return formats
def _extract_video_info(self, video_data):
video_id = compat_str(video_data['id'])
FORMAT_KEYS = (
('sd', 'progressive_url'),
('hd', 'progressive_url_hd'),
)
formats = []
for format_id, key in FORMAT_KEYS:
video_url = video_data.get(key)
if video_url:
ext = determine_ext(video_url)
if ext == 'm3u8':
continue
bitrate = int_or_none(self._search_regex(
r'(\d+)\.%s' % ext, video_url, 'bitrate', default=None))
formats.append({
'url': video_url,
'format_id': format_id,
'tbr': bitrate,
'ext': ext,
})
smil_url = video_data.get('smil_url')
if smil_url:
formats.extend(self._extract_smil_formats(smil_url, video_id, fatal=False))
m3u8_url = video_data.get('m3u8_url')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
f4m_url = video_data.get('f4m_url')
if f4m_url:
formats.extend(self._extract_f4m_formats(
f4m_url, video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
comments = [{
'author_id': comment.get('author_id'),
'author': comment.get('author', {}).get('full_name'),
'id': comment.get('id'),
'text': comment['text'],
'timestamp': parse_iso8601(comment.get('created_at')),
} for comment in video_data.get('comments', {}).get('data', [])]
return {
'id': video_id,
'formats': formats,
'title': video_data['caption'],
'description': video_data.get('description'),
'thumbnail': video_data.get('thumbnail_url'),
'duration': float_or_none(video_data.get('duration'), 1000),
'timestamp': parse_iso8601(video_data.get('publish_at')),
'like_count': video_data.get('likes', {}).get('total'),
'comment_count': video_data.get('comments', {}).get('total'),
'view_count': video_data.get('views'),
'comments': comments,
}
def _extract_stream_info(self, stream_info):
broadcast_id = compat_str(stream_info['broadcast_id'])
is_live = stream_info.get('is_live')
formats = []
smil_url = stream_info.get('play_url')
if smil_url:
formats.extend(self._extract_smil_formats(smil_url, broadcast_id))
m3u8_url = stream_info.get('m3u8_url')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, broadcast_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
rtsp_url = stream_info.get('rtsp_url')
if rtsp_url:
formats.append({
'url': rtsp_url,
'format_id': 'rtsp',
})
self._sort_formats(formats)
return {
'id': broadcast_id,
'formats': formats,
'title': self._live_title(stream_info['stream_title']) if is_live else stream_info['stream_title'],
'thumbnail': stream_info.get('thumbnail_url'),
'is_live': is_live,
}
def _extract_event(self, event_data):
event_id = compat_str(event_data['id'])
account_id = compat_str(event_data['owner_account_id'])
feed_root_url = self._API_URL_TEMPLATE % (account_id, event_id) + '/feed.json'
stream_info = event_data.get('stream_info')
if stream_info:
return self._extract_stream_info(stream_info)
last_video = None
entries = []
for i in itertools.count(1):
if last_video is None:
info_url = feed_root_url
else:
info_url = '{root}?&id={id}&newer=-1&type=video'.format(
root=feed_root_url, id=last_video)
videos_info = self._download_json(
info_url, event_id, 'Downloading page {0}'.format(i))['data']
videos_info = [v['data'] for v in videos_info if v['type'] == 'video']
if not videos_info:
break
for v in videos_info:
v_id = compat_str(v['id'])
entries.append(self.url_result(
'http://livestream.com/accounts/%s/events/%s/videos/%s' % (account_id, event_id, v_id),
'Livestream', v_id, v.get('caption')))
last_video = videos_info[-1]['id']
return self.playlist_result(entries, event_id, event_data['full_name'])
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
event = mobj.group('event_id') or mobj.group('event_name')
account = mobj.group('account_id') or mobj.group('account_name')
api_url = self._API_URL_TEMPLATE % (account, event)
if video_id:
video_data = self._download_json(
api_url + '/videos/%s' % video_id, video_id)
return self._extract_video_info(video_data)
else:
event_data = self._download_json(api_url, video_id)
return self._extract_event(event_data)
# The original version of Livestream uses a different system
class LivestreamOriginalIE(InfoExtractor):
IE_NAME = 'livestream:original'
_VALID_URL = r'''(?x)https?://original\.livestream\.com/
(?P<user>[^/\?#]+)(?:/(?P<type>video|folder)
(?:(?:\?.*?Id=|/)(?P<id>.*?)(&|$))?)?
'''
_TESTS = [{
'url': 'http://original.livestream.com/dealbook/video?clipId=pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'info_dict': {
'id': 'pla_8aa4a3f1-ba15-46a4-893b-902210e138fb',
'ext': 'mp4',
'title': 'Spark 1 (BitCoin) with Cameron Winklevoss & Tyler Winklevoss of Winklevoss Capital',
'duration': 771.301,
'view_count': int,
},
}, {
'url': 'https://original.livestream.com/newplay/folder?dirId=a07bf706-d0e4-4e75-a747-b021d84f2fd3',
'info_dict': {
'id': 'a07bf706-d0e4-4e75-a747-b021d84f2fd3',
},
'playlist_mincount': 4,
}, {
# live stream
'url': 'http://original.livestream.com/znsbahamas',
'only_matching': True,
}]
def _extract_video_info(self, user, video_id):
api_url = 'http://x%sx.api.channel.livestream.com/2.0/clipdetails?extendedInfo=true&id=%s' % (user, video_id)
info = self._download_xml(api_url, video_id)
item = info.find('channel').find('item')
title = xpath_text(item, 'title')
media_ns = {'media': 'http://search.yahoo.com/mrss'}
thumbnail_url = xpath_attr(
item, xpath_with_ns('media:thumbnail', media_ns), 'url')
duration = float_or_none(xpath_attr(
item, xpath_with_ns('media:content', media_ns), 'duration'))
ls_ns = {'ls': 'http://api.channel.livestream.com/2.0'}
view_count = int_or_none(xpath_text(
item, xpath_with_ns('ls:viewsCount', ls_ns)))
return {
'id': video_id,
'title': title,
'thumbnail': thumbnail_url,
'duration': duration,
'view_count': view_count,
}
def _extract_video_formats(self, video_data, video_id):
formats = []
progressive_url = video_data.get('progressiveUrl')
if progressive_url:
formats.append({
'url': progressive_url,
'format_id': 'http',
})
m3u8_url = video_data.get('httpUrl')
if m3u8_url:
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
rtsp_url = video_data.get('rtspUrl')
if rtsp_url:
formats.append({
'url': rtsp_url,
'format_id': 'rtsp',
})
self._sort_formats(formats)
return formats
def _extract_folder(self, url, folder_id):
webpage = self._download_webpage(url, folder_id)
paths = orderedSet(re.findall(
r'''(?x)(?:
<li\s+class="folder">\s*<a\s+href="|
<a\s+href="(?=https?://livestre\.am/)
)([^"]+)"''', webpage))
entries = [{
'_type': 'url',
'url': compat_urlparse.urljoin(url, p),
} for p in paths]
return self.playlist_result(entries, folder_id)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
user = mobj.group('user')
url_type = mobj.group('type')
content_id = mobj.group('id')
if url_type == 'folder':
return self._extract_folder(url, content_id)
else:
# this url is used on mobile devices
stream_url = 'http://x%sx.api.channel.livestream.com/3.0/getstream.json' % user
info = {}
if content_id:
stream_url += '?id=%s' % content_id
info = self._extract_video_info(user, content_id)
else:
content_id = user
webpage = self._download_webpage(url, content_id)
info = {
'title': self._og_search_title(webpage),
'description': self._og_search_description(webpage),
'thumbnail': self._search_regex(r'channelLogo\.src\s*=\s*"([^"]+)"', webpage, 'thumbnail', None),
}
video_data = self._download_json(stream_url, content_id)
is_live = video_data.get('isLive')
info.update({
'id': content_id,
'title': self._live_title(info['title']) if is_live else info['title'],
'formats': self._extract_video_formats(video_data, content_id),
'is_live': is_live,
})
return info
# The server doesn't support HEAD request, the generic extractor can't detect
# the redirection
class LivestreamShortenerIE(InfoExtractor):
IE_NAME = 'livestream:shortener'
IE_DESC = False # Do not list
_VALID_URL = r'https?://livestre\.am/(?P<id>.+)'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
id = mobj.group('id')
webpage = self._download_webpage(url, id)
return {
'_type': 'url',
'url': self._og_search_url(webpage),
}
| unlicense |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/tensorflow/python/framework/cpp_shape_inference_pb2.py | 9 | 5959 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/python/framework/cpp_shape_inference.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/python/framework/cpp_shape_inference.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n5tensorflow/python/framework/cpp_shape_inference.proto\x12\ntensorflow\x1a%tensorflow/core/framework/types.proto\x1a,tensorflow/core/framework/tensor_shape.proto\"\xa6\x01\n\x17\x43ppShapeInferenceResult\x12+\n\x05shape\x18\x01 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\x32\n\x0chandle_shape\x18\x02 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12*\n\x0chandle_dtype\x18\x03 \x01(\x0e\x32\x14.tensorflow.DataType\"e\n\x1d\x43ppShapeInferenceInputsNeeded\x12\x1c\n\x14input_tensors_needed\x18\x01 \x03(\x05\x12&\n\x1einput_tensors_as_shapes_needed\x18\x02 \x03(\x05\x42\x03\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CPPSHAPEINFERENCERESULT = _descriptor.Descriptor(
name='CppShapeInferenceResult',
full_name='tensorflow.CppShapeInferenceResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='shape', full_name='tensorflow.CppShapeInferenceResult.shape', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle_shape', full_name='tensorflow.CppShapeInferenceResult.handle_shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='handle_dtype', full_name='tensorflow.CppShapeInferenceResult.handle_dtype', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=155,
serialized_end=321,
)
_CPPSHAPEINFERENCEINPUTSNEEDED = _descriptor.Descriptor(
name='CppShapeInferenceInputsNeeded',
full_name='tensorflow.CppShapeInferenceInputsNeeded',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='input_tensors_needed', full_name='tensorflow.CppShapeInferenceInputsNeeded.input_tensors_needed', index=0,
number=1, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input_tensors_as_shapes_needed', full_name='tensorflow.CppShapeInferenceInputsNeeded.input_tensors_as_shapes_needed', index=1,
number=2, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=323,
serialized_end=424,
)
_CPPSHAPEINFERENCERESULT.fields_by_name['shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_CPPSHAPEINFERENCERESULT.fields_by_name['handle_shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_CPPSHAPEINFERENCERESULT.fields_by_name['handle_dtype'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
DESCRIPTOR.message_types_by_name['CppShapeInferenceResult'] = _CPPSHAPEINFERENCERESULT
DESCRIPTOR.message_types_by_name['CppShapeInferenceInputsNeeded'] = _CPPSHAPEINFERENCEINPUTSNEEDED
CppShapeInferenceResult = _reflection.GeneratedProtocolMessageType('CppShapeInferenceResult', (_message.Message,), dict(
DESCRIPTOR = _CPPSHAPEINFERENCERESULT,
__module__ = 'tensorflow.python.framework.cpp_shape_inference_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CppShapeInferenceResult)
))
_sym_db.RegisterMessage(CppShapeInferenceResult)
CppShapeInferenceInputsNeeded = _reflection.GeneratedProtocolMessageType('CppShapeInferenceInputsNeeded', (_message.Message,), dict(
DESCRIPTOR = _CPPSHAPEINFERENCEINPUTSNEEDED,
__module__ = 'tensorflow.python.framework.cpp_shape_inference_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.CppShapeInferenceInputsNeeded)
))
_sym_db.RegisterMessage(CppShapeInferenceInputsNeeded)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\370\001\001'))
# @@protoc_insertion_point(module_scope)
| agpl-3.0 |
wesm/ibis | dev/merge-pr.py | 1 | 8184 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Utility for creating well-formed pull request merges and pushing them to
# Apache.
# usage: ./apache-pr-merge.py (see config env vars below)
#
# Lightly modified from version of this script in incubator-parquet-format
from __future__ import print_function
from requests.auth import HTTPBasicAuth
import requests
import os
import subprocess
import sys
import textwrap
from six.moves import input
import six
IBIS_HOME = os.path.abspath(__file__).rsplit("/", 2)[0]
PROJECT_NAME = 'ibis'
print("IBIS_HOME = " + IBIS_HOME)
# Remote name with the PR
PR_REMOTE_NAME = os.environ.get("PR_REMOTE_NAME", "upstream")
# Remote name where results pushed
PUSH_REMOTE_NAME = os.environ.get("PUSH_REMOTE_NAME", "upstream")
GITHUB_BASE = "https://github.com/cloudera/" + PROJECT_NAME + "/pull"
GITHUB_API_BASE = "https://api.github.com/repos/cloudera/" + PROJECT_NAME
# Prefix added to temporary branches
BRANCH_PREFIX = "PR_TOOL"
os.chdir(IBIS_HOME)
auth_required = False
if auth_required:
GITHUB_USERNAME = os.environ['GITHUB_USER']
import getpass
GITHUB_PASSWORD = getpass.getpass('Enter github.com password for %s:'
% GITHUB_USERNAME)
def get_json_auth(url):
auth = HTTPBasicAuth(GITHUB_USERNAME, GITHUB_PASSWORD)
req = requests.get(url, auth=auth)
return req.json()
get_json = get_json_auth
else:
def get_json_no_auth(url):
req = requests.get(url)
return req.json()
get_json = get_json_no_auth
def fail(msg):
print(msg)
clean_up()
sys.exit(-1)
def run_cmd(cmd):
if isinstance(cmd, six.string_types):
cmd = cmd.split(' ')
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
# this avoids hiding the stdout / stderr of failed processes
print('Command failed: %s' % cmd)
print('With output:')
print('--------------')
print(e.output)
print('--------------')
raise e
if isinstance(output, six.binary_type):
output = output.decode('utf-8')
return output
def continue_maybe(prompt):
result = input("\n%s (y/n): " % prompt)
if result.lower() != "y":
fail("Okay, exiting")
original_head = run_cmd("git rev-parse HEAD")[:8]
def clean_up():
print("Restoring head pointer to %s" % original_head)
run_cmd("git checkout %s" % original_head)
branches = run_cmd("git branch").replace(" ", "").split("\n")
for branch in filter(lambda x: x.startswith(BRANCH_PREFIX), branches):
print("Deleting local branch %s" % branch)
run_cmd("git branch -D %s" % branch)
# merge the requested PR and return the merge hash
def merge_pr(pr_num, target_ref):
pr_branch_name = "%s_MERGE_PR_%s" % (BRANCH_PREFIX, pr_num)
target_branch_name = "%s_MERGE_PR_%s_%s" % (BRANCH_PREFIX, pr_num,
target_ref.upper())
run_cmd("git fetch %s pull/%s/head:%s" % (PR_REMOTE_NAME, pr_num,
pr_branch_name))
run_cmd("git fetch %s %s:%s" % (PUSH_REMOTE_NAME, target_ref,
target_branch_name))
run_cmd("git checkout %s" % target_branch_name)
had_conflicts = False
try:
run_cmd(['git', 'merge', pr_branch_name, '--squash'])
except Exception as e:
msg = ("Error merging: %s\nWould you like to "
"manually fix-up this merge?" % e)
continue_maybe(msg)
msg = ("Okay, please fix any conflicts and 'git add' "
"conflicting files... Finished?")
continue_maybe(msg)
had_conflicts = True
commit_authors = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%an <%ae>']).split("\n")
distinct_authors = sorted(set(commit_authors),
key=lambda x: commit_authors.count(x),
reverse=True)
primary_author = distinct_authors[0]
commits = run_cmd(['git', 'log', 'HEAD..%s' % pr_branch_name,
'--pretty=format:%h [%an] %s']).split("\n\n")
merge_message_flags = []
merge_message_flags += ["-m", title]
if body is not None:
merge_message_flags += ["-m", '\n'.join(textwrap.wrap(body))]
authors = "\n".join(["Author: %s" % a for a in distinct_authors])
merge_message_flags += ["-m", authors]
if had_conflicts:
committer_name = run_cmd("git config --get user.name").strip()
committer_email = run_cmd("git config --get user.email").strip()
message = ("This patch had conflicts when merged, "
"resolved by\nCommitter: %s <%s>" %
(committer_name, committer_email))
merge_message_flags += ["-m", message]
# The string "Closes #%s" string is required for GitHub to correctly close
# the PR
merge_message_flags += [
"-m",
"Closes #%s from %s and squashes the following commits:"
% (pr_num, pr_repo_desc)]
for c in commits:
merge_message_flags += ["-m", c]
run_cmd(['git', 'commit',
'--no-verify', # do not run commit hooks
'--author="%s"' % primary_author] +
merge_message_flags)
continue_maybe("Merge complete (local ref %s). Push to %s?" % (
target_branch_name, PUSH_REMOTE_NAME))
try:
run_cmd('git push %s %s:%s' % (PUSH_REMOTE_NAME, target_branch_name,
target_ref))
except Exception as e:
clean_up()
fail("Exception while pushing: %s" % e)
merge_hash = run_cmd("git rev-parse %s" % target_branch_name)[:8]
clean_up()
print("Pull request #%s merged!" % pr_num)
print("Merge hash: %s" % merge_hash)
return merge_hash
branches = get_json("%s/branches" % GITHUB_API_BASE)
branch_names = filter(lambda x: x.startswith("branch-"),
[x['name'] for x in branches])
pr_num = input("Which pull request would you like to merge? (e.g. 34): ")
pr = get_json("%s/pulls/%s" % (GITHUB_API_BASE, pr_num))
url = pr["url"]
title = pr["title"]
body = pr["body"]
target_ref = pr["base"]["ref"]
user_login = pr["user"]["login"]
base_ref = pr["head"]["ref"]
pr_repo_desc = "%s/%s" % (user_login, base_ref)
if pr["merged"] is True:
print("Pull request {0} has already been merged, assuming "
"you want to backport".format(pr_num))
merge_commit_desc = run_cmd([
'git', 'log', '--merges', '--first-parent',
'--grep=pull request #%s' % pr_num, '--oneline']).split("\n")[0]
if merge_commit_desc == "":
fail("Couldn't find any merge commit for #{0}"
", you may need to update HEAD.".format(pr_num))
merge_hash = merge_commit_desc[:7]
message = merge_commit_desc[8:]
print("Found: %s" % message)
sys.exit(0)
if not bool(pr["mergeable"]):
msg = ("Pull request {0} is not mergeable in its current form.\n"
"Continue? (experts only!)".format(pr_num))
continue_maybe(msg)
print("\n=== Pull Request #%s ===" % pr_num)
print("title\t%s\nsource\t%s\ntarget\t%s\nurl\t%s" % (
title, pr_repo_desc, target_ref, url))
continue_maybe("Proceed with merging pull request #%s?" % pr_num)
merged_refs = [target_ref]
merge_hash = merge_pr(pr_num, target_ref)
| apache-2.0 |
itsjeyd/edx-platform | lms/djangoapps/verify_student/tests/test_services.py | 16 | 8259 | """
Tests of re-verification service.
"""
import ddt
from opaque_keys.edx.keys import CourseKey
from course_modes.models import CourseMode
from course_modes.tests.factories import CourseModeFactory
from student.models import CourseEnrollment
from student.tests.factories import UserFactory
from lms.djangoapps.verify_student.models import VerificationCheckpoint, VerificationStatus, SkippedReverification
from lms.djangoapps.verify_student.services import ReverificationService
from openedx.core.djangoapps.credit.api import get_credit_requirement_status, set_credit_requirements
from openedx.core.djangoapps.credit.models import CreditCourse
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
@ddt.ddt
class TestReverificationService(ModuleStoreTestCase):
"""
Tests for the re-verification service.
"""
def setUp(self):
super(TestReverificationService, self).setUp()
self.user = UserFactory.create(username="rusty", password="test")
self.course = CourseFactory.create(org='Robot', number='999', display_name='Test Course')
self.course_id = self.course.id
CourseModeFactory.create(
mode_slug="verified",
course_id=self.course_id,
min_price=100,
)
self.course_key = CourseKey.from_string(unicode(self.course_id))
self.item = ItemFactory.create(parent=self.course, category='chapter', display_name='Test Section')
self.final_checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/final_uuid'.format(
org=self.course_id.org, course=self.course_id.course
)
# Enroll in a verified mode
self.enrollment = CourseEnrollment.enroll(self.user, self.course_id, mode=CourseMode.VERIFIED)
@ddt.data('final', 'midterm')
def test_start_verification(self, checkpoint_name):
"""Test the 'start_verification' service method.
Check that if a reverification checkpoint exists for a specific course
then 'start_verification' method returns that checkpoint otherwise it
creates that checkpoint.
"""
reverification_service = ReverificationService()
checkpoint_location = u'i4x://{org}/{course}/edx-reverification-block/{checkpoint}'.format(
org=self.course_id.org, course=self.course_id.course, checkpoint=checkpoint_name
)
expected_url = (
'/verify_student/reverify'
'/{course_key}'
'/{checkpoint_location}/'
).format(course_key=unicode(self.course_id), checkpoint_location=checkpoint_location)
self.assertEqual(
reverification_service.start_verification(unicode(self.course_id), checkpoint_location),
expected_url
)
def test_get_status(self):
"""Test the verification statuses of a user for a given 'checkpoint'
and 'course_id'.
"""
reverification_service = ReverificationService()
self.assertIsNone(
reverification_service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
)
checkpoint_obj = VerificationCheckpoint.objects.create(
course_id=unicode(self.course_id),
checkpoint_location=self.final_checkpoint_location
)
VerificationStatus.objects.create(checkpoint=checkpoint_obj, user=self.user, status='submitted')
self.assertEqual(
reverification_service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location),
'submitted'
)
VerificationStatus.objects.create(checkpoint=checkpoint_obj, user=self.user, status='approved')
self.assertEqual(
reverification_service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location),
'approved'
)
def test_skip_verification(self):
"""
Test adding skip attempt of a user for a reverification checkpoint.
"""
reverification_service = ReverificationService()
VerificationCheckpoint.objects.create(
course_id=unicode(self.course_id),
checkpoint_location=self.final_checkpoint_location
)
reverification_service.skip_verification(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
self.assertEqual(
SkippedReverification.objects.filter(user=self.user, course_id=self.course_id).count(),
1
)
# now test that a user can have only one entry for a skipped
# reverification for a course
reverification_service.skip_verification(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
self.assertEqual(
SkippedReverification.objects.filter(user=self.user, course_id=self.course_id).count(),
1
)
# testing service for skipped attempt.
self.assertEqual(
reverification_service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location),
'skipped'
)
@ddt.data(
*CourseMode.CREDIT_ELIGIBLE_MODES
)
def test_declined_verification_on_skip(self, mode):
"""Test that status with value 'declined' is added in credit
requirement status model when a user skip's an ICRV.
"""
reverification_service = ReverificationService()
checkpoint = VerificationCheckpoint.objects.create(
course_id=unicode(self.course_id),
checkpoint_location=self.final_checkpoint_location
)
# Create credit course and set credit requirements.
CreditCourse.objects.create(course_key=self.course_key, enabled=True)
self.enrollment.update_enrollment(mode=mode)
set_credit_requirements(
self.course_key,
[
{
"namespace": "reverification",
"name": checkpoint.checkpoint_location,
"display_name": "Assessment 1",
"criteria": {},
}
]
)
reverification_service.skip_verification(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
requirement_status = get_credit_requirement_status(
self.course_key, self.user.username, 'reverification', checkpoint.checkpoint_location
)
self.assertEqual(SkippedReverification.objects.filter(user=self.user, course_id=self.course_id).count(), 1)
self.assertEqual(len(requirement_status), 1)
self.assertEqual(requirement_status[0].get('name'), checkpoint.checkpoint_location)
self.assertEqual(requirement_status[0].get('status'), 'declined')
def test_get_attempts(self):
"""Check verification attempts count against a user for a given
'checkpoint' and 'course_id'.
"""
reverification_service = ReverificationService()
course_id = unicode(self.course_id)
self.assertEqual(
reverification_service.get_attempts(self.user.id, course_id, self.final_checkpoint_location),
0
)
# now create a checkpoint and add user's entry against it then test
# that the 'get_attempts' service method returns correct count
checkpoint_obj = VerificationCheckpoint.objects.create(
course_id=course_id,
checkpoint_location=self.final_checkpoint_location
)
VerificationStatus.objects.create(checkpoint=checkpoint_obj, user=self.user, status='submitted')
self.assertEqual(
reverification_service.get_attempts(self.user.id, course_id, self.final_checkpoint_location),
1
)
def test_not_in_verified_track(self):
# No longer enrolled in a verified track
self.enrollment.update_enrollment(mode=CourseMode.HONOR)
# Should be marked as "skipped" (opted out)
service = ReverificationService()
status = service.get_status(self.user.id, unicode(self.course_id), self.final_checkpoint_location)
self.assertEqual(status, service.NON_VERIFIED_TRACK)
| agpl-3.0 |
hsaputra/tensorflow | tensorflow/python/kernel_tests/sparse_reorder_op_test.py | 94 | 5261 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import sparse_ops
import tensorflow.python.ops.sparse_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
class SparseReorderTest(test.TestCase):
def _SparseTensorPlaceholder(self):
return sparse_tensor.SparseTensor(
array_ops.placeholder(dtypes.int64),
array_ops.placeholder(dtypes.float64),
array_ops.placeholder(dtypes.int64))
def _SparseTensorValue_5x6(self, permutation):
ind = np.array([[0, 0], [1, 0], [1, 3], [1, 4], [3, 2],
[3, 3]]).astype(np.int64)
val = np.array([0, 10, 13, 14, 32, 33]).astype(np.float64)
ind = ind[permutation]
val = val[permutation]
shape = np.array([5, 6]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def testStaticShapeInfoPreserved(self):
sp_input = sparse_tensor.SparseTensor.from_value(
self._SparseTensorValue_5x6(np.arange(6)))
self.assertAllEqual((5, 6), sp_input.get_shape())
sp_output = sparse_ops.sparse_reorder(sp_input)
self.assertAllEqual((5, 6), sp_output.get_shape())
def testAlreadyInOrder(self):
with self.test_session(use_gpu=False) as sess:
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = sparse_ops.sparse_reorder(input_val)
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testFeedAlreadyInOrder(self):
with self.test_session(use_gpu=False) as sess:
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.arange(6))
sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, input_val.indices)
self.assertAllEqual(output_val.values, input_val.values)
self.assertAllEqual(output_val.dense_shape, input_val.dense_shape)
def testOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.test_session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = sparse_ops.sparse_reorder(input_val)
output_val = sess.run(sp_output)
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.dense_shape,
expected_output_val.dense_shape)
def testFeedOutOfOrder(self):
expected_output_val = self._SparseTensorValue_5x6(np.arange(6))
with self.test_session(use_gpu=False) as sess:
for _ in range(5): # To test various random permutations
sp_input = self._SparseTensorPlaceholder()
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_output = sparse_ops.sparse_reorder(sp_input)
output_val = sess.run(sp_output, {sp_input: input_val})
self.assertAllEqual(output_val.indices, expected_output_val.indices)
self.assertAllEqual(output_val.values, expected_output_val.values)
self.assertAllEqual(output_val.dense_shape,
expected_output_val.dense_shape)
def testGradients(self):
with self.test_session(use_gpu=False):
for _ in range(5): # To test various random permutations
input_val = self._SparseTensorValue_5x6(np.random.permutation(6))
sp_input = sparse_tensor.SparseTensor(input_val.indices,
input_val.values,
input_val.dense_shape)
sp_output = sparse_ops.sparse_reorder(sp_input)
err = gradient_checker.compute_gradient_error(
sp_input.values,
input_val.values.shape,
sp_output.values,
input_val.values.shape,
x_init_value=input_val.values)
self.assertLess(err, 1e-11)
if __name__ == "__main__":
test.main()
| apache-2.0 |
vinoth3v/In | In/core/valuator.py | 1 | 7873 | import re
from In.core.object_meta import ObjectMetaBase
class ValuatorContainer(dict):
def __missing__(self, key):
vcls = IN.register.get_class(key, 'Valuator')
obj = vcls()
self[key] = obj
return obj
class ValuatorEngine:
'''Valuator class that valuate values based on validation rules.
Instance available as IN.valuator
'''
# dict of all Valuator instances
valuators = ValuatorContainer()
def validate(self, value, rule): # rule is ['type', args] or [[], [], []]
'''
#TODO: allow per false error message
rule = [
'And', [
['Length', '>', 6, 'The value length should be greater than 6.'],
['Not', [['Num']],
['Or', [
['Email', 'Invalid email address.'],
['Domain'],
['Url', 'Invalid Url.'],
]],
]],
]
'''
if not rule: # empty list
return [True]
try:
firstitem = rule[0]
item_type = type(firstitem)
if item_type is str: # ['type', args]
args = rule[1:]
result = self.valuators[firstitem].validate(value, *args)
if not result[0]:
#return [False, args[-1]] # last item is error message
return result
elif item_type is list: # [[], [], []]
for subrule in rule:
result = self.validate(value, subrule) # recursive
if not result[0]:
return result
except Exception as e:
IN.logger.debug()
return [False, str(e)]
return [True]
def __getattr__(self, key):
self.key = self.valuators[key]
return self.key
class ValuatorMeta(ObjectMetaBase):
__class_type_base_name__ = 'ValuatorBase'
__class_type_name__ = 'Valuator'
class ValuatorBase(dict, metaclass = ValuatorMeta):
'''Base class of all IN ValuatorBase.
'''
__allowed_children__ = None
__default_child__ = None
ops = {
'=' : lambda l, al, ml: l == al,
'==' : lambda l, al, ml: l == al,
'!=' : lambda l, al, ml: l != al,
'>' : lambda l, al, ml: l > al,
'<' : lambda l, al, ml: l < al,
'>=' : lambda l, al, ml: l >= al,
'<=' : lambda l, al, ml: l <= al,
'<>' : lambda l, al, ml: al < l > ml,
'><' : lambda l, al, ml: al > l < ml,
}
def validate(self, value):
'''return value should be a list like [False, 'Error message.'] or [True]
'''
return [True]
@IN.register('Valuator', type = 'Valuator')
class Valuator(ValuatorBase):
'''Base class of all IN ValuatorBase.
'''
pass
class And(Valuator):
pass
class Or(Valuator):
pass
class Not(Valuator):
def validate(self, value, rule, message = ''):
'''not validator'''
result = IN.valuator.validate(value, rule[0])
not_result = not result[0]
return [not_result, message]
class Empty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if value else [True]
class NotEmpty(Valuator):
def validate(self, value, message = ''):
# returning value itself makes it evaluates again
return [False, message] if not value else [True]
class Length(Valuator):
def validate(self, value, length = 0, op = '=', mlength = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](len(value), length, mlength)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Equal(Valuator):
def validate(self, value, tvalue, op = '=', mvalue = 0, message = ''):
try:
# does multiple ifs are good?
result = self.ops[op](value, tvalue, mvalue)
result = [result or False, message]
return result
except KeyError:
IN.logger.debug()
return [False, message] # always false
class Regx(Valuator):
'''Valuator rule class that using regex'''
re_compiled = {} # we dont want to compile again
def get_regx(self, regx):
try:
return self.re_compiled[regx]
except KeyError:
self.re_compiled[regx] = re.compile(regx)
return self.re_compiled[regx]
def validate(self, value, regx, message = ''):
result = self.get_regx(regx).match(value)
return [result, message]
class Domain(Regx):
regex_host = r'(?:(?:[a-zA-Z0-9][a-zA-Z0-9\-]*)?[a-zA-Z0-9])'
def validate(self, domain, message = ''):
false_message = [False, message]
dlen = len(domain)
if dlen < 4 or dlen > 255 or domain.endswith('.') or '.' not in domain:
return false_message
try:
domain = domain.encode('idna').decode('ascii')
except Exception:
return false_message
try:
domain.encode('ascii').decode('idna')
except Exception:
return false_message
reg = self.regex_host + r'(?:\.' + self.regex_host + r')*'
m = re.match(reg + "$", domain)
if not m:
return false_message
return [True]
class Email(Regx):
regex = re.compile(r'^[A-Za-z0-9\.\+_-]')
atext = r'a-zA-Z0-9_\.\-' # !#\$%&\'\*\+/=\?\^`\{\|\}~
atext_utf8 = atext + r"\u0080-\U0010FFFF"
regex_local = re.compile(''.join(('[', atext, ']+(?:\\.[', atext, ']+)*$')))
regex_local_utf8 = re.compile(''.join(('[', atext_utf8, ']+(?:\\.[', atext_utf8, ']+)*$')))
def validate(self, value, message = ''):
parts = value.split('@')
if len(parts) != 2:
return [False, message]
local = self.validate_local(parts[0])
if not local:
return [False, message]
# check domain part
domain_result = IN.valuator.validate(parts[1], ['Domain', message])
if not domain_result[0]:
return domain_result
return [True] # valid
def validate_local(self, local):
# check nabar name part
if not local or len(local) > 64 or '..' in local:
return False
m = re.match(self.regex_local, local) # ASCII
if m: # True
return True
else:
# unicode
m = re.match(self.regex_local_utf8, local)
if m:
return True
else:
return False
class Url(Regx):
def validate(self, value, message = ''):
return True
class Alpha(Valuator):
def validate(self, value, message = ''):
return [str.isalpha(value), message]
class AlphaNum(Valuator):
def validate(self, value, message = ''):
return [str.isalnum(value), message]
class Digit(Valuator):
def validate(self, value, message = ''):
return [str.isdigit(value), message]
class Decimal(Valuator):
def validate(self, value, message = ''):
return [str.isdecimal(value), message]
class Lower(Valuator):
def validate(self, value, message = ''):
return [str.islower(value), message]
class Upper(Valuator):
def validate(self, value, message = ''):
return [str.isupper(value), message]
class Numeric(Valuator):
def validate(self, value, message = ''):
return [str.isnumeric(value), message]
class Space(Valuator):
'''Is value has only non printable chars'''
def validate(self, value, message = ''):
return [str.isspace(value), message]
class Startswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).startswith(start), message]
class Endswith(Valuator):
def validate(self, value, start, message = ''):
return [str(value).endswith(start), message]
class In(Valuator):
def validate(self, value, itr, message = ''):
return [value in itr, message]
class INPath(Valuator):
'''Check whether this string is a valid IN route.'''
def validate(self, value, message = ''):
return True
class NabarRole(Valuator):
'''Check whether nabar has this role.'''
def validate(self, value, message = ''):
return True
class NabarAccess(Valuator):
'''Check whether nabar has this access permissions.'''
def validate(self, value):
return True
class Callback(Valuator):
'''call the Callback to valuate.'''
def validate(self, value, message = ''):
return True
#@IN.hook
#def __In_app_init__(app):
### set the valuator
#IN.valuator = ValuatorEngine()
| apache-2.0 |
ku3o/coala-bears | tests/hypertext/BootLintBearTest.py | 7 | 2754 | from bears.hypertext.BootLintBear import BootLintBear
from tests.LocalBearTestHelper import verify_local_bear
good_file = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Test</title>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/
html5shiv.min.js">
</script>
<script src="https://oss.maxcdn.com/respond/1.4.2/
respond.min.js">
</script>
<![endif]-->
<script src="../../lib/jquery.min.js"></script>
<link rel="stylesheet" href="../../lib/qunit.css">
<script src="../../lib/qunit.js"></script>
<script src="../../../dist/browser/bootlint.js"></script>
<script src="../generic-qunit.js"></script>
</head>
<body>
<button type="submit">Submit</button>
<button type="reset">Reset</button>
<button type="button">Button</button>
<div id="qunit"></div>
<ol id="bootlint"></ol>
</body>
</html>
"""
bad_file = """
<html lang="en">
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>Test</title>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.2/
html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/
respond.min.js"></script>
<![endif]-->
<script src="../../lib/jquery.min.js"></script>
<link rel="stylesheet" href="../../lib/qunit.css">
<script src="../../lib/qunit.js"></script>
<script src="../../../dist/browser/bootlint.js"></script>
<script src="../generic-qunit.js"></script>
</head>
<body>
<button>No type set</button>
<div>
<div class="panel-body">
<p>Something</p>
</div>
</div>
<div id="qunit"></div>
<ol id="bootlint">
<li data-lint="Found one or more `<button>`s
missing a `type` attribute."></li>
</ol>
</body>
</html>
"""
# There's a missing type in <button> tag, missing DOCTYPE
# and panel has no body.
BootLintBearTest = verify_local_bear(BootLintBear,
valid_files=(good_file,),
invalid_files=(bad_file,))
BootLintBearDisableTest = verify_local_bear(
BootLintBear,
valid_files=(good_file, bad_file),
invalid_files=(),
settings={'bootlint_ignore': 'W001,W007,E001,E023'})
| agpl-3.0 |
ledtvavs/repository.ledtv | script.tvguide.Vader/resources/lib/dateutil/parser/isoparser.py | 9 | 12845 | # -*- coding: utf-8 -*-
"""
This module offers a parser for ISO-8601 strings
It is intended to support all valid date, time and datetime formats per the
ISO-8601 specification.
"""
from datetime import datetime, timedelta, time, date
import calendar
from dateutil import tz
from functools import wraps
import re
import six
__all__ = ["isoparse", "isoparser"]
def _takes_ascii(f):
@wraps(f)
def func(self, str_in, *args, **kwargs):
# If it's a stream, read the whole thing
str_in = getattr(str_in, 'read', lambda: str_in)()
# If it's unicode, turn it into bytes, since ISO-8601 only covers ASCII
if isinstance(str_in, six.text_type):
# ASCII is the same in UTF-8
try:
str_in = str_in.encode('ascii')
except UnicodeEncodeError as e:
msg = 'ISO-8601 strings should contain only ASCII characters'
six.raise_from(ValueError(msg), e)
return f(self, str_in, *args, **kwargs)
return func
class isoparser(object):
def __init__(self, sep=None):
"""
:param sep:
A single character that separates date and time portions. If
``None``, the parser will accept any single character.
For strict ISO-8601 adherence, pass ``'T'``.
"""
if sep is not None:
if (len(sep) != 1 or ord(sep) >= 128 or sep in '0123456789'):
raise ValueError('Separator must be a single, non-numeric ' +
'ASCII character')
sep = sep.encode('ascii')
self._sep = sep
@_takes_ascii
def isoparse(self, dt_str):
"""
Parse an ISO-8601 datetime string into a :class:`datetime.datetime`.
An ISO-8601 datetime string consists of a date portion, followed
optionally by a time portion - the date and time portions are separated
by a single character separator, which is ``T`` in the official
standard. Incomplete date formats (such as ``YYYY-MM``) may *not* be
combined with a time portion.
Supported date formats are:
Common:
- ``YYYY``
- ``YYYY-MM`` or ``YYYYMM``
- ``YYYY-MM-DD`` or ``YYYYMMDD``
Uncommon:
- ``YYYY-Www`` or ``YYYYWww`` - ISO week (day defaults to 0)
- ``YYYY-Www-D`` or ``YYYYWwwD`` - ISO week and day
The ISO week and day numbering follows the same logic as
:func:`datetime.date.isocalendar`.
Supported time formats are:
- ``hh``
- ``hh:mm`` or ``hhmm``
- ``hh:mm:ss`` or ``hhmmss``
- ``hh:mm:ss.sss`` or ``hh:mm:ss.ssssss`` (3-6 sub-second digits)
Midnight is a special case for `hh`, as the standard supports both
00:00 and 24:00 as a representation.
.. caution::
Support for fractional components other than seconds is part of the
ISO-8601 standard, but is not currently implemented in this parser.
Supported time zone offset formats are:
- `Z` (UTC)
- `±HH:MM`
- `±HHMM`
- `±HH`
Offsets will be represented as :class:`dateutil.tz.tzoffset` objects,
with the exception of UTC, which will be represented as
:class:`dateutil.tz.tzutc`. Time zone offsets equivalent to UTC (such
as `+00:00`) will also be represented as :class:`dateutil.tz.tzutc`.
:param dt_str:
A string or stream containing only an ISO-8601 datetime string
:return:
Returns a :class:`datetime.datetime` representing the string.
Unspecified components default to their lowest value.
.. warning::
As of version 2.7.0, the strictness of the parser should not be
considered a stable part of the contract. Any valid ISO-8601 string
that parses correctly with the default settings will continue to
parse correctly in future versions, but invalid strings that
currently fail (e.g. ``2017-01-01T00:00+00:00:00``) are not
guaranteed to continue failing in future versions if they encode
a valid date.
"""
components, pos = self._parse_isodate(dt_str)
if len(dt_str) > pos:
if self._sep is None or dt_str[pos:pos + 1] == self._sep:
components += self._parse_isotime(dt_str[pos + 1:])
else:
raise ValueError('String contains unknown ISO components')
return datetime(*components)
@_takes_ascii
def parse_isodate(self, datestr):
"""
Parse the date portion of an ISO string.
:param datestr:
The string portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.date` object
"""
components, pos = self._parse_isodate(datestr)
if pos < len(datestr):
raise ValueError('String contains unknown ISO ' +
'components: {}'.format(datestr))
return date(*components)
@_takes_ascii
def parse_isotime(self, timestr):
"""
Parse the time portion of an ISO string.
:param timestr:
The time portion of an ISO string, without a separator
:return:
Returns a :class:`datetime.time` object
"""
return time(*self._parse_isotime(timestr))
@_takes_ascii
def parse_tzstr(self, tzstr, zero_as_utc=True):
"""
Parse a valid ISO time zone string.
See :func:`isoparser.isoparse` for details on supported formats.
:param tzstr:
A string representing an ISO time zone offset
:param zero_as_utc:
Whether to return :class:`dateutil.tz.tzutc` for zero-offset zones
:return:
Returns :class:`dateutil.tz.tzoffset` for offsets and
:class:`dateutil.tz.tzutc` for ``Z`` and (if ``zero_as_utc`` is
specified) offsets equivalent to UTC.
"""
return self._parse_tzstr(tzstr, zero_as_utc=zero_as_utc)
# Constants
_MICROSECOND_END_REGEX = re.compile(b'[-+Z]+')
_DATE_SEP = b'-'
_TIME_SEP = b':'
_MICRO_SEP = b'.'
def _parse_isodate(self, dt_str):
try:
return self._parse_isodate_common(dt_str)
except ValueError:
return self._parse_isodate_uncommon(dt_str)
def _parse_isodate_common(self, dt_str):
len_str = len(dt_str)
components = [1, 1, 1]
if len_str < 4:
raise ValueError('ISO string too short')
# Year
components[0] = int(dt_str[0:4])
pos = 4
if pos >= len_str:
return components, pos
has_sep = dt_str[pos:pos + 1] == self._DATE_SEP
if has_sep:
pos += 1
# Month
if len_str - pos < 2:
raise ValueError('Invalid common month')
components[1] = int(dt_str[pos:pos + 2])
pos += 2
if pos >= len_str:
if has_sep:
return components, pos
else:
raise ValueError('Invalid ISO format')
if has_sep:
if dt_str[pos:pos + 1] != self._DATE_SEP:
raise ValueError('Invalid separator in ISO string')
pos += 1
# Day
if len_str - pos < 2:
raise ValueError('Invalid common day')
components[2] = int(dt_str[pos:pos + 2])
return components, pos + 2
def _parse_isodate_uncommon(self, dt_str):
if len(dt_str) < 4:
raise ValueError('ISO string too short')
# All ISO formats start with the year
year = int(dt_str[0:4])
has_sep = dt_str[4:5] == self._DATE_SEP
pos = 4 + has_sep # Skip '-' if it's there
if dt_str[pos:pos + 1] == b'W':
# YYYY-?Www-?D?
pos += 1
weekno = int(dt_str[pos:pos + 2])
pos += 2
dayno = 1
if len(dt_str) > pos:
if (dt_str[pos:pos + 1] == self._DATE_SEP) != has_sep:
raise ValueError('Inconsistent use of dash separator')
pos += has_sep
dayno = int(dt_str[pos:pos + 1])
pos += 1
base_date = self._calculate_weekdate(year, weekno, dayno)
else:
# YYYYDDD or YYYY-DDD
if len(dt_str) - pos < 3:
raise ValueError('Invalid ordinal day')
ordinal_day = int(dt_str[pos:pos + 3])
pos += 3
if ordinal_day < 1 or ordinal_day > (365 + calendar.isleap(year)):
raise ValueError('Invalid ordinal day' +
' {} for year {}'.format(ordinal_day, year))
base_date = date(year, 1, 1) + timedelta(days=ordinal_day - 1)
components = [base_date.year, base_date.month, base_date.day]
return components, pos
def _calculate_weekdate(self, year, week, day):
"""
Calculate the day of corresponding to the ISO year-week-day calendar.
This function is effectively the inverse of
:func:`datetime.date.isocalendar`.
:param year:
The year in the ISO calendar
:param week:
The week in the ISO calendar - range is [1, 53]
:param day:
The day in the ISO calendar - range is [1 (MON), 7 (SUN)]
:return:
Returns a :class:`datetime.date`
"""
if not 0 < week < 54:
raise ValueError('Invalid week: {}'.format(week))
if not 0 < day < 8: # Range is 1-7
raise ValueError('Invalid weekday: {}'.format(day))
# Get week 1 for the specific year:
jan_4 = date(year, 1, 4) # Week 1 always has January 4th in it
week_1 = jan_4 - timedelta(days=jan_4.isocalendar()[2] - 1)
# Now add the specific number of weeks and days to get what we want
week_offset = (week - 1) * 7 + (day - 1)
return week_1 + timedelta(days=week_offset)
def _parse_isotime(self, timestr):
len_str = len(timestr)
components = [0, 0, 0, 0, None]
pos = 0
comp = -1
if len(timestr) < 2:
raise ValueError('ISO time too short')
has_sep = len_str >= 3 and timestr[2:3] == self._TIME_SEP
while pos < len_str and comp < 5:
comp += 1
if timestr[pos:pos + 1] in b'-+Z':
# Detect time zone boundary
components[-1] = self._parse_tzstr(timestr[pos:])
pos = len_str
break
if comp < 3:
# Hour, minute, second
components[comp] = int(timestr[pos:pos + 2])
pos += 2
if (has_sep and pos < len_str and
timestr[pos:pos + 1] == self._TIME_SEP):
pos += 1
if comp == 3:
# Microsecond
if timestr[pos:pos + 1] != self._MICRO_SEP:
continue
pos += 1
us_str = self._MICROSECOND_END_REGEX.split(timestr[pos:pos + 6],
1)[0]
components[comp] = int(us_str) * 10**(6 - len(us_str))
pos += len(us_str)
if pos < len_str:
raise ValueError('Unused components in ISO string')
if components[0] == 24:
# Standard supports 00:00 and 24:00 as representations of midnight
if any(component != 0 for component in components[1:4]):
raise ValueError('Hour may only be 24 at 24:00:00.000')
components[0] = 0
return components
def _parse_tzstr(self, tzstr, zero_as_utc=True):
if tzstr == b'Z':
return tz.tzutc()
if len(tzstr) not in {3, 5, 6}:
raise ValueError('Time zone offset must be 1, 3, 5 or 6 characters')
if tzstr[0:1] == b'-':
mult = -1
elif tzstr[0:1] == b'+':
mult = 1
else:
raise ValueError('Time zone offset requires sign')
hours = int(tzstr[1:3])
if len(tzstr) == 3:
minutes = 0
else:
minutes = int(tzstr[(4 if tzstr[3:4] == self._TIME_SEP else 3):])
if zero_as_utc and hours == 0 and minutes == 0:
return tz.tzutc()
else:
if minutes > 59:
raise ValueError('Invalid minutes in time zone offset')
if hours > 23:
raise ValueError('Invalid hours in time zone offset')
return tz.tzoffset(None, mult * (hours * 60 + minutes) * 60)
DEFAULT_ISOPARSER = isoparser()
isoparse = DEFAULT_ISOPARSER.isoparse
| gpl-3.0 |
onecloud/neutron | neutron/tests/unit/vmware/test_nsx_plugin.py | 2 | 55152 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import mock
import netaddr
from oslo.config import cfg
from sqlalchemy import exc as sql_exc
import webob.exc
from neutron.api.v2 import attributes
from neutron.common import constants
from neutron.common import exceptions as ntn_exc
import neutron.common.test_lib as test_lib
from neutron import context
from neutron.extensions import external_net
from neutron.extensions import l3
from neutron.extensions import l3_ext_gw_mode
from neutron.extensions import portbindings
from neutron.extensions import providernet as pnet
from neutron.extensions import securitygroup as secgrp
from neutron import manager
from neutron.openstack.common.db import exception as db_exc
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.vmware.api_client import exception as api_exc
from neutron.plugins.vmware.api_client import version as version_module
from neutron.plugins.vmware.common import exceptions as nsx_exc
from neutron.plugins.vmware.common import sync
from neutron.plugins.vmware.common import utils
from neutron.plugins.vmware.dbexts import db as nsx_db
from neutron.plugins.vmware.extensions import distributedrouter as dist_router
from neutron.plugins.vmware import nsxlib
from neutron.tests.unit import _test_extension_portbindings as test_bindings
import neutron.tests.unit.test_db_plugin as test_plugin
import neutron.tests.unit.test_extension_ext_gw_mode as test_ext_gw_mode
import neutron.tests.unit.test_extension_security_group as ext_sg
import neutron.tests.unit.test_l3_plugin as test_l3_plugin
from neutron.tests.unit import testlib_api
from neutron.tests.unit import vmware
from neutron.tests.unit.vmware.apiclient import fake
LOG = log.getLogger(__name__)
class NsxPluginV2TestCase(test_plugin.NeutronDbPluginV2TestCase):
def _create_network(self, fmt, name, admin_state_up,
arg_list=None, providernet_args=None, **kwargs):
data = {'network': {'name': name,
'admin_state_up': admin_state_up,
'tenant_id': self._tenant_id}}
# Fix to allow the router:external attribute and any other
# attributes containing a colon to be passed with
# a double underscore instead
kwargs = dict((k.replace('__', ':'), v) for k, v in kwargs.items())
if external_net.EXTERNAL in kwargs:
arg_list = (external_net.EXTERNAL, ) + (arg_list or ())
attrs = kwargs
if providernet_args:
attrs.update(providernet_args)
for arg in (('admin_state_up', 'tenant_id', 'shared') +
(arg_list or ())):
# Arg must be present and not empty
if arg in kwargs and kwargs[arg]:
data['network'][arg] = kwargs[arg]
network_req = self.new_create_request('networks', data, fmt)
if (kwargs.get('set_context') and 'tenant_id' in kwargs):
# create a specific auth context for this request
network_req.environ['neutron.context'] = context.Context(
'', kwargs['tenant_id'])
return network_req.get_response(self.api)
def setUp(self,
plugin=vmware.PLUGIN_NAME,
ext_mgr=None,
service_plugins=None):
test_lib.test_config['config_files'] = [
vmware.get_fake_conf('nsx.ini.test')]
# mock api client
self.fc = fake.FakeClient(vmware.STUBS_PATH)
self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
self.mock_instance = self.mock_nsx.start()
# Avoid runs of the synchronizer looping call
patch_sync = mock.patch.object(sync, '_start_loopingcall')
patch_sync.start()
# Emulate tests against NSX 2.x
self.mock_instance.return_value.get_version.return_value = (
version_module.Version("2.9"))
self.mock_instance.return_value.request.side_effect = (
self.fc.fake_request)
super(NsxPluginV2TestCase, self).setUp(plugin=plugin,
ext_mgr=ext_mgr)
# Newly created port's status is always 'DOWN' till NSX wires them.
self.port_create_status = constants.PORT_STATUS_DOWN
cfg.CONF.set_override('metadata_mode', None, 'NSX')
self.addCleanup(self.fc.reset_all)
class TestBasicGet(test_plugin.TestBasicGet, NsxPluginV2TestCase):
pass
class TestV2HTTPResponse(test_plugin.TestV2HTTPResponse, NsxPluginV2TestCase):
pass
class TestPortsV2(NsxPluginV2TestCase,
test_plugin.TestPortsV2,
test_bindings.PortBindingsTestCase,
test_bindings.PortBindingsHostTestCaseMixin):
VIF_TYPE = portbindings.VIF_TYPE_OVS
HAS_PORT_FILTER = True
def test_exhaust_ports_overlay_network(self):
cfg.CONF.set_override('max_lp_per_overlay_ls', 1, group='NSX')
with self.network(name='testnet',
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)) as net:
with self.subnet(network=net) as sub:
with self.port(subnet=sub):
# creating another port should see an exception
self._create_port('json', net['network']['id'], 400)
def test_exhaust_ports_bridged_network(self):
cfg.CONF.set_override('max_lp_per_bridged_ls', 1, group="NSX")
providernet_args = {pnet.NETWORK_TYPE: 'flat',
pnet.PHYSICAL_NETWORK: 'tzuuid'}
with self.network(name='testnet',
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)) as net:
with self.subnet(network=net) as sub:
with self.port(subnet=sub):
with self.port(subnet=sub):
plugin = manager.NeutronManager.get_plugin()
ls = nsxlib.switch.get_lswitches(plugin.cluster,
net['network']['id'])
self.assertEqual(len(ls), 2)
def test_update_port_delete_ip(self):
# This test case overrides the default because the nsx plugin
# implements port_security/security groups and it is not allowed
# to remove an ip address from a port unless the security group
# is first removed.
with self.subnet() as subnet:
with self.port(subnet=subnet) as port:
data = {'port': {'admin_state_up': False,
'fixed_ips': [],
secgrp.SECURITYGROUPS: []}}
req = self.new_update_request('ports',
data, port['port']['id'])
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual(res['port']['admin_state_up'],
data['port']['admin_state_up'])
self.assertEqual(res['port']['fixed_ips'],
data['port']['fixed_ips'])
def test_create_port_name_exceeds_40_chars(self):
name = 'this_is_a_port_whose_name_is_longer_than_40_chars'
with self.port(name=name) as port:
# Assert the neutron name is not truncated
self.assertEqual(name, port['port']['name'])
def _verify_no_orphan_left(self, net_id):
# Verify no port exists on net
# ie: cleanup on db was successful
query_params = "network_id=%s" % net_id
self._test_list_resources('port', [],
query_params=query_params)
# Also verify no orphan port was left on nsx
# no port should be there at all
self.assertFalse(self.fc._fake_lswitch_lport_dict)
def test_create_port_nsx_error_no_orphan_left(self):
with mock.patch.object(nsxlib.switch, 'create_lport',
side_effect=api_exc.NsxApiException):
with self.network() as net:
net_id = net['network']['id']
self._create_port(self.fmt, net_id,
webob.exc.HTTPInternalServerError.code)
self._verify_no_orphan_left(net_id)
def test_create_port_neutron_error_no_orphan_left(self):
with mock.patch.object(nsx_db, 'add_neutron_nsx_port_mapping',
side_effect=ntn_exc.NeutronException):
with self.network() as net:
net_id = net['network']['id']
self._create_port(self.fmt, net_id,
webob.exc.HTTPInternalServerError.code)
self._verify_no_orphan_left(net_id)
def test_create_port_db_error_no_orphan_left(self):
db_exception = db_exc.DBError(
inner_exception=sql_exc.IntegrityError(mock.ANY,
mock.ANY,
mock.ANY))
with mock.patch.object(nsx_db, 'add_neutron_nsx_port_mapping',
side_effect=db_exception):
with self.network() as net:
with self.port(device_owner=constants.DEVICE_OWNER_DHCP):
self._verify_no_orphan_left(net['network']['id'])
def test_create_port_maintenance_returns_503(self):
with self.network() as net:
with mock.patch.object(nsxlib, 'do_request',
side_effect=nsx_exc.MaintenanceInProgress):
data = {'port': {'network_id': net['network']['id'],
'admin_state_up': False,
'fixed_ips': [],
'tenant_id': self._tenant_id}}
plugin = manager.NeutronManager.get_plugin()
with mock.patch.object(plugin, 'get_network',
return_value=net['network']):
port_req = self.new_create_request('ports', data, self.fmt)
res = port_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
res.status_int)
class TestNetworksV2(test_plugin.TestNetworksV2, NsxPluginV2TestCase):
def _test_create_bridge_network(self, vlan_id=0):
net_type = vlan_id and 'vlan' or 'flat'
name = 'bridge_net'
expected = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', 'ACTIVE'), ('shared', False),
(pnet.NETWORK_TYPE, net_type),
(pnet.PHYSICAL_NETWORK, 'tzuuid'),
(pnet.SEGMENTATION_ID, vlan_id)]
providernet_args = {pnet.NETWORK_TYPE: net_type,
pnet.PHYSICAL_NETWORK: 'tzuuid'}
if vlan_id:
providernet_args[pnet.SEGMENTATION_ID] = vlan_id
with self.network(name=name,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID)) as net:
for k, v in expected:
self.assertEqual(net['network'][k], v)
def test_create_bridge_network(self):
self._test_create_bridge_network()
def test_create_bridge_vlan_network(self):
self._test_create_bridge_network(vlan_id=123)
def test_create_bridge_vlan_network_outofrange_returns_400(self):
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
self._test_create_bridge_network(vlan_id=5000)
self.assertEqual(ctx_manager.exception.code, 400)
def test_list_networks_filter_by_id(self):
# We add this unit test to cover some logic specific to the
# nsx plugin
with contextlib.nested(self.network(name='net1'),
self.network(name='net2')) as (net1, net2):
query_params = 'id=%s' % net1['network']['id']
self._test_list_resources('network', [net1],
query_params=query_params)
query_params += '&id=%s' % net2['network']['id']
self._test_list_resources('network', [net1, net2],
query_params=query_params)
def test_delete_network_after_removing_subet(self):
gateway_ip = '10.0.0.1'
cidr = '10.0.0.0/24'
fmt = 'json'
# Create new network
res = self._create_network(fmt=fmt, name='net',
admin_state_up=True)
network = self.deserialize(fmt, res)
subnet = self._make_subnet(fmt, network, gateway_ip,
cidr, ip_version=4)
req = self.new_delete_request('subnets', subnet['subnet']['id'])
sub_del_res = req.get_response(self.api)
self.assertEqual(sub_del_res.status_int, 204)
req = self.new_delete_request('networks', network['network']['id'])
net_del_res = req.get_response(self.api)
self.assertEqual(net_del_res.status_int, 204)
def test_list_networks_with_shared(self):
with self.network(name='net1'):
with self.network(name='net2', shared=True):
req = self.new_list_request('networks')
res = self.deserialize('json', req.get_response(self.api))
self.assertEqual(len(res['networks']), 2)
req_2 = self.new_list_request('networks')
req_2.environ['neutron.context'] = context.Context('',
'somebody')
res = self.deserialize('json', req_2.get_response(self.api))
# tenant must see a single network
self.assertEqual(len(res['networks']), 1)
def test_create_network_name_exceeds_40_chars(self):
name = 'this_is_a_network_whose_name_is_longer_than_40_chars'
with self.network(name=name) as net:
# Assert neutron name is not truncated
self.assertEqual(net['network']['name'], name)
def test_create_network_maintenance_returns_503(self):
data = {'network': {'name': 'foo',
'admin_state_up': True,
'tenant_id': self._tenant_id}}
with mock.patch.object(nsxlib, 'do_request',
side_effect=nsx_exc.MaintenanceInProgress):
net_req = self.new_create_request('networks', data, self.fmt)
res = net_req.get_response(self.api)
self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
res.status_int)
def test_update_network_with_admin_false(self):
data = {'network': {'admin_state_up': False}}
with self.network() as net:
plugin = manager.NeutronManager.get_plugin()
self.assertRaises(NotImplementedError,
plugin.update_network,
context.get_admin_context(),
net['network']['id'], data)
def test_update_network_with_name_calls_nsx(self):
with mock.patch.object(
nsxlib.switch, 'update_lswitch') as update_lswitch_mock:
# don't worry about deleting this network, do not use
# context manager
ctx = context.get_admin_context()
plugin = manager.NeutronManager.get_plugin()
net = plugin.create_network(
ctx, {'network': {'name': 'xxx',
'admin_state_up': True,
'shared': False,
'port_security_enabled': True}})
plugin.update_network(ctx, net['id'],
{'network': {'name': 'yyy'}})
update_lswitch_mock.assert_called_once_with(
mock.ANY, mock.ANY, 'yyy')
class SecurityGroupsTestCase(ext_sg.SecurityGroupDBTestCase):
def setUp(self):
test_lib.test_config['config_files'] = [
vmware.get_fake_conf('nsx.ini.test')]
# mock nsx api client
self.fc = fake.FakeClient(vmware.STUBS_PATH)
self.mock_nsx = mock.patch(vmware.NSXAPI_NAME, autospec=True)
instance = self.mock_nsx.start()
instance.return_value.login.return_value = "the_cookie"
# Avoid runs of the synchronizer looping call
patch_sync = mock.patch.object(sync, '_start_loopingcall')
patch_sync.start()
instance.return_value.request.side_effect = self.fc.fake_request
super(SecurityGroupsTestCase, self).setUp(vmware.PLUGIN_NAME)
class TestSecurityGroup(ext_sg.TestSecurityGroups, SecurityGroupsTestCase):
def test_create_security_group_name_exceeds_40_chars(self):
name = 'this_is_a_secgroup_whose_name_is_longer_than_40_chars'
with self.security_group(name=name) as sg:
# Assert Neutron name is not truncated
self.assertEqual(sg['security_group']['name'], name)
def test_create_security_group_rule_bad_input(self):
name = 'foo security group'
description = 'foo description'
with self.security_group(name, description) as sg:
security_group_id = sg['security_group']['id']
protocol = 200
min_range = 32
max_range = 4343
rule = self._build_security_group_rule(
security_group_id, 'ingress', protocol,
min_range, max_range)
res = self._create_security_group_rule(self.fmt, rule)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 400)
class TestL3ExtensionManager(object):
def get_resources(self):
# Simulate extension of L3 attribute map
# First apply attribute extensions
for key in l3.RESOURCE_ATTRIBUTE_MAP.keys():
l3.RESOURCE_ATTRIBUTE_MAP[key].update(
l3_ext_gw_mode.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
l3.RESOURCE_ATTRIBUTE_MAP[key].update(
dist_router.EXTENDED_ATTRIBUTES_2_0.get(key, {}))
# Finally add l3 resources to the global attribute map
attributes.RESOURCE_ATTRIBUTE_MAP.update(
l3.RESOURCE_ATTRIBUTE_MAP)
return l3.L3.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class TestL3SecGrpExtensionManager(TestL3ExtensionManager):
"""A fake extension manager for L3 and Security Group extensions.
Includes also NSX specific L3 attributes.
"""
def get_resources(self):
resources = super(TestL3SecGrpExtensionManager,
self).get_resources()
resources.extend(secgrp.Securitygroup.get_resources())
return resources
def backup_l3_attribute_map():
"""Return a backup of the original l3 attribute map."""
return dict((res, attrs.copy()) for
(res, attrs) in l3.RESOURCE_ATTRIBUTE_MAP.iteritems())
def restore_l3_attribute_map(map_to_restore):
"""Ensure changes made by fake ext mgrs are reverted."""
l3.RESOURCE_ATTRIBUTE_MAP = map_to_restore
class L3NatTest(test_l3_plugin.L3BaseForIntTests, NsxPluginV2TestCase):
def _restore_l3_attribute_map(self):
l3.RESOURCE_ATTRIBUTE_MAP = self._l3_attribute_map_bk
def setUp(self, plugin=vmware.PLUGIN_NAME, ext_mgr=None,
service_plugins=None):
self._l3_attribute_map_bk = {}
for item in l3.RESOURCE_ATTRIBUTE_MAP:
self._l3_attribute_map_bk[item] = (
l3.RESOURCE_ATTRIBUTE_MAP[item].copy())
cfg.CONF.set_override('api_extensions_path', vmware.NSXEXT_PATH)
l3_attribute_map_bk = backup_l3_attribute_map()
self.addCleanup(restore_l3_attribute_map, l3_attribute_map_bk)
ext_mgr = ext_mgr or TestL3ExtensionManager()
super(L3NatTest, self).setUp(
plugin=plugin, ext_mgr=ext_mgr, service_plugins=service_plugins)
plugin_instance = manager.NeutronManager.get_plugin()
self._plugin_name = "%s.%s" % (
plugin_instance.__module__,
plugin_instance.__class__.__name__)
self._plugin_class = plugin_instance.__class__
def _create_l3_ext_network(self, vlan_id=None):
name = 'l3_ext_net'
net_type = utils.NetworkTypes.L3_EXT
providernet_args = {pnet.NETWORK_TYPE: net_type,
pnet.PHYSICAL_NETWORK: 'l3_gw_uuid'}
if vlan_id:
providernet_args[pnet.SEGMENTATION_ID] = vlan_id
return self.network(name=name,
router__external=True,
providernet_args=providernet_args,
arg_list=(pnet.NETWORK_TYPE,
pnet.PHYSICAL_NETWORK,
pnet.SEGMENTATION_ID))
class TestL3NatTestCase(L3NatTest,
test_l3_plugin.L3NatDBIntTestCase,
NsxPluginV2TestCase):
def _test_create_l3_ext_network(self, vlan_id=0):
name = 'l3_ext_net'
net_type = utils.NetworkTypes.L3_EXT
expected = [('subnets', []), ('name', name), ('admin_state_up', True),
('status', 'ACTIVE'), ('shared', False),
(external_net.EXTERNAL, True),
(pnet.NETWORK_TYPE, net_type),
(pnet.PHYSICAL_NETWORK, 'l3_gw_uuid'),
(pnet.SEGMENTATION_ID, vlan_id)]
with self._create_l3_ext_network(vlan_id) as net:
for k, v in expected:
self.assertEqual(net['network'][k], v)
def _nsx_validate_ext_gw(self, router_id, l3_gw_uuid, vlan_id):
"""Verify data on fake NSX API client in order to validate
plugin did set them properly
"""
# First find the NSX router ID
ctx = context.get_admin_context()
nsx_router_id = nsx_db.get_nsx_router_id(ctx.session, router_id)
ports = [port for port in self.fc._fake_lrouter_lport_dict.values()
if (port['lr_uuid'] == nsx_router_id and
port['att_type'] == "L3GatewayAttachment")]
self.assertEqual(len(ports), 1)
self.assertEqual(ports[0]['attachment_gwsvc_uuid'], l3_gw_uuid)
self.assertEqual(ports[0].get('vlan_id'), vlan_id)
def test_create_l3_ext_network_without_vlan(self):
self._test_create_l3_ext_network()
def _test_router_create_with_gwinfo_and_l3_ext_net(self, vlan_id=None,
validate_ext_gw=True):
with self._create_l3_ext_network(vlan_id) as net:
with self.subnet(network=net) as s:
data = {'router': {'tenant_id': 'whatever'}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request('routers', data,
self.fmt)
try:
res = router_req.get_response(self.ext_api)
router = self.deserialize(self.fmt, res)
self.assertEqual(
s['subnet']['network_id'],
(router['router']['external_gateway_info']
['network_id']))
if validate_ext_gw:
self._nsx_validate_ext_gw(router['router']['id'],
'l3_gw_uuid', vlan_id)
finally:
self._delete('routers', router['router']['id'])
def test_router_create_with_gwinfo_and_l3_ext_net(self):
self._test_router_create_with_gwinfo_and_l3_ext_net()
def test_router_create_with_gwinfo_and_l3_ext_net_with_vlan(self):
self._test_router_create_with_gwinfo_and_l3_ext_net(444)
def _test_router_create_with_distributed(self, dist_input, dist_expected,
version='3.1', return_code=201):
self.mock_instance.return_value.get_version.return_value = (
version_module.Version(version))
data = {'tenant_id': 'whatever'}
data['name'] = 'router1'
data['distributed'] = dist_input
router_req = self.new_create_request(
'routers', {'router': data}, self.fmt)
try:
res = router_req.get_response(self.ext_api)
self.assertEqual(return_code, res.status_int)
if res.status_int == 201:
router = self.deserialize(self.fmt, res)
self.assertIn('distributed', router['router'])
self.assertEqual(dist_expected,
router['router']['distributed'])
finally:
if res.status_int == 201:
self._delete('routers', router['router']['id'])
def test_router_create_distributed_with_3_1(self):
self._test_router_create_with_distributed(True, True)
def test_router_create_distributed_with_new_nsx_versions(self):
with mock.patch.object(nsxlib.router, 'create_explicit_route_lrouter'):
self._test_router_create_with_distributed(True, True, '3.2')
self._test_router_create_with_distributed(True, True, '4.0')
self._test_router_create_with_distributed(True, True, '4.1')
def test_router_create_not_distributed(self):
self._test_router_create_with_distributed(False, False)
def test_router_create_distributed_unspecified(self):
self._test_router_create_with_distributed(None, False)
def test_router_create_distributed_returns_400(self):
self._test_router_create_with_distributed(True, None, '3.0', 400)
def test_router_create_on_obsolete_platform(self):
def obsolete_response(*args, **kwargs):
response = (nsxlib.router.
_create_implicit_routing_lrouter(*args, **kwargs))
response.pop('distributed')
return response
with mock.patch.object(
nsxlib.router, 'create_lrouter', new=obsolete_response):
self._test_router_create_with_distributed(None, False, '2.2')
def _create_router_with_gw_info_for_test(self, subnet):
data = {'router': {'tenant_id': 'whatever',
'name': 'router1',
'external_gateway_info':
{'network_id': subnet['subnet']['network_id']}}}
router_req = self.new_create_request(
'routers', data, self.fmt)
return router_req.get_response(self.ext_api)
def test_router_create_nsx_error_returns_500(self, vlan_id=None):
with mock.patch.object(nsxlib.router,
'create_router_lport',
side_effect=api_exc.NsxApiException):
with self._create_l3_ext_network(vlan_id) as net:
with self.subnet(network=net) as s:
res = self._create_router_with_gw_info_for_test(s)
self.assertEqual(
webob.exc.HTTPInternalServerError.code,
res.status_int)
def test_router_add_gateway_invalid_network_returns_404(self):
# NOTE(salv-orlando): This unit test has been overriden
# as the nsx plugin support the ext_gw_mode extension
# which mandates a uuid for the external network identifier
with self.router() as r:
self._add_external_gateway_to_router(
r['router']['id'],
uuidutils.generate_uuid(),
expected_code=webob.exc.HTTPNotFound.code)
def _verify_router_rollback(self):
# Check that nothing is left on DB
# TODO(salv-orlando): Verify whehter this is thread-safe
# w.r.t. sqllite and parallel testing
self._test_list_resources('router', [])
# Check that router is not in NSX
self.assertFalse(self.fc._fake_lrouter_dict)
def test_router_create_with_gw_info_neutron_fail_does_rollback(self):
# Simulate get subnet error while building list of ips with prefix
with mock.patch.object(self._plugin_class,
'_build_ip_address_list',
side_effect=ntn_exc.SubnetNotFound(
subnet_id='xxx')):
with self._create_l3_ext_network() as net:
with self.subnet(network=net) as s:
res = self._create_router_with_gw_info_for_test(s)
self.assertEqual(
webob.exc.HTTPNotFound.code,
res.status_int)
self._verify_router_rollback()
def test_router_create_with_gw_info_nsx_fail_does_rollback(self):
# Simulate error while fetching nsx router gw port
with mock.patch.object(self._plugin_class,
'_find_router_gw_port',
side_effect=api_exc.NsxApiException):
with self._create_l3_ext_network() as net:
with self.subnet(network=net) as s:
res = self._create_router_with_gw_info_for_test(s)
self.assertEqual(
webob.exc.HTTPInternalServerError.code,
res.status_int)
self._verify_router_rollback()
def _test_router_update_gateway_on_l3_ext_net(self, vlan_id=None,
validate_ext_gw=True):
with self.router() as r:
with self.subnet() as s1:
with self._create_l3_ext_network(vlan_id) as net:
with self.subnet(network=net) as s2:
self._set_net_external(s1['subnet']['network_id'])
try:
self._add_external_gateway_to_router(
r['router']['id'],
s1['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id,
s1['subnet']['network_id'])
# Plug network with external mapping
self._set_net_external(s2['subnet']['network_id'])
self._add_external_gateway_to_router(
r['router']['id'],
s2['subnet']['network_id'])
body = self._show('routers', r['router']['id'])
net_id = (body['router']
['external_gateway_info']['network_id'])
self.assertEqual(net_id,
s2['subnet']['network_id'])
if validate_ext_gw:
self._nsx_validate_ext_gw(
body['router']['id'],
'l3_gw_uuid', vlan_id)
finally:
# Cleanup
self._remove_external_gateway_from_router(
r['router']['id'],
s2['subnet']['network_id'])
def test_router_update_gateway_on_l3_ext_net(self):
self._test_router_update_gateway_on_l3_ext_net()
def test_router_update_gateway_on_l3_ext_net_with_vlan(self):
self._test_router_update_gateway_on_l3_ext_net(444)
def test_router_list_by_tenant_id(self):
with contextlib.nested(self.router(tenant_id='custom'),
self.router(),
self.router()
) as routers:
self._test_list_resources('router', [routers[0]],
query_params="tenant_id=custom")
def test_create_l3_ext_network_with_vlan(self):
self._test_create_l3_ext_network(666)
def test_floatingip_with_assoc_fails(self):
self._test_floatingip_with_assoc_fails(self._plugin_name)
def test_floatingip_with_invalid_create_port(self):
self._test_floatingip_with_invalid_create_port(self._plugin_name)
def _metadata_setup(self):
cfg.CONF.set_override('metadata_mode', 'access_network', 'NSX')
def _metadata_teardown(self):
cfg.CONF.set_override('metadata_mode', None, 'NSX')
def test_create_router_name_exceeds_40_chars(self):
name = 'this_is_a_router_whose_name_is_longer_than_40_chars'
with self.router(name=name) as rtr:
# Assert Neutron name is not truncated
self.assertEqual(rtr['router']['name'], name)
def test_router_add_interface_subnet_with_metadata_access(self):
self._metadata_setup()
self.test_router_add_interface_subnet()
self._metadata_teardown()
def test_router_add_interface_port_with_metadata_access(self):
self._metadata_setup()
self.test_router_add_interface_port()
self._metadata_teardown()
def test_router_add_interface_dupsubnet_returns_400_with_metadata(self):
self._metadata_setup()
self.test_router_add_interface_dup_subnet1_returns_400()
self._metadata_teardown()
def test_router_add_interface_overlapped_cidr_returns_400_with(self):
self._metadata_setup()
self.test_router_add_interface_overlapped_cidr_returns_400()
self._metadata_teardown()
def test_router_remove_interface_inuse_returns_409_with_metadata(self):
self._metadata_setup()
self.test_router_remove_interface_inuse_returns_409()
self._metadata_teardown()
def test_router_remove_iface_wrong_sub_returns_400_with_metadata(self):
self._metadata_setup()
self.test_router_remove_interface_wrong_subnet_returns_400()
self._metadata_teardown()
def test_router_delete_with_metadata_access(self):
self._metadata_setup()
self.test_router_delete()
self._metadata_teardown()
def test_router_delete_with_port_existed_returns_409_with_metadata(self):
self._metadata_setup()
self.test_router_delete_with_port_existed_returns_409()
self._metadata_teardown()
def test_metadatata_network_created_with_router_interface_add(self):
self._metadata_setup()
with mock.patch.object(self._plugin_class, 'schedule_network') as f:
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add',
r['router']['id'],
s['subnet']['id'],
None)
r_ports = self._list('ports')['ports']
self.assertEqual(len(r_ports), 2)
ips = []
for port in r_ports:
ips.extend([netaddr.IPAddress(fixed_ip['ip_address'])
for fixed_ip in port['fixed_ips']])
meta_cidr = netaddr.IPNetwork('169.254.0.0/16')
self.assertTrue(any([ip in meta_cidr for ip in ips]))
# Needed to avoid 409
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
# Verify that the metadata network gets scheduled first, so that
# an active dhcp agent can pick it up
expected_meta_net = {
'status': 'ACTIVE',
'subnets': [],
'name': 'meta-%s' % r['router']['id'],
'admin_state_up': True,
'tenant_id': '',
'port_security_enabled': False,
'shared': False,
'id': mock.ANY
}
f.assert_called_once_with(mock.ANY, expected_meta_net)
self._metadata_teardown()
def test_metadata_network_create_rollback_on_create_subnet_failure(self):
self._metadata_setup()
with self.router() as r:
with self.subnet() as s:
# Raise a NeutronException (eg: NotFound)
with mock.patch.object(self._plugin_class,
'create_subnet',
side_effect=ntn_exc.NotFound):
self._router_interface_action(
'add', r['router']['id'], s['subnet']['id'], None)
# Ensure metadata network was removed
nets = self._list('networks')['networks']
self.assertEqual(len(nets), 1)
# Needed to avoid 409
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
self._metadata_teardown()
def test_metadata_network_create_rollback_on_add_rtr_iface_failure(self):
self._metadata_setup()
with self.router() as r:
with self.subnet() as s:
# Raise a NeutronException when adding metadata subnet
# to router
# save function being mocked
real_func = self._plugin_class.add_router_interface
plugin_instance = manager.NeutronManager.get_plugin()
def side_effect(*args):
if args[-1]['subnet_id'] == s['subnet']['id']:
# do the real thing
return real_func(plugin_instance, *args)
# otherwise raise
raise api_exc.NsxApiException()
with mock.patch.object(self._plugin_class,
'add_router_interface',
side_effect=side_effect):
self._router_interface_action(
'add', r['router']['id'], s['subnet']['id'], None)
# Ensure metadata network was removed
nets = self._list('networks')['networks']
self.assertEqual(len(nets), 1)
# Needed to avoid 409
self._router_interface_action('remove',
r['router']['id'],
s['subnet']['id'],
None)
self._metadata_teardown()
def test_metadata_network_removed_with_router_interface_remove(self):
self._metadata_setup()
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add', r['router']['id'],
s['subnet']['id'], None)
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 2)
meta_cidr = netaddr.IPNetwork('169.254.0.0/16')
for subnet in subnets:
cidr = netaddr.IPNetwork(subnet['cidr'])
if meta_cidr == cidr or meta_cidr in cidr.supernet(16):
meta_sub_id = subnet['id']
meta_net_id = subnet['network_id']
ports = self._list(
'ports',
query_params='network_id=%s' % meta_net_id)['ports']
self.assertEqual(len(ports), 1)
meta_port_id = ports[0]['id']
self._router_interface_action('remove', r['router']['id'],
s['subnet']['id'], None)
self._show('networks', meta_net_id,
webob.exc.HTTPNotFound.code)
self._show('ports', meta_port_id,
webob.exc.HTTPNotFound.code)
self._show('subnets', meta_sub_id,
webob.exc.HTTPNotFound.code)
self._metadata_teardown()
def test_metadata_network_remove_rollback_on_failure(self):
self._metadata_setup()
with self.router() as r:
with self.subnet() as s:
self._router_interface_action('add', r['router']['id'],
s['subnet']['id'], None)
networks = self._list('networks')['networks']
for network in networks:
if network['id'] != s['subnet']['network_id']:
meta_net_id = network['id']
ports = self._list(
'ports',
query_params='network_id=%s' % meta_net_id)['ports']
meta_port_id = ports[0]['id']
# Raise a NeutronException when removing
# metadata subnet from router
# save function being mocked
real_func = self._plugin_class.remove_router_interface
plugin_instance = manager.NeutronManager.get_plugin()
def side_effect(*args):
if args[-1].get('subnet_id') == s['subnet']['id']:
# do the real thing
return real_func(plugin_instance, *args)
# otherwise raise
raise api_exc.NsxApiException()
with mock.patch.object(self._plugin_class,
'remove_router_interface',
side_effect=side_effect):
self._router_interface_action('remove', r['router']['id'],
s['subnet']['id'], None)
# Metadata network and subnet should still be there
self._show('networks', meta_net_id,
webob.exc.HTTPOk.code)
self._show('ports', meta_port_id,
webob.exc.HTTPOk.code)
self._metadata_teardown()
def test_metadata_dhcp_host_route(self):
cfg.CONF.set_override('metadata_mode', 'dhcp_host_route', 'NSX')
subnets = self._list('subnets')['subnets']
with self.subnet() as s:
with self.port(subnet=s, device_id='1234',
device_owner=constants.DEVICE_OWNER_DHCP):
subnets = self._list('subnets')['subnets']
self.assertEqual(len(subnets), 1)
self.assertEqual(subnets[0]['host_routes'][0]['nexthop'],
'10.0.0.2')
self.assertEqual(subnets[0]['host_routes'][0]['destination'],
'169.254.169.254/32')
subnets = self._list('subnets')['subnets']
# Test that route is deleted after dhcp port is removed
self.assertEqual(len(subnets[0]['host_routes']), 0)
def test_floatingip_disassociate(self):
with self.port() as p:
private_sub = {'subnet': {'id':
p['port']['fixed_ips'][0]['subnet_id']}}
with self.floatingip_no_assoc(private_sub) as fip:
port_id = p['port']['id']
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': port_id}})
self.assertEqual(body['floatingip']['port_id'], port_id)
# Disassociate
body = self._update('floatingips', fip['floatingip']['id'],
{'floatingip': {'port_id': None}})
body = self._show('floatingips', fip['floatingip']['id'])
self.assertIsNone(body['floatingip']['port_id'])
self.assertIsNone(body['floatingip']['fixed_ip_address'])
def test_create_router_maintenance_returns_503(self):
with self._create_l3_ext_network() as net:
with self.subnet(network=net) as s:
with mock.patch.object(
nsxlib,
'do_request',
side_effect=nsx_exc.MaintenanceInProgress):
data = {'router': {'tenant_id': 'whatever'}}
data['router']['name'] = 'router1'
data['router']['external_gateway_info'] = {
'network_id': s['subnet']['network_id']}
router_req = self.new_create_request(
'routers', data, self.fmt)
res = router_req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPServiceUnavailable.code,
res.status_int)
class ExtGwModeTestCase(NsxPluginV2TestCase,
test_ext_gw_mode.ExtGwModeIntTestCase):
pass
class NeutronNsxOutOfSync(NsxPluginV2TestCase,
test_l3_plugin.L3NatTestCaseMixin,
ext_sg.SecurityGroupsTestCase):
def setUp(self):
l3_attribute_map_bk = backup_l3_attribute_map()
self.addCleanup(restore_l3_attribute_map, l3_attribute_map_bk)
super(NeutronNsxOutOfSync, self).setUp(
ext_mgr=TestL3SecGrpExtensionManager())
def test_delete_network_not_in_nsx(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
self.fc._fake_lswitch_dict.clear()
req = self.new_delete_request('networks', net1['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
def test_show_network_not_in_nsx(self):
res = self._create_network('json', 'net1', True)
net = self.deserialize('json', res)
self.fc._fake_lswitch_dict.clear()
req = self.new_show_request('networks', net['network']['id'],
fields=['id', 'status'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network']['status'],
constants.NET_STATUS_ERROR)
def test_delete_port_not_in_nsx(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.fc._fake_lswitch_lport_dict.clear()
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
def test_show_port_not_in_nsx(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.fc._fake_lswitch_lport_dict.clear()
self.fc._fake_lswitch_lportstatus_dict.clear()
req = self.new_show_request('ports', port['port']['id'],
fields=['id', 'status'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['port']['status'],
constants.PORT_STATUS_ERROR)
def test_create_port_on_network_not_in_nsx(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
self.fc._fake_lswitch_dict.clear()
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR)
def test_update_port_not_in_nsx(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.fc._fake_lswitch_lport_dict.clear()
data = {'port': {'name': 'error_port'}}
req = self.new_update_request('ports', data, port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port']['status'], constants.PORT_STATUS_ERROR)
self.assertEqual(port['port']['name'], 'error_port')
def test_delete_port_and_network_not_in_nsx(self):
res = self._create_network('json', 'net1', True)
net1 = self.deserialize('json', res)
res = self._create_port('json', net1['network']['id'])
port = self.deserialize('json', res)
self.fc._fake_lswitch_dict.clear()
self.fc._fake_lswitch_lport_dict.clear()
req = self.new_delete_request('ports', port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
req = self.new_delete_request('networks', net1['network']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 204)
def test_delete_router_not_in_nsx(self):
res = self._create_router('json', 'tenant')
router = self.deserialize('json', res)
self.fc._fake_lrouter_dict.clear()
req = self.new_delete_request('routers', router['router']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
def test_show_router_not_in_nsx(self):
res = self._create_router('json', 'tenant')
router = self.deserialize('json', res)
self.fc._fake_lrouter_dict.clear()
req = self.new_show_request('routers', router['router']['id'],
fields=['id', 'status'])
router = self.deserialize('json', req.get_response(self.ext_api))
self.assertEqual(router['router']['status'],
constants.NET_STATUS_ERROR)
def _create_network_and_subnet(self, cidr, external=False):
net_res = self._create_network('json', 'ext_net', True)
net = self.deserialize('json', net_res)
net_id = net['network']['id']
if external:
self._update('networks', net_id,
{'network': {external_net.EXTERNAL: True}})
sub_res = self._create_subnet('json', net_id, cidr)
sub = self.deserialize('json', sub_res)
return net_id, sub['subnet']['id']
def test_clear_gateway_nat_rule_not_in_nsx(self):
# Create external network and subnet
ext_net_id = self._create_network_and_subnet('1.1.1.0/24', True)[0]
# Create internal network and subnet
int_sub_id = self._create_network_and_subnet('10.0.0.0/24')[1]
res = self._create_router('json', 'tenant')
router = self.deserialize('json', res)
# Add interface to router (needed to generate NAT rule)
req = self.new_action_request(
'routers',
{'subnet_id': int_sub_id},
router['router']['id'],
"add_router_interface")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 200)
# Set gateway for router
req = self.new_update_request(
'routers',
{'router': {'external_gateway_info':
{'network_id': ext_net_id}}},
router['router']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 200)
# Delete NAT rule from NSX, clear gateway
# and verify operation still succeeds
self.fc._fake_lrouter_nat_dict.clear()
req = self.new_update_request(
'routers',
{'router': {'external_gateway_info': {}}},
router['router']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 200)
def test_remove_router_interface_not_in_nsx(self):
# Create internal network and subnet
int_sub_id = self._create_network_and_subnet('10.0.0.0/24')[1]
res = self._create_router('json', 'tenant')
router = self.deserialize('json', res)
# Add interface to router (needed to generate NAT rule)
req = self.new_action_request(
'routers',
{'subnet_id': int_sub_id},
router['router']['id'],
"add_router_interface")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 200)
self.fc._fake_lrouter_dict.clear()
req = self.new_action_request(
'routers',
{'subnet_id': int_sub_id},
router['router']['id'],
"remove_router_interface")
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 200)
def test_update_router_not_in_nsx(self):
res = self._create_router('json', 'tenant')
router = self.deserialize('json', res)
self.fc._fake_lrouter_dict.clear()
req = self.new_update_request(
'routers',
{'router': {'name': 'goo'}},
router['router']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 500)
req = self.new_show_request('routers', router['router']['id'])
router = self.deserialize('json', req.get_response(self.ext_api))
self.assertEqual(router['router']['status'],
constants.NET_STATUS_ERROR)
def test_delete_security_group_not_in_nsx(self):
res = self._create_security_group('json', 'name', 'desc')
sec_group = self.deserialize('json', res)
self.fc._fake_securityprofile_dict.clear()
req = self.new_delete_request(
'security-groups',
sec_group['security_group']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
| apache-2.0 |
mihaip/delicious2google | app/openid/yadis/accept.py | 184 | 3742 | """Functions for generating and parsing HTTP Accept: headers for
supporting server-directed content negotiation.
"""
def generateAcceptHeader(*elements):
"""Generate an accept header value
[str or (str, float)] -> str
"""
parts = []
for element in elements:
if type(element) is str:
qs = "1.0"
mtype = element
else:
mtype, q = element
q = float(q)
if q > 1 or q <= 0:
raise ValueError('Invalid preference factor: %r' % q)
qs = '%0.1f' % (q,)
parts.append((qs, mtype))
parts.sort()
chunks = []
for q, mtype in parts:
if q == '1.0':
chunks.append(mtype)
else:
chunks.append('%s; q=%s' % (mtype, q))
return ', '.join(chunks)
def parseAcceptHeader(value):
"""Parse an accept header, ignoring any accept-extensions
returns a list of tuples containing main MIME type, MIME subtype,
and quality markdown.
str -> [(str, str, float)]
"""
chunks = [chunk.strip() for chunk in value.split(',')]
accept = []
for chunk in chunks:
parts = [s.strip() for s in chunk.split(';')]
mtype = parts.pop(0)
if '/' not in mtype:
# This is not a MIME type, so ignore the bad data
continue
main, sub = mtype.split('/', 1)
for ext in parts:
if '=' in ext:
k, v = ext.split('=', 1)
if k == 'q':
try:
q = float(v)
break
except ValueError:
# Ignore poorly formed q-values
pass
else:
q = 1.0
accept.append((q, main, sub))
accept.sort()
accept.reverse()
return [(main, sub, q) for (q, main, sub) in accept]
def matchTypes(accept_types, have_types):
"""Given the result of parsing an Accept: header, and the
available MIME types, return the acceptable types with their
quality markdowns.
For example:
>>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5')
>>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg'])
[('text/html', 1.0), ('text/plain', 0.5)]
Type signature: ([(str, str, float)], [str]) -> [(str, float)]
"""
if not accept_types:
# Accept all of them
default = 1
else:
default = 0
match_main = {}
match_sub = {}
for (main, sub, q) in accept_types:
if main == '*':
default = max(default, q)
continue
elif sub == '*':
match_main[main] = max(match_main.get(main, 0), q)
else:
match_sub[(main, sub)] = max(match_sub.get((main, sub), 0), q)
accepted_list = []
order_maintainer = 0
for mtype in have_types:
main, sub = mtype.split('/')
if (main, sub) in match_sub:
q = match_sub[(main, sub)]
else:
q = match_main.get(main, default)
if q:
accepted_list.append((1 - q, order_maintainer, q, mtype))
order_maintainer += 1
accepted_list.sort()
return [(mtype, q) for (_, _, q, mtype) in accepted_list]
def getAcceptable(accept_header, have_types):
"""Parse the accept header and return a list of available types in
preferred order. If a type is unacceptable, it will not be in the
resulting list.
This is a convenience wrapper around matchTypes and
parseAcceptHeader.
(str, [str]) -> [str]
"""
accepted = parseAcceptHeader(accept_header)
preferred = matchTypes(accepted, have_types)
return [mtype for (mtype, _) in preferred]
| apache-2.0 |
Harmonic/laradock | jupyterhub/jupyterhub_config.py | 10 | 5035 | # Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
# Configuration file for JupyterHub
import os
c = get_config()
# create system users that don't exist yet
c.LocalAuthenticator.create_system_users = True
def create_dir_hook(spawner):
username = spawner.user.name # get the username
volume_path = os.path.join('/user-data', username)
if not os.path.exists(volume_path):
# create a directory with umask 0755
# hub and container user must have the same UID to be writeable
# still readable by other users on the system
os.mkdir(volume_path, 0o755)
os.chown(volume_path, 1000,100)
# now do whatever you think your user needs
# ...
pass
# attach the hook function to the spawner
c.Spawner.pre_spawn_hook = create_dir_hook
# We rely on environment variables to configure JupyterHub so that we
# avoid having to rebuild the JupyterHub container every time we change a
# configuration parameter.
# Spawn single-user servers as Docker containers
c.JupyterHub.spawner_class = 'dockerspawner.DockerSpawner'
# Spawn containers from this image
c.DockerSpawner.image = os.environ['JUPYTERHUB_LOCAL_NOTEBOOK_IMAGE']
# JupyterHub requires a single-user instance of the Notebook server, so we
# default to using the `start-singleuser.sh` script included in the
# jupyter/docker-stacks *-notebook images as the Docker run command when
# spawning containers. Optionally, you can override the Docker run command
# using the DOCKER_SPAWN_CMD environment variable.
spawn_cmd = os.environ.get('JUPYTERHUB_DOCKER_SPAWN_CMD', "start-singleuser.sh")
c.DockerSpawner.extra_create_kwargs.update({ 'command': spawn_cmd })
# Connect containers to this Docker network
network_name = os.environ.get('JUPYTERHUB_NETWORK_NAME','laradock_backend')
c.DockerSpawner.use_internal_ip = True
c.DockerSpawner.network_name = network_name
enable_nvidia = os.environ.get('JUPYTERHUB_ENABLE_NVIDIA','false')
# Pass the network name as argument to spawned containers
c.DockerSpawner.extra_host_config = { 'network_mode': network_name }
if 'true' == enable_nvidia:
c.DockerSpawner.extra_host_config = { 'network_mode': network_name, 'runtime': 'nvidia' }
pass
# c.DockerSpawner.extra_host_config = { 'network_mode': network_name, "devices":["/dev/nvidiactl","/dev/nvidia-uvm","/dev/nvidia0"] }
# Explicitly set notebook directory because we'll be mounting a host volume to
# it. Most jupyter/docker-stacks *-notebook images run the Notebook server as
# user `jovyan`, and set the notebook directory to `/home/jovyan/work`.
# We follow the same convention.
# notebook_dir = os.environ.get('JUPYTERHUB_DOCKER_NOTEBOOK_DIR') or '/home/jovyan/work'
notebook_dir = '/notebooks'
c.DockerSpawner.notebook_dir = notebook_dir
# Mount the real user's Docker volume on the host to the notebook user's
# notebook directory in the container
user_data = os.environ.get('JUPYTERHUB_USER_DATA','/jupyterhub')
c.DockerSpawner.volumes = {
user_data+'/{username}': notebook_dir
}
c.DockerSpawner.extra_create_kwargs.update({ 'user': 'root'})
# volume_driver is no longer a keyword argument to create_container()
# c.DockerSpawner.extra_create_kwargs.update({ 'volume_driver': 'local' })
# Remove containers once they are stopped
c.DockerSpawner.remove_containers = True
# For debugging arguments passed to spawned containers
c.DockerSpawner.debug = True
# User containers will access hub by container name on the Docker network
c.JupyterHub.hub_ip = 'jupyterhub'
c.JupyterHub.hub_port = 8000
# TLS config
c.JupyterHub.port = 80
# c.JupyterHub.ssl_key = os.environ['SSL_KEY']
# c.JupyterHub.ssl_cert = os.environ['SSL_CERT']
# Authenticate users with GitHub OAuth
c.JupyterHub.authenticator_class = 'oauthenticator.GitHubOAuthenticator'
c.GitHubOAuthenticator.oauth_callback_url = os.environ['JUPYTERHUB_OAUTH_CALLBACK_URL']
c.GitHubOAuthenticator.client_id = os.environ['JUPYTERHUB_OAUTH_CLIENT_ID']
c.GitHubOAuthenticator.client_secret = os.environ['JUPYTERHUB_OAUTH_CLIENT_SECRET']
# Persist hub data on volume mounted inside container
data_dir = '/data'
c.JupyterHub.cookie_secret_file = os.path.join(data_dir,
'jupyterhub_cookie_secret')
print(os.environ)
c.JupyterHub.db_url = 'postgresql://{user}:{password}@{host}/{db}'.format(
user=os.environ['JUPYTERHUB_POSTGRES_USER'],
host=os.environ['JUPYTERHUB_POSTGRES_HOST'],
password=os.environ['JUPYTERHUB_POSTGRES_PASSWORD'],
db=os.environ['JUPYTERHUB_POSTGRES_DB'],
)
# Whitlelist users and admins
c.Authenticator.whitelist = whitelist = set()
c.Authenticator.admin_users = admin = set()
c.JupyterHub.admin_access = True
pwd = os.path.dirname(__file__)
with open(os.path.join(pwd, 'userlist')) as f:
for line in f:
if not line:
continue
parts = line.split()
name = parts[0]
print(name)
whitelist.add(name)
if len(parts) > 1 and parts[1] == 'admin':
admin.add(name)
admin.add('laradock')
| mit |
roadmapper/ansible | lib/ansible/module_utils/facts/system/service_mgr.py | 38 | 6617 | # Collect facts related to system service manager and init.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import platform
import re
from ansible.module_utils._text import to_native
from ansible.module_utils.facts.utils import get_file_content
from ansible.module_utils.facts.collector import BaseFactCollector
# The distutils module is not shipped with SUNWPython on Solaris.
# It's in the SUNWPython-devel package which also contains development files
# that don't belong on production boxes. Since our Solaris code doesn't
# depend on LooseVersion, do not import it on Solaris.
if platform.system() != 'SunOS':
from distutils.version import LooseVersion
class ServiceMgrFactCollector(BaseFactCollector):
name = 'service_mgr'
_fact_ids = set()
required_facts = set(['platform', 'distribution'])
@staticmethod
def is_systemd_managed(module):
# tools must be installed
if module.get_bin_path('systemctl'):
# this should show if systemd is the boot init system, if checking init faild to mark as systemd
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
return False
@staticmethod
def is_systemd_managed_offline(module):
# tools must be installed
if module.get_bin_path('systemctl'):
# check if /sbin/init is a symlink to systemd
# on SUSE, /sbin/init may be missing if systemd-sysvinit package is not installed.
if os.path.islink('/sbin/init') and os.path.basename(os.readlink('/sbin/init')) == 'systemd':
return True
return False
def collect(self, module=None, collected_facts=None):
facts_dict = {}
if not module:
return facts_dict
collected_facts = collected_facts or {}
service_mgr_name = None
# TODO: detect more custom init setups like bootscripts, dmd, s6, Epoch, etc
# also other OSs other than linux might need to check across several possible candidates
# Mapping of proc_1 values to more useful names
proc_1_map = {
'procd': 'openwrt_init',
'runit-init': 'runit',
'svscan': 'svc',
'openrc-init': 'openrc',
}
# try various forms of querying pid 1
proc_1 = get_file_content('/proc/1/comm')
if proc_1 is None:
# FIXME: return code isnt checked
# FIXME: if stdout is empty string, odd things
# FIXME: other code seems to think we could get proc_1 == None past this point
rc, proc_1, err = module.run_command("ps -p 1 -o comm|tail -n 1", use_unsafe_shell=True)
# If the output of the command starts with what looks like a PID, then the 'ps' command
# probably didn't work the way we wanted, probably because it's busybox
if re.match(r' *[0-9]+ ', proc_1):
proc_1 = None
# The ps command above may return "COMMAND" if the user cannot read /proc, e.g. with grsecurity
if proc_1 == "COMMAND\n":
proc_1 = None
# FIXME: empty string proc_1 staus empty string
if proc_1 is not None:
proc_1 = os.path.basename(proc_1)
proc_1 = to_native(proc_1)
proc_1 = proc_1.strip()
if proc_1 is not None and (proc_1 == 'init' or proc_1.endswith('sh')):
# many systems return init, so this cannot be trusted, if it ends in 'sh' it probalby is a shell in a container
proc_1 = None
# if not init/None it should be an identifiable or custom init, so we are done!
if proc_1 is not None:
# Lookup proc_1 value in map and use proc_1 value itself if no match
# FIXME: empty string still falls through
service_mgr_name = proc_1_map.get(proc_1, proc_1)
# FIXME: replace with a system->service_mgr_name map?
# start with the easy ones
elif collected_facts.get('ansible_distribution', None) == 'MacOSX':
# FIXME: find way to query executable, version matching is not ideal
if LooseVersion(platform.mac_ver()[0]) >= LooseVersion('10.4'):
service_mgr_name = 'launchd'
else:
service_mgr_name = 'systemstarter'
elif 'BSD' in collected_facts.get('ansible_system', '') or collected_facts.get('ansible_system') in ['Bitrig', 'DragonFly']:
# FIXME: we might want to break out to individual BSDs or 'rc'
service_mgr_name = 'bsdinit'
elif collected_facts.get('ansible_system') == 'AIX':
service_mgr_name = 'src'
elif collected_facts.get('ansible_system') == 'SunOS':
service_mgr_name = 'smf'
elif collected_facts.get('ansible_distribution') == 'OpenWrt':
service_mgr_name = 'openwrt_init'
elif collected_facts.get('ansible_system') == 'Linux':
# FIXME: mv is_systemd_managed
if self.is_systemd_managed(module=module):
service_mgr_name = 'systemd'
elif module.get_bin_path('initctl') and os.path.exists("/etc/init/"):
service_mgr_name = 'upstart'
elif os.path.exists('/sbin/openrc'):
service_mgr_name = 'openrc'
elif self.is_systemd_managed_offline(module=module):
service_mgr_name = 'systemd'
elif os.path.exists('/etc/init.d/'):
service_mgr_name = 'sysvinit'
if not service_mgr_name:
# if we cannot detect, fallback to generic 'service'
service_mgr_name = 'service'
facts_dict['service_mgr'] = service_mgr_name
return facts_dict
| gpl-3.0 |
plotly/plotly.py | packages/python/plotly/plotly/graph_objs/heatmapgl/legendgrouptitle/_font.py | 1 | 8487 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "heatmapgl.legendgrouptitle"
_path_str = "heatmapgl.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.heatmapgl.lege
ndgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.heatmapgl.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmapgl.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
hkariti/ansible-modules-core | windows/win_template.py | 40 | 2343 | # this is a virtual module that is entirely implemented server side
DOCUMENTATION = '''
---
module: win_template
version_added: "1.9.2"
short_description: Templates a file out to a remote server.
description:
- Templates are processed by the Jinja2 templating language
(U(http://jinja.pocoo.org/docs/)) - documentation on the template
formatting can be found in the Template Designer Documentation
(U(http://jinja.pocoo.org/docs/templates/)).
- "Six additional variables can be used in templates: C(ansible_managed)
(configurable via the C(defaults) section of C(ansible.cfg)) contains a string
which can be used to describe the template name, host, modification time of the
template file and the owner uid, C(template_host) contains the node name of
the template's machine, C(template_uid) the owner, C(template_path) the
absolute path of the template, C(template_fullpath) is the absolute path of the
template, and C(template_run_date) is the date that the template was rendered. Note that including
a string that uses a date in the template will result in the template being marked 'changed'
each time."
options:
src:
description:
- Path of a Jinja2 formatted template on the local server. This can be a relative or absolute path.
required: true
dest:
description:
- Location to render the template to on the remote machine.
required: true
notes:
- "templates are loaded with C(trim_blocks=True)."
- By default, windows line endings are not created in the generated file.
- "In order to ensure windows line endings are in the generated file, add the following header
as the first line of your template: #jinja2: newline_sequence:'\r\n' and ensure each line
of the template ends with \r\n"
- Beware fetching files from windows machines when creating templates
because certain tools, such as Powershell ISE, and regedit's export facility
add a Byte Order Mark as the first character of the file, which can cause tracebacks.
- Use "od -cx" to examine your templates for Byte Order Marks.
author: "Jon Hawkesworth (@jhawkesworth)"
'''
EXAMPLES = '''
# Playbook Example (win_template can only be run inside a playbook)
- win_template: src=/mytemplates/file.conf.j2 dest=C:\\temp\\file.conf
'''
| gpl-3.0 |
e9wifi-dev/android_kernel_lge_e9wifi | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.