repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringclasses 981 values | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15 values |
|---|---|---|---|---|---|
awkspace/ansible | lib/ansible/modules/storage/purestorage/purefb_network.py | 14 | 5908 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Simon Dodsley (simon@purestorage.com)
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: purefb_network
version_added: "2.8"
short_description: Manage network interfaces in a Pure Storage FlashBlade
description:
- This module manages network interfaces on Pure Storage FlashBlade.
- When creating a network interface a subnet must already exist with
a network prefix that covers the IP address of the interface being
created.
author: Simon Dodsley (@sdodsley)
options:
name:
description:
- Interface Name.
required: true
state:
description:
- Create, delete or modifies a network interface.
required: false
default: present
choices: [ "present", "absent" ]
address:
description:
- IP address of interface.
required: false
services:
description:
- Define which services are configured for the interfaces.
required: false
choices: [ "data" ]
default: data
itype:
description:
- Type of interface.
required: false
choices: [ "vip" ]
default: vip
extends_documentation_fragment:
- purestorage.fb
'''
EXAMPLES = '''
- name: Create new network interface named foo
purefb_network:
name: foo
address: 10.21.200.23
state: present
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Change IP address of network interface named foo
purefb_network:
name: foo
state: present
address: 10.21.200.123
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641
- name: Delete network interface named foo
purefb_network:
name: foo
state: absent
fb_url: 10.10.10.2
api_token: T-55a68eb5-c785-4720-a2ca-8b03903bf641'''
RETURN = '''
'''
HAS_PURITY_FB = True
try:
from purity_fb import NetworkInterface
except ImportError:
HAS_PURITY_FB = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_blade, purefb_argument_spec
MINIMUM_API_VERSION = '1.3'
def get_iface(module, blade):
"""Return Filesystem or None"""
iface = []
iface.append(module.params['name'])
try:
res = blade.network_interfaces.list_network_interfaces(names=iface)
return res.items[0]
except Exception:
return None
def create_iface(module, blade):
"""Create Network Interface"""
iface = []
services = []
iface.append(module.params['name'])
services.append(module.params['services'])
try:
blade.network_interfaces.create_network_interfaces(names=iface,
network_interface=NetworkInterface(address=module.params['address'],
services=services,
type=module.params['itype']
)
)
changed = True
except Exception:
module.fail_json(msg='Interface creation failed. Check valid subnet exists for IP address {0}'.format(module.params['address']))
changed = False
module.exit_json(changed=changed)
def modify_iface(module, blade):
"""Modify Network Interface IP address"""
changed = False
iface = get_iface(module, blade)
iface_new = []
iface_new.append(module.params['name'])
if module.params['address'] != iface.address:
try:
blade.network_interfaces.update_network_interfaces(names=iface_new,
network_interface=NetworkInterface(address=module.params['address']))
changed = True
except Exception:
changed = False
module.exit_json(changed=changed)
def delete_iface(module, blade):
""" Delete Network Interface"""
iface = []
iface.append(module.params['name'])
try:
blade.network_interfaces.delete_network_interfaces(names=iface)
changed = True
except Exception:
changed = False
module.exit_json(changed=changed)
def main():
argument_spec = purefb_argument_spec()
argument_spec.update(
dict(
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
address=dict(),
services=dict(default='data', choices=['data']),
itype=dict(default='vip', choices=['vip']),
)
)
required_if = [["state", "present", ["address"]]]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=False)
if not HAS_PURITY_FB:
module.fail_json(msg='purity_fb sdk is required for this module')
state = module.params['state']
blade = get_blade(module)
api_version = blade.api_version.list_versions().versions
if MINIMUM_API_VERSION not in api_version:
module.fail_json(msg='Upgrade Purity//FB to enable this module')
iface = get_iface(module, blade)
if state == 'present' and not iface:
create_iface(module, blade)
elif state == 'present' and iface:
modify_iface(module, blade)
elif state == 'absent' and iface:
delete_iface(module, blade)
elif state == 'absent' and not iface:
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
dominikl/openmicroscopy | build.py | 10 | 4021 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# $Id$
#
# Copyright 2009 Glencoe Software, Inc. All rights reserved.
# Use is subject to license terms supplied in LICENSE.txt
#
# General build scripts.
import os
import sys
import subprocess
BUILD_PY = "-Dbuild.py=true"
def popen(args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE):
copy = os.environ.copy()
return subprocess.Popen(args,
env=copy,
stdin=stdin,
stdout=stdout,
stderr=stderr)
def execute(args):
p = popen(args, stdout=sys.stdout, stderr=sys.stderr)
rc = p.wait()
if rc != 0:
sys.exit(rc)
def notification(msg, prio):
"""
Provides UI notification.
"""
# May want to revert this to be OMERO_BUILD_NOTIFICATION, or whatever.
if "OMERO_QUIET" in os.environ or sys.platform == "win32":
return
try:
p = popen(["growlnotify", "-t", "OMERO Build Status", "-p",
str(prio)], stdin=subprocess.PIPE)
p.communicate(msg)
rc = p.wait()
if rc != 0:
pass # growl didn't work
except OSError:
pass # No growlnotify found, may want to use another tool
def java_omero(args):
command = [find_java()]
p = os.path.join(os.path.curdir, "lib", "log4j-build.xml")
command.append("-Dlog4j.configuration=%s" % p)
command.append(BUILD_PY)
command.extend(calculate_memory_args())
command.extend(["omero"])
if isinstance(args, str):
command.append(args)
else:
command.extend(args)
execute(command)
def find_java():
return "java"
def calculate_memory_args():
return (
"-Xmx600M",
"-XX:MaxPermSize=256m",
"-XX:+IgnoreUnrecognizedVMOptions"
)
def handle_tools(args):
_ = os.path.sep.join
additions = []
mappings = {
"-top": _(["build.xml"]),
"-cpp": _(["components", "tools", "OmeroCpp", "build.xml"]),
"-fs": _(["components", "tools", "OmeroFS", "build.xml"]),
"-java": _(["components", "tools", "OmeroJava", "build.xml"]),
"-py": _(["components", "tools", "OmeroPy", "build.xml"]),
"-web": _(["components", "tools", "OmeroWeb", "build.xml"]),
}
while len(args) > 0 and args[0] in mappings.keys()+["-perf"]:
if args[0] == "-perf":
args.pop(0)
A = ["-listener",
"net.sf.antcontrib.perf.AntPerformanceListener"]
additions.extend(A)
elif args[0] in mappings.keys():
F = mappings[args.pop(0)]
A = ["-f", F]
additions.extend(A)
return additions + args
def handle_relative(args):
"""
If no other specific file has been requested,
then use whatever relative path is needed to
specify build.xml in the local directory.
Regardless, os.chdir is called to the top.
"""
additions = []
this = os.path.abspath(__file__)
this_dir = os.path.abspath(os.path.join(this, os.pardir))
cwd = os.path.abspath(os.getcwd())
os.chdir(this_dir)
if "-f" not in args:
build_xml = os.path.join(cwd, "build.xml")
if os.path.exists(build_xml):
additions.append("-f")
additions.append(build_xml)
return additions + args
if __name__ == "__main__":
#
# use java_omero which will specially configure the build system.
#
args = list(sys.argv)
args.pop(0)
# Unset CLASSPATH, since this breaks the build
if os.environ.get('CLASSPATH'):
del os.environ['CLASSPATH']
try:
args = handle_tools(args)
args = handle_relative(args)
java_omero(args)
notification(""" Finished: %s """ % " ".join(args), 0)
except KeyboardInterrupt:
sys.stderr.write("\nCancelled by user\n")
sys.exit(2)
except SystemExit, se:
notification(""" Failed: %s """ % " ".join(args), 100)
sys.exit(se.code)
| gpl-2.0 |
newrocknj/horizon | openstack_dashboard/test/test_data/keystone_data.py | 30 | 16443 | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import timedelta # noqa
from django.conf import settings
from django.utils import datetime_safe
from keystoneclient import access
from keystoneclient.v2_0 import ec2
from keystoneclient.v2_0 import roles
from keystoneclient.v2_0 import tenants
from keystoneclient.v2_0 import users
from keystoneclient.v3 import domains
from keystoneclient.v3 import groups
from keystoneclient.v3 import role_assignments
from openstack_auth import user as auth_user
from openstack_dashboard.test.test_data import utils
# Dummy service catalog with all service.
# All endpoint URLs should point to example.com.
# Try to keep them as accurate to real data as possible (ports, URIs, etc.)
SERVICE_CATALOG = [
{"type": "compute",
"name": "nova",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8774/v2",
"internalURL": "http://int.nova.example.com:8774/v2",
"publicURL": "http://public.nova.example.com:8774/v2"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova2.example.com:8774/v2",
"internalURL": "http://int.nova2.example.com:8774/v2",
"publicURL": "http://public.nova2.example.com:8774/v2"}]},
{"type": "volume",
"name": "cinder",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"}]},
{"type": "volumev2",
"name": "cinderv2",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8776/v2",
"internalURL": "http://int.nova.example.com:8776/v2",
"publicURL": "http://public.nova.example.com:8776/v2"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova.example.com:8776/v2",
"internalURL": "http://int.nova.example.com:8776/v2",
"publicURL": "http://public.nova.example.com:8776/v2"}]},
{"type": "image",
"name": "glance",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.glance.example.com:9292/v1",
"internalURL": "http://int.glance.example.com:9292/v1",
"publicURL": "http://public.glance.example.com:9292/v1"}]},
{"type": "identity",
"name": "keystone",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.keystone.example.com:35357/v2.0",
"internalURL": "http://int.keystone.example.com:5000/v2.0",
"publicURL": "http://public.keystone.example.com:5000/v2.0"}]},
{"type": "object-store",
"name": "swift",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.swift.example.com:8080/",
"internalURL": "http://int.swift.example.com:8080/",
"publicURL": "http://public.swift.example.com:8080/"}]},
{"type": "network",
"name": "neutron",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.neutron.example.com:9696/",
"internalURL": "http://int.neutron.example.com:9696/",
"publicURL": "http://public.neutron.example.com:9696/"}]},
{"type": "ec2",
"name": "EC2 Service",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8773/services/Admin",
"publicURL": "http://public.nova.example.com:8773/services/Cloud",
"internalURL": "http://int.nova.example.com:8773/services/Cloud"}]},
{"type": "metering",
"name": "ceilometer",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.ceilometer.example.com:8777",
"publicURL": "http://public.ceilometer.example.com:8777",
"internalURL": "http://int.ceilometer.example.com:8777"}]},
{"type": "orchestration",
"name": "Heat",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.heat.example.com:8004/v1",
"publicURL": "http://public.heat.example.com:8004/v1",
"internalURL": "http://int.heat.example.com:8004/v1"}]},
{"type": "database",
"name": "Trove",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.trove.example.com:8779/v1.0",
"publicURL": "http://public.trove.example.com:8779/v1.0",
"internalURL": "http://int.trove.example.com:8779/v1.0"}]},
{"type": "data-processing",
"name": "Sahara",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.sahara.example.com:8386/v1.1",
"publicURL": "http://public.sahara.example.com:8386/v1.1",
"internalURL": "http://int.sahara.example.com:8386/v1.1"}]}
]
def data(TEST):
# Make a deep copy of the catalog to avoid persisting side-effects
# when tests modify the catalog.
TEST.service_catalog = copy.deepcopy(SERVICE_CATALOG)
TEST.tokens = utils.TestDataContainer()
TEST.domains = utils.TestDataContainer()
TEST.users = utils.TestDataContainer()
TEST.groups = utils.TestDataContainer()
TEST.tenants = utils.TestDataContainer()
TEST.role_assignments = utils.TestDataContainer()
TEST.roles = utils.TestDataContainer()
TEST.ec2 = utils.TestDataContainer()
admin_role_dict = {'id': '1',
'name': 'admin'}
admin_role = roles.Role(roles.RoleManager, admin_role_dict)
member_role_dict = {'id': "2",
'name': settings.OPENSTACK_KEYSTONE_DEFAULT_ROLE}
member_role = roles.Role(roles.RoleManager, member_role_dict)
TEST.roles.add(admin_role, member_role)
TEST.roles.admin = admin_role
TEST.roles.member = member_role
domain_dict = {'id': "1",
'name': 'test_domain',
'description': "a test domain.",
'enabled': True}
domain_dict_2 = {'id': "2",
'name': 'disabled_domain',
'description': "a disabled test domain.",
'enabled': False}
domain = domains.Domain(domains.DomainManager, domain_dict)
disabled_domain = domains.Domain(domains.DomainManager, domain_dict_2)
TEST.domains.add(domain, disabled_domain)
TEST.domain = domain # Your "current" domain
user_dict = {'id': "1",
'name': 'test_user',
'description': 'test_decription',
'email': 'test@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user = users.User(None, user_dict)
user_dict = {'id': "2",
'name': 'user_two',
'description': 'test_decription',
'email': 'two@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user2 = users.User(None, user_dict)
user_dict = {'id': "3",
'name': 'user_three',
'description': 'test_decription',
'email': 'three@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user3 = users.User(None, user_dict)
user_dict = {'id': "4",
'name': 'user_four',
'description': 'test_decription',
'email': 'four@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "2"}
user4 = users.User(None, user_dict)
user_dict = {'id': "5",
'name': 'user_five',
'description': 'test_decription',
'email': None,
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "1"}
user5 = users.User(None, user_dict)
TEST.users.add(user, user2, user3, user4, user5)
TEST.user = user # Your "current" user
TEST.user.service_catalog = copy.deepcopy(SERVICE_CATALOG)
group_dict = {'id': "1",
'name': 'group_one',
'description': 'group one description',
'project_id': '1',
'domain_id': '1'}
group = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "2",
'name': 'group_two',
'description': 'group two description',
'project_id': '1',
'domain_id': '1'}
group2 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "3",
'name': 'group_three',
'description': 'group three description',
'project_id': '1',
'domain_id': '1'}
group3 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "4",
'name': 'group_four',
'description': 'group four description',
'project_id': '2',
'domain_id': '2'}
group4 = groups.Group(groups.GroupManager(None), group_dict)
TEST.groups.add(group, group2, group3, group4)
role_assignments_dict = {'user': {'id': '1'},
'role': {'id': '1'},
'scope': {'project': {'id': '1'}}}
proj_role_assignment1 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '2'},
'role': {'id': '2'},
'scope': {'project': {'id': '1'}}}
proj_role_assignment2 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'group': {'id': '1'},
'role': {'id': '2'},
'scope': {'project': {'id': '1'}}}
proj_role_assignment3 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '3'},
'role': {'id': '2'},
'scope': {'project': {'id': '1'}}}
proj_role_assignment4 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '1'},
'role': {'id': '1'},
'scope': {'domain': {'id': '1'}}}
domain_role_assignment1 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '2'},
'role': {'id': '2'},
'scope': {'domain': {'id': '1'}}}
domain_role_assignment2 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'group': {'id': '1'},
'role': {'id': '2'},
'scope': {'domain': {'id': '1'}}}
domain_role_assignment3 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '3'},
'role': {'id': '2'},
'scope': {'domain': {'id': '1'}}}
domain_role_assignment4 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
TEST.role_assignments.add(proj_role_assignment1,
proj_role_assignment2,
proj_role_assignment3,
proj_role_assignment4,
domain_role_assignment1,
domain_role_assignment2,
domain_role_assignment3,
domain_role_assignment4)
tenant_dict = {'id': "1",
'name': 'test_tenant',
'description': "a test tenant.",
'enabled': True,
'domain_id': '1',
'domain_name': 'test_domain'}
tenant_dict_2 = {'id': "2",
'name': 'disabled_tenant',
'description': "a disabled test tenant.",
'enabled': False,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant_dict_3 = {'id': "3",
'name': u'\u4e91\u89c4\u5219',
'description': "an unicode-named tenant.",
'enabled': True,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant = tenants.Tenant(tenants.TenantManager, tenant_dict)
disabled_tenant = tenants.Tenant(tenants.TenantManager, tenant_dict_2)
tenant_unicode = tenants.Tenant(tenants.TenantManager, tenant_dict_3)
TEST.tenants.add(tenant, disabled_tenant, tenant_unicode)
TEST.tenant = tenant # Your "current" tenant
tomorrow = datetime_safe.datetime.now() + timedelta(days=1)
expiration = tomorrow.isoformat()
scoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration,
'tenant': tenant_dict,
'tenants': [tenant_dict]},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
scoped_access_info = access.AccessInfo.factory(resp=None,
body=scoped_token_dict)
unscoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
unscoped_access_info = access.AccessInfo.factory(resp=None,
body=unscoped_token_dict)
scoped_token = auth_user.Token(scoped_access_info)
unscoped_token = auth_user.Token(unscoped_access_info)
TEST.tokens.add(scoped_token, unscoped_token)
TEST.token = scoped_token # your "current" token.
TEST.tokens.scoped_token = scoped_token
TEST.tokens.unscoped_token = unscoped_token
access_secret = ec2.EC2(ec2.CredentialsManager, {"access": "access",
"secret": "secret",
"tenant_id": tenant.id})
TEST.ec2.add(access_secret)
| apache-2.0 |
waltervh/BornAgain | dev-tools/analyze/baloc/history_collector.py | 3 | 4186 | """
Process gitlog and create a file with number of lines of code.
"""
from datetime import datetime
from .file_types import FileTypes, filetype
import re
from email.utils import parsedate
import subprocess
def gitlog():
"""
Execute gitlog command and make generator over lines in the log
"""
p = subprocess.Popen(['git', 'log', 'develop', '--reverse', '-p'], stdout=subprocess.PIPE)
for line in iter(p.stdout.readline, b''):
decoded = line.decode('latin1')
if decoded and len(decoded):
yield decoded.strip()
class Commit:
"""
Contains commit info and accumulated number of lines of code (file type dependent).
"""
def __init__(self):
self.date = datetime.today()
self.added_lines = 0
self.removed_lines = 0
self.loc_for_type = FileTypes.loc_for_type()
self.hsh = None
self.who = None
self.cmt = None
def increment_loc(self, file_type):
self.loc_for_type[file_type] += 1
self.added_lines += 1
def decrement_loc(self, file_type):
self.loc_for_type[file_type] -= 1
self.removed_lines += 1
class DayHistory:
"""
Number of lines added or deleted for given day.
"""
def __init__(self, date, locs):
self.date = date
self.loc_for_type = locs
class HistoryCollector:
def __init__(self):
self.last_commit = Commit()
self.data = []
self.locs = 0
self.fc = 0
self.file_type_ppp = FileTypes.UNDEF
self.file_type_mmm = FileTypes.UNDEF
self.start_date = datetime(2012, 4, 1)
self.days_history = {} # DayHistory vs. number of days since beginning of coding
def pop(self):
if not self.last_commit.added_lines:
return
pstr="%s %8u %5s %5s %7s %s"%(self.last_commit.date, self.locs, '+' + str(self.last_commit.added_lines), '-' + str(self.last_commit.removed_lines), self.last_commit.hsh, self.last_commit.who)
print(pstr)
delta = (self.last_commit.date - self.start_date).days
self.days_history[delta] = DayHistory(self.last_commit.date, self.last_commit.loc_for_type)
tmp = list(self.last_commit.loc_for_type)
self.last_commit = Commit()
self.last_commit.loc_for_type = tmp
def run(self):
nnn = 0
for x in gitlog():
nnn += 1
if x.startswith('commit'):
self.pop()
self.last_commit.hsh = x[7:14]
if x.startswith('Author'):
self.last_commit.who = x.replace("Author: ", '').replace('\n', '')
self.last_commit.who = re.sub(">.*", "", self.last_commit.who)
self.last_commit.who = re.sub(".*<", "", self.last_commit.who)
if x.startswith('Date'):
self.fc = 1
self.last_commit.date = datetime(*parsedate(x[5:])[:7])
if self.fc == 2:
self.last_commit.cmt = x[:-1]
self.fc = 0
if self.fc == 1:
if len(x) == 1:
self.fc = 2
if x.startswith('+++'):
self.file_type_ppp = filetype(x)
if x.startswith('---'):
self.file_type_mmm = filetype(x)
if x.startswith('+') and not x.startswith('+++'):
self.last_commit.increment_loc(self.file_type_ppp)
if self.file_type_ppp <FileTypes.PYAPI:
self.locs += 1
if x.startswith('-') and not x.startswith('---'):
self.last_commit.decrement_loc(self.file_type_mmm)
if self.file_type_mmm < FileTypes.PYAPI:
self.locs -= 1
# if nnn>1000000:
# break
self.pop()
def save_report(self, filename):
print("Saving report in {0}".format(filename))
with open(filename, 'w') as the_file:
for key in self.days_history:
dayhist = self.days_history[key]
pstr = "%s %s \n" % (str(dayhist.date), ' '.join(str(e) for e in dayhist.loc_for_type))
the_file.write(pstr)
| gpl-3.0 |
ikcalB/linuxcnc-mirror | lib/python/gladevcp/hal_actions.py | 6 | 22004 | #!/usr/bin/env python
# vim: sts=4 sw=4 et
# GladeVcp actions
#
# Copyright (c) 2010 Pavel Shramov <shramov@mexmat.net>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import gobject
import gtk
import os
import time
import re, string
from hal_widgets import _HalWidgetBase
import linuxcnc
from hal_glib import GStat
_ = lambda x: x
class _EMCStaticHolder:
def __init__(self):
# Delay init...
self.linuxcnc = None
self.stat = None
self.gstat = None
def get(self):
if not self.linuxcnc:
self.linuxcnc = linuxcnc.command()
if not self.gstat:
self.gstat = GStat()
return self.linuxcnc, self.gstat.stat, self.gstat
class _EMCStatic:
holder = _EMCStaticHolder()
def get(self):
return self.holder.get()
class _EMC_ActionBase(_HalWidgetBase):
_gproperties = {'name': (gobject.TYPE_STRING, 'Name', 'Action name', "",
gobject.PARAM_READWRITE|gobject.PARAM_CONSTRUCT)
}
linuxcnc_static = _EMCStatic()
def _hal_init(self):
self.linuxcnc, self.stat, self.gstat = self.linuxcnc_static.get()
self._stop_emission = False
# if 'NO_FORCE_HOMING' is true, MDI commands are allowed before homing.
inifile = os.environ.get('INI_FILE_NAME', '/dev/null')
ini = linuxcnc.ini(inifile)
self.no_f_home = int(ini.find("TRAJ", "NO_FORCE_HOMING") or 0)
def machine_on(self):
self.stat.poll()
return self.stat.task_state > linuxcnc.STATE_OFF
def is_auto_mode(self):
self.stat.poll()
print self.stat.task_mode, linuxcnc.MODE_AUTO
return self.stat.task_mode == linuxcnc.MODE_AUTO
def is_file_loaded(self):
self.stat.poll()
print "file name:",self.stat.file
if self.stat.file:
return True
else:
return False
def is_all_homed(self):
self.stat.poll()
axis_count = homed_count = 0
for i,h in enumerate(self.stat.homed):
if h:
if self.stat.axis_mask & (1<<i): homed_count +=1
if self.stat.axis_mask & (1<<i) == 0: continue
axis_count += 1
if homed_count == axis_count:
return True
return False
def no_home_required(self):
return self.no_f_home
def safe_handler(self, f):
def _f(self, *a, **kw):
if self._stop_emission:
return
return f(self, *a, **kw)
return _f
def set_active_safe(self, active):
self._stop_emission = True
self.set_active(active)
self._stop_emission = False
def do_get_property(self, property):
name = property.name.replace('-', '_')
if name == 'name':
return self.get_name()
elif name == 'label':
return self.get_label()
elif name == 'tooltip':
return self.get_tooltip()
elif name == 'stock_id':
return self.get_stock_id()
else:
raise AttributeError("Unknown property: %s" % property.name)
def do_set_property(self, property, value):
name = property.name.replace('-', '_')
if name == 'name':
if value:
self.set_name(value)
elif name == 'label':
self.set_label(value)
elif name == 'tooltip':
self.set_tooltip(value)
elif name == 'stock_id':
self.set_stock_id(value)
else:
raise AttributeError("Unknown property: %s" % property.name)
return True
class _EMC_Action(gtk.Action, _EMC_ActionBase):
__gproperties__ = _EMC_ActionBase._gproperties
def __init__(self, name=None):
gtk.Action.__init__(self, None, None, None, None)
self._stop_emission = False
self.connect('activate', self.safe_handler(self.on_activate))
def set_active_safe(self, a): return #XXX: Override set_active with nop
def on_activate(self, w):
return True
class _EMC_ToggleAction(gtk.ToggleAction, _EMC_ActionBase):
__gproperties__ = _EMC_ActionBase._gproperties
def __init__(self, name=None):
gtk.ToggleAction.__init__(self, None, None, None, None)
self._stop_emission = False
self.connect('toggled', self.safe_handler(self.on_toggled))
# XXX: Override nop in _EMC_Action
set_active_safe = _EMC_ActionBase.set_active_safe
def on_toggled(self, w):
return True
class _EMC_RadioAction(gtk.RadioAction, _EMC_ToggleAction):
__gproperties__ = _EMC_ToggleAction._gproperties
def __init__(self, name=None):
gtk.RadioAction.__init__(self, None, None, None, None, 0)
self._stop_emission = False
self.connect('toggled', self.safe_handler(self.on_toggled))
def on_toggled(self, w):
if not w.get_active():
return
return self.on_activate(w)
class EMC_Stat(GStat, _EMC_ActionBase):
__gtype_name__ = 'EMC_Stat'
def __init__(self):
stat = self.linuxcnc_static.get()[1]
GStat.__init__(self, stat)
def _hal_init(self):
pass
def _action(klass, f, *a, **kw):
class _C(_EMC_Action):
__gtype_name__ = klass
def on_activate(self, w):
print klass
f(self, *a, **kw)
return _C
EMC_Action_ESTOP = _action('EMC_Action_ESTOP', lambda s: s.linuxcnc.state(linuxcnc.STATE_ESTOP))
EMC_Action_ESTOP_RESET = _action('EMC_Action_ESTOP_RESET', lambda s: s.linuxcnc.state(linuxcnc.STATE_ESTOP_RESET))
EMC_Action_ON = _action('EMC_Action_ON', lambda s: s.linuxcnc.state(linuxcnc.STATE_ON))
EMC_Action_OFF = _action('EMC_Action_OFF', lambda s: s.linuxcnc.state(linuxcnc.STATE_OFF))
class EMC_ToggleAction_ESTOP(_EMC_ToggleAction):
__gtype_name__ = 'EMC_ToggleAction_ESTOP'
def _hal_init(self):
_EMC_ToggleAction._hal_init(self)
self.set_active_safe(True)
self.gstat.connect('state-estop', lambda w: self.set_active_safe(True))
self.gstat.connect('state-estop-reset', lambda w: self.set_active_safe(False))
def on_toggled(self, w):
if self.get_active():
print 'Issuing ESTOP'
self.linuxcnc.state(linuxcnc.STATE_ESTOP)
else:
print 'Issuing ESTOP RESET'
self.linuxcnc.state(linuxcnc.STATE_ESTOP_RESET)
class EMC_ToggleAction_Power(_EMC_ToggleAction):
__gtype_name__ = 'EMC_ToggleAction_Power'
def _hal_init(self):
_EMC_ToggleAction._hal_init(self)
self.set_active_safe(False)
self.set_sensitive(False)
self.gstat.connect('state-on', lambda w: self.set_active_safe(True))
self.gstat.connect('state-off', lambda w: self.set_active_safe(False))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop-reset', lambda w: self.set_sensitive(True))
def on_toggled(self, w):
if self.get_active():
print 'Issuing ON'
self.linuxcnc.state(linuxcnc.STATE_ON)
else:
print 'Issuing OFF'
self.linuxcnc.state(linuxcnc.STATE_OFF)
class EMC_RadioAction_ESTOP(_EMC_RadioAction):
__gtype_name__ = 'EMC_RadioAction_ESTOP'
def _hal_init(self):
_EMC_RadioAction._hal_init(self)
self.set_active_safe(True)
self.gstat.connect('state-estop', lambda w: self.set_active_safe(True))
def on_activate(self, w):
self.linuxcnc.state(linuxcnc.STATE_ESTOP)
class EMC_RadioAction_ESTOP_RESET(_EMC_RadioAction):
__gtype_name__ = 'EMC_RadioAction_ESTOP_RESET'
def _hal_init(self):
_EMC_RadioAction._hal_init(self)
self.set_active_safe(False)
self.gstat.connect('state-estop-reset', lambda w: self.set_active_safe(True))
def on_activate(self, w):
self.linuxcnc.state(linuxcnc.STATE_ESTOP_RESET)
class EMC_RadioAction_ON(_EMC_RadioAction):
__gtype_name__ = 'EMC_RadioAction_ON'
def _hal_init(self):
_EMC_RadioAction._hal_init(self)
self.set_active_safe(True)
self.gstat.connect('state-on', lambda w: self.set_active_safe(True))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop-reset', lambda w: self.set_sensitive(True))
def on_activate(self, w):
self.linuxcnc.state(linuxcnc.STATE_ON)
class EMC_RadioAction_OFF(_EMC_RadioAction):
__gtype_name__ = 'EMC_RadioAction_OFF'
def _hal_init(self):
_EMC_RadioAction._hal_init(self)
self.set_active_safe(False)
self.gstat.connect('state-off', lambda w: self.set_active_safe(True))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop-reset', lambda w: self.set_sensitive(True))
def on_activate(self, w):
self.linuxcnc.state(linuxcnc.STATE_OFF)
def running(s, do_poll=True):
if do_poll: s.poll()
return s.task_mode == linuxcnc.MODE_AUTO and s.interp_state != linuxcnc.INTERP_IDLE
def ensure_mode(s, c, *modes):
s.poll()
if not modes: return False
if s.task_mode in modes: return True
if running(s, do_poll=False): return False
c.mode(modes[0])
c.wait_complete()
return True
class EMC_Action_Run(_EMC_Action):
__gtype_name__ = 'EMC_Action_Run'
program_start_line = gobject.property(type=int, default=0, minimum=0, nick='Restart line',
blurb='Restart line number-Usually 0 - program start')
reset_line = gobject.property(type=int, default=0, minimum=0, nick='Restart line after restarting once',
blurb='Line number that will be set afterthe next restart. -usually 0 - program start')
def set_restart_line(self,line,resetline=0):
self.program_start_line = line
self.reset_line = resetline
def on_activate(self, w):
ensure_mode(self.stat, self.linuxcnc, linuxcnc.MODE_AUTO)
self.linuxcnc.auto(linuxcnc.AUTO_RUN, self.program_start_line)
self.program_start_line = self.reset_line
class EMC_Action_Step(_EMC_Action):
__gtype_name__ = 'EMC_Action_Step'
def _hal_init(self):
_EMC_Action._hal_init(self)
self.gstat.connect('state-off', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect('interp-idle', lambda w: self.set_sensitive(self.machine_on()))
def on_activate(self, w):
ensure_mode(self.stat, self.linuxcnc, linuxcnc.MODE_AUTO)
self.linuxcnc.auto(linuxcnc.AUTO_STEP)
class EMC_Action_Pause(_EMC_Action):
__gtype_name__ = 'EMC_Action_Pause'
def on_activate(self, w):
self.stat.poll()
if self.stat.task_mode != linuxcnc.MODE_AUTO or\
self.stat.interp_state not in (linuxcnc.INTERP_READING, linuxcnc.INTERP_WAITING):
return
ensure_mode(self.stat, self.linuxcnc, linuxcnc.MODE_AUTO)
self.linuxcnc.auto(linuxcnc.AUTO_PAUSE)
class EMC_Action_Resume(_EMC_Action):
__gtype_name__ = 'EMC_Action_Resume'
def on_activate(self, w):
print "RESUME"
self.stat.poll()
if not self.stat.paused:
return
if self.stat.task_mode not in (linuxcnc.MODE_AUTO, linuxcnc.MODE_MDI):
return
ensure_mode(self.stat, self.linuxcnc, linuxcnc.MODE_AUTO, linuxcnc.MODE_MDI)
self.linuxcnc.auto(linuxcnc.AUTO_RESUME)
class EMC_Action_Stop(_EMC_Action):
__gtype_name__ = 'EMC_Action_Stop'
def on_activate(self, w):
self.linuxcnc.abort()
self.linuxcnc.wait_complete()
class EMC_ToggleAction_Run(_EMC_ToggleAction, EMC_Action_Run):
__gtype_name__ = 'EMC_ToggleAction_Run'
program_start_line = gobject.property(type=int, default=0, minimum=0, nick='Restart line',
blurb='Restart line number-Usually 0 - program start')
reset_line = gobject.property(type=int, default=0, minimum=0, nick='Restart line after restarting once',
blurb='Line number that will be set afterthe next restart. -usually 0 - program start')
def _hal_init(self):
_EMC_ToggleAction._hal_init(self)
self.set_active_safe(False)
self.set_sensitive(False)
self.gstat.connect('state-off', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect( 'interp-idle', lambda w: self.set_sensitive( self.machine_on() and ( self.is_all_homed() or self.no_home_required() ) and self.is_file_loaded() ) )
self.gstat.connect('interp-idle', lambda w: self.set_active_safe(False))
self.gstat.connect('interp-run', lambda w: self.set_sensitive(False))
self.gstat.connect('interp-run', lambda w: self.set_active_safe(True))
self.gstat.connect('all-homed', lambda w: self.set_sensitive( self.machine_on() and self.is_file_loaded() ))
self.gstat.connect('file-loaded', self.file_loaded_check)
def file_loaded_check(self,widget,filename):
self.set_sensitive( self.machine_on() and (self.is_all_homed() or self.no_home_required()) )
def set_restart_line(self,line,resetline=0):
self.program_start_line = line
self.reset_line = resetline
def on_toggled(self, w):
if self.get_active():
return self.on_activate(w)
class EMC_ToggleAction_Stop(_EMC_ToggleAction, EMC_Action_Stop):
__gtype_name__ = "EMC_ToggleAction_Stop"
def _hal_init(self):
_EMC_ToggleAction._hal_init(self)
self.set_active_safe(True)
self.set_sensitive(False)
self.gstat.connect('state-off', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect('interp-idle', lambda w: self.set_sensitive(False))
self.gstat.connect('interp-idle', lambda w: self.set_active_safe(True))
self.gstat.connect('interp-run', lambda w: self.set_sensitive(self.machine_on()))
self.gstat.connect('interp-run', lambda w: self.set_active_safe(False))
def on_toggled(self, w):
if self.get_active():
return self.on_activate(w)
class EMC_ToggleAction_Pause(_EMC_ToggleAction, EMC_Action_Pause):
__gtype_name__ = "EMC_ToggleAction_Pause"
def _hal_init(self):
_EMC_ToggleAction._hal_init(self)
self.resume = EMC_Action_Resume()
self.resume._hal_init()
self.set_active_safe(True)
self.set_sensitive(False)
self.gstat.connect('state-off', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect('interp-idle', lambda w: self.set_sensitive(False))
self.gstat.connect('interp-idle', lambda w: self.set_active_safe(False))
self.gstat.connect('interp-run', lambda w: self.set_sensitive(self.machine_on()))
self.gstat.connect('interp-run', lambda w: self.set_active_safe(False))
self.gstat.connect('interp-paused', lambda w: self.set_active_safe(True))
self.gstat.connect('interp-waiting', lambda w: self.set_active_safe(False))
def on_toggled(self, w):
if self.get_active():
return self.on_activate(w)
else:
return self.resume.on_activate(self.resume)
class HalTemplate(string.Template):
idpattern = '[_a-z][-._a-z0-9]*'
class FloatComp:
def __init__(self, comp):
self.comp = comp
def __getitem__(self, k):
v = float(self.comp[k])
return "%f" % v
class EMC_Action_MDI(_EMC_Action):
__gtype_name__ = 'EMC_Action_MDI'
command = gobject.property(type=str, default='', nick='MDI Command')
def _hal_init(self):
_EMC_Action._hal_init(self)
self.set_sensitive(False)
self.gstat.connect('state-off', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect('interp-idle', lambda w: self.set_sensitive(self.machine_on() and ( self.is_all_homed() or self.no_home_required() ) ))
self.gstat.connect('interp-run', lambda w: self.set_sensitive(False))
self.gstat.connect('all-homed', lambda w: self.set_sensitive(self.machine_on()))
def on_activate(self, w):
ensure_mode(self.stat, self.linuxcnc, linuxcnc.MODE_MDI)
template = HalTemplate(self.command)
cmd = template.substitute(FloatComp(self.hal))
self.linuxcnc.mdi(cmd)
class EMC_ToggleAction_MDI(_EMC_ToggleAction, EMC_Action_MDI):
__gtype_name__ = 'EMC_ToggleAction_MDI'
__gsignals__ = {
'mdi-command-start': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()),
'mdi-command-stop': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ()),
}
command = gobject.property(type=str, default='', nick='MDI Command')
def _hal_init(self):
_EMC_ToggleAction._hal_init(self)
EMC_Action_MDI._hal_init(self)
def on_toggled(self, w):
if not self.get_active():
return
self.set_sensitive(False)
self.emit('mdi-command-start')
self.on_activate(w)
gobject.timeout_add(100, self.wait_complete)
def wait_complete(self):
if self.linuxcnc.wait_complete(0) in [-1, linuxcnc.RCS_EXEC]:
return True
self.emit('mdi-command-stop')
self.set_active_safe(False)
self.set_sensitive(self.machine_on())
return False
class EMC_Action_Home(_EMC_Action):
__gtype_name__ = 'EMC_Action_Unhome'
axis = gobject.property(type=int, default=-1, minimum=-1, nick='Axis',
blurb='Axis to unhome. -1 to unhome all')
def _hal_init(self):
_EMC_Action._hal_init(self)
self.set_sensitive(False)
self.gstat.connect('state-off', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect('interp-idle', lambda w: self.set_sensitive(self.machine_on()))
self.gstat.connect('interp-run', lambda w: self.set_sensitive(False))
def on_activate(self, w):
ensure_mode(self.stat, self.linuxcnc, linuxcnc.MODE_MANUAL)
self.linuxcnc.unhome(self.axis)
def prompt_areyousure(type, message, secondary=None):
dialog = gtk.MessageDialog(None, 0, type, gtk.BUTTONS_YES_NO, message)
if secondary:
dialog.format_secondary_text(secondary)
r = dialog.run()
dialog.destroy()
return r == gtk.RESPONSE_YES
class EMC_Action_Home(_EMC_Action):
__gtype_name__ = 'EMC_Action_Home'
axis = gobject.property(type=int, default=-1, minimum=-1, nick='Axis',
blurb='Axis to home. -1 to home all')
confirm_homed = gobject.property(type=bool, default=False, nick='Confirm rehoming',
blurb='Ask user if axis is already homed')
def _hal_init(self):
_EMC_Action._hal_init(self)
self.set_sensitive(False)
self.gstat.connect('state-off', lambda w: self.set_sensitive(False))
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
self.gstat.connect('interp-idle', lambda w: self.set_sensitive(self.machine_on()))
self.gstat.connect('interp-run', lambda w: self.set_sensitive(False))
def homed(self):
if self.axis != -1:
return self.stat.homed[self.axis]
for i,h in enumerate(self.stat.homed):
if h and self.stat.axis_mask & (1<<i):
return True
def on_activate(self, w):
#if not manual_ok(): return
ensure_mode(self.stat, self.linuxcnc, linuxcnc.MODE_MANUAL)
if self.confirm_homed and self.homed():
if not prompt_areyousure(gtk.MESSAGE_WARNING,
_("Axis is already homed, are you sure you want to re-home?")):
return
self.linuxcnc.home(self.axis)
class State_Sensitive_Table(gtk.Table, _EMC_ActionBase):
__gtype_name__ = "State_Sensitive_Table"
is_homed = gobject.property(type=bool, default=True, nick='Must Be Homed',
blurb='Machine Must be homed for widgets to be sensitive to input')
is_on = gobject.property(type=bool, default=True, nick='Must Be On',
blurb='Machine Must be On for widgets to be sensitive to input')
is_idle = gobject.property(type=bool, default=True, nick='Must Be Idle',
blurb='Machine Must be Idle for widgets to be sensitive to input')
def _hal_init(self):
_EMC_ActionBase._hal_init(self)
self.set_sensitive(False)
self.gstat.connect('state-estop', lambda w: self.set_sensitive(False))
if self.is_on:
self.gstat.connect('state-off', lambda w: self.set_sensitive(False))
if self.is_homed:
self.gstat.connect('interp-idle', lambda w: self.set_sensitive(self.machine_on() and ( self.is_all_homed() or self.no_home_required() ) ))
else:
self.gstat.connect('interp-idle', lambda w: self.set_sensitive(self.machine_on()) )
if self.is_idle:
self.gstat.connect('interp-run', lambda w: self.set_sensitive(False))
if self.is_homed:
self.gstat.connect('all-homed', lambda w: self.set_sensitive(self.machine_on()))
| lgpl-2.1 |
marissazhou/django | django/utils/archive.py | 562 | 7070 | """
Based on "python-archive" -- http://pypi.python.org/pypi/python-archive/
Copyright (c) 2010 Gary Wilson Jr. <gary.wilson@gmail.com> and contributors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import os
import shutil
import tarfile
import zipfile
from django.utils import six
class ArchiveException(Exception):
"""
Base exception class for all archive errors.
"""
class UnrecognizedArchiveFormat(ArchiveException):
"""
Error raised when passed file is not a recognized archive format.
"""
def extract(path, to_path=''):
"""
Unpack the tar or zip file at the specified path to the directory
specified by to_path.
"""
with Archive(path) as archive:
archive.extract(to_path)
class Archive(object):
"""
The external API class that encapsulates an archive implementation.
"""
def __init__(self, file):
self._archive = self._archive_cls(file)(file)
@staticmethod
def _archive_cls(file):
cls = None
if isinstance(file, six.string_types):
filename = file
else:
try:
filename = file.name
except AttributeError:
raise UnrecognizedArchiveFormat(
"File object not a recognized archive format.")
base, tail_ext = os.path.splitext(filename.lower())
cls = extension_map.get(tail_ext)
if not cls:
base, ext = os.path.splitext(base)
cls = extension_map.get(ext)
if not cls:
raise UnrecognizedArchiveFormat(
"Path not a recognized archive format: %s" % filename)
return cls
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def extract(self, to_path=''):
self._archive.extract(to_path)
def list(self):
self._archive.list()
def close(self):
self._archive.close()
class BaseArchive(object):
"""
Base Archive class. Implementations should inherit this class.
"""
def split_leading_dir(self, path):
path = str(path)
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\'))
or '\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(self, paths):
"""
Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)
"""
common_prefix = None
for path in paths:
prefix, rest = self.split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def extract(self):
raise NotImplementedError('subclasses of BaseArchive must provide an extract() method')
def list(self):
raise NotImplementedError('subclasses of BaseArchive must provide a list() method')
class TarArchive(BaseArchive):
def __init__(self, file):
self._archive = tarfile.open(file)
def list(self, *args, **kwargs):
self._archive.list(*args, **kwargs)
def extract(self, to_path):
# note: python<=2.5 doesn't seem to know about pax headers, filter them
members = [member for member in self._archive.getmembers()
if member.name != 'pax_global_header']
leading = self.has_leading_dir(x.name for x in members)
for member in members:
name = member.name
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
if member.isdir():
if filename and not os.path.exists(filename):
os.makedirs(filename)
else:
try:
extracted = self._archive.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
print("In the tar file %s the member %s is invalid: %s" %
(name, member.name, exc))
else:
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
with open(filename, 'wb') as outfile:
shutil.copyfileobj(extracted, outfile)
finally:
if extracted:
extracted.close()
def close(self):
self._archive.close()
class ZipArchive(BaseArchive):
def __init__(self, file):
self._archive = zipfile.ZipFile(file)
def list(self, *args, **kwargs):
self._archive.printdir(*args, **kwargs)
def extract(self, to_path):
namelist = self._archive.namelist()
leading = self.has_leading_dir(namelist)
for name in namelist:
data = self._archive.read(name)
if leading:
name = self.split_leading_dir(name)[1]
filename = os.path.join(to_path, name)
dirname = os.path.dirname(filename)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
if filename.endswith(('/', '\\')):
# A directory
if not os.path.exists(filename):
os.makedirs(filename)
else:
with open(filename, 'wb') as outfile:
outfile.write(data)
def close(self):
self._archive.close()
extension_map = {
'.tar': TarArchive,
'.tar.bz2': TarArchive,
'.tar.gz': TarArchive,
'.tgz': TarArchive,
'.tz2': TarArchive,
'.zip': ZipArchive,
}
| bsd-3-clause |
APM602/APM602 | Tools/LogAnalyzer/tests/TestIMUMatch.py | 61 | 3781 | from LogAnalyzer import Test,TestResult
import DataflashLog
from math import sqrt
class TestIMUMatch(Test):
'''test for empty or near-empty logs'''
def __init__(self):
Test.__init__(self)
self.name = "IMU Mismatch"
def run(self, logdata, verbose):
#tuning parameters:
warn_threshold = .75
fail_threshold = 1.5
filter_tc = 5.0
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if ("IMU" in logdata.channels) and (not "IMU2" in logdata.channels):
self.result.status = TestResult.StatusType.NA
self.result.statusMessage = "No IMU2"
return
if (not "IMU" in logdata.channels) or (not "IMU2" in logdata.channels):
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No IMU log data"
return
imu1 = logdata.channels["IMU"]
imu2 = logdata.channels["IMU2"]
imu1_timems = imu1["TimeMS"].listData
imu1_accx = imu1["AccX"].listData
imu1_accy = imu1["AccY"].listData
imu1_accz = imu1["AccZ"].listData
imu2_timems = imu2["TimeMS"].listData
imu2_accx = imu2["AccX"].listData
imu2_accy = imu2["AccY"].listData
imu2_accz = imu2["AccZ"].listData
imu1 = []
imu2 = []
for i in range(len(imu1_timems)):
imu1.append({ 't': imu1_timems[i][1]*1.0E-3, 'x': imu1_accx[i][1], 'y': imu1_accy[i][1], 'z': imu1_accz[i][1]})
for i in range(len(imu2_timems)):
imu2.append({ 't': imu2_timems[i][1]*1.0E-3, 'x': imu2_accx[i][1], 'y': imu2_accy[i][1], 'z': imu2_accz[i][1]})
imu1.sort(key=lambda x: x['t'])
imu2.sort(key=lambda x: x['t'])
imu2_index = 0
last_t = None
xdiff_filtered = 0
ydiff_filtered = 0
zdiff_filtered = 0
max_diff_filtered = 0
for i in range(len(imu1)):
#find closest imu2 value
t = imu1[i]['t']
dt = 0 if last_t is None else t-last_t
dt=min(dt,.1)
next_imu2 = None
for i in range(imu2_index,len(imu2)):
next_imu2 = imu2[i]
imu2_index=i
if next_imu2['t'] >= t:
break
prev_imu2 = imu2[imu2_index-1]
closest_imu2 = next_imu2 if abs(next_imu2['t']-t)<abs(prev_imu2['t']-t) else prev_imu2
xdiff = imu1[i]['x']-closest_imu2['x']
ydiff = imu1[i]['y']-closest_imu2['y']
zdiff = imu1[i]['z']-closest_imu2['z']
xdiff_filtered += (xdiff-xdiff_filtered)*dt/filter_tc
ydiff_filtered += (ydiff-ydiff_filtered)*dt/filter_tc
zdiff_filtered += (zdiff-zdiff_filtered)*dt/filter_tc
diff_filtered = sqrt(xdiff_filtered**2+ydiff_filtered**2+zdiff_filtered**2)
max_diff_filtered = max(max_diff_filtered,diff_filtered)
#print max_diff_filtered
last_t = t
if max_diff_filtered > fail_threshold:
self.result.statusMessage = "Check vibration or accelerometer calibration. (Mismatch: %.2f, WARN: %.2f, FAIL: %.2f)" % (max_diff_filtered,warn_threshold,fail_threshold)
self.result.status = TestResult.StatusType.FAIL
elif max_diff_filtered > warn_threshold:
self.result.statusMessage = "Check vibration or accelerometer calibration. (Mismatch: %.2f, WARN: %.2f, FAIL: %.2f)" % (max_diff_filtered,warn_threshold,fail_threshold)
self.result.status = TestResult.StatusType.WARN
else:
self.result.statusMessage = "(Mismatch: %.2f, WARN: %.2f, FAIL: %.2f)" % (max_diff_filtered,warn_threshold, fail_threshold)
| gpl-3.0 |
Nexenta/cinder | cinder/tests/unit/backup/drivers/test_backup_ceph.py | 1 | 50666 | # Copyright 2013 Canonical Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Tests for Ceph backup service."""
import hashlib
import os
import tempfile
import uuid
import mock
from oslo_concurrency import processutils
from oslo_serialization import jsonutils
import six
from six.moves import range
from cinder.backup import driver
from cinder.backup.drivers import ceph
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _
from cinder import objects
from cinder import test
from cinder.volume.drivers import rbd as rbddriver
# This is used to collect raised exceptions so that tests may check what was
# raised.
# NOTE: this must be initialised in test setUp().
RAISED_EXCEPTIONS = []
class MockException(Exception):
def __init__(self, *args, **kwargs):
RAISED_EXCEPTIONS.append(self.__class__)
class MockImageNotFoundException(MockException):
"""Used as mock for rbd.ImageNotFound."""
class MockImageBusyException(MockException):
"""Used as mock for rbd.ImageBusy."""
class MockObjectNotFoundException(MockException):
"""Used as mock for rados.MockObjectNotFoundException."""
def common_mocks(f):
"""Decorator to set mocks common to all tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
# NOTE(dosaboy): mock Popen to, by default, raise Exception in order to
# ensure that any test ending up in a subprocess fails
# if not properly mocked.
@mock.patch('subprocess.Popen', spec=True)
# NOTE(dosaboy): mock out eventlet.sleep() so that it does nothing.
@mock.patch('eventlet.sleep', spec=True)
@mock.patch('time.time', spec=True)
# NOTE(dosaboy): set spec to empty object so that hasattr calls return
# False by default.
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd, mock_time, mock_sleep,
mock_popen):
mock_time.side_effect = inst.time_inc
mock_popen.side_effect = Exception
inst.mock_rados = mock_rados
inst.mock_rbd = mock_rbd
inst.mock_rbd.ImageBusy = MockImageBusyException
inst.mock_rbd.ImageNotFound = MockImageNotFoundException
inst.service.rbd = inst.mock_rbd
inst.service.rados = inst.mock_rados
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
class BackupCephTestCase(test.TestCase):
"""Test case for ceph backup driver."""
def _create_volume_db_entry(self, id, size):
vol = {'id': id, 'size': size, 'status': 'available'}
return db.volume_create(self.ctxt, vol)['id']
def _create_backup_db_entry(self, backupid, volid, size,
userid=str(uuid.uuid4()),
projectid=str(uuid.uuid4())):
backup = {'id': backupid, 'size': size, 'volume_id': volid,
'user_id': userid, 'project_id': projectid}
return db.backup_create(self.ctxt, backup)['id']
def time_inc(self):
self.counter += 1
return self.counter
def _get_wrapped_rbd_io(self, rbd_image):
rbd_meta = rbddriver.RBDImageMetadata(rbd_image, 'pool_foo',
'user_foo', 'conf_foo')
return rbddriver.RBDImageIOWrapper(rbd_meta)
def _setup_mock_popen(self, mock_popen, retval=None, p1hook=None,
p2hook=None):
class MockPopen(object):
hooks = [p2hook, p1hook]
def __init__(mock_inst, cmd, *args, **kwargs):
self.callstack.append('popen_init')
mock_inst.stdout = mock.Mock()
mock_inst.stdout.close = mock.Mock()
mock_inst.stdout.close.side_effect = \
lambda *args: self.callstack.append('stdout_close')
mock_inst.returncode = 0
hook = mock_inst.__class__.hooks.pop()
if hook is not None:
hook()
def communicate(mock_inst):
self.callstack.append('communicate')
return retval
mock_popen.side_effect = MockPopen
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(BackupCephTestCase, self).setUp()
self.ctxt = context.get_admin_context()
# Create volume.
self.volume_size = 1
self.volume_id = str(uuid.uuid4())
self._create_volume_db_entry(self.volume_id, self.volume_size)
self.volume = db.volume_get(self.ctxt, self.volume_id)
# Create backup of volume.
self.backup_id = str(uuid.uuid4())
self._create_backup_db_entry(self.backup_id, self.volume_id,
self.volume_size)
self.backup = objects.Backup.get_by_id(self.ctxt, self.backup_id)
# Create alternate volume.
self.alt_volume_id = str(uuid.uuid4())
self._create_volume_db_entry(self.alt_volume_id, self.volume_size)
self.alt_volume = db.volume_get(self.ctxt, self.alt_volume_id)
self.chunk_size = 1024
self.num_chunks = 128
self.data_length = self.num_chunks * self.chunk_size
self.checksum = hashlib.sha256()
# Create a file with some data in it.
self.volume_file = tempfile.NamedTemporaryFile()
self.addCleanup(self.volume_file.close)
for _i in range(0, self.num_chunks):
data = os.urandom(self.chunk_size)
self.checksum.update(data)
self.volume_file.write(data)
self.volume_file.seek(0)
# Always trigger an exception if a command is executed since it should
# always be dealt with gracefully. At time of writing on rbd
# export/import-diff is executed and if they fail we expect to find
# alternative means of backing up.
mock_exec = mock.Mock()
mock_exec.side_effect = processutils.ProcessExecutionError
self.service = ceph.CephBackupDriver(self.ctxt, execute=mock_exec)
# Ensure that time.time() always returns more than the last time it was
# called to avoid div by zero errors.
self.counter = float(0)
self.callstack = []
@common_mocks
def test_get_rbd_support(self):
del self.service.rbd.RBD_FEATURE_LAYERING
del self.service.rbd.RBD_FEATURE_STRIPINGV2
self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_LAYERING'))
self.assertFalse(hasattr(self.service.rbd, 'RBD_FEATURE_STRIPINGV2'))
oldformat, features = self.service._get_rbd_support()
self.assertTrue(oldformat)
self.assertEqual(0, features)
self.service.rbd.RBD_FEATURE_LAYERING = 1
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1, features)
self.service.rbd.RBD_FEATURE_STRIPINGV2 = 2
oldformat, features = self.service._get_rbd_support()
self.assertFalse(oldformat)
self.assertEqual(1 | 2, features)
@common_mocks
def test_get_most_recent_snap(self):
last = 'backup.%s.snap.9824923.1212' % (uuid.uuid4())
image = self.mock_rbd.Image.return_value
image.list_snaps.return_value = \
[{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': last},
{'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}]
snap = self.service._get_most_recent_snap(image)
self.assertEqual(last, snap)
@common_mocks
def test_get_backup_snap_name(self):
snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4())
def get_backup_snaps(inst, *args):
return [{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4()),
'backup_id': str(uuid.uuid4())},
{'name': snap_name,
'backup_id': self.backup_id}]
with mock.patch.object(self.service, 'get_backup_snaps'):
name = self.service._get_backup_snap_name(self.service.rbd.Image(),
'base_foo',
self.backup_id)
self.assertIsNone(name)
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.side_effect = get_backup_snaps
name = self.service._get_backup_snap_name(self.service.rbd.Image(),
'base_foo',
self.backup_id)
self.assertEqual(snap_name, name)
self.assertTrue(mock_get_backup_snaps.called)
@common_mocks
def test_get_backup_snaps(self):
image = self.mock_rbd.Image.return_value
image.list_snaps.return_value = [
{'name': 'backup.%s.snap.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.wambam.6423868.2342' % (uuid.uuid4())},
{'name': 'backup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': 'bbbackup.%s.snap.1321319.3235' % (uuid.uuid4())},
{'name': 'backup.%s.snap.3824923.1412' % (uuid.uuid4())}]
snaps = self.service.get_backup_snaps(image)
self.assertEqual(3, len(snaps))
@common_mocks
def test_transfer_data_from_rbd_to_file(self):
def fake_read(offset, length):
self.volume_file.seek(offset)
return self.volume_file.read(length)
self.mock_rbd.Image.return_value.read.side_effect = fake_read
self.mock_rbd.Image.return_value.size.return_value = self.data_length
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
self.service._transfer_data(rbd_io, 'src_foo', test_file,
'dest_foo', self.data_length)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_rbd_to_rbd(self):
def fake_read(offset, length):
self.volume_file.seek(offset)
return self.volume_file.read(length)
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
rbd1 = mock.Mock()
rbd1.read.side_effect = fake_read
rbd1.size.return_value = os.fstat(self.volume_file.fileno()).st_size
rbd2 = mock.Mock()
rbd2.write.side_effect = mock_write_data
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
src_rbd_io = self._get_wrapped_rbd_io(rbd1)
dest_rbd_io = self._get_wrapped_rbd_io(rbd2)
self.service._transfer_data(src_rbd_io, 'src_foo', dest_rbd_io,
'dest_foo', self.data_length)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_file_to_rbd(self):
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
self.mock_rbd.Image.return_value.write.side_effect = mock_write_data
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
self.service._transfer_data(self.volume_file, 'src_foo',
rbd_io, 'dest_foo', self.data_length)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_transfer_data_from_file_to_file(self):
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
checksum = hashlib.sha256()
self.service._transfer_data(self.volume_file, 'src_foo', test_file,
'dest_foo', self.data_length)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
@common_mocks
def test_backup_volume_from_file(self):
checksum = hashlib.sha256()
def mock_write_data(data, offset):
checksum.update(data)
test_file.write(data)
self.service.rbd.Image.return_value.write.side_effect = mock_write_data
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, '_discard_bytes'):
with tempfile.NamedTemporaryFile() as test_file:
self.service.backup(self.backup, self.volume_file)
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
self.assertTrue(self.service.rbd.Image.return_value.write.called)
@common_mocks
def test_get_backup_base_name(self):
name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.assertEqual("volume-%s.backup.base" % (self.volume_id), name)
self.assertRaises(exception.InvalidParameterValue,
self.service._get_backup_base_name,
self.volume_id)
name = self.service._get_backup_base_name(self.volume_id, '1234')
self.assertEqual("volume-%s.backup.%s" % (self.volume_id, '1234'),
name)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd(self, mock_popen, mock_fnctl):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, '_backup_metadata'):
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
with mock.patch.object(self.service, '_full_backup') as \
mock_full_backup:
with mock.patch.object(self.service,
'_try_delete_base_image'):
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = rbddriver.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = rbddriver.RBDImageIOWrapper(meta)
self.service.backup(self.backup, rbdio)
self.assertEqual(['popen_init',
'read',
'popen_init',
'write',
'stdout_close',
'communicate'], self.callstack)
self.assertFalse(mock_full_backup.called)
self.assertTrue(mock_get_backup_snaps.called)
# Ensure the files are equal
self.assertEqual(checksum.digest(),
self.checksum.digest())
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd_fail(self, mock_popen, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In _backup_rbd(), after an exception.BackupRBDOperationFailed
occurs in self._rbd_diff_transfer(), we want to check the
process when the second exception occurs in
self._try_delete_base_image().
"""
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps'), \
mock.patch.object(self.service, '_rbd_diff_transfer') as \
mock_rbd_diff_transfer:
def mock_rbd_diff_transfer_side_effect(src_name, src_pool,
dest_name, dest_pool,
src_user, src_conf,
dest_user, dest_conf,
src_snap, from_snap):
raise exception.BackupRBDOperationFailed(_('mock'))
# Raise a pseudo exception.BackupRBDOperationFailed.
mock_rbd_diff_transfer.side_effect \
= mock_rbd_diff_transfer_side_effect
with mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service,
'_try_delete_base_image') as \
mock_try_delete_base_image:
def mock_try_delete_base_image_side_effect(backup_id,
volume_id,
base_name):
raise self.service.rbd.ImageNotFound(_('mock'))
# Raise a pesudo exception rbd.ImageNotFound.
mock_try_delete_base_image.side_effect \
= mock_try_delete_base_image_side_effect
with mock.patch.object(self.service, '_backup_metadata'):
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = rbddriver.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = rbddriver.RBDImageIOWrapper(meta)
# We expect that the second exception is
# notified.
self.assertRaises(
self.service.rbd.ImageNotFound,
self.service.backup,
self.backup, rbdio)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_backup_volume_from_rbd_fail2(self, mock_popen, mock_fnctl):
"""Test of when an exception occurs in an exception handler.
In backup(), after an exception.BackupOperationError occurs in
self._backup_metadata(), we want to check the process when the
second exception occurs in self.delete().
"""
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
def mock_write_data():
self.volume_file.seek(0)
data = self.volume_file.read(self.data_length)
self.callstack.append('write')
checksum.update(data)
test_file.write(data)
def mock_read_data():
self.callstack.append('read')
return self.volume_file.read(self.data_length)
self._setup_mock_popen(mock_popen,
['out', 'err'],
p1hook=mock_read_data,
p2hook=mock_write_data)
self.mock_rbd.RBD.list = mock.Mock()
self.mock_rbd.RBD.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps'), \
mock.patch.object(self.service, '_rbd_diff_transfer'), \
mock.patch.object(self.service, '_full_backup'), \
mock.patch.object(self.service, '_backup_metadata') as \
mock_backup_metadata:
def mock_backup_metadata_side_effect(backup):
raise exception.BackupOperationError(_('mock'))
# Raise a pseudo exception.BackupOperationError.
mock_backup_metadata.side_effect = mock_backup_metadata_side_effect
with mock.patch.object(self.service, 'delete') as mock_delete:
def mock_delete_side_effect(backup):
raise self.service.rbd.ImageBusy()
# Raise a pseudo exception rbd.ImageBusy.
mock_delete.side_effect = mock_delete_side_effect
with tempfile.NamedTemporaryFile() as test_file:
checksum = hashlib.sha256()
image = self.service.rbd.Image()
meta = rbddriver.RBDImageMetadata(image,
'pool_foo',
'user_foo',
'conf_foo')
rbdio = rbddriver.RBDImageIOWrapper(meta)
# We expect that the second exception is
# notified.
self.assertRaises(
self.service.rbd.ImageBusy,
self.service.backup,
self.backup, rbdio)
@common_mocks
def test_backup_vol_length_0(self):
volume_id = str(uuid.uuid4())
self._create_volume_db_entry(volume_id, 0)
backup_id = str(uuid.uuid4())
self._create_backup_db_entry(backup_id, volume_id, 1)
backup = objects.Backup.get_by_id(self.ctxt, backup_id)
self.assertRaises(exception.InvalidParameterValue, self.service.backup,
backup, self.volume_file)
@common_mocks
def test_restore(self):
backup_name = self.service._get_backup_base_name(self.backup_id,
diff_format=True)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
def mock_read_data(offset, length):
return self.volume_file.read(self.data_length)
self.mock_rbd.Image.return_value.read.side_effect = mock_read_data
self.mock_rbd.Image.return_value.size.return_value = \
self.chunk_size * self.num_chunks
with mock.patch.object(self.service, '_restore_metadata') as \
mock_restore_metadata:
with mock.patch.object(self.service, '_discard_bytes') as \
mock_discard_bytes:
with tempfile.NamedTemporaryFile() as test_file:
self.volume_file.seek(0)
self.service.restore(self.backup, self.volume_id,
test_file)
checksum = hashlib.sha256()
test_file.seek(0)
for _c in range(0, self.num_chunks):
checksum.update(test_file.read(self.chunk_size))
# Ensure the files are equal
self.assertEqual(checksum.digest(), self.checksum.digest())
self.assertTrue(mock_restore_metadata.called)
self.assertTrue(mock_discard_bytes.called)
self.assertTrue(mock_discard_bytes.called)
self.assertTrue(self.service.rbd.Image.return_value.read.called)
@common_mocks
def test_discard_bytes(self):
# Lower the chunksize to a memory manageable number
self.service.chunk_size = 1024
image = self.mock_rbd.Image.return_value
wrapped_rbd = self._get_wrapped_rbd_io(image)
self.service._discard_bytes(wrapped_rbd, 0, 0)
self.assertEqual(0, image.discard.call_count)
self.service._discard_bytes(wrapped_rbd, 0, 1234)
self.assertEqual(1, image.discard.call_count)
image.reset_mock()
# Test discard with no remainder
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
self.service._discard_bytes(wrapped_rbd, 0,
self.service.chunk_size * 2)
self.assertEqual(2, image.write.call_count)
self.assertEqual(2, image.flush.call_count)
self.assertFalse(image.discard.called)
zeroes = '\0' * self.service.chunk_size
image.write.assert_has_calls([mock.call(zeroes, 0),
mock.call(zeroes, self.chunk_size)])
image.reset_mock()
image.write.reset_mock()
# Now test with a remainder.
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
self.service._discard_bytes(wrapped_rbd, 0,
(self.service.chunk_size * 2) + 1)
self.assertEqual(3, image.write.call_count)
self.assertEqual(3, image.flush.call_count)
self.assertFalse(image.discard.called)
image.write.assert_has_calls([mock.call(zeroes,
self.chunk_size * 2),
mock.call(zeroes,
self.chunk_size * 3),
mock.call('\0',
self.chunk_size * 4)])
@common_mocks
def test_delete_backup_snapshot(self):
snap_name = 'backup.%s.snap.3824923.1412' % (uuid.uuid4())
base_name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.mock_rbd.RBD.remove_snap = mock.Mock()
with mock.patch.object(self.service, '_get_backup_snap_name') as \
mock_get_backup_snap_name:
mock_get_backup_snap_name.return_value = snap_name
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
mock_get_backup_snaps.return_value = None
rem = self.service._delete_backup_snapshot(self.mock_rados,
base_name,
self.backup_id)
self.assertTrue(mock_get_backup_snap_name.called)
self.assertTrue(mock_get_backup_snaps.called)
self.assertEqual((snap_name, 0), rem)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_try_delete_base_image_diff_format(self, mock_meta_backup):
backup_name = self.service._get_backup_base_name(self.volume_id,
diff_format=True)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
with mock.patch.object(self.service, '_delete_backup_snapshot') as \
mock_del_backup_snap:
snap_name = self.service._get_new_snap_name(self.backup_id)
mock_del_backup_snap.return_value = (snap_name, 0)
self.service.delete(self.backup)
self.assertTrue(mock_del_backup_snap.called)
self.assertTrue(self.mock_rbd.RBD.return_value.list.called)
self.assertTrue(self.mock_rbd.RBD.return_value.remove.called)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_try_delete_base_image(self, mock_meta_backup):
backup_name = self.service._get_backup_base_name(self.volume_id,
self.backup_id)
self.mock_rbd.RBD.return_value.list.return_value = [backup_name]
with mock.patch.object(self.service, 'get_backup_snaps'):
self.service.delete(self.backup)
self.assertTrue(self.mock_rbd.RBD.return_value.remove.called)
@common_mocks
def test_try_delete_base_image_busy(self):
"""This should induce retries then raise rbd.ImageBusy."""
backup_name = self.service._get_backup_base_name(self.volume_id,
self.backup_id)
rbd = self.mock_rbd.RBD.return_value
rbd.list.return_value = [backup_name]
rbd.remove.side_effect = self.mock_rbd.ImageBusy
with mock.patch.object(self.service, 'get_backup_snaps') as \
mock_get_backup_snaps:
self.assertRaises(self.mock_rbd.ImageBusy,
self.service._try_delete_base_image,
self.backup['id'], self.backup['volume_id'])
self.assertTrue(mock_get_backup_snaps.called)
self.assertTrue(rbd.list.called)
self.assertTrue(rbd.remove.called)
self.assertIn(MockImageBusyException, RAISED_EXCEPTIONS)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_delete(self, mock_meta_backup):
with mock.patch.object(self.service, '_try_delete_base_image'):
self.service.delete(self.backup)
self.assertEqual([], RAISED_EXCEPTIONS)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_delete_image_not_found(self, mock_meta_backup):
with mock.patch.object(self.service, '_try_delete_base_image') as \
mock_del_base:
mock_del_base.side_effect = self.mock_rbd.ImageNotFound
# ImageNotFound exception is caught so that db entry can be cleared
self.service.delete(self.backup)
self.assertEqual([MockImageNotFoundException], RAISED_EXCEPTIONS)
@common_mocks
def test_diff_restore_allowed_with_image_not_exists(self):
"""Test diff restore not allowed when backup not diff-format."""
not_allowed = (False, None)
backup_base = 'backup.base'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (False, backup_base)
resp = self.service._diff_restore_allowed(*args_vols_different)
self.assertEqual(not_allowed, resp)
mock_rbd_image_exists.assert_called_once_with(
backup_base,
self.backup['volume_id'],
self.mock_rados)
@common_mocks
def test_diff_restore_allowed_with_no_restore_point(self):
"""Test diff restore not allowed when no restore point found.
Detail conditions:
1. backup base is diff-format
2. restore point does not exist
"""
not_allowed = (False, None)
backup_base = 'backup.base'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = None
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual(not_allowed, resp)
self.assertTrue(mock_rbd_image_exists.called)
mock_get_restore_point.assert_called_once_with(
backup_base,
self.backup['id'])
@common_mocks
def test_diff_restore_allowed_with_not_rbd(self):
"""Test diff restore not allowed when destination volume is not rbd.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is not an rbd.
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = False
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
mock_file_is_rbd.assert_called_once_with(
rbd_io)
@common_mocks
def test_diff_restore_allowed_with_same_volume(self):
"""Test diff restore not allowed when volumes are same.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are the same
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_same = [backup_base, self.backup, self.volume, rbd_io,
self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
resp = self.service._diff_restore_allowed(*args_vols_same)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
@common_mocks
def test_diff_restore_allowed_with_has_extents(self):
"""Test diff restore not allowed when destination volume has data.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are different
5. destination volume has data on it - full copy is mandated
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
with mock.patch.object(self.service, '_rbd_has_extents') \
as mock_rbd_has_extents:
mock_rbd_has_extents.return_value = True
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((False, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
mock_rbd_has_extents.assert_called_once_with(
rbd_io.rbd_image)
@common_mocks
def test_diff_restore_allowed_with_no_extents(self):
"""Test diff restore allowed when no data in destination volume.
Detail conditions:
1. backup base is diff-format
2. restore point exists
3. destination volume is an rbd
4. source and destination volumes are different
5. destination volume no data on it
"""
backup_base = 'backup.base'
restore_point = 'backup.snap.1'
rbd_io = self._get_wrapped_rbd_io(self.service.rbd.Image())
args_vols_different = [backup_base, self.backup, self.alt_volume,
rbd_io, self.mock_rados]
with mock.patch.object(self.service, '_rbd_image_exists') as \
mock_rbd_image_exists:
mock_rbd_image_exists.return_value = (True, backup_base)
with mock.patch.object(self.service, '_get_restore_point') as \
mock_get_restore_point:
mock_get_restore_point.return_value = restore_point
with mock.patch.object(self.service, '_file_is_rbd') as \
mock_file_is_rbd:
mock_file_is_rbd.return_value = True
with mock.patch.object(self.service, '_rbd_has_extents') \
as mock_rbd_has_extents:
mock_rbd_has_extents.return_value = False
args = args_vols_different
resp = self.service._diff_restore_allowed(*args)
self.assertEqual((True, restore_point), resp)
self.assertTrue(mock_rbd_image_exists.called)
self.assertTrue(mock_get_restore_point.called)
self.assertTrue(mock_file_is_rbd.called)
self.assertTrue(mock_rbd_has_extents.called)
@common_mocks
@mock.patch('fcntl.fcntl', spec=True)
@mock.patch('subprocess.Popen', spec=True)
def test_piped_execute(self, mock_popen, mock_fcntl):
mock_fcntl.return_value = 0
self._setup_mock_popen(mock_popen, ['out', 'err'])
self.service._piped_execute(['foo'], ['bar'])
self.assertEqual(['popen_init', 'popen_init',
'stdout_close', 'communicate'], self.callstack)
@common_mocks
def test_restore_metdata(self):
version = 2
def mock_read(*args):
base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META
glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META
return jsonutils.dumps({base_tag: {'image_name': 'image.base'},
glance_tag: {'image_name': 'image.glance'},
'version': version})
self.mock_rados.Object.return_value.read.side_effect = mock_read
self.service._restore_metadata(self.backup, self.volume_id)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.assertTrue(self.mock_rados.Object.return_value.read.called)
version = 3
try:
self.service._restore_metadata(self.backup, self.volume_id)
except exception.BackupOperationError as exc:
msg = _("Metadata restore failed due to incompatible version")
self.assertEqual(msg, six.text_type(exc))
else:
# Force a test failure
self.assertFalse(True)
@common_mocks
@mock.patch('cinder.backup.drivers.ceph.VolumeMetadataBackup', spec=True)
def test_backup_metadata_already_exists(self, mock_meta_backup):
def mock_set(json_meta):
msg = (_("Metadata backup object '%s' already exists") %
("backup.%s.meta" % (self.backup_id)))
raise exception.VolumeMetadataBackupExists(msg)
mock_meta_backup.return_value.set = mock.Mock()
mock_meta_backup.return_value.set.side_effect = mock_set
with mock.patch.object(self.service, 'get_metadata') as \
mock_get_metadata:
mock_get_metadata.return_value = "some.json.metadata"
try:
self.service._backup_metadata(self.backup)
except exception.BackupOperationError as e:
msg = (_("Failed to backup volume metadata - Metadata backup "
"object 'backup.%s.meta' already exists") %
(self.backup_id))
self.assertEqual(msg, six.text_type(e))
else:
# Make the test fail
self.assertFalse(True)
self.assertFalse(mock_meta_backup.set.called)
@common_mocks
def test_backup_metata_error(self):
"""Ensure that delete() is called if the metadata backup fails.
Also ensure that the exception is propagated to the caller.
"""
with mock.patch.object(self.service, '_backup_metadata') as \
mock_backup_metadata:
mock_backup_metadata.side_effect = exception.BackupOperationError
with mock.patch.object(self.service, '_get_volume_size_gb'):
with mock.patch.object(self.service, '_file_is_rbd',
return_value=False):
with mock.patch.object(self.service, '_full_backup'):
with mock.patch.object(self.service, 'delete') as \
mock_delete:
self.assertRaises(exception.BackupOperationError,
self.service.backup, self.backup,
mock.Mock(),
backup_metadata=True)
self.assertTrue(mock_delete.called)
@common_mocks
def test_restore_invalid_metadata_version(self):
def mock_read(*args):
base_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_BASE_META
glance_tag = driver.BackupMetadataAPI.TYPE_TAG_VOL_GLANCE_META
return jsonutils.dumps({base_tag: {'image_name': 'image.base'},
glance_tag: {'image_name': 'image.glance'},
'version': 3})
self.mock_rados.Object.return_value.read.side_effect = mock_read
with mock.patch.object(ceph.VolumeMetadataBackup, '_exists') as \
mock_exists:
mock_exists.return_value = True
self.assertRaises(exception.BackupOperationError,
self.service._restore_metadata,
self.backup, self.volume_id)
self.assertTrue(mock_exists.called)
self.assertTrue(self.mock_rados.Object.return_value.read.called)
def common_meta_backup_mocks(f):
"""Decorator to set mocks common to all metadata backup tests.
The point of doing these mocks here is so that we don't accidentally set
mocks that can't/don't get unset.
"""
def _common_inner_inner1(inst, *args, **kwargs):
@mock.patch('cinder.backup.drivers.ceph.rbd')
@mock.patch('cinder.backup.drivers.ceph.rados')
def _common_inner_inner2(mock_rados, mock_rbd):
inst.mock_rados = mock_rados
inst.mock_rbd = mock_rbd
inst.mock_rados.ObjectNotFound = MockObjectNotFoundException
return f(inst, *args, **kwargs)
return _common_inner_inner2()
return _common_inner_inner1
class VolumeMetadataBackupTestCase(test.TestCase):
def setUp(self):
global RAISED_EXCEPTIONS
RAISED_EXCEPTIONS = []
super(VolumeMetadataBackupTestCase, self).setUp()
self.backup_id = str(uuid.uuid4())
self.mb = ceph.VolumeMetadataBackup(mock.Mock(), self.backup_id)
@common_meta_backup_mocks
def test_name(self):
self.assertEqual('backup.%s.meta' % (self.backup_id), self.mb.name)
@common_meta_backup_mocks
def test_exists(self):
# True
self.assertTrue(self.mb.exists)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.mock_rados.Object.return_value.reset_mock()
# False
self.mock_rados.Object.return_value.stat.side_effect = (
self.mock_rados.ObjectNotFound)
self.assertFalse(self.mb.exists)
self.assertTrue(self.mock_rados.Object.return_value.stat.called)
self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS)
@common_meta_backup_mocks
def test_set(self):
obj_data = []
called = []
def mock_read(*args):
called.append('read')
self.assertEqual(1, len(obj_data))
return obj_data[0]
def _mock_write(data):
obj_data.append(data)
called.append('write')
self.mb.get = mock.Mock()
self.mb.get.side_effect = mock_read
with mock.patch.object(ceph.VolumeMetadataBackup, 'set') as mock_write:
mock_write.side_effect = _mock_write
self.mb.set({'foo': 'bar'})
self.assertEqual({'foo': 'bar'}, self.mb.get())
self.assertTrue(self.mb.get.called)
self.mb._exists = mock.Mock()
self.mb._exists.return_value = True
# use the unmocked set() method.
self.assertRaises(exception.VolumeMetadataBackupExists, self.mb.set,
{'doo': 'dah'})
# check the meta obj state has not changed.
self.assertEqual({'foo': 'bar'}, self.mb.get())
self.assertEqual(['write', 'read', 'read'], called)
@common_meta_backup_mocks
def test_get(self):
self.mock_rados.Object.return_value.stat.side_effect = (
self.mock_rados.ObjectNotFound)
self.mock_rados.Object.return_value.read.return_value = 'meta'
self.assertIsNone(self.mb.get())
self.mock_rados.Object.return_value.stat.side_effect = None
self.assertEqual('meta', self.mb.get())
@common_meta_backup_mocks
def remove_if_exists(self):
with mock.patch.object(self.mock_rados.Object, 'remove') as \
mock_remove:
mock_remove.side_effect = self.mock_rados.ObjectNotFound
self.mb.remove_if_exists()
self.assertEqual([MockObjectNotFoundException], RAISED_EXCEPTIONS)
self.mock_rados.Object.remove.side_effect = None
self.mb.remove_if_exists()
self.assertEqual([], RAISED_EXCEPTIONS)
| apache-2.0 |
signed/intellij-community | python/lib/Lib/CGIHTTPServer.py | 86 | 12466 | """CGI-savvy HTTP Server.
This module builds on SimpleHTTPServer by implementing GET and POST
requests to cgi-bin scripts.
If the os.fork() function is not present (e.g. on Windows),
os.popen2() is used as a fallback, with slightly altered semantics; if
that function is not present either (e.g. on Macintosh), only Python
scripts are supported, and they are executed by the current process.
In all cases, the implementation is intentionally naive -- all
requests are executed sychronously.
SECURITY WARNING: DON'T USE THIS CODE UNLESS YOU ARE INSIDE A FIREWALL
-- it may execute arbitrary Python code or external programs.
Note that status code 200 is sent prior to execution of a CGI script, so
scripts cannot send other status codes such as 302 (redirect).
"""
__version__ = "0.4"
__all__ = ["CGIHTTPRequestHandler"]
import os
import sys
import urllib
import BaseHTTPServer
import SimpleHTTPServer
import select
class CGIHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Complete HTTP server with GET, HEAD and POST commands.
GET and HEAD also support running CGI scripts.
The POST command is *only* implemented for CGI scripts.
"""
# Determine platform specifics
have_fork = hasattr(os, 'fork')
have_popen2 = hasattr(os, 'popen2')
have_popen3 = hasattr(os, 'popen3')
# Make rfile unbuffered -- we need to read one line and then pass
# the rest to a subprocess, so we can't use buffered input.
rbufsize = 0
def do_POST(self):
"""Serve a POST request.
This is only implemented for CGI scripts.
"""
if self.is_cgi():
self.run_cgi()
else:
self.send_error(501, "Can only POST to CGI scripts")
def send_head(self):
"""Version of send_head that support CGI scripts"""
if self.is_cgi():
return self.run_cgi()
else:
return SimpleHTTPServer.SimpleHTTPRequestHandler.send_head(self)
def is_cgi(self):
"""Test whether self.path corresponds to a CGI script.
Return a tuple (dir, rest) if self.path requires running a
CGI script, None if not. Note that rest begins with a
slash if it is not empty.
The default implementation tests whether the path
begins with one of the strings in the list
self.cgi_directories (and the next character is a '/'
or the end of the string).
"""
path = self.path
for x in self.cgi_directories:
i = len(x)
if path[:i] == x and (not path[i:] or path[i] == '/'):
self.cgi_info = path[:i], path[i+1:]
return True
return False
cgi_directories = ['/cgi-bin', '/htbin']
def is_executable(self, path):
"""Test whether argument path is an executable file."""
return executable(path)
def is_python(self, path):
"""Test whether argument path is a Python script."""
head, tail = os.path.splitext(path)
return tail.lower() in (".py", ".pyw")
def run_cgi(self):
"""Execute a CGI script."""
path = self.path
dir, rest = self.cgi_info
i = path.find('/', len(dir) + 1)
while i >= 0:
nextdir = path[:i]
nextrest = path[i+1:]
scriptdir = self.translate_path(nextdir)
if os.path.isdir(scriptdir):
dir, rest = nextdir, nextrest
i = path.find('/', len(dir) + 1)
else:
break
# find an explicit query string, if present.
i = rest.rfind('?')
if i >= 0:
rest, query = rest[:i], rest[i+1:]
else:
query = ''
# dissect the part after the directory name into a script name &
# a possible additional path, to be stored in PATH_INFO.
i = rest.find('/')
if i >= 0:
script, rest = rest[:i], rest[i:]
else:
script, rest = rest, ''
scriptname = dir + '/' + script
scriptfile = self.translate_path(scriptname)
if not os.path.exists(scriptfile):
self.send_error(404, "No such CGI script (%r)" % scriptname)
return
if not os.path.isfile(scriptfile):
self.send_error(403, "CGI script is not a plain file (%r)" %
scriptname)
return
ispy = self.is_python(scriptname)
if not ispy:
if not (self.have_fork or self.have_popen2 or self.have_popen3):
self.send_error(403, "CGI script is not a Python script (%r)" %
scriptname)
return
if not self.is_executable(scriptfile):
self.send_error(403, "CGI script is not executable (%r)" %
scriptname)
return
# Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html
# XXX Much of the following could be prepared ahead of time!
env = {}
env['SERVER_SOFTWARE'] = self.version_string()
env['SERVER_NAME'] = self.server.server_name
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
env['SERVER_PROTOCOL'] = self.protocol_version
env['SERVER_PORT'] = str(self.server.server_port)
env['REQUEST_METHOD'] = self.command
uqrest = urllib.unquote(rest)
env['PATH_INFO'] = uqrest
env['PATH_TRANSLATED'] = self.translate_path(uqrest)
env['SCRIPT_NAME'] = scriptname
if query:
env['QUERY_STRING'] = query
host = self.address_string()
if host != self.client_address[0]:
env['REMOTE_HOST'] = host
env['REMOTE_ADDR'] = self.client_address[0]
authorization = self.headers.getheader("authorization")
if authorization:
authorization = authorization.split()
if len(authorization) == 2:
import base64, binascii
env['AUTH_TYPE'] = authorization[0]
if authorization[0].lower() == "basic":
try:
authorization = base64.decodestring(authorization[1])
except binascii.Error:
pass
else:
authorization = authorization.split(':')
if len(authorization) == 2:
env['REMOTE_USER'] = authorization[0]
# XXX REMOTE_IDENT
if self.headers.typeheader is None:
env['CONTENT_TYPE'] = self.headers.type
else:
env['CONTENT_TYPE'] = self.headers.typeheader
length = self.headers.getheader('content-length')
if length:
env['CONTENT_LENGTH'] = length
accept = []
for line in self.headers.getallmatchingheaders('accept'):
if line[:1] in "\t\n\r ":
accept.append(line.strip())
else:
accept = accept + line[7:].split(',')
env['HTTP_ACCEPT'] = ','.join(accept)
ua = self.headers.getheader('user-agent')
if ua:
env['HTTP_USER_AGENT'] = ua
co = filter(None, self.headers.getheaders('cookie'))
if co:
env['HTTP_COOKIE'] = ', '.join(co)
# XXX Other HTTP_* headers
# Since we're setting the env in the parent, provide empty
# values to override previously set values
for k in ('QUERY_STRING', 'REMOTE_HOST', 'CONTENT_LENGTH',
'HTTP_USER_AGENT', 'HTTP_COOKIE'):
env.setdefault(k, "")
os.environ.update(env)
self.send_response(200, "Script output follows")
decoded_query = query.replace('+', ' ')
if self.have_fork:
# Unix -- fork as we should
args = [script]
if '=' not in decoded_query:
args.append(decoded_query)
nobody = nobody_uid()
self.wfile.flush() # Always flush before forking
pid = os.fork()
if pid != 0:
# Parent
pid, sts = os.waitpid(pid, 0)
# throw away additional data [see bug #427345]
while select.select([self.rfile], [], [], 0)[0]:
if not self.rfile.read(1):
break
if sts:
self.log_error("CGI script exit status %#x", sts)
return
# Child
try:
try:
os.setuid(nobody)
except os.error:
pass
os.dup2(self.rfile.fileno(), 0)
os.dup2(self.wfile.fileno(), 1)
os.execve(scriptfile, args, os.environ)
except:
self.server.handle_error(self.request, self.client_address)
os._exit(127)
elif self.have_popen2 or self.have_popen3:
# Windows -- use popen2 or popen3 to create a subprocess
import shutil
if self.have_popen3:
popenx = os.popen3
else:
popenx = os.popen2
cmdline = scriptfile
if self.is_python(scriptfile):
interp = sys.executable
if interp.lower().endswith("w.exe"):
# On Windows, use python.exe, not pythonw.exe
interp = interp[:-5] + interp[-4:]
cmdline = "%s -u %s" % (interp, cmdline)
if '=' not in query and '"' not in query:
cmdline = '%s "%s"' % (cmdline, query)
self.log_message("command: %s", cmdline)
try:
nbytes = int(length)
except (TypeError, ValueError):
nbytes = 0
files = popenx(cmdline, 'b')
fi = files[0]
fo = files[1]
if self.have_popen3:
fe = files[2]
if self.command.lower() == "post" and nbytes > 0:
data = self.rfile.read(nbytes)
fi.write(data)
# throw away additional data [see bug #427345]
while select.select([self.rfile._sock], [], [], 0)[0]:
if not self.rfile._sock.recv(1):
break
fi.close()
shutil.copyfileobj(fo, self.wfile)
if self.have_popen3:
errors = fe.read()
fe.close()
if errors:
self.log_error('%s', errors)
sts = fo.close()
if sts:
self.log_error("CGI script exit status %#x", sts)
else:
self.log_message("CGI script exited OK")
else:
# Other O.S. -- execute script in this process
save_argv = sys.argv
save_stdin = sys.stdin
save_stdout = sys.stdout
save_stderr = sys.stderr
try:
save_cwd = os.getcwd()
try:
sys.argv = [scriptfile]
if '=' not in decoded_query:
sys.argv.append(decoded_query)
sys.stdout = self.wfile
sys.stdin = self.rfile
execfile(scriptfile, {"__name__": "__main__"})
finally:
sys.argv = save_argv
sys.stdin = save_stdin
sys.stdout = save_stdout
sys.stderr = save_stderr
os.chdir(save_cwd)
except SystemExit, sts:
self.log_error("CGI script exit status %s", str(sts))
else:
self.log_message("CGI script exited OK")
nobody = None
def nobody_uid():
"""Internal routine to get nobody's uid"""
global nobody
if nobody:
return nobody
try:
import pwd
except ImportError:
return -1
try:
nobody = pwd.getpwnam('nobody')[2]
except KeyError:
nobody = 1 + max(map(lambda x: x[2], pwd.getpwall()))
return nobody
def executable(path):
"""Test for executable file."""
try:
st = os.stat(path)
except os.error:
return False
return st.st_mode & 0111 != 0
def test(HandlerClass = CGIHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
SimpleHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
| apache-2.0 |
DrOctogon/Satchmo | satchmo/apps/payment/modules/google/notifications.py | 6 | 3767 | '''
Created on 3 Mar 2009
@author: dalore
'''
from django.utils.translation import ugettext as _
from livesettings import config_get_group, config_value
from payment.utils import get_processor_by_key
from satchmo_store.shop.models import Cart, Order, OrderPayment
import re
def find_order(data):
"""
Helper function to find order using a google id
"""
transaction_id = data['google-order-number']
payment = OrderPayment.objects.filter(transaction_id__exact=transaction_id)[0]
order = payment.order
return order
def notify_neworder(request, data):
"""
Called when google reports a new order.
Looks up the order from the private data and sets the status.
Empties the cart.
"""
# get params from data
private_data = data['shopping-cart.merchant-private-data']
order_id = re.search('satchmo-order id="(\d+)"', private_data).group(1)
order = Order.objects.get(pk=order_id)
payment_module = config_get_group('PAYMENT_GOOGLE')
processor = get_processor_by_key('PAYMENT_GOOGLE')
# record pending payment
amount = data['order-total']
pending_payment = processor.create_pending_payment(order)
# save transaction id so we can find this order later
pending_payment.capture.transaction_id = data['google-order-number']
pending_payment.capture.save()
# delete cart
for cart in Cart.objects.filter(customer=order.contact):
cart.empty()
cart.delete()
# set status
order.add_status(status='New', notes=_("Received through Google Checkout."))
def do_charged(request, data):
"""
Called when google sends a charged status update
Note that the charged amount comes in a seperate call
"""
# find order from google id
order = find_order(data)
# Added to track total sold for each product
for item in order.orderitem_set.all():
product = item.product
product.total_sold += item.quantity
if config_value('PRODUCT','TRACK_INVENTORY'):
product.items_in_stock -= item.quantity
product.save()
# process payment
processor = get_processor_by_key('PAYMENT_GOOGLE')
# setting status to billed (why does paypal set it to new?)
order.add_status(status='Billed', notes=_("Paid through Google Checkout."))
def do_shipped(request, data):
"""
Called when you use the google checkout console to mark order has been shipped
"""
# find order from google id
order = find_order(data)
# process payment
processor = get_processor_by_key('PAYMENT_GOOGLE')
# setting status to billed (why does paypal set it to new?)
order.add_status(status='Shipped', notes=_("Shipped through Google Checkout."))
def notify_statechanged(request, data):
"""
This is called when there has been a change in the order state
"""
# financial state
financial_state = data['new-financial-order-state']
fulfillment_state = data['new-fulfillment-order-state']
if financial_state == 'CHARGED':
if fulfillment_state == 'PROCESSING':
do_charged(request, data)
elif fulfillment_state == 'DELIVERED':
do_shipped(request, data)
def notify_chargeamount(request, data):
"""
This gets called when google sends a charge amount
"""
# find order from google id
order = find_order(data)
transaction_id = data['google-order-number']
processor = get_processor_by_key('PAYMENT_GOOGLE')
processor.record_payment(amount=data['latest-charge-amount'], transaction_id=transaction_id, order=order)
total_charged_amount = data['total-charge-amount']
if total_charged_amount >= order.total:
order.order_success()
| bsd-3-clause |
ujenmr/ansible | test/units/modules/storage/netapp/test_na_ontap_interface.py | 23 | 10173 | # (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import print_function
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_interface \
import NetAppOntapInterface as interface_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.params = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'interface':
xml = self.build_interface_info(self.params)
self.xml_out = xml
return xml
@staticmethod
def build_interface_info(data):
''' build xml data for vserser-info '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'net-interface-info': {
'interface-name': data['name'],
'administrative-status': data['administrative-status'],
'failover-policy': data['failover-policy'],
'firewall-policy': data['firewall-policy'],
'is-auto-revert': data['is-auto-revert'],
'home-node': data['home_node'],
'home-port': data['home_port'],
'address': data['address'],
'netmask': data['netmask'],
'role': data['role'],
'protocols': data['protocols'] if data.get('protocols') else None
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_interface = {
'name': 'test_lif',
'administrative-status': 'up',
'failover-policy': 'up',
'firewall-policy': 'up',
'is-auto-revert': 'true',
'home_node': 'node',
'role': 'test',
'home_port': 'e0c',
'address': '2.2.2.2',
'netmask': '1.1.1.1',
}
def mock_args(self):
return {
'vserver': 'vserver',
'interface_name': self.mock_interface['name'],
'home_node': self.mock_interface['home_node'],
'role': self.mock_interface['role'],
'home_port': self.mock_interface['home_port'],
'address': self.mock_interface['address'],
'netmask': self.mock_interface['netmask'],
'hostname': 'hostname',
'username': 'username',
'password': 'password',
}
def get_interface_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_interface object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_interface object
"""
interface_obj = interface_module()
interface_obj.autosupport_log = Mock(return_value=None)
if kind is None:
interface_obj.server = MockONTAPConnection()
else:
interface_obj.server = MockONTAPConnection(kind=kind, data=self.mock_interface)
return interface_obj
def test_module_fail_when_required_args_missing(self):
''' required arguments are reported as errors '''
with pytest.raises(AnsibleFailJson) as exc:
set_module_args({})
interface_module()
print('Info: %s' % exc.value.args[0]['msg'])
def test_create_error_missing_param(self):
''' Test if create throws an error if required param 'role' is not specified'''
data = self.mock_args()
del data['role']
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_interface_mock_object('interface').create_interface()
msg = 'Error: Missing one or more required parameters for creating interface: ' \
'home_port, netmask, role, home_node, address'
expected = sorted(','.split(msg))
received = sorted(','.split(exc.value.args[0]['msg']))
assert expected == received
def test_get_nonexistent_interface(self):
''' Test if get_interface returns None for non-existent interface '''
set_module_args(self.mock_args())
result = self.get_interface_mock_object().get_interface()
assert result is None
def test_get_existing_interface(self):
''' Test if get_interface returns None for existing interface '''
set_module_args(self.mock_args())
result = self.get_interface_mock_object(kind='interface').get_interface()
assert result['interface_name'] == self.mock_interface['name']
def test_successful_create(self):
''' Test successful create '''
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object().apply()
assert exc.value.args[0]['changed']
def test_successful_create_for_NVMe(self):
''' Test successful create for NVMe protocol'''
data = self.mock_args()
data['protocols'] = 'fc-nvme'
del data['address']
del data['netmask']
del data['home_port']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object().apply()
assert exc.value.args[0]['changed']
def test_create_idempotency_for_NVMe(self):
''' Test create idempotency for NVMe protocol '''
data = self.mock_args()
data['protocols'] = 'fc-nvme'
del data['address']
del data['netmask']
del data['home_port']
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object('interface').apply()
assert not exc.value.args[0]['changed']
def test_create_error_for_NVMe(self):
''' Test if create throws an error if required param 'protocols' uses NVMe'''
data = self.mock_args()
data['protocols'] = 'fc-nvme'
set_module_args(data)
with pytest.raises(AnsibleFailJson) as exc:
self.get_interface_mock_object('interface').create_interface()
msg = 'Error: Following parameters for creating interface are not supported for data-protocol fc-nvme: ' \
'netmask, firewall_policy, address'
expected = sorted(','.split(msg))
received = sorted(','.split(exc.value.args[0]['msg']))
assert expected == received
def test_create_idempotency(self):
''' Test create idempotency '''
set_module_args(self.mock_args())
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object('interface').apply()
assert not exc.value.args[0]['changed']
def test_successful_delete(self):
''' Test delete existing interface '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object('interface').apply()
assert exc.value.args[0]['changed']
def test_delete_idempotency(self):
''' Test delete idempotency '''
data = self.mock_args()
data['state'] = 'absent'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object().apply()
assert not exc.value.args[0]['changed']
def test_successful_modify(self):
''' Test successful modify interface_minutes '''
data = self.mock_args()
data['home_port'] = 'new_port'
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
interface_obj = self.get_interface_mock_object('interface')
interface_obj.apply()
assert exc.value.args[0]['changed']
def test_modify_idempotency(self):
''' Test modify idempotency '''
data = self.mock_args()
set_module_args(data)
with pytest.raises(AnsibleExitJson) as exc:
self.get_interface_mock_object('interface').apply()
assert not exc.value.args[0]['changed']
| gpl-3.0 |
sdcooke/django | tests/admin_changelist/models.py | 276 | 2890 | from django.db import models
from django.utils.encoding import python_2_unicode_compatible
class Event(models.Model):
# Oracle can have problems with a column named "date"
date = models.DateField(db_column="event_date")
class Parent(models.Model):
name = models.CharField(max_length=128)
class Child(models.Model):
parent = models.ForeignKey(Parent, models.SET_NULL, editable=False, null=True)
name = models.CharField(max_length=30, blank=True)
age = models.IntegerField(null=True, blank=True)
class Genre(models.Model):
name = models.CharField(max_length=20)
class Band(models.Model):
name = models.CharField(max_length=20)
nr_of_members = models.PositiveIntegerField()
genres = models.ManyToManyField(Genre)
@python_2_unicode_compatible
class Musician(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Group(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(Musician, through='Membership')
def __str__(self):
return self.name
class Concert(models.Model):
name = models.CharField(max_length=30)
group = models.ForeignKey(Group, models.CASCADE)
class Membership(models.Model):
music = models.ForeignKey(Musician, models.CASCADE)
group = models.ForeignKey(Group, models.CASCADE)
role = models.CharField(max_length=15)
class Quartet(Group):
pass
class ChordsMusician(Musician):
pass
class ChordsBand(models.Model):
name = models.CharField(max_length=30)
members = models.ManyToManyField(ChordsMusician, through='Invitation')
class Invitation(models.Model):
player = models.ForeignKey(ChordsMusician, models.CASCADE)
band = models.ForeignKey(ChordsBand, models.CASCADE)
instrument = models.CharField(max_length=15)
class Swallow(models.Model):
origin = models.CharField(max_length=255)
load = models.FloatField()
speed = models.FloatField()
class Meta:
ordering = ('speed', 'load')
class SwallowOneToOne(models.Model):
swallow = models.OneToOneField(Swallow, models.CASCADE)
class UnorderedObject(models.Model):
"""
Model without any defined `Meta.ordering`.
Refs #17198.
"""
bool = models.BooleanField(default=True)
class OrderedObjectManager(models.Manager):
def get_queryset(self):
return super(OrderedObjectManager, self).get_queryset().order_by('number')
class OrderedObject(models.Model):
"""
Model with Manager that defines a default order.
Refs #17198.
"""
name = models.CharField(max_length=255)
bool = models.BooleanField(default=True)
number = models.IntegerField(default=0, db_column='number_val')
objects = OrderedObjectManager()
class CustomIdUser(models.Model):
uuid = models.AutoField(primary_key=True)
| bsd-3-clause |
CHT5/program-y | src/programy/parser/template/nodes/sr.py | 3 | 1984 | """
Copyright (c) 2016 Keith Sterling
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
from programy.parser.template.nodes.base import TemplateNode
class TemplateSrNode(TemplateNode):
def __init__(self):
TemplateNode.__init__(self)
def resolve(self, bot, clientid):
try:
sentence = bot.get_conversation(clientid).current_question().current_sentence()
star = sentence.matched_context.star(1)
if star is not None:
resolved = bot.ask_question(clientid, star, srai=True)
else:
logging.error("Sr node has no stars available")
resolved = ""
logging.debug("[%s] resolved to [%s]", self.to_string(), resolved)
return resolved
except Exception as excep:
logging.exception(excep)
return ""
def to_string(self):
return "SR"
def to_xml(self, bot, clientid):
xml = "<sr />"
return xml | mit |
lewiskan/heron | integration-test/src/python/integration_test/topology/fields_grouping/fields_grouping.py | 8 | 1311 | # copyright 2016 twitter. all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=missing-docstring
from heron.pyheron.src.python import Grouping
from ...core import TestTopologyBuilder
from ...common.bolt import CountAggregatorBolt, WordCountBolt
from ...common.spout import ABSpout
def fields_grouping_builder(topology_name, http_server_url):
builder = TestTopologyBuilder(topology_name, http_server_url)
ab_spout = builder.add_spout("ab-spout", ABSpout, 1, max_executions=400)
count_bolt = builder.add_bolt("count-bolt", WordCountBolt,
inputs={ab_spout: Grouping.fields('word')}, par=2)
builder.add_bolt("sum-bolt", CountAggregatorBolt,
inputs={count_bolt: Grouping.NONE}, par=1)
return builder.create_topology()
| apache-2.0 |
silentfuzzle/calibre | src/calibre/ebooks/oeb/polish/container.py | 2 | 55024 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import os, logging, sys, hashlib, uuid, re, shutil, unicodedata
from collections import defaultdict
from io import BytesIO
from urlparse import urlparse
from future_builtins import zip
from lxml import etree
from cssutils import replaceUrls, getUrls
from calibre import CurrentDir
from calibre.customize.ui import (plugin_for_input_format, plugin_for_output_format)
from calibre.ebooks.chardet import xml_to_unicode
from calibre.ebooks.conversion.plugins.epub_input import (
ADOBE_OBFUSCATION, IDPF_OBFUSCATION, decrypt_font_data)
from calibre.ebooks.conversion.preprocess import HTMLPreProcessor, CSSPreProcessor as cssp
from calibre.ebooks.mobi import MobiError
from calibre.ebooks.mobi.reader.headers import MetadataHeader
from calibre.ebooks.mobi.tweak import set_cover
from calibre.ebooks.oeb.base import (
serialize, OEB_DOCS, OEB_STYLES, OPF2_NS, DC11_NS, OPF, Manifest,
rewrite_links, iterlinks, itercsslinks, urlquote, urlunquote)
from calibre.ebooks.oeb.polish.errors import InvalidBook, DRMError
from calibre.ebooks.oeb.polish.parsing import parse as parse_html_tweak
from calibre.ebooks.oeb.polish.utils import PositionFinder, CommentFinder, guess_type, parse_css
from calibre.ebooks.oeb.parse_utils import NotHTML, parse_html, RECOVER_PARSER
from calibre.ptempfile import PersistentTemporaryDirectory, PersistentTemporaryFile
from calibre.utils.filenames import nlinks_file, hardlink_file
from calibre.utils.ipc.simple_worker import fork_job, WorkerError
from calibre.utils.logging import default_log
from calibre.utils.zipfile import ZipFile
exists, join, relpath = os.path.exists, os.path.join, os.path.relpath
OEB_FONTS = {guess_type('a.ttf'), guess_type('b.otf'), guess_type('a.woff'), 'application/x-font-ttf', 'application/x-font-otf', 'application/font-sfnt'}
OPF_NAMESPACES = {'opf':OPF2_NS, 'dc':DC11_NS}
class CSSPreProcessor(cssp):
def __call__(self, data):
return self.MS_PAT.sub(self.ms_sub, data)
def clone_dir(src, dest):
' Clone a directory using hard links for the files, dest must already exist '
for x in os.listdir(src):
dpath = os.path.join(dest, x)
spath = os.path.join(src, x)
if os.path.isdir(spath):
os.mkdir(dpath)
clone_dir(spath, dpath)
else:
try:
hardlink_file(spath, dpath)
except:
shutil.copy2(spath, dpath)
def clone_container(container, dest_dir):
' Efficiently clone a container using hard links '
dest_dir = os.path.abspath(os.path.realpath(dest_dir))
clone_data = container.clone_data(dest_dir)
cls = type(container)
if cls is Container:
return cls(None, None, container.log, clone_data=clone_data)
return cls(None, container.log, clone_data=clone_data)
def name_to_abspath(name, root):
return os.path.abspath(join(root, *name.split('/')))
def abspath_to_name(path, root):
return relpath(os.path.abspath(path), root).replace(os.sep, '/')
def name_to_href(name, root, base=None, quote=urlquote):
fullpath = name_to_abspath(name, root)
basepath = root if base is None else os.path.dirname(name_to_abspath(base, root))
path = relpath(fullpath, basepath).replace(os.sep, '/')
return quote(path)
def href_to_name(href, root, base=None):
base = root if base is None else os.path.dirname(name_to_abspath(base, root))
purl = urlparse(href)
if purl.scheme or not purl.path:
return None
href = urlunquote(purl.path)
if href.startswith('/') or (len(href) > 1 and href[1] == ':' and 'a' <= href[0].lower() <= 'z'):
# For paths that start with drive letter os.path.join(base, href)
# will discard base and return href on windows, so we assume that
# such paths are also absolute paths, on all platforms.
return None
fullpath = os.path.join(base, *href.split('/'))
return abspath_to_name(fullpath, root)
class Container(object): # {{{
'''
A container represents an Open EBook as a directory full of files and an
opf file. There are two important concepts:
* The root directory. This is the base of the ebook. All the ebooks
files are inside this directory or in its sub-directories.
* Names: These are paths to the books' files relative to the root
directory. They always contain POSIX separators and are unquoted. They
can be thought of as canonical identifiers for files in the book.
Most methods on the container object work with names. Names are always
in the NFC unicode normal form.
* Clones: the container object supports efficient on-disk cloning, which is used to
implement checkpoints in the ebook editor. In order to make this work, you should
never access files on the filesystem directly. Instead, use :meth:`raw_data` or
:meth:`open` to read/write to component files in the book.
When converting between hrefs and names use the methods provided by this
class, they assume all hrefs are quoted.
'''
#: The type of book (epub for EPUB files and azw3 for AZW3 files)
book_type = 'oeb'
SUPPORTS_TITLEPAGES = True
SUPPORTS_FILENAMES = True
def __init__(self, rootpath, opfpath, log, clone_data=None):
self.root = clone_data['root'] if clone_data is not None else os.path.abspath(rootpath)
self.log = log
self.html_preprocessor = HTMLPreProcessor()
self.css_preprocessor = CSSPreProcessor()
self.tweak_mode = False
self.parsed_cache = {}
self.mime_map = {}
self.name_path_map = {}
self.dirtied = set()
self.encoding_map = {}
self.pretty_print = set()
self.cloned = False
self.cache_names = ('parsed_cache', 'mime_map', 'name_path_map', 'encoding_map', 'dirtied', 'pretty_print')
if clone_data is not None:
self.cloned = True
for x in ('name_path_map', 'opf_name', 'mime_map', 'pretty_print', 'encoding_map', 'tweak_mode'):
setattr(self, x, clone_data[x])
self.opf_dir = os.path.dirname(self.name_path_map[self.opf_name])
return
# Map of relative paths with '/' separators from root of unzipped ePub
# to absolute paths on filesystem with os-specific separators
opfpath = os.path.abspath(os.path.realpath(opfpath))
for dirpath, _dirnames, filenames in os.walk(self.root):
for f in filenames:
path = join(dirpath, f)
name = self.abspath_to_name(path)
# OS X silently changes all file names to NFD form. The EPUB
# spec requires all text including filenames to be in NFC form.
# The proper fix is to implement a VFS that maps between
# canonical names and their file system representation, however,
# I dont have the time for that now. Note that the container
# ensures that all text files are normalized to NFC when
# decoding them anyway, so there should be no mismatch between
# names in the text and NFC canonical file names.
name = unicodedata.normalize('NFC', name)
self.name_path_map[name] = path
self.mime_map[name] = guess_type(path)
# Special case if we have stumbled onto the opf
if path == opfpath:
self.opf_name = name
self.opf_dir = os.path.dirname(path)
self.mime_map[name] = guess_type('a.opf')
if not hasattr(self, 'opf_name'):
raise InvalidBook('Could not locate opf file: %r'%opfpath)
# Update mime map with data from the OPF
self.refresh_mime_map()
def refresh_mime_map(self):
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
href = item.get('href')
name = self.href_to_name(href, self.opf_name)
if name in self.mime_map and name != self.opf_name:
# some epubs include the opf in the manifest with an incorrect mime type
self.mime_map[name] = item.get('media-type')
def clone_data(self, dest_dir):
Container.commit(self, keep_parsed=True)
self.cloned = True
clone_dir(self.root, dest_dir)
return {
'root': dest_dir,
'opf_name': self.opf_name,
'mime_map': self.mime_map.copy(),
'pretty_print': set(self.pretty_print),
'encoding_map': self.encoding_map.copy(),
'tweak_mode': self.tweak_mode,
'name_path_map': {
name:os.path.join(dest_dir, os.path.relpath(path, self.root))
for name, path in self.name_path_map.iteritems()}
}
def guess_type(self, name):
' Return the expected mimetype for the specified file name based on its extension. '
# epubcheck complains if the mimetype for text documents is set to
# text/html in EPUB 2 books. Sigh.
ans = guess_type(name)
if ans == 'text/html':
ans = 'application/xhtml+xml'
return ans
def add_name_to_manifest(self, name):
' Add an entry to the manifest for a file with the specified name. Returns the manifest id. '
all_ids = {x.get('id') for x in self.opf_xpath('//*[@id]')}
c = 0
item_id = 'id'
while item_id in all_ids:
c += 1
item_id = 'id' + '%d'%c
manifest = self.opf_xpath('//opf:manifest')[0]
href = self.name_to_href(name, self.opf_name)
item = manifest.makeelement(OPF('item'),
id=item_id, href=href)
item.set('media-type', self.mime_map[name])
self.insert_into_xml(manifest, item)
self.dirty(self.opf_name)
return item_id
def manifest_has_name(self, name):
''' Return True if the manifest has an entry corresponding to name '''
href = self.name_to_href(name, self.opf_name)
all_hrefs = {x.get('href') for x in self.opf_xpath('//opf:manifest/opf:item[@href]')}
return href in all_hrefs
def add_file(self, name, data, media_type=None, spine_index=None):
''' Add a file to this container. Entries for the file are
automatically created in the OPF manifest and spine
(if the file is a text document) '''
if self.has_name(name):
raise ValueError('A file with the name %s already exists' % name)
if '..' in name:
raise ValueError('Names are not allowed to have .. in them')
href = self.name_to_href(name, self.opf_name)
all_hrefs = {x.get('href') for x in self.opf_xpath('//opf:manifest/opf:item[@href]')}
if href in all_hrefs:
raise ValueError('An item with the href %s already exists in the manifest' % href)
path = self.name_to_abspath(name)
base = os.path.dirname(path)
if not os.path.exists(base):
os.makedirs(base)
with open(path, 'wb') as f:
f.write(data)
mt = media_type or self.guess_type(name)
self.name_path_map[name] = path
self.mime_map[name] = mt
if self.ok_to_be_unmanifested(name):
return
item_id = self.add_name_to_manifest(name)
if mt in OEB_DOCS:
manifest = self.opf_xpath('//opf:manifest')[0]
spine = self.opf_xpath('//opf:spine')[0]
si = manifest.makeelement(OPF('itemref'), idref=item_id)
self.insert_into_xml(spine, si, index=spine_index)
def rename(self, current_name, new_name):
''' Renames a file from current_name to new_name. It automatically
rebases all links inside the file if the directory the file is in
changes. Note however, that links are not updated in the other files
that could reference this file. This is for performance, such updates
should be done once, in bulk. '''
if current_name in self.names_that_must_not_be_changed:
raise ValueError('Renaming of %s is not allowed' % current_name)
if self.exists(new_name) and (new_name == current_name or new_name.lower() != current_name.lower()):
# The destination exists and does not differ from the current name only by case
raise ValueError('Cannot rename %s to %s as %s already exists' % (current_name, new_name, new_name))
new_path = self.name_to_abspath(new_name)
base = os.path.dirname(new_path)
if os.path.isfile(base):
raise ValueError('Cannot rename %s to %s as %s is a file' % (current_name, new_name, base))
if not os.path.exists(base):
os.makedirs(base)
old_path = parent_dir = self.name_to_abspath(current_name)
self.commit_item(current_name)
os.rename(old_path, new_path)
# Remove empty directories
while parent_dir:
parent_dir = os.path.dirname(parent_dir)
try:
os.rmdir(parent_dir)
except EnvironmentError:
break
for x in ('mime_map', 'encoding_map'):
x = getattr(self, x)
if current_name in x:
x[new_name] = x[current_name]
self.name_path_map[new_name] = new_path
for x in self.cache_names:
x = getattr(self, x)
try:
x.pop(current_name, None)
except TypeError:
x.discard(current_name)
if current_name == self.opf_name:
self.opf_name = new_name
if os.path.dirname(old_path) != os.path.dirname(new_path):
from calibre.ebooks.oeb.polish.replace import LinkRebaser
repl = LinkRebaser(self, current_name, new_name)
self.replace_links(new_name, repl)
self.dirty(new_name)
def replace_links(self, name, replace_func):
''' Replace all links in name using replace_func, which must be a
callable that accepts a URL and returns the replaced URL. It must also
have a 'replaced' attribute that is set to True if any actual
replacement is done. Convenient ways of creating such callables are
using the :class:`LinkReplacer` and :class:`LinkRebaser` classes. '''
media_type = self.mime_map.get(name, guess_type(name))
if name == self.opf_name:
for elem in self.opf_xpath('//*[@href]'):
elem.set('href', replace_func(elem.get('href')))
elif media_type.lower() in OEB_DOCS:
rewrite_links(self.parsed(name), replace_func)
elif media_type.lower() in OEB_STYLES:
replaceUrls(self.parsed(name), replace_func)
elif media_type.lower() == guess_type('toc.ncx'):
for elem in self.parsed(name).xpath('//*[@src]'):
elem.set('src', replace_func(elem.get('src')))
if replace_func.replaced:
self.dirty(name)
return replace_func.replaced
def iterlinks(self, name, get_line_numbers=True):
''' Iterate over all links in name. If get_line_numbers is True the
yields results of the form (link, line_number, offset). Where
line_number is the line_number at which the link occurs and offset is
the number of characters from the start of the line. Note that offset
could actually encompass several lines if not zero. '''
media_type = self.mime_map.get(name, guess_type(name))
if name == self.opf_name:
for elem in self.opf_xpath('//*[@href]'):
yield (elem.get('href'), elem.sourceline, 0) if get_line_numbers else elem.get('href')
elif media_type.lower() in OEB_DOCS:
for el, attr, link, pos in iterlinks(self.parsed(name)):
yield (link, el.sourceline, pos) if get_line_numbers else link
elif media_type.lower() in OEB_STYLES:
if get_line_numbers:
with self.open(name, 'rb') as f:
raw = self.decode(f.read()).replace('\r\n', '\n').replace('\r', '\n')
position = PositionFinder(raw)
is_in_comment = CommentFinder(raw)
for link, offset in itercsslinks(raw):
if not is_in_comment(offset):
lnum, col = position(offset)
yield link, lnum, col
else:
for link in getUrls(self.parsed(name)):
yield link
elif media_type.lower() == guess_type('toc.ncx'):
for elem in self.parsed(name).xpath('//*[@src]'):
yield (elem.get('src'), elem.sourceline, 0) if get_line_numbers else elem.get('src')
def abspath_to_name(self, fullpath, root=None):
'''
Convert an absolute path to a canonical name relative to :attr:`root`
:param root: The base directory. By default the root for this container object is used.
'''
return abspath_to_name(fullpath, root or self.root)
def name_to_abspath(self, name):
' Convert a canonical name to an absolute OS dependant path '
return name_to_abspath(name, self.root)
def exists(self, name):
''' True iff a file corresponding to the canonical name exists. Note
that this function suffers from the limitations of the underlying OS
filesystem, in particular case (in)sensitivity. So on a case
insensitive filesystem this will return True even if the case of name
is different from the case of the underlying filesystem file. See also :meth:`has_name`'''
return os.path.exists(self.name_to_abspath(name))
def href_to_name(self, href, base=None):
'''
Convert an href (relative to base) to a name. base must be a name or
None, in which case self.root is used.
'''
return href_to_name(href, self.root, base=base)
def name_to_href(self, name, base=None):
'''Convert a name to a href relative to base, which must be a name or
None in which case self.root is used as the base'''
return name_to_href(name, self.root, base=base)
def opf_xpath(self, expr):
' Convenience method to evaluate an XPath expression on the OPF file, has the opf: and dc: namespace prefixes pre-defined. '
return self.opf.xpath(expr, namespaces=OPF_NAMESPACES)
def has_name(self, name):
''' Return True iff a file with the same canonical name as that specified exists. Unlike :meth:`exists` this method is always case-sensitive. '''
return name and name in self.name_path_map
def relpath(self, path, base=None):
'''Convert an absolute path (with os separators) to a path relative to
base (defaults to self.root). The relative path is *not* a name. Use
:meth:`abspath_to_name` for that.'''
return relpath(path, base or self.root)
def decode(self, data, normalize_to_nfc=True):
"""
Automatically decode ``data`` into a ``unicode`` object.
:param normalize_to_nfc: Normalize returned unicode to the NFC normal form as is required by both the EPUB and AZW3 formats.
"""
def fix_data(d):
return d.replace('\r\n', '\n').replace('\r', '\n')
if isinstance(data, unicode):
return fix_data(data)
bom_enc = None
if data[:4] in {b'\0\0\xfe\xff', b'\xff\xfe\0\0'}:
bom_enc = {b'\0\0\xfe\xff':'utf-32-be',
b'\xff\xfe\0\0':'utf-32-le'}[data[:4]]
data = data[4:]
elif data[:2] in {b'\xff\xfe', b'\xfe\xff'}:
bom_enc = {b'\xff\xfe':'utf-16-le', b'\xfe\xff':'utf-16-be'}[data[:2]]
data = data[2:]
elif data[:3] == b'\xef\xbb\xbf':
bom_enc = 'utf-8'
data = data[3:]
if bom_enc is not None:
try:
self.used_encoding = bom_enc
return fix_data(data.decode(bom_enc))
except UnicodeDecodeError:
pass
try:
self.used_encoding = 'utf-8'
return fix_data(data.decode('utf-8'))
except UnicodeDecodeError:
pass
data, self.used_encoding = xml_to_unicode(data)
if normalize_to_nfc:
data = unicodedata.normalize('NFC', data)
return fix_data(data)
def ok_to_be_unmanifested(self, name):
return name in self.names_that_need_not_be_manifested
@property
def names_that_need_not_be_manifested(self):
' Set of names that are allowed to be missing from the manifest. Depends on the ebook file format. '
return {self.opf_name}
@property
def names_that_must_not_be_removed(self):
' Set of names that must never be deleted from the container. Depends on the ebook file format. '
return {self.opf_name}
@property
def names_that_must_not_be_changed(self):
' Set of names that must never be renamed. Depends on the ebook file format. '
return set()
def parse_xml(self, data):
data, self.used_encoding = xml_to_unicode(
data, strip_encoding_pats=True, assume_utf8=True, resolve_entities=True)
data = unicodedata.normalize('NFC', data)
return etree.fromstring(data, parser=RECOVER_PARSER)
def parse_xhtml(self, data, fname='<string>', force_html5_parse=False):
if self.tweak_mode:
return parse_html_tweak(data, log=self.log, decoder=self.decode, force_html5_parse=force_html5_parse)
else:
try:
return parse_html(
data, log=self.log, decoder=self.decode,
preprocessor=self.html_preprocessor, filename=fname,
non_html_file_tags={'ncx'})
except NotHTML:
return self.parse_xml(data)
def parse(self, path, mime):
with open(path, 'rb') as src:
data = src.read()
if mime in OEB_DOCS:
data = self.parse_xhtml(data, self.relpath(path))
elif mime[-4:] in {'+xml', '/xml'}:
data = self.parse_xml(data)
elif mime in OEB_STYLES:
data = self.parse_css(data, self.relpath(path))
return data
def raw_data(self, name, decode=True, normalize_to_nfc=True):
'''
Return the raw data corresponding to the file specified by name
:param decode: If True and the file has a text based mimetype, decode it and return a unicode object instead of raw bytes.
:param normalize_to_nfc: If True the returned unicode object is normalized to the NFC normal form as is required for the EPUB and AZW3 file formats.
'''
ans = self.open(name).read()
mime = self.mime_map.get(name, guess_type(name))
if decode and (mime in OEB_STYLES or mime in OEB_DOCS or mime == 'text/plain' or mime[-4:] in {'+xml', '/xml'}):
ans = self.decode(ans, normalize_to_nfc=normalize_to_nfc)
return ans
def parse_css(self, data, fname='<string>', is_declaration=False):
return parse_css(data, fname=fname, is_declaration=is_declaration, decode=self.decode, log_level=logging.WARNING,
css_preprocessor=(None if self.tweak_mode else self.css_preprocessor))
def parsed(self, name):
''' Return a parsed representation of the file specified by name. For
HTML and XML files an lxml tree is returned. For CSS files a cssutils
stylesheet is returned. Note that parsed objects are cached for
performance. If you make any changes to the parsed object, you must
call :meth:`dirty` so that the container knows to update the cache. See also :meth:`replace`.'''
ans = self.parsed_cache.get(name, None)
if ans is None:
self.used_encoding = None
mime = self.mime_map.get(name, guess_type(name))
ans = self.parse(self.name_path_map[name], mime)
self.parsed_cache[name] = ans
self.encoding_map[name] = self.used_encoding
return ans
def replace(self, name, obj):
'''
Replace the parsed object corresponding to name with obj, which must be
a similar object, i.e. an lxml tree for HTML/XML or a cssutils
stylesheet for a CSS file.
'''
self.parsed_cache[name] = obj
self.dirty(name)
@property
def opf(self):
' The parsed OPF file '
return self.parsed(self.opf_name)
@property
def mi(self):
''' The metadata of this book as a Metadata object. Note that this
object is constructed on the fly every time this property is requested,
so use it sparingly. '''
from calibre.ebooks.metadata.opf2 import OPF as O
mi = self.serialize_item(self.opf_name)
return O(BytesIO(mi), basedir=self.opf_dir, unquote_urls=False,
populate_spine=False).to_book_metadata()
@property
def opf_version(self):
' The version set on the OPF\'s <package> element '
try:
return self.opf_xpath('//opf:package/@version')[0]
except IndexError:
return ''
@property
def manifest_id_map(self):
' Mapping of manifest id to canonical names '
return {item.get('id'):self.href_to_name(item.get('href'), self.opf_name)
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @id]')}
@property
def manifest_type_map(self):
' Mapping of manifest media-type to list of canonical names of that media-type '
ans = defaultdict(list)
for item in self.opf_xpath('//opf:manifest/opf:item[@href and @media-type]'):
ans[item.get('media-type').lower()].append(self.href_to_name(
item.get('href'), self.opf_name))
return {mt:tuple(v) for mt, v in ans.iteritems()}
@property
def guide_type_map(self):
' Mapping of guide type to canonical name '
return {item.get('type', ''):self.href_to_name(item.get('href'), self.opf_name)
for item in self.opf_xpath('//opf:guide/opf:reference[@href and @type]')}
@property
def spine_iter(self):
''' An iterator that yields item, name is_linear for every item in the
books' spine. item is the lxml element, name is the canonical file name
and is_linear is True if the item is linear. See also: :attr:`spine_names` and :attr:`spine_items`. '''
manifest_id_map = self.manifest_id_map
non_linear = []
for item in self.opf_xpath('//opf:spine/opf:itemref[@idref]'):
idref = item.get('idref')
name = manifest_id_map.get(idref, None)
path = self.name_path_map.get(name, None)
if path:
if item.get('linear', 'yes') == 'yes':
yield item, name, True
else:
non_linear.append((item, name))
for item, name in non_linear:
yield item, name, False
@property
def spine_names(self):
''' An iterator yielding name and is_linear for every item in the
books' spine. See also: :attr:`spine_iter` and :attr:`spine_items`. '''
for item, name, linear in self.spine_iter:
yield name, linear
@property
def spine_items(self):
''' An iterator yielding canonical name for every item in the
books' spine. See also: :attr:`spine_iter` and :attr:`spine_items`. '''
for name, linear in self.spine_names:
yield self.name_path_map[name]
def remove_from_spine(self, spine_items, remove_if_no_longer_in_spine=True):
'''
Remove the specified items (by canonical name) from the spine. If ``remove_if_no_longer_in_spine``
is True, the items are also deleted from the book, not just from the spine.
'''
nixed = set()
for (name, remove), (item, xname, linear) in zip(spine_items, self.spine_iter):
if remove and name == xname:
self.remove_from_xml(item)
nixed.add(name)
if remove_if_no_longer_in_spine:
# Remove from the book if no longer in spine
nixed -= {name for name, linear in self.spine_names}
for name in nixed:
self.remove_item(name)
def set_spine(self, spine_items):
''' Set the spine to be spine_items where spine_items is an iterable of
the form (name, linear). Will raise an error if one of the names is not
present in the manifest. '''
imap = self.manifest_id_map
imap = {name:item_id for item_id, name in imap.iteritems()}
items = [item for item, name, linear in self.spine_iter]
tail, last_tail = (items[0].tail, items[-1].tail) if items else ('\n ', '\n ')
map(self.remove_from_xml, items)
spine = self.opf_xpath('//opf:spine')[0]
spine.text = tail
for name, linear in spine_items:
i = spine.makeelement('{%s}itemref' % OPF_NAMESPACES['opf'], nsmap={'opf':OPF_NAMESPACES['opf']})
i.tail = tail
i.set('idref', imap[name])
spine.append(i)
if not linear:
i.set('linear', 'no')
if len(spine) > 0:
spine[-1].tail = last_tail
self.dirty(self.opf_name)
def remove_item(self, name, remove_from_guide=True):
'''
Remove the item identified by name from this container. This removes all
references to the item in the OPF manifest, guide and spine as well as from
any internal caches.
'''
removed = set()
for elem in self.opf_xpath('//opf:manifest/opf:item[@href]'):
if self.href_to_name(elem.get('href'), self.opf_name) == name:
id_ = elem.get('id', None)
if id_ is not None:
removed.add(id_)
self.remove_from_xml(elem)
self.dirty(self.opf_name)
if removed:
for spine in self.opf_xpath('//opf:spine'):
tocref = spine.attrib.get('toc', None)
if tocref and tocref in removed:
spine.attrib.pop('toc', None)
self.dirty(self.opf_name)
for item in self.opf_xpath('//opf:spine/opf:itemref[@idref]'):
idref = item.get('idref')
if idref in removed:
self.remove_from_xml(item)
self.dirty(self.opf_name)
for meta in self.opf_xpath('//opf:meta[@name="cover" and @content]'):
if meta.get('content') in removed:
self.remove_from_xml(meta)
self.dirty(self.opf_name)
if remove_from_guide:
for item in self.opf_xpath('//opf:guide/opf:reference[@href]'):
if self.href_to_name(item.get('href'), self.opf_name) == name:
self.remove_from_xml(item)
self.dirty(self.opf_name)
path = self.name_path_map.pop(name, None)
if path and os.path.exists(path):
os.remove(path)
self.mime_map.pop(name, None)
self.parsed_cache.pop(name, None)
self.dirtied.discard(name)
def dirty(self, name):
''' Mark the parsed object corresponding to name as dirty. See also: :meth:`parsed`. '''
self.dirtied.add(name)
def remove_from_xml(self, item):
'Removes item from parent, fixing indentation (works only with self closing items)'
parent = item.getparent()
idx = parent.index(item)
if idx == 0:
# We are removing the first item - only care about adjusting
# the tail if this was the only child
if len(parent) == 1:
parent.text = item.tail
else:
# Make sure the preceding item has this tail
parent[idx-1].tail = item.tail
parent.remove(item)
return item
def insert_into_xml(self, parent, item, index=None):
'''Insert item into parent (or append if index is None), fixing
indentation. Only works with self closing items.'''
if index is None:
parent.append(item)
else:
parent.insert(index, item)
idx = parent.index(item)
if idx == 0:
item.tail = parent.text
# If this is the only child of this parent element, we need a
# little extra work as we have gone from a self-closing <foo />
# element to <foo><item /></foo>
if len(parent) == 1:
sibling = parent.getprevious()
if sibling is None:
# Give up!
return
parent.text = sibling.text
item.tail = sibling.tail
else:
item.tail = parent[idx-1].tail
if idx == len(parent)-1:
parent[idx-1].tail = parent.text
def opf_get_or_create(self, name):
''' Convenience method to either return the first XML element with the
specified name or create it under the opf:package element and then
return it, if it does not already exist. '''
ans = self.opf_xpath('//opf:'+name)
if ans:
return ans[0]
self.dirty(self.opf_name)
package = self.opf_xpath('//opf:package')[0]
item = package.makeelement(OPF(name))
item.tail = '\n'
package.append(item)
return item
def generate_item(self, name, id_prefix=None, media_type=None, unique_href=True):
'''Add an item to the manifest with href derived from the given
name. Ensures uniqueness of href and id automatically. Returns
generated item.'''
id_prefix = id_prefix or 'id'
media_type = media_type or guess_type(name)
href = self.name_to_href(name, self.opf_name)
base, ext = href.rpartition('.')[0::2]
all_ids = {x.get('id') for x in self.opf_xpath('//*[@id]')}
c = 0
item_id = id_prefix
while item_id in all_ids:
c += 1
item_id = id_prefix + '%d'%c
all_names = {x.get('href') for x in self.opf_xpath(
'//opf:manifest/opf:item[@href]')}
def exists(h):
return self.exists(self.href_to_name(h, self.opf_name))
if unique_href:
c = 0
while href in all_names or exists(href):
c += 1
href = '%s_%d.%s'%(base, c, ext)
manifest = self.opf_xpath('//opf:manifest')[0]
item = manifest.makeelement(OPF('item'),
id=item_id, href=href)
item.set('media-type', media_type)
self.insert_into_xml(manifest, item)
self.dirty(self.opf_name)
name = self.href_to_name(href, self.opf_name)
self.name_path_map[name] = path = self.name_to_abspath(name)
self.mime_map[name] = media_type
# Ensure that the file corresponding to the newly created item exists
# otherwise cloned containers will fail when they try to get the number
# of links to the file
base = os.path.dirname(path)
if not os.path.exists(base):
os.makedirs(base)
open(path, 'wb').close()
return item
def format_opf(self):
try:
mdata = self.opf_xpath('//opf:metadata')[0]
except IndexError:
pass
else:
mdata.text = '\n '
remove = set()
for child in mdata:
child.tail = '\n '
try:
if (child.get('name', '').startswith('calibre:') and
child.get('content', '').strip() in {'{}', ''}):
remove.add(child)
except AttributeError:
continue # Happens for XML comments
for child in remove:
mdata.remove(child)
if len(mdata) > 0:
mdata[-1].tail = '\n '
# Ensure name comes before content, needed for Nooks
for meta in self.opf_xpath('//opf:meta[@name="cover"]'):
if 'content' in meta.attrib:
meta.set('content', meta.attrib.pop('content'))
def serialize_item(self, name):
''' Convert a parsed object (identified by canonical name) into a bytestring. See :meth:`parsed`. '''
data = self.parsed(name)
if name == self.opf_name:
self.format_opf()
data = serialize(data, self.mime_map[name], pretty_print=name in
self.pretty_print)
if name == self.opf_name:
# Needed as I can't get lxml to output opf:role and
# not output <opf:metadata> as well
data = re.sub(br'(<[/]{0,1})opf:', r'\1', data)
return data
def commit_item(self, name, keep_parsed=False):
''' Commit a parsed object to disk (it is serialized and written to the
underlying file). If ``keep_parsed`` is True the parsed representation
is retained in the cache. See also: :meth:`parsed` '''
if name not in self.parsed_cache:
return
data = self.serialize_item(name)
self.dirtied.discard(name)
if not keep_parsed:
self.parsed_cache.pop(name)
dest = self.name_path_map[name]
if self.cloned and nlinks_file(dest) > 1:
# Decouple this file from its links
os.unlink(dest)
with open(dest, 'wb') as f:
f.write(data)
def filesize(self, name):
''' Return the size in bytes of the file represented by the specified
canonical name. Automatically handles dirtied parsed objects. See also:
:meth:`parsed` '''
if name in self.dirtied:
self.commit_item(name, keep_parsed=True)
path = self.name_to_abspath(name)
return os.path.getsize(path)
def open(self, name, mode='rb'):
''' Open the file pointed to by name for direct read/write. Note that
this will commit the file if it is dirtied and remove it from the parse
cache. You must finish with this file before accessing the parsed
version of it again, or bad things will happen. '''
if name in self.dirtied:
self.commit_item(name)
self.parsed_cache.pop(name, False)
path = self.name_to_abspath(name)
base = os.path.dirname(path)
if not os.path.exists(base):
os.makedirs(base)
else:
if self.cloned and mode not in {'r', 'rb'} and os.path.exists(path) and nlinks_file(path) > 1:
# Decouple this file from its links
temp = path + 'xxx'
shutil.copyfile(path, temp)
os.unlink(path)
os.rename(temp, path)
return open(path, mode)
def commit(self, outpath=None, keep_parsed=False):
'''
Commit all dirtied parsed objects to the filesystem and write out the ebook file at outpath.
:param output: The path to write the saved ebook file to. If None, the path of the original book file is used.
:param keep_parsed: If True the parsed representations of committed items are kept in the cache.
'''
for name in tuple(self.dirtied):
self.commit_item(name, keep_parsed=keep_parsed)
def compare_to(self, other):
if set(self.name_path_map) != set(other.name_path_map):
return 'Set of files is not the same'
mismatches = []
for name, path in self.name_path_map.iteritems():
opath = other.name_path_map[name]
with open(path, 'rb') as f1, open(opath, 'rb') as f2:
if f1.read() != f2.read():
mismatches.append('The file %s is not the same'%name)
return '\n'.join(mismatches)
# }}}
# EPUB {{{
class InvalidEpub(InvalidBook):
pass
class ObfuscationKeyMissing(InvalidEpub):
pass
OCF_NS = 'urn:oasis:names:tc:opendocument:xmlns:container'
class EpubContainer(Container):
book_type = 'epub'
META_INF = {
'container.xml': True,
'manifest.xml': False,
'encryption.xml': False,
'metadata.xml': False,
'signatures.xml': False,
'rights.xml': False,
}
def __init__(self, pathtoepub, log, clone_data=None, tdir=None):
if clone_data is not None:
super(EpubContainer, self).__init__(None, None, log, clone_data=clone_data)
for x in ('pathtoepub', 'obfuscated_fonts'):
setattr(self, x, clone_data[x])
return
self.pathtoepub = pathtoepub
if tdir is None:
tdir = PersistentTemporaryDirectory('_epub_container')
tdir = os.path.abspath(os.path.realpath(tdir))
self.root = tdir
with open(self.pathtoepub, 'rb') as stream:
try:
zf = ZipFile(stream)
zf.extractall(tdir)
except:
log.exception('EPUB appears to be invalid ZIP file, trying a'
' more forgiving ZIP parser')
from calibre.utils.localunzip import extractall
stream.seek(0)
extractall(stream, path=tdir)
try:
os.remove(join(tdir, 'mimetype'))
except EnvironmentError:
pass
container_path = join(self.root, 'META-INF', 'container.xml')
if not exists(container_path):
raise InvalidEpub('No META-INF/container.xml in epub')
container = etree.fromstring(open(container_path, 'rb').read())
opf_files = container.xpath((
r'child::ocf:rootfiles/ocf:rootfile'
'[@media-type="%s" and @full-path]'%guess_type('a.opf')
), namespaces={'ocf':OCF_NS}
)
if not opf_files:
raise InvalidEpub('META-INF/container.xml contains no link to OPF file')
opf_path = os.path.join(self.root, *(urlunquote(opf_files[0].get('full-path')).split('/')))
if not exists(opf_path):
raise InvalidEpub('OPF file does not exist at location pointed to'
' by META-INF/container.xml')
super(EpubContainer, self).__init__(tdir, opf_path, log)
self.obfuscated_fonts = {}
if 'META-INF/encryption.xml' in self.name_path_map:
self.process_encryption()
self.parsed_cache['META-INF/container.xml'] = container
def clone_data(self, dest_dir):
ans = super(EpubContainer, self).clone_data(dest_dir)
ans['pathtoepub'] = self.pathtoepub
ans['obfuscated_fonts'] = self.obfuscated_fonts.copy()
return ans
def rename(self, old_name, new_name):
is_opf = old_name == self.opf_name
super(EpubContainer, self).rename(old_name, new_name)
if is_opf:
for elem in self.parsed('META-INF/container.xml').xpath((
r'child::ocf:rootfiles/ocf:rootfile'
'[@media-type="%s" and @full-path]'%guess_type('a.opf')
), namespaces={'ocf':OCF_NS}
):
# The asinine epubcheck cannot handle quoted filenames in
# container.xml
elem.set('full-path', self.opf_name)
self.dirty('META-INF/container.xml')
if old_name in self.obfuscated_fonts:
self.obfuscated_fonts[new_name] = self.obfuscated_fonts.pop(old_name)
enc = self.parsed('META-INF/encryption.xml')
for cr in enc.xpath('//*[local-name()="CipherReference" and @URI]'):
if self.href_to_name(cr.get('URI')) == old_name:
cr.set('URI', self.name_to_href(new_name))
self.dirty('META-INF/encryption.xml')
@property
def names_that_need_not_be_manifested(self):
return super(EpubContainer, self).names_that_need_not_be_manifested | {'META-INF/' + x for x in self.META_INF}
def ok_to_be_unmanifested(self, name):
return name in self.names_that_need_not_be_manifested or name.startswith('META-INF/')
@property
def names_that_must_not_be_removed(self):
return super(EpubContainer, self).names_that_must_not_be_removed | {'META-INF/container.xml'}
@property
def names_that_must_not_be_changed(self):
return super(EpubContainer, self).names_that_must_not_be_changed | {'META-INF/' + x for x in self.META_INF}
def remove_item(self, name, remove_from_guide=True):
# Handle removal of obfuscated fonts
if name == 'META-INF/encryption.xml':
self.obfuscated_fonts.clear()
if name in self.obfuscated_fonts:
self.obfuscated_fonts.pop(name, None)
enc = self.parsed('META-INF/encryption.xml')
for em in enc.xpath('//*[local-name()="EncryptionMethod" and @Algorithm]'):
alg = em.get('Algorithm')
if alg not in {ADOBE_OBFUSCATION, IDPF_OBFUSCATION}:
continue
try:
cr = em.getparent().xpath('descendant::*[local-name()="CipherReference" and @URI]')[0]
except (IndexError, ValueError, KeyError):
continue
if name == self.href_to_name(cr.get('URI')):
self.remove_from_xml(em.getparent())
self.dirty('META-INF/encryption.xml')
super(EpubContainer, self).remove_item(name, remove_from_guide=remove_from_guide)
def process_encryption(self):
fonts = {}
enc = self.parsed('META-INF/encryption.xml')
for em in enc.xpath('//*[local-name()="EncryptionMethod" and @Algorithm]'):
alg = em.get('Algorithm')
if alg not in {ADOBE_OBFUSCATION, IDPF_OBFUSCATION}:
raise DRMError()
try:
cr = em.getparent().xpath('descendant::*[local-name()="CipherReference" and @URI]')[0]
except (IndexError, ValueError, KeyError):
continue
name = self.href_to_name(cr.get('URI'))
path = self.name_path_map.get(name, None)
if path is not None:
fonts[name] = alg
if not fonts:
return
package_id = raw_unique_identifier = idpf_key = None
for attrib, val in self.opf.attrib.iteritems():
if attrib.endswith('unique-identifier'):
package_id = val
break
if package_id is not None:
for elem in self.opf_xpath('//*[@id=%r]'%package_id):
if elem.text:
raw_unique_identifier = elem.text
break
if raw_unique_identifier is not None:
idpf_key = raw_unique_identifier
idpf_key = re.sub(u'\u0020\u0009\u000d\u000a', u'', idpf_key)
idpf_key = hashlib.sha1(idpf_key.encode('utf-8')).digest()
key = None
for item in self.opf_xpath('//*[local-name()="metadata"]/*'
'[local-name()="identifier"]'):
scheme = None
for xkey in item.attrib.keys():
if xkey.endswith('scheme'):
scheme = item.get(xkey)
if (scheme and scheme.lower() == 'uuid') or \
(item.text and item.text.startswith('urn:uuid:')):
try:
key = bytes(item.text).rpartition(':')[-1]
key = uuid.UUID(key).bytes
except:
self.log.exception('Failed to parse obfuscation key')
key = None
for font, alg in fonts.iteritems():
tkey = key if alg == ADOBE_OBFUSCATION else idpf_key
if not tkey:
raise ObfuscationKeyMissing('Failed to find obfuscation key')
raw = self.raw_data(font, decode=False)
raw = decrypt_font_data(tkey, raw, alg)
with self.open(font, 'wb') as f:
f.write(raw)
self.obfuscated_fonts[font] = (alg, tkey)
def commit(self, outpath=None, keep_parsed=False):
super(EpubContainer, self).commit(keep_parsed=keep_parsed)
restore_fonts = {}
for name in self.obfuscated_fonts:
if name not in self.name_path_map:
continue
alg, key = self.obfuscated_fonts[name]
# Decrypting and encrypting are the same operation (XOR with key)
restore_fonts[name] = data = self.raw_data(name, decode=False)
with self.open(name, 'wb') as f:
f.write(decrypt_font_data(key, data, alg))
if outpath is None:
outpath = self.pathtoepub
from calibre.ebooks.tweak import zip_rebuilder
with open(join(self.root, 'mimetype'), 'wb') as f:
f.write(guess_type('a.epub'))
zip_rebuilder(self.root, outpath)
for name, data in restore_fonts.iteritems():
with self.open(name, 'wb') as f:
f.write(data)
@dynamic_property
def path_to_ebook(self):
def fget(self):
return self.pathtoepub
def fset(self, val):
self.pathtoepub = val
return property(fget=fget, fset=fset)
# }}}
# AZW3 {{{
class InvalidMobi(InvalidBook):
pass
def do_explode(path, dest):
from calibre.ebooks.mobi.reader.mobi6 import MobiReader
from calibre.ebooks.mobi.reader.mobi8 import Mobi8Reader
with open(path, 'rb') as stream:
mr = MobiReader(stream, default_log, None, None)
with CurrentDir(dest):
mr = Mobi8Reader(mr, default_log, for_tweak=True)
opf = os.path.abspath(mr())
obfuscated_fonts = mr.encrypted_fonts
return opf, obfuscated_fonts
def opf_to_azw3(opf, outpath, container):
from calibre.ebooks.conversion.plumber import Plumber, create_oebbook
class Item(Manifest.Item):
def _parse_css(self, data):
# The default CSS parser used by oeb.base inserts the h namespace
# and resolves all @import rules. We dont want that.
return container.parse_css(data)
def specialize(oeb):
oeb.manifest.Item = Item
plumber = Plumber(opf, outpath, container.log)
plumber.setup_options()
inp = plugin_for_input_format('azw3')
outp = plugin_for_output_format('azw3')
plumber.opts.mobi_passthrough = True
oeb = create_oebbook(container.log, opf, plumber.opts, specialize=specialize)
set_cover(oeb)
outp.convert(oeb, outpath, inp, plumber.opts, container.log)
def epub_to_azw3(epub, outpath=None):
container = get_container(epub, tweak_mode=True)
outpath = outpath or (epub.rpartition('.')[0] + '.azw3')
opf_to_azw3(container.name_to_abspath(container.opf_name), outpath, container)
class AZW3Container(Container):
book_type = 'azw3'
SUPPORTS_TITLEPAGES = False
SUPPORTS_FILENAMES = False
def __init__(self, pathtoazw3, log, clone_data=None, tdir=None):
if clone_data is not None:
super(AZW3Container, self).__init__(None, None, log, clone_data=clone_data)
for x in ('pathtoazw3', 'obfuscated_fonts'):
setattr(self, x, clone_data[x])
return
self.pathtoazw3 = pathtoazw3
if tdir is None:
tdir = PersistentTemporaryDirectory('_azw3_container')
tdir = os.path.abspath(os.path.realpath(tdir))
self.root = tdir
with open(pathtoazw3, 'rb') as stream:
raw = stream.read(3)
if raw == b'TPZ':
raise InvalidMobi(_('This is not a MOBI file. It is a Topaz file.'))
try:
header = MetadataHeader(stream, default_log)
except MobiError:
raise InvalidMobi(_('This is not a MOBI file.'))
if header.encryption_type != 0:
raise DRMError()
kf8_type = header.kf8_type
if kf8_type is None:
raise InvalidMobi(_('This MOBI file does not contain a KF8 format '
'book. KF8 is the new format from Amazon. calibre can '
'only edit MOBI files that contain KF8 books. Older '
'MOBI files without KF8 are not editable.'))
if kf8_type == 'joint':
raise InvalidMobi(_('This MOBI file contains both KF8 and '
'older Mobi6 data. calibre can only edit MOBI files '
'that contain only KF8 data.'))
try:
opf_path, obfuscated_fonts = fork_job(
'calibre.ebooks.oeb.polish.container', 'do_explode',
args=(pathtoazw3, tdir), no_output=True)['result']
except WorkerError as e:
log(e.orig_tb)
raise InvalidMobi('Failed to explode MOBI')
super(AZW3Container, self).__init__(tdir, opf_path, log)
self.obfuscated_fonts = {x.replace(os.sep, '/') for x in obfuscated_fonts}
def clone_data(self, dest_dir):
ans = super(AZW3Container, self).clone_data(dest_dir)
ans['pathtoazw3'] = self.pathtoazw3
ans['obfuscated_fonts'] = self.obfuscated_fonts.copy()
return ans
def commit(self, outpath=None, keep_parsed=False):
super(AZW3Container, self).commit(keep_parsed=keep_parsed)
if outpath is None:
outpath = self.pathtoazw3
opf_to_azw3(self.name_path_map[self.opf_name], outpath, self)
@dynamic_property
def path_to_ebook(self):
def fget(self):
return self.pathtoazw3
def fset(self, val):
self.pathtoazw3 = val
return property(fget=fget, fset=fset)
@property
def names_that_must_not_be_changed(self):
return set(self.name_path_map)
# }}}
def get_container(path, log=None, tdir=None, tweak_mode=False):
if log is None:
log = default_log
ebook = (AZW3Container if path.rpartition('.')[-1].lower() in {'azw3', 'mobi', 'original_azw3', 'original_mobi'}
else EpubContainer)(path, log, tdir=tdir)
ebook.tweak_mode = tweak_mode
return ebook
def test_roundtrip():
ebook = get_container(sys.argv[-1])
p = PersistentTemporaryFile(suffix='.'+sys.argv[-1].rpartition('.')[-1])
p.close()
ebook.commit(outpath=p.name)
ebook2 = get_container(p.name)
ebook3 = get_container(p.name)
diff = ebook3.compare_to(ebook2)
if diff is not None:
print (diff)
if __name__ == '__main__':
test_roundtrip()
| gpl-3.0 |
2013Commons/HUE-SHARK | desktop/core/ext-py/Django-1.2.3/build/lib.linux-i686-2.7/django/utils/tree.py | 310 | 5778 | """
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
from django.utils.copycompat import deepcopy
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
Warning: You probably don't want to pass in the 'negated' parameter. It
is NOT the same as constructing a node and calling negate() on the
result.
"""
self.children = children and children[:] or []
self.connector = connector or self.default
self.subtree_parents = []
self.negated = negated
# We need this because of django.db.models.query_utils.Q. Q. __init__() is
# problematic, but it is a natural Node subclass in all other respects.
def _new_instance(cls, children=None, connector=None, negated=False):
"""
This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).
"""
obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
_new_instance = classmethod(_new_instance)
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c
in self.children]))
return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in
self.children]))
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = deepcopy(self.children, memodict)
obj.subtree_parents = deepcopy(self.subtree_parents, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __nonzero__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, node, conn_type):
"""
Adds a new node to the tree. If the conn_type is the same as the root's
current connector type, the node is added to the first level.
Otherwise, the whole tree is pushed down one level and a new root
connector is created, connecting the existing tree and the new node.
"""
if node in self.children and conn_type == self.connector:
return
if len(self.children) < 2:
self.connector = conn_type
if self.connector == conn_type:
if isinstance(node, Node) and (node.connector == conn_type or
len(node) == 1):
self.children.extend(node.children)
else:
self.children.append(node)
else:
obj = self._new_instance(self.children, self.connector,
self.negated)
self.connector = conn_type
self.children = [obj, node]
def negate(self):
"""
Negate the sense of the root connector. This reorganises the children
so that the current node has a single child: a negated node containing
all the previous children. This slightly odd construction makes adding
new children behave more intuitively.
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.
"""
self.children = [self._new_instance(self.children, self.connector,
not self.negated)]
self.connector = self.default
def start_subtree(self, conn_type):
"""
Sets up internal state so that new nodes are added to a subtree of the
current node. The conn_type specifies how the sub-tree is joined to the
existing children.
"""
if len(self.children) == 1:
self.connector = conn_type
elif self.connector != conn_type:
self.children = [self._new_instance(self.children, self.connector,
self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(self.__class__(self.children,
self.connector, self.negated))
self.connector = self.default
self.negated = False
self.children = []
def end_subtree(self):
"""
Closes off the most recently unmatched start_subtree() call.
This puts the current state into a node of the parent tree and returns
the current instances state to be the parent.
"""
obj = self.subtree_parents.pop()
node = self.__class__(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children
self.children.append(node)
| apache-2.0 |
patrickcurl/ztruck | dj/lib/python2.7/site-packages/django/http/request.py | 86 | 21287 | from __future__ import unicode_literals
import copy
import os
import re
import sys
from io import BytesIO
from itertools import chain
from pprint import pformat
from django.conf import settings
from django.core import signing
from django.core.exceptions import DisallowedHost, ImproperlyConfigured
from django.core.files import uploadhandler
from django.http.multipartparser import MultiPartParser, MultiPartParserError
from django.utils import six
from django.utils.datastructures import ImmutableList, MultiValueDict
from django.utils.encoding import (
escape_uri_path, force_bytes, force_str, force_text, iri_to_uri,
)
from django.utils.six.moves.urllib.parse import (
parse_qsl, quote, urlencode, urljoin, urlsplit,
)
RAISE_ERROR = object()
host_validation_re = re.compile(r"^([a-z0-9.-]+|\[[a-f0-9]*:[a-f0-9:]+\])(:\d+)?$")
class UnreadablePostError(IOError):
pass
class RawPostDataException(Exception):
"""
You cannot access raw_post_data from a request that has
multipart/* POST data if it has been accessed via POST,
FILES, etc..
"""
pass
class HttpRequest(object):
"""A basic HTTP request."""
# The encoding used in GET/POST dicts. None means use default setting.
_encoding = None
_upload_handlers = []
def __init__(self):
# WARNING: The `WSGIRequest` subclass doesn't call `super`.
# Any variable assignment made here should also happen in
# `WSGIRequest.__init__()`.
self.GET = QueryDict(mutable=True)
self.POST = QueryDict(mutable=True)
self.COOKIES = {}
self.META = {}
self.FILES = MultiValueDict()
self.path = ''
self.path_info = ''
self.method = None
self.resolver_match = None
self._post_parse_error = False
def __repr__(self):
if self.method is None or not self.get_full_path():
return force_str('<%s>' % self.__class__.__name__)
return force_str(
'<%s: %s %r>' % (self.__class__.__name__, self.method, force_str(self.get_full_path()))
)
def get_host(self):
"""Returns the HTTP host using the environment or request headers."""
# We try three options, in order of decreasing preference.
if settings.USE_X_FORWARDED_HOST and (
'HTTP_X_FORWARDED_HOST' in self.META):
host = self.META['HTTP_X_FORWARDED_HOST']
elif 'HTTP_HOST' in self.META:
host = self.META['HTTP_HOST']
else:
# Reconstruct the host using the algorithm from PEP 333.
host = self.META['SERVER_NAME']
server_port = str(self.META['SERVER_PORT'])
if server_port != ('443' if self.is_secure() else '80'):
host = '%s:%s' % (host, server_port)
# There is no hostname validation when DEBUG=True
if settings.DEBUG:
return host
domain, port = split_domain_port(host)
if domain and validate_host(domain, settings.ALLOWED_HOSTS):
return host
else:
msg = "Invalid HTTP_HOST header: %r." % host
if domain:
msg += " You may need to add %r to ALLOWED_HOSTS." % domain
else:
msg += " The domain name provided is not valid according to RFC 1034/1035."
raise DisallowedHost(msg)
def get_full_path(self):
# RFC 3986 requires query string arguments to be in the ASCII range.
# Rather than crash if this doesn't happen, we encode defensively.
return '%s%s' % (
escape_uri_path(self.path),
('?' + iri_to_uri(self.META.get('QUERY_STRING', ''))) if self.META.get('QUERY_STRING', '') else ''
)
def get_signed_cookie(self, key, default=RAISE_ERROR, salt='', max_age=None):
"""
Attempts to return a signed cookie. If the signature fails or the
cookie has expired, raises an exception... unless you provide the
default argument in which case that value will be returned instead.
"""
try:
cookie_value = self.COOKIES[key]
except KeyError:
if default is not RAISE_ERROR:
return default
else:
raise
try:
value = signing.get_cookie_signer(salt=key + salt).unsign(
cookie_value, max_age=max_age)
except signing.BadSignature:
if default is not RAISE_ERROR:
return default
else:
raise
return value
def build_absolute_uri(self, location=None):
"""
Builds an absolute URI from the location and the variables available in
this request. If no ``location`` is specified, the absolute URI is
built on ``request.get_full_path()``. Anyway, if the location is
absolute, it is simply converted to an RFC 3987 compliant URI and
returned and if location is relative or is scheme-relative (i.e.,
``//example.com/``), it is urljoined to a base URL constructed from the
request variables.
"""
if location is None:
# Make it an absolute url (but schemeless and domainless) for the
# edge case that the path starts with '//'.
location = '//%s' % self.get_full_path()
bits = urlsplit(location)
if not (bits.scheme and bits.netloc):
current_uri = '{scheme}://{host}{path}'.format(scheme=self.scheme,
host=self.get_host(),
path=self.path)
# Join the constructed URL with the provided location, which will
# allow the provided ``location`` to apply query strings to the
# base path as well as override the host, if it begins with //
location = urljoin(current_uri, location)
return iri_to_uri(location)
def _get_scheme(self):
return 'https' if os.environ.get("HTTPS") == "on" else 'http'
@property
def scheme(self):
# First, check the SECURE_PROXY_SSL_HEADER setting.
if settings.SECURE_PROXY_SSL_HEADER:
try:
header, value = settings.SECURE_PROXY_SSL_HEADER
except ValueError:
raise ImproperlyConfigured(
'The SECURE_PROXY_SSL_HEADER setting must be a tuple containing two values.'
)
if self.META.get(header, None) == value:
return 'https'
# Failing that, fall back to _get_scheme(), which is a hook for
# subclasses to implement.
return self._get_scheme()
def is_secure(self):
return self.scheme == 'https'
def is_ajax(self):
return self.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, val):
"""
Sets the encoding used for GET/POST accesses. If the GET or POST
dictionary has already been created, it is removed and recreated on the
next access (so that it is decoded correctly).
"""
self._encoding = val
if hasattr(self, '_get'):
del self._get
if hasattr(self, '_post'):
del self._post
def _initialize_handlers(self):
self._upload_handlers = [uploadhandler.load_handler(handler, self)
for handler in settings.FILE_UPLOAD_HANDLERS]
@property
def upload_handlers(self):
if not self._upload_handlers:
# If there are no upload handlers defined, initialize them from settings.
self._initialize_handlers()
return self._upload_handlers
@upload_handlers.setter
def upload_handlers(self, upload_handlers):
if hasattr(self, '_files'):
raise AttributeError("You cannot set the upload handlers after the upload has been processed.")
self._upload_handlers = upload_handlers
def parse_file_upload(self, META, post_data):
"""Returns a tuple of (POST QueryDict, FILES MultiValueDict)."""
self.upload_handlers = ImmutableList(
self.upload_handlers,
warning="You cannot alter upload handlers after the upload has been processed."
)
parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding)
return parser.parse()
@property
def body(self):
if not hasattr(self, '_body'):
if self._read_started:
raise RawPostDataException("You cannot access body after reading from request's data stream")
try:
self._body = self.read()
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
self._stream = BytesIO(self._body)
return self._body
def _mark_post_parse_error(self):
self._post = QueryDict('')
self._files = MultiValueDict()
self._post_parse_error = True
def _load_post_and_files(self):
"""Populate self._post and self._files if the content-type is a form type"""
if self.method != 'POST':
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
return
if self._read_started and not hasattr(self, '_body'):
self._mark_post_parse_error()
return
if self.META.get('CONTENT_TYPE', '').startswith('multipart/form-data'):
if hasattr(self, '_body'):
# Use already read data
data = BytesIO(self._body)
else:
data = self
try:
self._post, self._files = self.parse_file_upload(self.META, data)
except MultiPartParserError:
# An error occurred while parsing POST data. Since when
# formatting the error the request handler might access
# self.POST, set self._post and self._file to prevent
# attempts to parse POST data again.
# Mark that an error occurred. This allows self.__repr__ to
# be explicit about it instead of simply representing an
# empty POST
self._mark_post_parse_error()
raise
elif self.META.get('CONTENT_TYPE', '').startswith('application/x-www-form-urlencoded'):
self._post, self._files = QueryDict(self.body, encoding=self._encoding), MultiValueDict()
else:
self._post, self._files = QueryDict('', encoding=self._encoding), MultiValueDict()
def close(self):
if hasattr(self, '_files'):
for f in chain.from_iterable(l[1] for l in self._files.lists()):
f.close()
# File-like and iterator interface.
#
# Expects self._stream to be set to an appropriate source of bytes by
# a corresponding request subclass (e.g. WSGIRequest).
# Also when request data has already been read by request.POST or
# request.body, self._stream points to a BytesIO instance
# containing that data.
def read(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.read(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def readline(self, *args, **kwargs):
self._read_started = True
try:
return self._stream.readline(*args, **kwargs)
except IOError as e:
six.reraise(UnreadablePostError, UnreadablePostError(*e.args), sys.exc_info()[2])
def xreadlines(self):
while True:
buf = self.readline()
if not buf:
break
yield buf
__iter__ = xreadlines
def readlines(self):
return list(iter(self))
class QueryDict(MultiValueDict):
"""
A specialized MultiValueDict which represents a query string.
A QueryDict can be used to represent GET or POST data. It subclasses
MultiValueDict since keys in such data can be repeated, for instance
in the data from a form with a <select multiple> field.
By default QueryDicts are immutable, though the copy() method
will always return a mutable copy.
Both keys and values set on this class are converted from the given encoding
(DEFAULT_CHARSET by default) to unicode.
"""
# These are both reset in __init__, but is specified here at the class
# level so that unpickling will have valid values
_mutable = True
_encoding = None
def __init__(self, query_string=None, mutable=False, encoding=None):
super(QueryDict, self).__init__()
if not encoding:
encoding = settings.DEFAULT_CHARSET
self.encoding = encoding
if six.PY3:
if isinstance(query_string, bytes):
# query_string normally contains URL-encoded data, a subset of ASCII.
try:
query_string = query_string.decode(encoding)
except UnicodeDecodeError:
# ... but some user agents are misbehaving :-(
query_string = query_string.decode('iso-8859-1')
for key, value in parse_qsl(query_string or '',
keep_blank_values=True,
encoding=encoding):
self.appendlist(key, value)
else:
for key, value in parse_qsl(query_string or '',
keep_blank_values=True):
try:
value = value.decode(encoding)
except UnicodeDecodeError:
value = value.decode('iso-8859-1')
self.appendlist(force_text(key, encoding, errors='replace'),
value)
self._mutable = mutable
@property
def encoding(self):
if self._encoding is None:
self._encoding = settings.DEFAULT_CHARSET
return self._encoding
@encoding.setter
def encoding(self, value):
self._encoding = value
def _assert_mutable(self):
if not self._mutable:
raise AttributeError("This QueryDict instance is immutable")
def __setitem__(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).__setitem__(key, value)
def __delitem__(self, key):
self._assert_mutable()
super(QueryDict, self).__delitem__(key)
def __copy__(self):
result = self.__class__('', mutable=True, encoding=self.encoding)
for key, value in six.iterlists(self):
result.setlist(key, value)
return result
def __deepcopy__(self, memo):
result = self.__class__('', mutable=True, encoding=self.encoding)
memo[id(self)] = result
for key, value in six.iterlists(self):
result.setlist(copy.deepcopy(key, memo), copy.deepcopy(value, memo))
return result
def setlist(self, key, list_):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
list_ = [bytes_to_text(elt, self.encoding) for elt in list_]
super(QueryDict, self).setlist(key, list_)
def setlistdefault(self, key, default_list=None):
self._assert_mutable()
return super(QueryDict, self).setlistdefault(key, default_list)
def appendlist(self, key, value):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
value = bytes_to_text(value, self.encoding)
super(QueryDict, self).appendlist(key, value)
def pop(self, key, *args):
self._assert_mutable()
return super(QueryDict, self).pop(key, *args)
def popitem(self):
self._assert_mutable()
return super(QueryDict, self).popitem()
def clear(self):
self._assert_mutable()
super(QueryDict, self).clear()
def setdefault(self, key, default=None):
self._assert_mutable()
key = bytes_to_text(key, self.encoding)
default = bytes_to_text(default, self.encoding)
return super(QueryDict, self).setdefault(key, default)
def copy(self):
"""Returns a mutable copy of this object."""
return self.__deepcopy__({})
def urlencode(self, safe=None):
"""
Returns an encoded string of all query string arguments.
:arg safe: Used to specify characters which do not require quoting, for
example::
>>> q = QueryDict('', mutable=True)
>>> q['next'] = '/a&b/'
>>> q.urlencode()
'next=%2Fa%26b%2F'
>>> q.urlencode(safe='/')
'next=/a%26b/'
"""
output = []
if safe:
safe = force_bytes(safe, self.encoding)
encode = lambda k, v: '%s=%s' % ((quote(k, safe), quote(v, safe)))
else:
encode = lambda k, v: urlencode({k: v})
for k, list_ in self.lists():
k = force_bytes(k, self.encoding)
output.extend(encode(k, force_bytes(v, self.encoding))
for v in list_)
return '&'.join(output)
def build_request_repr(request, path_override=None, GET_override=None,
POST_override=None, COOKIES_override=None,
META_override=None):
"""
Builds and returns the request's representation string. The request's
attributes may be overridden by pre-processed values.
"""
# Since this is called as part of error handling, we need to be very
# robust against potentially malformed input.
try:
get = (pformat(GET_override)
if GET_override is not None
else pformat(request.GET))
except Exception:
get = '<could not parse>'
if request._post_parse_error:
post = '<could not parse>'
else:
try:
post = (pformat(POST_override)
if POST_override is not None
else pformat(request.POST))
except Exception:
post = '<could not parse>'
try:
cookies = (pformat(COOKIES_override)
if COOKIES_override is not None
else pformat(request.COOKIES))
except Exception:
cookies = '<could not parse>'
try:
meta = (pformat(META_override)
if META_override is not None
else pformat(request.META))
except Exception:
meta = '<could not parse>'
path = path_override if path_override is not None else request.path
return force_str('<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
(request.__class__.__name__,
path,
six.text_type(get),
six.text_type(post),
six.text_type(cookies),
six.text_type(meta)))
# It's neither necessary nor appropriate to use
# django.utils.encoding.smart_text for parsing URLs and form inputs. Thus,
# this slightly more restricted function, used by QueryDict.
def bytes_to_text(s, encoding):
"""
Converts basestring objects to unicode, using the given encoding. Illegally
encoded input characters are replaced with Unicode "unknown" codepoint
(\ufffd).
Returns any non-basestring objects without change.
"""
if isinstance(s, bytes):
return six.text_type(s, encoding, 'replace')
else:
return s
def split_domain_port(host):
"""
Return a (domain, port) tuple from a given host.
Returned domain is lower-cased. If the host is invalid, the domain will be
empty.
"""
host = host.lower()
if not host_validation_re.match(host):
return '', ''
if host[-1] == ']':
# It's an IPv6 address without a port.
return host, ''
bits = host.rsplit(':', 1)
if len(bits) == 2:
return tuple(bits)
return bits[0], ''
def validate_host(host, allowed_hosts):
"""
Validate the given host for this site.
Check that the host looks valid and matches a host or host pattern in the
given list of ``allowed_hosts``. Any pattern beginning with a period
matches a domain and all its subdomains (e.g. ``.example.com`` matches
``example.com`` and any subdomain), ``*`` matches anything, and anything
else must match exactly.
Note: This function assumes that the given host is lower-cased and has
already had the port, if any, stripped off.
Return ``True`` for a valid host, ``False`` otherwise.
"""
host = host[:-1] if host.endswith('.') else host
for pattern in allowed_hosts:
pattern = pattern.lower()
match = (
pattern == '*' or
pattern.startswith('.') and (
host.endswith(pattern) or host == pattern[1:]
) or
pattern == host
)
if match:
return True
return False
| apache-2.0 |
romain-li/edx-platform | lms/djangoapps/course_api/permissions.py | 85 | 1125 | """
Course API Authorization functions
"""
from student.roles import GlobalStaff
def can_view_courses_for_username(requesting_user, target_username):
"""
Determine whether `requesting_user` has permission to view courses available
to the user identified by `target_username`.
Arguments:
requesting_user (User): The user requesting permission to view another
target_username (string):
The name of the user `requesting_user` would like
to access.
Return value:
Boolean:
`True` if `requesting_user` is authorized to view courses as
`target_username`. Otherwise, `False`
Raises:
TypeError if target_username is empty or None.
"""
# AnonymousUser has no username, so we test for requesting_user's own
# username before prohibiting an empty target_username.
if requesting_user.username == target_username:
return True
elif not target_username:
raise TypeError("target_username must be specified")
else:
staff = GlobalStaff()
return staff.has_user(requesting_user)
| agpl-3.0 |
alexpap/exareme | exareme-tools/madis/src/lib/vtoutgtable.py | 5 | 4965 | from gtable import gtablefileFull , gjsonfileFull ,gjsonFull
from iterutils import peekable
from sqlitetypes import typestoSqliteTypes
import types
def vtoutpugtformat(out,diter,simplejson=True): #### TODO Work on types patttern
"""
Reads diter stream of tuples(row,types) and formats row values to
the google json format or if simplejson is False to the google like format.
Writes formated tables in file like stream out the
"""
def unfold(it):
for row,h in it:
yield row
return
d=peekable(diter)
samplevals, sampleheads =d.peek()
names=[]
gtypes=[]
mustguess=False
for val, headinfo in zip(samplevals, sampleheads):
names.append(headinfo[0].title())
coltype=typestoSqliteTypes(headinfo[1])
if coltype=="INTEGER" or coltype=="REAL" or coltype=="NUMERIC":
gtypes.append('number')
elif coltype=="TEXT":
gtypes.append('string')
else:
mustguess=True
gtypes.append("GUESS")
if mustguess:
samples=d.maxpeek(30)
samplestats=dict()
for i in xrange(len(gtypes)):
if gtypes[i]=="GUESS":
samplestats[i]={'string':False,"number":False}
for row in unfold(samples):
allknown=True
for uto in samplestats:
if not samplestats[uto]['string']:
allknown=False
if row[uto]!="":
samplestats[uto][typeguessing(row[uto])]=True
if allknown:
break
for uto in samplestats:
if samplestats[uto]['string']:# or not samplestats[uto]['number']:
gtypes[uto]='string'
else:
gtypes[uto]='number'
if simplejson:
#out.write(gjsonFull(unfold(d),names,gtypes).encode('utf-8'))
gjsonfileFull(unfold(d),out,names,gtypes)
else:
gtablefileFull(unfold(d),out,names,gtypes)
#out.write(gtableFull(unfold(d),names,gtypes).encode('utf-8'))
def typeguessing(el): ####Oi upoloipoi typoi
# import types
# if type(el) not in types.StringTypes:
# print "Element is : --%s-- , Type is %s Type of element not string!!!!!!!!!!!!!" %(el,type(el))
# raise Exception
if type(el) not in types.StringTypes:
el=str(el)
if el.startswith("0") and not el.startswith("0."):
return 'string'
try:
int(el)
return 'number'
except ValueError:
try:
float(el)
return 'number'
except ValueError:
return 'string'
"""
cols property
---------------
cols is an array of objects describing the ID and type of each column. Each property is an object with the following properties (case-sensitive):
* type [Required] Data type of the data in the column. Supports the following string values (examples include the v: property, described later):
o 'boolean' - JavaScript boolean value ('true' or 'false'). Example value: v:'true'
o 'number' - JavaScript number value. Example values: v:7 , v:3.14, v:-55
o 'string' - JavaScript string value. Example value: v:'hello'
o 'date' - JavaScript Date object (zero-based month), with the time truncated. Example value: v:new Date(2008, 0, 15)
o 'datetime' - JavaScript Date object including the time. Example value: v:new Date(2008, 0, 15, 14, 30, 45)
o 'timeofday' - Array of three numbers and an optional fourth, representing hour (0 indicates midnight), minute, second, and optional millisecond. Example values: v:[8, 15, 0], v: [6, 12, 1, 144]
* id [Optional] String ID of the column. Must be unique in the table. Use basic alphanumeric characters, so the host page does not require fancy escapes to access the column in JavaScript. Be careful not to choose a JavaScript keyword. Example: id:'col_1'
* label [Optional] String value that some visualizations display for this column. Example: label:'Height'
* pattern [Optional] String pattern that was used by a data source to format numeric, date, or time column values. This is for reference only; you probably won't need to read the pattern, and it isn't required to exist. The Google Visualization client does not use this value (it reads the cell's formatted value). If the DataTable has come from a data source in response to a query with a format clause, the pattern you specified in that clause will probably be returned in this value. The recommended pattern standards are the ICU DecimalFormat and SimpleDateFormat.
* p [Optional] An object that is a map of custom values applied to the cell. These values can be of any JavaScript type. If your visualization supports any cell-level properties, it will describe them; otherwise, this property will be ignored. Example: p:{style: 'border: 1px solid green;'}.
""" | mit |
jeroenh/OpenNSA | test/test_topology.py | 1 | 1847 | import StringIO
from twisted.trial import unittest
from opennsa import nsa
from opennsa.topology import gole
from . import topology as testtopology
TEST_PATH_1 = {
'source_stp' : nsa.STP('Aruba', 'A2'),
'dest_stp' : nsa.STP('Curacao', 'C3'),
'paths' : [ [ nsa.Link('Aruba', 'A2', 'A4'), nsa.Link('Bonaire', 'B1', 'B4'), nsa.Link('Curacao', 'C1', 'C3') ],
[ nsa.Link('Aruba', 'A2', 'A1'), nsa.Link('Dominica', 'D4', 'D1'), nsa.Link('Curacao', 'C4', 'C3') ]
]
}
TEST_PATH_2 = {
'source_stp' : nsa.STP('Aruba', 'A2'),
'dest_stp' : nsa.STP('Bonaire', 'B2'),
'paths' : [ [ nsa.Link('Aruba', 'A2', 'A4'), nsa.Link('Bonaire', 'B1', 'B2') ],
[ nsa.Link('Aruba', 'A2', 'A1'), nsa.Link('Dominica', 'D4', 'D1'), nsa.Link('Curacao', 'C4', 'C1'), nsa.Link('Bonaire', 'B4', 'B2') ] ]
}
# Currently we do not have bandwidth, so this us unused
TEST_PATH_3 = {
'source_stp': nsa.STP('Aruba', 'A2'),
'dest_stp' : nsa.STP('Bonaire', 'B3'),
'paths' : [ [ nsa.Link('Aruba', 'A2', 'A1'), nsa.Link('Dominica', 'D4', 'D1'), nsa.Link('Curacao', 'C4', 'C1'), nsa.Link('Bonaire', 'B4', 'B3') ] ],
'bandwidth' : nsa.BandwidthParameters(1000, 1000, 1000)
}
TEST_PATHS = [ TEST_PATH_1, TEST_PATH_2 ]
class GenericTopologyTest:
def testParseAndFindPath(self):
for tp in TEST_PATHS:
paths = self.topo.findPaths(tp['source_stp'], tp['dest_stp'], tp.get('bandwidth'))
for path in paths:
self.assertIn(path.network_links, tp['paths'])
self.assertEquals(len(paths), len(tp['paths']))
class GOLETopologyTest(GenericTopologyTest, unittest.TestCase):
def setUp(self):
f = StringIO.StringIO(testtopology.TEST_TOPOLOGY)
self.topo, _ = gole.parseTopology( [f] )
| bsd-3-clause |
rven/odoo | addons/pad/models/pad.py | 1 | 5592 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
import random
import re
import string
import requests
from odoo import api, models, _
from odoo.exceptions import UserError
from ..py_etherpad import EtherpadLiteClient
_logger = logging.getLogger(__name__)
class PadCommon(models.AbstractModel):
_name = 'pad.common'
_description = 'Pad Common'
def _valid_field_parameter(self, field, name):
return name == 'pad_content_field' or super()._valid_field_parameter(field, name)
@api.model
def pad_is_configured(self):
return bool(self.env.company.pad_server)
@api.model
def pad_generate_url(self):
company = self.env.company.sudo()
pad = {
"server": company.pad_server,
"key": company.pad_key,
}
# make sure pad server in the form of http://hostname
if not pad["server"]:
return pad
if not pad["server"].startswith('http'):
pad["server"] = 'http://' + pad["server"]
pad["server"] = pad["server"].rstrip('/')
# generate a salt
s = string.ascii_uppercase + string.digits
salt = ''.join([s[random.SystemRandom().randint(0, len(s) - 1)] for i in range(10)])
# path
# etherpad hardcodes pad id length limit to 50
path = '-%s-%s' % (self._name, salt)
path = '%s%s' % (self.env.cr.dbname.replace('_', '-')[0:50 - len(path)], path)
# contruct the url
url = '%s/p/%s' % (pad["server"], path)
# if create with content
if self.env.context.get('field_name') and self.env.context.get('model'):
myPad = EtherpadLiteClient(pad["key"], pad["server"] + '/api')
try:
myPad.createPad(path)
except IOError:
raise UserError(_("Pad creation failed, either there is a problem with your pad server URL or with your connection."))
# get attr on the field model
model = self.env[self.env.context["model"]]
field = model._fields[self.env.context['field_name']]
real_field = field.pad_content_field
res_id = self.env.context.get("object_id")
record = model.browse(res_id)
# get content of the real field
real_field_value = record[real_field] or self.env.context.get('record', {}).get(real_field, '')
if real_field_value:
myPad.setHtmlFallbackText(path, real_field_value)
return {
"server": pad["server"],
"path": path,
"url": url,
}
@api.model
def pad_get_content(self, url):
company = self.env.company.sudo()
myPad = EtherpadLiteClient(company.pad_key, (company.pad_server or '') + '/api')
content = ''
if url:
split_url = url.split('/p/')
path = len(split_url) == 2 and split_url[1]
try:
content = myPad.getHtml(path).get('html', '')
except IOError:
_logger.warning('Http Error: the credentials might be absent for url: "%s". Falling back.' % url)
try:
r = requests.get('%s/export/html' % url)
r.raise_for_status()
except Exception:
_logger.warning("No pad found with url '%s'.", url)
else:
mo = re.search('<body>(.*)</body>', r.content.decode(), re.DOTALL)
if mo:
content = mo.group(1)
return content
# TODO
# reverse engineer protocol to be setHtml without using the api key
def write(self, vals):
self._set_field_to_pad(vals)
self._set_pad_to_field(vals)
return super(PadCommon, self).write(vals)
@api.model
def create(self, vals):
# Case of a regular creation: we receive the pad url, so we need to update the
# corresponding field
self._set_pad_to_field(vals)
pad = super(PadCommon, self).create(vals)
# Case of a programmatical creation (e.g. copy): we receive the field content, so we need
# to create the corresponding pad
if self.env.context.get('pad_no_create', False):
return pad
for k, field in self._fields.items():
if hasattr(field, 'pad_content_field') and k not in vals:
ctx = {
'model': self._name,
'field_name': k,
'object_id': pad.id,
}
pad_info = self.with_context(**ctx).pad_generate_url()
pad[k] = pad_info.get('url')
return pad
def _set_field_to_pad(self, vals):
# Update the pad if the `pad_content_field` is modified
for k, field in self._fields.items():
if hasattr(field, 'pad_content_field') and vals.get(field.pad_content_field) and self[k]:
company = self.env.user.sudo().company_id
myPad = EtherpadLiteClient(company.pad_key, (company.pad_server or '') + '/api')
path = self[k].split('/p/')[1]
myPad.setHtmlFallbackText(path, vals[field.pad_content_field])
def _set_pad_to_field(self, vals):
# Update the `pad_content_field` if the pad is modified
for k, v in list(vals.items()):
field = self._fields.get(k)
if hasattr(field, 'pad_content_field'):
vals[field.pad_content_field] = self.pad_get_content(v)
| agpl-3.0 |
Cnfc19932/scrapy | tests/test_item.py | 129 | 7291 | import unittest
from scrapy.item import Item, Field
import six
class ItemTest(unittest.TestCase):
def assertSortedEqual(self, first, second, msg=None):
return self.assertEqual(sorted(first), sorted(second), msg)
def test_simple(self):
class TestItem(Item):
name = Field()
i = TestItem()
i['name'] = u'name'
self.assertEqual(i['name'], u'name')
def test_init(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(KeyError, i.__getitem__, 'name')
i2 = TestItem(name=u'john doe')
self.assertEqual(i2['name'], u'john doe')
i3 = TestItem({'name': u'john doe'})
self.assertEqual(i3['name'], u'john doe')
i4 = TestItem(i3)
self.assertEqual(i4['name'], u'john doe')
self.assertRaises(KeyError, TestItem, {'name': u'john doe',
'other': u'foo'})
def test_invalid_field(self):
class TestItem(Item):
pass
i = TestItem()
self.assertRaises(KeyError, i.__setitem__, 'field', 'text')
self.assertRaises(KeyError, i.__getitem__, 'field')
def test_repr(self):
class TestItem(Item):
name = Field()
number = Field()
i = TestItem()
i['name'] = u'John Doe'
i['number'] = 123
itemrepr = repr(i)
if six.PY2:
self.assertEqual(itemrepr,
"{'name': u'John Doe', 'number': 123}")
else:
self.assertEqual(itemrepr,
"{'name': 'John Doe', 'number': 123}")
i2 = eval(itemrepr)
self.assertEqual(i2['name'], 'John Doe')
self.assertEqual(i2['number'], 123)
def test_private_attr(self):
class TestItem(Item):
name = Field()
i = TestItem()
i._private = 'test'
self.assertEqual(i._private, 'test')
def test_raise_getattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(AttributeError, getattr, i, 'name')
def test_raise_setattr(self):
class TestItem(Item):
name = Field()
i = TestItem()
self.assertRaises(AttributeError, setattr, i, 'name', 'john')
def test_custom_methods(self):
class TestItem(Item):
name = Field()
def get_name(self):
return self['name']
def change_name(self, name):
self['name'] = name
i = TestItem()
self.assertRaises(KeyError, i.get_name)
i['name'] = u'lala'
self.assertEqual(i.get_name(), u'lala')
i.change_name(u'other')
self.assertEqual(i.get_name(), 'other')
def test_metaclass(self):
class TestItem(Item):
name = Field()
keys = Field()
values = Field()
i = TestItem()
i['name'] = u'John'
self.assertEqual(list(i.keys()), ['name'])
self.assertEqual(list(i.values()), ['John'])
i['keys'] = u'Keys'
i['values'] = u'Values'
self.assertSortedEqual(list(i.keys()), ['keys', 'values', 'name'])
self.assertSortedEqual(list(i.values()), [u'Keys', u'Values', u'John'])
def test_metaclass_with_fields_attribute(self):
class TestItem(Item):
fields = {'new': Field(default='X')}
item = TestItem(new=u'New')
self.assertSortedEqual(list(item.keys()), ['new'])
self.assertSortedEqual(list(item.values()), [u'New'])
def test_metaclass_inheritance(self):
class BaseItem(Item):
name = Field()
keys = Field()
values = Field()
class TestItem(BaseItem):
keys = Field()
i = TestItem()
i['keys'] = 3
self.assertEqual(list(i.keys()), ['keys'])
self.assertEqual(list(i.values()), [3])
def test_metaclass_multiple_inheritance_simple(self):
class A(Item):
fields = {'load': Field(default='A')}
save = Field(default='A')
class B(A): pass
class C(Item):
fields = {'load': Field(default='C')}
save = Field(default='C')
class D(B, C): pass
item = D(save='X', load='Y')
self.assertEqual(item['save'], 'X')
self.assertEqual(item['load'], 'Y')
self.assertEqual(D.fields, {'load': {'default': 'A'},
'save': {'default': 'A'}})
# D class inverted
class E(C, B): pass
self.assertEqual(E(save='X')['save'], 'X')
self.assertEqual(E(load='X')['load'], 'X')
self.assertEqual(E.fields, {'load': {'default': 'C'},
'save': {'default': 'C'}})
def test_metaclass_multiple_inheritance_diamond(self):
class A(Item):
fields = {'update': Field(default='A')}
save = Field(default='A')
load = Field(default='A')
class B(A): pass
class C(A):
fields = {'update': Field(default='C')}
save = Field(default='C')
class D(B, C):
fields = {'update': Field(default='D')}
load = Field(default='D')
self.assertEqual(D(save='X')['save'], 'X')
self.assertEqual(D(load='X')['load'], 'X')
self.assertEqual(D.fields, {'save': {'default': 'C'},
'load': {'default': 'D'}, 'update': {'default': 'D'}})
# D class inverted
class E(C, B):
load = Field(default='E')
self.assertEqual(E(save='X')['save'], 'X')
self.assertEqual(E(load='X')['load'], 'X')
self.assertEqual(E.fields, {'save': {'default': 'C'},
'load': {'default': 'E'}, 'update': {'default': 'C'}})
def test_metaclass_multiple_inheritance_without_metaclass(self):
class A(Item):
fields = {'load': Field(default='A')}
save = Field(default='A')
class B(A): pass
class C(object):
fields = {'load': Field(default='C')}
not_allowed = Field(default='not_allowed')
save = Field(default='C')
class D(B, C): pass
self.assertRaises(KeyError, D, not_allowed='value')
self.assertEqual(D(save='X')['save'], 'X')
self.assertEqual(D.fields, {'save': {'default': 'A'},
'load': {'default': 'A'}})
# D class inverted
class E(C, B): pass
self.assertRaises(KeyError, E, not_allowed='value')
self.assertEqual(E(save='X')['save'], 'X')
self.assertEqual(E.fields, {'save': {'default': 'A'},
'load': {'default': 'A'}})
def test_to_dict(self):
class TestItem(Item):
name = Field()
i = TestItem()
i['name'] = u'John'
self.assertEqual(dict(i), {'name': u'John'})
def test_copy(self):
class TestItem(Item):
name = Field()
item = TestItem({'name':'lower'})
copied_item = item.copy()
self.assertNotEqual(id(item), id(copied_item))
copied_item['name'] = copied_item['name'].upper()
self.assertNotEqual(item['name'], copied_item['name'])
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
Jon-ICS/upm | examples/python/grovetemp.py | 7 | 1892 | from __future__ import print_function
# Author: Brendan Le Foll <brendan.le.foll@intel.com>
# Contributions: Sarah Knepper <sarah.knepper@intel.com>
# Copyright (c) 2014 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import time
from upm import pyupm_grove as grove
def main():
# Create the temperature sensor object using AIO pin 0
temp = grove.GroveTemp(0)
print(temp.name())
# Read the temperature ten times, printing both the Celsius and
# equivalent Fahrenheit temperature, waiting one second between readings
for i in range(0, 10):
celsius = temp.value()
fahrenheit = celsius * 9.0/5.0 + 32.0;
print("%d degrees Celsius, or %d degrees Fahrenheit" \
% (celsius, fahrenheit))
time.sleep(1)
# Delete the temperature sensor object
del temp
if __name__ == '__main__':
main()
| mit |
Azure/azure-sdk-for-python | sdk/keyvault/azure-keyvault-keys/azure/keyvault/keys/_generated/v2016_10_01/aio/operations/_key_vault_client_operations.py | 1 | 230715 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class KeyVaultClientOperationsMixin:
async def create_key(
self,
vault_base_url: str,
key_name: str,
parameters: "_models.KeyCreateParameters",
**kwargs: Any
) -> "_models.KeyBundle":
"""Creates a new key, stores it, then returns key parameters and attributes to the client.
The create key operation can be used to create any key type in Azure Key Vault. If the named
key already exists, Azure Key Vault creates a new version of the key. It requires the
keys/create permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name for the new key. The system will generate the version name for the
new key.
:type key_name: str
:param parameters: The parameters to create a key.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_key.metadata = {'url': '/keys/{key-name}/create'} # type: ignore
async def import_key(
self,
vault_base_url: str,
key_name: str,
parameters: "_models.KeyImportParameters",
**kwargs: Any
) -> "_models.KeyBundle":
"""Imports an externally created key, stores it, and returns key parameters and attributes to the client.
The import key operation may be used to import any key type into an Azure Key Vault. If the
named key already exists, Azure Key Vault creates a new version of the key. This operation
requires the keys/import permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: Name for the imported key.
:type key_name: str
:param parameters: The parameters to import a key.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyImportParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.import_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyImportParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_key.metadata = {'url': '/keys/{key-name}'} # type: ignore
async def delete_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> "_models.DeletedKeyBundle":
"""Deletes a key of any type from storage in Azure Key Vault.
The delete key operation cannot be used to remove individual versions of a key. This operation
removes the cryptographic material associated with the key, which means the key is not usable
for Sign/Verify, Wrap/Unwrap or Encrypt/Decrypt operations. This operation requires the
keys/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key to delete.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedKeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedKeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedKeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedKeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_key.metadata = {'url': '/keys/{key-name}'} # type: ignore
async def update_key(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyUpdateParameters",
**kwargs: Any
) -> "_models.KeyBundle":
"""The update key operation changes specified attributes of a stored key and can be applied to any key type and key version stored in Azure Key Vault.
In order to perform this operation, the key must already exist in the Key Vault. Note: The
cryptographic material of a key itself cannot be changed. This operation requires the
keys/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of key to update.
:type key_name: str
:param key_version: The version of the key to update.
:type key_version: str
:param parameters: The parameters of the key to update.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_key.metadata = {'url': '/keys/{key-name}/{key-version}'} # type: ignore
async def get_key(
self,
vault_base_url: str,
key_name: str,
key_version: str,
**kwargs: Any
) -> "_models.KeyBundle":
"""Gets the public part of a stored key.
The get key operation is applicable to all key types. If the requested key is symmetric, then
no key material is released in the response. This operation requires the keys/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key to get.
:type key_name: str
:param key_version: Adding the version parameter retrieves a specific version of a key.
:type key_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_key.metadata = {'url': '/keys/{key-name}/{key-version}'} # type: ignore
def get_key_versions(
self,
vault_base_url: str,
key_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.KeyListResult"]:
"""Retrieves a list of individual key versions with the same key name.
The full key identifier, attributes, and tags are provided in the response. This operation
requires the keys/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.KeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_key_versions.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('KeyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_key_versions.metadata = {'url': '/keys/{key-name}/versions'} # type: ignore
def get_keys(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.KeyListResult"]:
"""List keys in the specified vault.
Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the
public part of a stored key. The LIST operation is applicable to all key types, however only
the base key identifier, attributes, and tags are provided in the response. Individual versions
of a key are not listed in the response. This operation requires the keys/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either KeyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.KeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_keys.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('KeyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_keys.metadata = {'url': '/keys'} # type: ignore
async def backup_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> "_models.BackupKeyResult":
"""Requests that a backup of the specified key be downloaded to the client.
The Key Backup operation exports a key from Azure Key Vault in a protected form. Note that this
operation does NOT return key material in a form that can be used outside the Azure Key Vault
system, the returned key material is either protected to a Azure Key Vault HSM or to Azure Key
Vault itself. The intent of this operation is to allow a client to GENERATE a key in one Azure
Key Vault instance, BACKUP the key, and then RESTORE it into another Azure Key Vault instance.
The BACKUP operation may be used to export, in protected form, any key type from Azure Key
Vault. Individual versions of a key cannot be backed up. BACKUP / RESTORE can be performed
within geographical boundaries only; meaning that a BACKUP from one geographical area cannot be
restored to another geographical area. For example, a backup from the US geographical area
cannot be restored in an EU geographical area. This operation requires the key/backup
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupKeyResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.BackupKeyResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupKeyResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.backup_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BackupKeyResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
backup_key.metadata = {'url': '/keys/{key-name}/backup'} # type: ignore
async def restore_key(
self,
vault_base_url: str,
parameters: "_models.KeyRestoreParameters",
**kwargs: Any
) -> "_models.KeyBundle":
"""Restores a backed up key to a vault.
Imports a previously backed up key into Azure Key Vault, restoring the key, its key identifier,
attributes and access control policies. The RESTORE operation may be used to import a
previously backed up key. Individual versions of a key cannot be restored. The key is restored
in its entirety with the same key name as it had when it was backed up. If the key name is not
available in the target Key Vault, the RESTORE operation will be rejected. While the key name
is retained during restore, the final key identifier will change if the key is restored to a
different vault. Restore will restore all versions and preserve version identifiers. The
RESTORE operation is subject to security constraints: The target Key Vault must be owned by the
same Microsoft Azure Subscription as the source Key Vault The user must have RESTORE permission
in the target Key Vault. This operation requires the keys/restore permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param parameters: The parameters to restore the key.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyRestoreParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.restore_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyRestoreParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
restore_key.metadata = {'url': '/keys/restore'} # type: ignore
async def encrypt(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyOperationsParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Encrypts an arbitrary sequence of bytes using an encryption key that is stored in a key vault.
The ENCRYPT operation encrypts an arbitrary sequence of bytes using an encryption key that is
stored in Azure Key Vault. Note that the ENCRYPT operation only supports a single block of
data, the size of which is dependent on the target key and the encryption algorithm to be used.
The ENCRYPT operation is only strictly necessary for symmetric keys stored in Azure Key Vault
since protection with an asymmetric key can be performed using public portion of the key. This
operation is supported for asymmetric keys as a convenience for callers that have a
key-reference but do not have access to the public key material. This operation requires the
keys/encrypt permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the encryption operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.encrypt.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyOperationsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
encrypt.metadata = {'url': '/keys/{key-name}/{key-version}/encrypt'} # type: ignore
async def decrypt(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyOperationsParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Decrypts a single block of encrypted data.
The DECRYPT operation decrypts a well-formed block of ciphertext using the target encryption
key and specified algorithm. This operation is the reverse of the ENCRYPT operation; only a
single block of data may be decrypted, the size of this block is dependent on the target key
and the algorithm to be used. The DECRYPT operation applies to asymmetric and symmetric keys
stored in Azure Key Vault since it uses the private portion of the key. This operation requires
the keys/decrypt permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the decryption operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.decrypt.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyOperationsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
decrypt.metadata = {'url': '/keys/{key-name}/{key-version}/decrypt'} # type: ignore
async def sign(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeySignParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Creates a signature from a digest using the specified key.
The SIGN operation is applicable to asymmetric and symmetric keys stored in Azure Key Vault
since this operation uses the private portion of the key. This operation requires the keys/sign
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the signing operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeySignParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.sign.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeySignParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
sign.metadata = {'url': '/keys/{key-name}/{key-version}/sign'} # type: ignore
async def verify(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyVerifyParameters",
**kwargs: Any
) -> "_models.KeyVerifyResult":
"""Verifies a signature using a specified key.
The VERIFY operation is applicable to symmetric keys stored in Azure Key Vault. VERIFY is not
strictly necessary for asymmetric keys stored in Azure Key Vault since signature verification
can be performed using the public portion of the key but this operation is supported as a
convenience for callers that only have a key-reference and not the public portion of the key.
This operation requires the keys/verify permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for verify operations.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyVerifyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyVerifyResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyVerifyResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyVerifyResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.verify.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyVerifyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyVerifyResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
verify.metadata = {'url': '/keys/{key-name}/{key-version}/verify'} # type: ignore
async def wrap_key(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyOperationsParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Wraps a symmetric key using a specified key.
The WRAP operation supports encryption of a symmetric key using a key encryption key that has
previously been stored in an Azure Key Vault. The WRAP operation is only strictly necessary for
symmetric keys stored in Azure Key Vault since protection with an asymmetric key can be
performed using the public portion of the key. This operation is supported for asymmetric keys
as a convenience for callers that have a key-reference but do not have access to the public key
material. This operation requires the keys/wrapKey permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for wrap operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.wrap_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyOperationsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
wrap_key.metadata = {'url': '/keys/{key-name}/{key-version}/wrapkey'} # type: ignore
async def unwrap_key(
self,
vault_base_url: str,
key_name: str,
key_version: str,
parameters: "_models.KeyOperationsParameters",
**kwargs: Any
) -> "_models.KeyOperationResult":
"""Unwraps a symmetric key using the specified key that was initially used for wrapping that key.
The UNWRAP operation supports decryption of a symmetric key using the target key encryption
key. This operation is the reverse of the WRAP operation. The UNWRAP operation applies to
asymmetric and symmetric keys stored in Azure Key Vault since it uses the private portion of
the key. This operation requires the keys/unwrapKey permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:param key_version: The version of the key.
:type key_version: str
:param parameters: The parameters for the key operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.KeyOperationsParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyOperationResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyOperationResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyOperationResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.unwrap_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
'key-version': self._serialize.url("key_version", key_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'KeyOperationsParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyOperationResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
unwrap_key.metadata = {'url': '/keys/{key-name}/{key-version}/unwrapkey'} # type: ignore
def get_deleted_keys(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeletedKeyListResult"]:
"""Lists the deleted keys in the specified vault.
Retrieves a list of the keys in the Key Vault as JSON Web Key structures that contain the
public part of a deleted key. This operation includes deletion-specific information. The Get
Deleted Keys operation is applicable for vaults enabled for soft-delete. While the operation
can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled
vault. This operation requires the keys/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedKeyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.DeletedKeyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedKeyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_deleted_keys.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedKeyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_deleted_keys.metadata = {'url': '/deletedkeys'} # type: ignore
async def get_deleted_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> "_models.DeletedKeyBundle":
"""Gets the public part of a deleted key.
The Get Deleted Key operation is applicable for soft-delete enabled vaults. While the operation
can be invoked on any vault, it will return an error if invoked on a non soft-delete enabled
vault. This operation requires the keys/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedKeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedKeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedKeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_deleted_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedKeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_key.metadata = {'url': '/deletedkeys/{key-name}'} # type: ignore
async def purge_deleted_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> None:
"""Permanently deletes the specified key.
The Purge Deleted Key operation is applicable for soft-delete enabled vaults. While the
operation can be invoked on any vault, it will return an error if invoked on a non soft-delete
enabled vault. This operation requires the keys/purge permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.purge_deleted_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purge_deleted_key.metadata = {'url': '/deletedkeys/{key-name}'} # type: ignore
async def recover_deleted_key(
self,
vault_base_url: str,
key_name: str,
**kwargs: Any
) -> "_models.KeyBundle":
"""Recovers the deleted key to its latest version.
The Recover Deleted Key operation is applicable for deleted keys in soft-delete enabled vaults.
It recovers the deleted key back to its latest version under /keys. An attempt to recover an
non-deleted key will return an error. Consider this the inverse of the delete operation on
soft-delete enabled vaults. This operation requires the keys/recover permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param key_name: The name of the deleted key.
:type key_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KeyBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.KeyBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KeyBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.recover_deleted_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'key-name': self._serialize.url("key_name", key_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('KeyBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
recover_deleted_key.metadata = {'url': '/deletedkeys/{key-name}/recover'} # type: ignore
async def set_secret(
self,
vault_base_url: str,
secret_name: str,
parameters: "_models.SecretSetParameters",
**kwargs: Any
) -> "_models.SecretBundle":
"""Sets a secret in a specified key vault.
The SET operation adds a secret to the Azure Key Vault. If the named secret already exists,
Azure Key Vault creates a new version of that secret. This operation requires the secrets/set
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param parameters: The parameters for setting the secret.
:type parameters: ~azure.keyvault.v2016_10_01.models.SecretSetParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecretSetParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_secret.metadata = {'url': '/secrets/{secret-name}'} # type: ignore
async def delete_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.DeletedSecretBundle":
"""Deletes a secret from a specified key vault.
The DELETE operation applies to any secret stored in Azure Key Vault. DELETE cannot be applied
to an individual version of a secret. This operation requires the secrets/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedSecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedSecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedSecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_secret.metadata = {'url': '/secrets/{secret-name}'} # type: ignore
async def update_secret(
self,
vault_base_url: str,
secret_name: str,
secret_version: str,
parameters: "_models.SecretUpdateParameters",
**kwargs: Any
) -> "_models.SecretBundle":
"""Updates the attributes associated with a specified secret in a given key vault.
The UPDATE operation changes specified attributes of an existing stored secret. Attributes that
are not specified in the request are left unchanged. The value of a secret itself cannot be
changed. This operation requires the secrets/set permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param secret_version: The version of the secret.
:type secret_version: str
:param parameters: The parameters for update secret operation.
:type parameters: ~azure.keyvault.v2016_10_01.models.SecretUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
'secret-version': self._serialize.url("secret_version", secret_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecretUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_secret.metadata = {'url': '/secrets/{secret-name}/{secret-version}'} # type: ignore
async def get_secret(
self,
vault_base_url: str,
secret_name: str,
secret_version: str,
**kwargs: Any
) -> "_models.SecretBundle":
"""Get a specified secret from a given key vault.
The GET operation is applicable to any secret stored in Azure Key Vault. This operation
requires the secrets/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param secret_version: The version of the secret.
:type secret_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
'secret-version': self._serialize.url("secret_version", secret_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_secret.metadata = {'url': '/secrets/{secret-name}/{secret-version}'} # type: ignore
def get_secrets(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SecretListResult"]:
"""List secrets in a specified key vault.
The Get Secrets operation is applicable to the entire vault. However, only the base secret
identifier and its attributes are provided in the response. Individual secret versions are not
listed in the response. This operation requires the secrets/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified, the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecretListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.SecretListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_secrets.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecretListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_secrets.metadata = {'url': '/secrets'} # type: ignore
def get_secret_versions(
self,
vault_base_url: str,
secret_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SecretListResult"]:
"""List all versions of the specified secret.
The full secret identifier and attributes are provided in the response. No values are returned
for the secrets. This operations requires the secrets/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:param maxresults: Maximum number of results to return in a page. If not specified, the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SecretListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.SecretListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_secret_versions.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SecretListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_secret_versions.metadata = {'url': '/secrets/{secret-name}/versions'} # type: ignore
def get_deleted_secrets(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeletedSecretListResult"]:
"""Lists deleted secrets for the specified vault.
The Get Deleted Secrets operation returns the secrets that have been deleted for a vault
enabled for soft-delete. This operation requires the secrets/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedSecretListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.DeletedSecretListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSecretListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_deleted_secrets.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedSecretListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_deleted_secrets.metadata = {'url': '/deletedsecrets'} # type: ignore
async def get_deleted_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.DeletedSecretBundle":
"""Gets the specified deleted secret.
The Get Deleted Secret operation returns the specified deleted secret along with its
attributes. This operation requires the secrets/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedSecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedSecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedSecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_deleted_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedSecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_secret.metadata = {'url': '/deletedsecrets/{secret-name}'} # type: ignore
async def purge_deleted_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> None:
"""Permanently deletes the specified secret.
The purge deleted secret operation removes the secret permanently, without the possibility of
recovery. This operation can only be enabled on a soft-delete enabled vault. This operation
requires the secrets/purge permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.purge_deleted_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purge_deleted_secret.metadata = {'url': '/deletedsecrets/{secret-name}'} # type: ignore
async def recover_deleted_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.SecretBundle":
"""Recovers the deleted secret to the latest version.
Recovers the deleted secret in the specified vault. This operation can only be performed on a
soft-delete enabled vault. This operation requires the secrets/recover permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the deleted secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.recover_deleted_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
recover_deleted_secret.metadata = {'url': '/deletedsecrets/{secret-name}/recover'} # type: ignore
async def backup_secret(
self,
vault_base_url: str,
secret_name: str,
**kwargs: Any
) -> "_models.BackupSecretResult":
"""Backs up the specified secret.
Requests that a backup of the specified secret be downloaded to the client. All versions of the
secret will be downloaded. This operation requires the secrets/backup permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param secret_name: The name of the secret.
:type secret_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: BackupSecretResult, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.BackupSecretResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.BackupSecretResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.backup_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'secret-name': self._serialize.url("secret_name", secret_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('BackupSecretResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
backup_secret.metadata = {'url': '/secrets/{secret-name}/backup'} # type: ignore
async def restore_secret(
self,
vault_base_url: str,
parameters: "_models.SecretRestoreParameters",
**kwargs: Any
) -> "_models.SecretBundle":
"""Restores a backed up secret to a vault.
Restores a backed up secret, and all its versions, to a vault. This operation requires the
secrets/restore permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param parameters: The parameters to restore the secret.
:type parameters: ~azure.keyvault.v2016_10_01.models.SecretRestoreParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SecretBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SecretBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SecretBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.restore_secret.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SecretRestoreParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SecretBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
restore_secret.metadata = {'url': '/secrets/restore'} # type: ignore
def get_certificates(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.CertificateListResult"]:
"""List certificates in a specified key vault.
The GetCertificates operation returns the set of certificates resources in the specified key
vault. This operation requires the certificates/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.CertificateListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_certificates.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_certificates.metadata = {'url': '/certificates'} # type: ignore
async def delete_certificate(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.DeletedCertificateBundle":
"""Deletes a certificate from a specified key vault.
Deletes all versions of a certificate object along with its associated policy. Delete
certificate cannot be used to remove individual versions of a certificate object. This
operation requires the certificates/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedCertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedCertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedCertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedCertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_certificate.metadata = {'url': '/certificates/{certificate-name}'} # type: ignore
async def set_certificate_contacts(
self,
vault_base_url: str,
contacts: "_models.Contacts",
**kwargs: Any
) -> "_models.Contacts":
"""Sets the certificate contacts for the specified key vault.
Sets the certificate contacts for the specified key vault. This operation requires the
certificates/managecontacts permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param contacts: The contacts for the key vault certificate.
:type contacts: ~azure.keyvault.v2016_10_01.models.Contacts
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Contacts, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.Contacts
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Contacts"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_certificate_contacts.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(contacts, 'Contacts')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Contacts', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_certificate_contacts.metadata = {'url': '/certificates/contacts'} # type: ignore
async def get_certificate_contacts(
self,
vault_base_url: str,
**kwargs: Any
) -> "_models.Contacts":
"""Lists the certificate contacts for a specified key vault.
The GetCertificateContacts operation returns the set of certificate contact resources in the
specified key vault. This operation requires the certificates/managecontacts permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Contacts, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.Contacts
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Contacts"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate_contacts.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Contacts', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate_contacts.metadata = {'url': '/certificates/contacts'} # type: ignore
async def delete_certificate_contacts(
self,
vault_base_url: str,
**kwargs: Any
) -> "_models.Contacts":
"""Deletes the certificate contacts for a specified key vault.
Deletes the certificate contacts for a specified key vault certificate. This operation requires
the certificates/managecontacts permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Contacts, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.Contacts
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.Contacts"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_certificate_contacts.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('Contacts', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_certificate_contacts.metadata = {'url': '/certificates/contacts'} # type: ignore
def get_certificate_issuers(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.CertificateIssuerListResult"]:
"""List certificate issuers for a specified key vault.
The GetCertificateIssuers operation returns the set of certificate issuer resources in the
specified key vault. This operation requires the certificates/manageissuers/getissuers
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateIssuerListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.CertificateIssuerListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateIssuerListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_certificate_issuers.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateIssuerListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_certificate_issuers.metadata = {'url': '/certificates/issuers'} # type: ignore
async def set_certificate_issuer(
self,
vault_base_url: str,
issuer_name: str,
parameter: "_models.CertificateIssuerSetParameters",
**kwargs: Any
) -> "_models.IssuerBundle":
"""Sets the specified certificate issuer.
The SetCertificateIssuer operation adds or updates the specified certificate issuer. This
operation requires the certificates/setissuers permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param issuer_name: The name of the issuer.
:type issuer_name: str
:param parameter: Certificate issuer set parameter.
:type parameter: ~azure.keyvault.v2016_10_01.models.CertificateIssuerSetParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IssuerBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.IssuerBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IssuerBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_certificate_issuer.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'issuer-name': self._serialize.url("issuer_name", issuer_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameter, 'CertificateIssuerSetParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('IssuerBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_certificate_issuer.metadata = {'url': '/certificates/issuers/{issuer-name}'} # type: ignore
async def update_certificate_issuer(
self,
vault_base_url: str,
issuer_name: str,
parameter: "_models.CertificateIssuerUpdateParameters",
**kwargs: Any
) -> "_models.IssuerBundle":
"""Updates the specified certificate issuer.
The UpdateCertificateIssuer operation performs an update on the specified certificate issuer
entity. This operation requires the certificates/setissuers permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param issuer_name: The name of the issuer.
:type issuer_name: str
:param parameter: Certificate issuer update parameter.
:type parameter: ~azure.keyvault.v2016_10_01.models.CertificateIssuerUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IssuerBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.IssuerBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IssuerBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_certificate_issuer.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'issuer-name': self._serialize.url("issuer_name", issuer_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameter, 'CertificateIssuerUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('IssuerBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate_issuer.metadata = {'url': '/certificates/issuers/{issuer-name}'} # type: ignore
async def get_certificate_issuer(
self,
vault_base_url: str,
issuer_name: str,
**kwargs: Any
) -> "_models.IssuerBundle":
"""Lists the specified certificate issuer.
The GetCertificateIssuer operation returns the specified certificate issuer resources in the
specified key vault. This operation requires the certificates/manageissuers/getissuers
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param issuer_name: The name of the issuer.
:type issuer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IssuerBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.IssuerBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IssuerBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate_issuer.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'issuer-name': self._serialize.url("issuer_name", issuer_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('IssuerBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate_issuer.metadata = {'url': '/certificates/issuers/{issuer-name}'} # type: ignore
async def delete_certificate_issuer(
self,
vault_base_url: str,
issuer_name: str,
**kwargs: Any
) -> "_models.IssuerBundle":
"""Deletes the specified certificate issuer.
The DeleteCertificateIssuer operation permanently removes the specified certificate issuer from
the vault. This operation requires the certificates/manageissuers/deleteissuers permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param issuer_name: The name of the issuer.
:type issuer_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IssuerBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.IssuerBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IssuerBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_certificate_issuer.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'issuer-name': self._serialize.url("issuer_name", issuer_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('IssuerBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_certificate_issuer.metadata = {'url': '/certificates/issuers/{issuer-name}'} # type: ignore
async def create_certificate(
self,
vault_base_url: str,
certificate_name: str,
parameters: "_models.CertificateCreateParameters",
**kwargs: Any
) -> "_models.CertificateOperation":
"""Creates a new certificate.
If this is the first version, the certificate resource is created. This operation requires the
certificates/create permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param parameters: The parameters to create a certificate.
:type parameters: ~azure.keyvault.v2016_10_01.models.CertificateCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CertificateCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_certificate.metadata = {'url': '/certificates/{certificate-name}/create'} # type: ignore
async def import_certificate(
self,
vault_base_url: str,
certificate_name: str,
parameters: "_models.CertificateImportParameters",
**kwargs: Any
) -> "_models.CertificateBundle":
"""Imports a certificate into a specified key vault.
Imports an existing valid certificate, containing a private key, into Azure Key Vault. The
certificate to be imported can be in either PFX or PEM format. If the certificate is in PEM
format the PEM file must contain the key as well as x509 certificates. This operation requires
the certificates/import permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param parameters: The parameters to import the certificate.
:type parameters: ~azure.keyvault.v2016_10_01.models.CertificateImportParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.import_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str', pattern=r'^[0-9a-zA-Z-]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CertificateImportParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
import_certificate.metadata = {'url': '/certificates/{certificate-name}/import'} # type: ignore
def get_certificate_versions(
self,
vault_base_url: str,
certificate_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.CertificateListResult"]:
"""List the versions of a certificate.
The GetCertificateVersions operation returns the versions of a certificate in the specified key
vault. This operation requires the certificates/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either CertificateListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.CertificateListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_certificate_versions.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('CertificateListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_certificate_versions.metadata = {'url': '/certificates/{certificate-name}/versions'} # type: ignore
async def get_certificate_policy(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.CertificatePolicy":
"""Lists the policy for a certificate.
The GetCertificatePolicy operation returns the specified certificate policy resources in the
specified key vault. This operation requires the certificates/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate in a given key vault.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificatePolicy, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificatePolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificatePolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate_policy.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificatePolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate_policy.metadata = {'url': '/certificates/{certificate-name}/policy'} # type: ignore
async def update_certificate_policy(
self,
vault_base_url: str,
certificate_name: str,
certificate_policy: "_models.CertificatePolicy",
**kwargs: Any
) -> "_models.CertificatePolicy":
"""Updates the policy for a certificate.
Set specified members in the certificate policy. Leave others as null. This operation requires
the certificates/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate in the given vault.
:type certificate_name: str
:param certificate_policy: The policy for the certificate.
:type certificate_policy: ~azure.keyvault.v2016_10_01.models.CertificatePolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificatePolicy, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificatePolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificatePolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_certificate_policy.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_policy, 'CertificatePolicy')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificatePolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate_policy.metadata = {'url': '/certificates/{certificate-name}/policy'} # type: ignore
async def update_certificate(
self,
vault_base_url: str,
certificate_name: str,
certificate_version: str,
parameters: "_models.CertificateUpdateParameters",
**kwargs: Any
) -> "_models.CertificateBundle":
"""Updates the specified attributes associated with the given certificate.
The UpdateCertificate operation applies the specified update on the given certificate; the only
elements updated are the certificate's attributes. This operation requires the
certificates/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate in the given key vault.
:type certificate_name: str
:param certificate_version: The version of the certificate.
:type certificate_version: str
:param parameters: The parameters for certificate update.
:type parameters: ~azure.keyvault.v2016_10_01.models.CertificateUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
'certificate-version': self._serialize.url("certificate_version", certificate_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CertificateUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate.metadata = {'url': '/certificates/{certificate-name}/{certificate-version}'} # type: ignore
async def get_certificate(
self,
vault_base_url: str,
certificate_name: str,
certificate_version: str,
**kwargs: Any
) -> "_models.CertificateBundle":
"""Gets information about a certificate.
Gets information about a specific certificate. This operation requires the certificates/get
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate in the given vault.
:type certificate_name: str
:param certificate_version: The version of the certificate.
:type certificate_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
'certificate-version': self._serialize.url("certificate_version", certificate_version, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate.metadata = {'url': '/certificates/{certificate-name}/{certificate-version}'} # type: ignore
async def update_certificate_operation(
self,
vault_base_url: str,
certificate_name: str,
certificate_operation: "_models.CertificateOperationUpdateParameter",
**kwargs: Any
) -> "_models.CertificateOperation":
"""Updates a certificate operation.
Updates a certificate creation operation that is already in progress. This operation requires
the certificates/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param certificate_operation: The certificate operation response.
:type certificate_operation: ~azure.keyvault.v2016_10_01.models.CertificateOperationUpdateParameter
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_certificate_operation.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(certificate_operation, 'CertificateOperationUpdateParameter')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_certificate_operation.metadata = {'url': '/certificates/{certificate-name}/pending'} # type: ignore
async def get_certificate_operation(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.CertificateOperation":
"""Gets the creation operation of a certificate.
Gets the creation operation associated with a specified certificate. This operation requires
the certificates/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_certificate_operation.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_certificate_operation.metadata = {'url': '/certificates/{certificate-name}/pending'} # type: ignore
async def delete_certificate_operation(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.CertificateOperation":
"""Deletes the creation operation for a specific certificate.
Deletes the creation operation for a specified certificate that is in the process of being
created. The certificate is no longer created. This operation requires the certificates/update
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateOperation, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateOperation
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateOperation"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_certificate_operation.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateOperation', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_certificate_operation.metadata = {'url': '/certificates/{certificate-name}/pending'} # type: ignore
async def merge_certificate(
self,
vault_base_url: str,
certificate_name: str,
parameters: "_models.CertificateMergeParameters",
**kwargs: Any
) -> "_models.CertificateBundle":
"""Merges a certificate or a certificate chain with a key pair existing on the server.
The MergeCertificate operation performs the merging of a certificate or certificate chain with
a key pair currently available in the service. This operation requires the certificates/create
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:param parameters: The parameters to merge certificate.
:type parameters: ~azure.keyvault.v2016_10_01.models.CertificateMergeParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.merge_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CertificateMergeParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
merge_certificate.metadata = {'url': '/certificates/{certificate-name}/pending/merge'} # type: ignore
def get_deleted_certificates(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.DeletedCertificateListResult"]:
"""Lists the deleted certificates in the specified vault currently available for recovery.
The GetDeletedCertificates operation retrieves the certificates in the current vault which are
in a deleted state and ready for recovery or purging. This operation includes deletion-specific
information. This operation requires the certificates/get/list permission. This operation can
only be enabled on soft-delete enabled vaults.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DeletedCertificateListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.DeletedCertificateListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedCertificateListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_deleted_certificates.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DeletedCertificateListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_deleted_certificates.metadata = {'url': '/deletedcertificates'} # type: ignore
async def get_deleted_certificate(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.DeletedCertificateBundle":
"""Retrieves information about the specified deleted certificate.
The GetDeletedCertificate operation retrieves the deleted certificate information plus its
attributes, such as retention interval, scheduled permanent deletion and the current deletion
recovery level. This operation requires the certificates/get permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DeletedCertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.DeletedCertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DeletedCertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_deleted_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DeletedCertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_deleted_certificate.metadata = {'url': '/deletedcertificates/{certificate-name}'} # type: ignore
async def purge_deleted_certificate(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> None:
"""Permanently deletes the specified deleted certificate.
The PurgeDeletedCertificate operation performs an irreversible deletion of the specified
certificate, without possibility for recovery. The operation is not available if the recovery
level does not specify 'Purgeable'. This operation requires the certificate/purge permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.purge_deleted_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
purge_deleted_certificate.metadata = {'url': '/deletedcertificates/{certificate-name}'} # type: ignore
async def recover_deleted_certificate(
self,
vault_base_url: str,
certificate_name: str,
**kwargs: Any
) -> "_models.CertificateBundle":
"""Recovers the deleted certificate back to its current version under /certificates.
The RecoverDeletedCertificate operation performs the reversal of the Delete operation. The
operation is applicable in vaults enabled for soft-delete, and must be issued during the
retention interval (available in the deleted certificate's attributes). This operation requires
the certificates/recover permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param certificate_name: The name of the deleted certificate.
:type certificate_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CertificateBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.CertificateBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CertificateBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.recover_deleted_certificate.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'certificate-name': self._serialize.url("certificate_name", certificate_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('CertificateBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
recover_deleted_certificate.metadata = {'url': '/deletedcertificates/{certificate-name}/recover'} # type: ignore
def get_storage_accounts(
self,
vault_base_url: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.StorageListResult"]:
"""List storage accounts managed by the specified key vault. This operation requires the
storage/list permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either StorageListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.StorageListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_storage_accounts.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('StorageListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_storage_accounts.metadata = {'url': '/storage'} # type: ignore
async def delete_storage_account(
self,
vault_base_url: str,
storage_account_name: str,
**kwargs: Any
) -> "_models.StorageBundle":
"""Deletes a storage account. This operation requires the storage/delete permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_storage_account.metadata = {'url': '/storage/{storage-account-name}'} # type: ignore
async def get_storage_account(
self,
vault_base_url: str,
storage_account_name: str,
**kwargs: Any
) -> "_models.StorageBundle":
"""Gets information about a specified storage account. This operation requires the storage/get
permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_storage_account.metadata = {'url': '/storage/{storage-account-name}'} # type: ignore
async def set_storage_account(
self,
vault_base_url: str,
storage_account_name: str,
parameters: "_models.StorageAccountCreateParameters",
**kwargs: Any
) -> "_models.StorageBundle":
"""Creates or updates a new storage account. This operation requires the storage/set permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param parameters: The parameters to create a storage account.
:type parameters: ~azure.keyvault.v2016_10_01.models.StorageAccountCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_storage_account.metadata = {'url': '/storage/{storage-account-name}'} # type: ignore
async def update_storage_account(
self,
vault_base_url: str,
storage_account_name: str,
parameters: "_models.StorageAccountUpdateParameters",
**kwargs: Any
) -> "_models.StorageBundle":
"""Updates the specified attributes associated with the given storage account. This operation
requires the storage/set/update permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param parameters: The parameters to update a storage account.
:type parameters: ~azure.keyvault.v2016_10_01.models.StorageAccountUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_storage_account.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_storage_account.metadata = {'url': '/storage/{storage-account-name}'} # type: ignore
async def regenerate_storage_account_key(
self,
vault_base_url: str,
storage_account_name: str,
parameters: "_models.StorageAccountRegenerteKeyParameters",
**kwargs: Any
) -> "_models.StorageBundle":
"""Regenerates the specified key value for the given storage account. This operation requires the
storage/regeneratekey permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param parameters: The parameters to regenerate storage account key.
:type parameters: ~azure.keyvault.v2016_10_01.models.StorageAccountRegenerteKeyParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: StorageBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.StorageBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.StorageBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.regenerate_storage_account_key.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'StorageAccountRegenerteKeyParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('StorageBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
regenerate_storage_account_key.metadata = {'url': '/storage/{storage-account-name}/regeneratekey'} # type: ignore
def get_sas_definitions(
self,
vault_base_url: str,
storage_account_name: str,
maxresults: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.SasDefinitionListResult"]:
"""List storage SAS definitions for the given storage account. This operation requires the
storage/listsas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param maxresults: Maximum number of results to return in a page. If not specified the service
will return up to 25 results.
:type maxresults: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SasDefinitionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.keyvault.v2016_10_01.models.SasDefinitionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.get_sas_definitions.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if maxresults is not None:
query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', maximum=25, minimum=1)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('SasDefinitionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
get_sas_definitions.metadata = {'url': '/storage/{storage-account-name}/sas'} # type: ignore
async def delete_sas_definition(
self,
vault_base_url: str,
storage_account_name: str,
sas_definition_name: str,
**kwargs: Any
) -> "_models.SasDefinitionBundle":
"""Deletes a SAS definition from a specified storage account. This operation requires the
storage/deletesas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param sas_definition_name: The name of the SAS definition.
:type sas_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SasDefinitionBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SasDefinitionBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.delete_sas_definition.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
'sas-definition-name': self._serialize.url("sas_definition_name", sas_definition_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SasDefinitionBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_sas_definition.metadata = {'url': '/storage/{storage-account-name}/sas/{sas-definition-name}'} # type: ignore
async def get_sas_definition(
self,
vault_base_url: str,
storage_account_name: str,
sas_definition_name: str,
**kwargs: Any
) -> "_models.SasDefinitionBundle":
"""Gets information about a SAS definition for the specified storage account. This operation
requires the storage/getsas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param sas_definition_name: The name of the SAS definition.
:type sas_definition_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SasDefinitionBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SasDefinitionBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
accept = "application/json"
# Construct URL
url = self.get_sas_definition.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
'sas-definition-name': self._serialize.url("sas_definition_name", sas_definition_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SasDefinitionBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_sas_definition.metadata = {'url': '/storage/{storage-account-name}/sas/{sas-definition-name}'} # type: ignore
async def set_sas_definition(
self,
vault_base_url: str,
storage_account_name: str,
sas_definition_name: str,
parameters: "_models.SasDefinitionCreateParameters",
**kwargs: Any
) -> "_models.SasDefinitionBundle":
"""Creates or updates a new SAS definition for the specified storage account. This operation
requires the storage/setsas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param sas_definition_name: The name of the SAS definition.
:type sas_definition_name: str
:param parameters: The parameters to create a SAS definition.
:type parameters: ~azure.keyvault.v2016_10_01.models.SasDefinitionCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SasDefinitionBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SasDefinitionBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.set_sas_definition.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
'sas-definition-name': self._serialize.url("sas_definition_name", sas_definition_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SasDefinitionCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SasDefinitionBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
set_sas_definition.metadata = {'url': '/storage/{storage-account-name}/sas/{sas-definition-name}'} # type: ignore
async def update_sas_definition(
self,
vault_base_url: str,
storage_account_name: str,
sas_definition_name: str,
parameters: "_models.SasDefinitionUpdateParameters",
**kwargs: Any
) -> "_models.SasDefinitionBundle":
"""Updates the specified attributes associated with the given SAS definition. This operation
requires the storage/setsas permission.
:param vault_base_url: The vault name, for example https://myvault.vault.azure.net.
:type vault_base_url: str
:param storage_account_name: The name of the storage account.
:type storage_account_name: str
:param sas_definition_name: The name of the SAS definition.
:type sas_definition_name: str
:param parameters: The parameters to update a SAS definition.
:type parameters: ~azure.keyvault.v2016_10_01.models.SasDefinitionUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SasDefinitionBundle, or the result of cls(response)
:rtype: ~azure.keyvault.v2016_10_01.models.SasDefinitionBundle
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SasDefinitionBundle"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2016-10-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_sas_definition.metadata['url'] # type: ignore
path_format_arguments = {
'vaultBaseUrl': self._serialize.url("vault_base_url", vault_base_url, 'str', skip_quote=True),
'storage-account-name': self._serialize.url("storage_account_name", storage_account_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
'sas-definition-name': self._serialize.url("sas_definition_name", sas_definition_name, 'str', pattern=r'^[0-9a-zA-Z]+$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'SasDefinitionUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.KeyVaultError, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('SasDefinitionBundle', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_sas_definition.metadata = {'url': '/storage/{storage-account-name}/sas/{sas-definition-name}'} # type: ignore
| mit |
scorphus/scrapy | scrapy/core/downloader/handlers/s3.py | 31 | 2328 | from urlparse import unquote
from scrapy import optional_features
from scrapy.exceptions import NotConfigured
from scrapy.utils.httpobj import urlparse_cached
from .http import HTTPDownloadHandler
try:
from boto.s3.connection import S3Connection
except ImportError:
S3Connection = object
class _v19_S3Connection(S3Connection):
"""A dummy S3Connection wrapper that doesn't do any syncronous download"""
def _mexe(self, method, bucket, key, headers, *args, **kwargs):
return headers
class _v20_S3Connection(S3Connection):
"""A dummy S3Connection wrapper that doesn't do any syncronous download"""
def _mexe(self, http_request, *args, **kwargs):
http_request.authorize(connection=self)
return http_request.headers
try:
import boto.auth
except ImportError:
_S3Connection = _v19_S3Connection
else:
_S3Connection = _v20_S3Connection
class S3DownloadHandler(object):
def __init__(self, settings, aws_access_key_id=None, aws_secret_access_key=None, \
httpdownloadhandler=HTTPDownloadHandler):
if 'boto' not in optional_features:
raise NotConfigured("missing boto library")
if not aws_access_key_id:
aws_access_key_id = settings['AWS_ACCESS_KEY_ID']
if not aws_secret_access_key:
aws_secret_access_key = settings['AWS_SECRET_ACCESS_KEY']
try:
self.conn = _S3Connection(aws_access_key_id, aws_secret_access_key)
except Exception as ex:
raise NotConfigured(str(ex))
self._download_http = httpdownloadhandler(settings).download_request
def download_request(self, request, spider):
p = urlparse_cached(request)
scheme = 'https' if request.meta.get('is_secure') else 'http'
bucket = p.hostname
path = p.path + '?' + p.query if p.query else p.path
url = '%s://%s.s3.amazonaws.com%s' % (scheme, bucket, path)
signed_headers = self.conn.make_request(
method=request.method,
bucket=bucket,
key=unquote(p.path),
query_args=unquote(p.query),
headers=request.headers,
data=request.body)
httpreq = request.replace(url=url, headers=signed_headers)
return self._download_http(httpreq, spider)
| bsd-3-clause |
TheTypoMaster/chromium-crosswalk | tools/memory_inspector/memory_inspector/backends/adb_client.py | 41 | 10480 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A pure python library to interface with the Android Debug Bridge daemon.
This a lightweight implementation of the adb socket protocol, the same as the
one used by the adb client binary when communicating with its own server.
It can be used to run commands, push and pull files from/to Android devices,
requiring only that an adb daemon (adb start-server) is running on the host.
"""
import logging
import os
import pipes
import re
import socket
import stat
import struct
ADB_PORT = 5037
TIMEOUT = 5
ADB_NOT_RUNNING_MESSAGE = 'ADB daemon not running. Run \'adb start-server\'.'
"""Regular expression for matching the output of the 'getprop' Android command.
Sample input:
[prop1]: [simple value]
[prop2]: [multiline
value]
"""
GETPROP_RE = re.compile(r'^\[([^\]]*)\]: \[(.*?)\]$', re.MULTILINE | re.DOTALL)
class ADBClientError(Exception):
"""ADB errors."""
pass
class ADBHostSession(object):
"""A session handler to communicate with the adb daemon via a TCP socket.
The design of adb requires that most high-level commands (shell, root, etc)
must be run in distinct TCP connections (read: the TCP socket must be recycled
continuously). However, many of these high-level commands typically require
a variable number of low-level messages to be exchanged.
This class abstracts the notion of a TCP session with the ADB host, hiding
the underlying socket and data (de)serialization boilerplate, by means of
scoped semantics, for instance:
with ADBHostSession() as session:
session.SendCmd(...)
session.ReadMsg(...)
"""
def __init__(self, transport=None):
self._sock = None
self._transport = transport
def __enter__(self):
try:
self._sock = socket.create_connection(('127.0.0.1', ADB_PORT),
timeout=TIMEOUT)
except socket.error:
raise ADBClientError(ADB_NOT_RUNNING_MESSAGE)
if self._transport:
self.SendCmd('host:transport:' + self._transport)
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
if exc_type is socket.error:
raise ADBClientError(ADB_NOT_RUNNING_MESSAGE)
try:
self._sock.close()
except Exception as e:
logging.warn('ADB socket teardown: %s', e)
def SendCmd(self, cmd):
cmd = '%04x%s' % (len(cmd), cmd)
self._sock.sendall(cmd.encode('ascii'))
self.CheckAck()
def ReadMsg(self):
size = int(self._sock.recv(4), 16)
return self._sock.recv(size)
def SendCmdAndGetReply(self, cmd):
self.SendCmd(cmd)
return self.ReadMsg()
def CheckAck(self):
status = self._sock.recv(4)
if status == 'OKAY':
return
elif status == 'FAIL':
raise ADBClientError('FAIL ' + self.ReadMsg())
else:
raise ADBClientError(status or 'EMPTY ACK')
def ReadAll(self):
return ''.join(iter(lambda: self._sock.recv(4096), ''))
def SendRaw(self, *args):
for arg in args:
if isinstance(arg, str):
data = arg
elif isinstance(arg, int):
data = struct.pack('I', arg)
else:
assert False
self._sock.sendall(data)
def RecvRaw(self, size_fmt):
"""size_fmt can be either an integer (buf size) or a string (struct fmt)."""
size = size_fmt if isinstance(size_fmt, int) else struct.calcsize(size_fmt)
data = self._sock.recv(size)
if isinstance(size_fmt, int):
return data
if len(data) != size:
raise ADBClientError('Protocol error: expected %d bytes, got %d' % (
size, len(data)))
return struct.unpack(size_fmt, data)
class ADBDevice(object):
"""Handles the interaction with a specific Android device."""
def __init__(self, serial):
assert isinstance(serial, str), type(serial)
self.serial = serial
all_props = self.Shell(['getprop'])
self._cached_props = dict(re.findall(GETPROP_RE, all_props))
def GetProp(self, name, cached=False):
if cached and name in self._cached_props:
return self._cached_props[name]
else:
return self.Shell(['getprop', name]).rstrip('\r\n')
def GetState(self):
with ADBHostSession() as s:
return s.SendCmdAndGetReply('host-serial:%s:get-state' % self.serial)
def IsConnected(self):
return self.GetState() == 'device'
def Shell(self, cmd):
# If cmd is a list (like in subprocess.call), quote and escape the args.
if isinstance(cmd, list):
cmd = ' '.join(pipes.quote(x) for x in cmd)
with ADBHostSession(transport=self.serial) as s:
s.SendCmd('shell:' + cmd)
return s.ReadAll().replace('\r\n', '\n') # Nobody likes carriage returns.
def WaitForDevice(self):
with ADBHostSession() as s:
s.SendCmd('host-serial:%s:wait-for-any' % self.serial)
return s.ReadAll() == 'OKAY'
def RestartShellAsRoot(self):
with ADBHostSession(transport=self.serial) as s:
s.SendCmd('root:')
return s.ReadAll()
def RemountSystemPartition(self):
with ADBHostSession(transport=self.serial) as s:
s.SendCmd('remount:')
resp = s.RecvRaw(64)
if 'succeeded' not in resp.lower():
raise ADBClientError('Remount failed: ' + resp)
def Reboot(self):
with ADBHostSession(transport=self.serial) as s:
s.SendCmd('reboot:')
def ForwardTCPPort(self, local_port, remote_port):
with ADBHostSession() as s:
s.SendCmd('host-serial:%s:forward:tcp:%s;tcp:%s' % (
self.serial, local_port, remote_port))
def DisableAllForwards(self):
with ADBHostSession() as s:
s.SendCmd('host-serial:%s:killforward-all' % self.serial)
def Stat(self, device_path):
device_path = device_path.encode('ascii')
with ADBHostSession(transport=self.serial) as s:
s.SendCmd('sync:')
s.SendRaw('STAT', len(device_path), device_path)
resp, mode, size, mtime = s.RecvRaw('4sIII')
assert resp == 'STAT'
return mode, size, mtime
def FileExists(self, device_path):
return self.Stat(device_path)[0] != 0
def Push(self, host_path, device_path):
if not os.path.isfile(host_path):
raise ADBClientError('Can push only regular files')
device_path = device_path.encode('ascii')
device_stat = self.Stat(device_path)
if device_stat[0] and not stat.S_ISREG(device_stat[0]):
raise ADBClientError('Target %s exists but is not a file' % device_path)
with ADBHostSession(transport=self.serial) as s:
s.SendCmd('sync:')
send_cmd = '%s,33206' % device_path # adb supports only rw-rw-rw-.
s.SendRaw('SEND', len(send_cmd), send_cmd)
with open(host_path, 'rb') as fd:
while True:
data = fd.read(1490) # Stay close to the MTU for best performance.
if not data:
break
s.SendRaw('DATA', len(data), data)
local_mtime = int(os.path.getmtime(host_path))
s.SendRaw('DONE', local_mtime)
s.CheckAck()
s.SendRaw('QUIT', 0)
def Pull(self, device_path, host_path, update_mtime=False):
"""Pulls a file from the device.
Args:
device_path: source path of the file to be pulled.
host_path: destination path on the host.
update_mtime: preserves the source file mtime if True.
"""
if os.path.exists(host_path) and not os.path.isfile(host_path):
raise ADBClientError('Target %s exists but is not a file' % host_path)
device_path = device_path.encode('ascii')
device_stat = self.Stat(device_path)
if device_stat[0] and not stat.S_ISREG(device_stat[0]):
raise ADBClientError('Source %s exists but is not a file' % device_path)
with ADBHostSession(transport=self.serial) as s:
s.SendCmd('sync:')
s.SendRaw('RECV', len(device_path), device_path)
with open(host_path, 'wb') as fd:
while True:
status, size = s.RecvRaw('4sI')
if status == 'DONE':
break
if status != 'DATA':
raise ADBClientError('Pull failed: ' + status)
while size > 0:
data = s.RecvRaw(size)
if not data:
raise ADBClientError('Pull failed: connection interrupted')
fd.write(data)
size -= len(data)
if update_mtime:
os.utime(host_path, (device_stat[2], device_stat[2]))
s.SendRaw('QUIT', 0)
def __str__(self):
return 'ADBDevice [%s]' % self.serial
__repr__ = __str__
def ListDevices():
"""Lists the connected devices. Returns a list of ADBDevice instances."""
with ADBHostSession() as s:
resp = s.SendCmdAndGetReply('host:devices')
return [ADBDevice(serial=line.split('\t')[0]) for line in resp.splitlines()]
def GetDevice(serial=None):
"""Returns and ADBDevice given its serial.
The first connected device is returned if serial is None.
"""
devices = [d for d in ListDevices() if not serial or serial == d.serial]
return devices[0] if devices else None
def _EndToEndTest():
"""Some minimal non-automated end-to-end testing."""
import tempfile
local_test_file = tempfile.mktemp()
print 'Starting test, please make sure at least one device is connected'
devices = ListDevices()
print 'Devices:', devices
device = GetDevice(devices[0].serial)
assert device.IsConnected()
device.RestartShellAsRoot()
device.WaitForDevice()
build_fingerprint = device.Shell(['getprop', 'ro.build.fingerprint']).strip()
print 'Build fingerprint', build_fingerprint
device.RemountSystemPartition()
assert 'rw' in device.Shell('cat /proc/mounts | grep system')
mode, size, _ = device.Stat('/system/bin/sh')
assert mode == 0100755, oct(mode)
assert size > 1024
print 'Pulling a large file'
device.Pull('/system/lib/libwebviewchromium.so', local_test_file)
print 'Pushing a large file'
device.Push(local_test_file, '/data/local/tmp/file name.so')
remote_md5 = device.Shell('md5 /system/lib/libwebviewchromium.so')[:32]
remote_md5_copy = device.Shell(['md5', '/data/local/tmp/file name.so'])[:32]
size = device.Stat('/data/local/tmp/file name.so')[1]
assert size == os.path.getsize(local_test_file)
device.Shell(['rm', '/data/local/tmp/file name.so'])
print 'Remote MD5 of the original file is', remote_md5
print 'Remote MD5 of the copied file is', remote_md5_copy
os.unlink(local_test_file)
assert remote_md5 == remote_md5_copy
print '[TEST PASSED]'
if __name__ == '__main__':
_EndToEndTest()
| bsd-3-clause |
platformio/platformio-core | platformio/debug/helpers.py | 1 | 6497 | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
import time
from fnmatch import fnmatch
from hashlib import sha1
from io import BytesIO
from os.path import isfile
from platformio import util
from platformio.commands import PlatformioCLI
from platformio.commands.run.command import cli as cmd_run
from platformio.commands.run.command import print_processing_header
from platformio.commands.test.helpers import get_test_names
from platformio.commands.test.processor import TestProcessorBase
from platformio.compat import IS_WINDOWS, is_bytes
from platformio.debug.exception import DebugInvalidOptionsError
class GDBMIConsoleStream(BytesIO): # pylint: disable=too-few-public-methods
STDOUT = sys.stdout
def write(self, text):
self.STDOUT.write(escape_gdbmi_stream("~", text))
self.STDOUT.flush()
def is_gdbmi_mode():
return "--interpreter" in " ".join(PlatformioCLI.leftover_args)
def escape_gdbmi_stream(prefix, stream):
bytes_stream = False
if is_bytes(stream):
bytes_stream = True
stream = stream.decode()
if not stream:
return b"" if bytes_stream else ""
ends_nl = stream.endswith("\n")
stream = re.sub(r"\\+", "\\\\\\\\", stream)
stream = stream.replace('"', '\\"')
stream = stream.replace("\n", "\\n")
stream = '%s"%s"' % (prefix, stream)
if ends_nl:
stream += "\n"
return stream.encode() if bytes_stream else stream
def get_default_debug_env(config):
default_envs = config.default_envs()
all_envs = config.envs()
for env in default_envs:
if config.get("env:" + env, "build_type") == "debug":
return env
for env in all_envs:
if config.get("env:" + env, "build_type") == "debug":
return env
return default_envs[0] if default_envs else all_envs[0]
def predebug_project(
ctx, project_dir, project_config, env_name, preload, verbose
): # pylint: disable=too-many-arguments
debug_testname = project_config.get("env:" + env_name, "debug_test")
if debug_testname:
test_names = get_test_names(project_config)
if debug_testname not in test_names:
raise DebugInvalidOptionsError(
"Unknown test name `%s`. Valid names are `%s`"
% (debug_testname, ", ".join(test_names))
)
print_processing_header(env_name, project_config, verbose)
tp = TestProcessorBase(
ctx,
debug_testname,
env_name,
dict(
project_config=project_config,
project_dir=project_dir,
without_building=False,
without_uploading=True,
without_testing=True,
verbose=False,
),
)
tp.build_or_upload(["__debug", "__test"] + (["upload"] if preload else []))
else:
ctx.invoke(
cmd_run,
project_dir=project_dir,
project_conf=project_config.path,
environment=[env_name],
target=["__debug"] + (["upload"] if preload else []),
verbose=verbose,
)
if preload:
time.sleep(5)
def has_debug_symbols(prog_path):
if not isfile(prog_path):
return False
matched = {
b".debug_info": False,
b".debug_abbrev": False,
b" -Og": False,
b" -g": False,
# b"__PLATFORMIO_BUILD_DEBUG__": False,
}
with open(prog_path, "rb") as fp:
last_data = b""
while True:
data = fp.read(1024)
if not data:
break
for pattern, found in matched.items():
if found:
continue
if pattern in last_data + data:
matched[pattern] = True
last_data = data
return all(matched.values())
def is_prog_obsolete(prog_path):
prog_hash_path = prog_path + ".sha1"
if not isfile(prog_path):
return True
shasum = sha1()
with open(prog_path, "rb") as fp:
while True:
data = fp.read(1024)
if not data:
break
shasum.update(data)
new_digest = shasum.hexdigest()
old_digest = None
if isfile(prog_hash_path):
with open(prog_hash_path) as fp:
old_digest = fp.read()
if new_digest == old_digest:
return False
with open(prog_hash_path, "w") as fp:
fp.write(new_digest)
return True
def reveal_debug_port(env_debug_port, tool_name, tool_settings):
def _get_pattern():
if not env_debug_port:
return None
if set(["*", "?", "[", "]"]) & set(env_debug_port):
return env_debug_port
return None
def _is_match_pattern(port):
pattern = _get_pattern()
if not pattern:
return True
return fnmatch(port, pattern)
def _look_for_serial_port(hwids):
for item in util.get_serialports(filter_hwid=True):
if not _is_match_pattern(item["port"]):
continue
port = item["port"]
if tool_name.startswith("blackmagic"):
if IS_WINDOWS and port.startswith("COM") and len(port) > 4:
port = "\\\\.\\%s" % port
if "GDB" in item["description"]:
return port
for hwid in hwids:
hwid_str = ("%s:%s" % (hwid[0], hwid[1])).replace("0x", "")
if hwid_str in item["hwid"]:
return port
return None
if env_debug_port and not _get_pattern():
return env_debug_port
if not tool_settings.get("require_debug_port"):
return None
debug_port = _look_for_serial_port(tool_settings.get("hwids", []))
if not debug_port:
raise DebugInvalidOptionsError("Please specify `debug_port` for environment")
return debug_port
| apache-2.0 |
sam-tsai/django-old | django/conf/locale/en/formats.py | 105 | 1357 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '14:30:59'
'%H:%M', # '14:30'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
petecummings/django-blog-zinnia | zinnia/admin/category.py | 4 | 1223 | """CategoryAdmin for Zinnia"""
from django.contrib import admin
from django.utils.html import format_html
from django.core.urlresolvers import NoReverseMatch
from django.utils.translation import ugettext_lazy as _
from zinnia.admin.forms import CategoryAdminForm
class CategoryAdmin(admin.ModelAdmin):
"""
Admin for Category model.
"""
form = CategoryAdminForm
fields = ('title', 'parent', 'description', 'slug')
list_display = ('title', 'slug', 'get_tree_path', 'description')
prepopulated_fields = {'slug': ('title', )}
search_fields = ('title', 'description')
list_filter = ('parent',)
def __init__(self, model, admin_site):
self.form.admin_site = admin_site
super(CategoryAdmin, self).__init__(model, admin_site)
def get_tree_path(self, category):
"""
Return the category's tree path in HTML.
"""
try:
return format_html(
'<a href="{}" target="blank">/{}/</a>',
category.get_absolute_url(), category.tree_path)
except NoReverseMatch:
return '/%s/' % category.tree_path
get_tree_path.allow_tags = True
get_tree_path.short_description = _('tree path')
| bsd-3-clause |
shakamunyi/neutron-vrrp | neutron/plugins/brocade/nos/fake_nosdriver.py | 8 | 3548 | # Copyright 2013 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Authors:
# Varma Bhupatiraju (vbhupati@#brocade.com)
# Shiv Haris (sharis@brocade.com)
"""FAKE DRIVER, for unit tests purposes.
Brocade NOS Driver implements NETCONF over SSHv2 for
Neutron network life-cycle management.
"""
class NOSdriver():
"""NOS NETCONF interface driver for Neutron network.
Fake: Handles life-cycle management of Neutron network,
leverages AMPP on NOS
(for use by unit tests, avoids touching any hardware)
"""
def __init__(self):
pass
def connect(self, host, username, password):
"""Connect via SSH and initialize the NETCONF session."""
pass
def create_network(self, host, username, password, net_id):
"""Creates a new virtual network."""
pass
def delete_network(self, host, username, password, net_id):
"""Deletes a virtual network."""
pass
def associate_mac_to_network(self, host, username, password,
net_id, mac):
"""Associates a MAC address to virtual network."""
pass
def dissociate_mac_from_network(self, host, username, password,
net_id, mac):
"""Dissociates a MAC address from virtual network."""
pass
def create_vlan_interface(self, mgr, vlan_id):
"""Configures a VLAN interface."""
pass
def delete_vlan_interface(self, mgr, vlan_id):
"""Deletes a VLAN interface."""
pass
def get_port_profiles(self, mgr):
"""Retrieves all port profiles."""
pass
def get_port_profile(self, mgr, name):
"""Retrieves a port profile."""
pass
def create_port_profile(self, mgr, name):
"""Creates a port profile."""
pass
def delete_port_profile(self, mgr, name):
"""Deletes a port profile."""
pass
def activate_port_profile(self, mgr, name):
"""Activates a port profile."""
pass
def deactivate_port_profile(self, mgr, name):
"""Deactivates a port profile."""
pass
def associate_mac_to_port_profile(self, mgr, name, mac_address):
"""Associates a MAC address to a port profile."""
pass
def dissociate_mac_from_port_profile(self, mgr, name, mac_address):
"""Dissociates a MAC address from a port profile."""
pass
def create_vlan_profile_for_port_profile(self, mgr, name):
"""Creates VLAN sub-profile for port profile."""
pass
def configure_l2_mode_for_vlan_profile(self, mgr, name):
"""Configures L2 mode for VLAN sub-profile."""
pass
def configure_trunk_mode_for_vlan_profile(self, mgr, name):
"""Configures trunk mode for VLAN sub-profile."""
pass
def configure_allowed_vlans_for_vlan_profile(self, mgr, name, vlan_id):
"""Configures allowed VLANs for VLAN sub-profile."""
pass
| apache-2.0 |
yugang/crosswalk-test-suite | wrt/wrt-sharemode-android-tests/inst.apk.py | 2 | 6335 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
TEST_PREFIX = os.environ['HOME']
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def overwriteCopy(src, dest, symlinks=False, ignore=None):
if not os.path.exists(dest):
os.makedirs(dest)
shutil.copystat(src, dest)
sub_list = os.listdir(src)
if ignore:
excl = ignore(src, sub_list)
sub_list = [x for x in sub_list if x not in excl]
for i_sub in sub_list:
s_path = os.path.join(src, i_sub)
d_path = os.path.join(dest, i_sub)
if symlinks and os.path.islink(s_path):
if os.path.lexists(d_path):
os.remove(d_path)
os.symlink(os.readlink(s_path), d_path)
try:
s_path_s = os.lstat(s_path)
s_path_mode = stat.S_IMODE(s_path_s.st_mode)
os.lchmod(d_path, s_path_mode)
except Exception, e:
pass
elif os.path.isdir(s_path):
overwriteCopy(s_path, d_path, symlinks, ignore)
else:
shutil.copy2(s_path, d_path)
def doCopy(src_item=None, dest_item=None):
try:
if os.path.isdir(src_item):
overwriteCopy(src_item, dest_item, symlinks=True)
else:
if not os.path.exists(os.path.dirname(dest_item)):
os.makedirs(os.path.dirname(dest_item))
shutil.copy2(src_item, dest_item)
except Exception, e:
return False
return True
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
#if os.path.isdir("%s/opt/%s/" % (TEST_PREFIX, PKG_NAME)):
#shutil.rmtree("%s/opt/%s/" % (TEST_PREFIX, PKG_NAME))
return action_status
def instPKGs():
action_status = True
#for root, dirs, files in os.walk(SCRIPT_DIR):
# for file in files:
# if file.endswith(".apk"):
# cmd = "%s -s %s install %s" % (ADB_CMD,
# PARAMETERS.device, os.path.join(root, file))
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".apk"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doCopy(item, "%s/opt/%s/%s" % (TEST_PREFIX, PKG_NAME, item_name)):
action_status = False
os.rename("%s/opt/%s/resources/apk/webappintel.apk" % (TEST_PREFIX, PKG_NAME),"%s/opt/%s/resources/apk/WebApp.apk" % (TEST_PREFIX, PKG_NAME))
print "Package push to host %s/opt/%s successfully!" % (TEST_PREFIX, PKG_NAME)
path = "/tmp/Crosswalk_sharemode.conf"
if os.path.exists(path):
if not doCopy(path, "%s/opt/%s/Crosswalk_sharemode.conf" % (TEST_PREFIX, PKG_NAME)):
action_status = False
(return_code, output) = doCMD("cat \"%s/opt/%s/Crosswalk_sharemode.conf\" | grep \"Android_Crosswalk_Path\" | cut -d \"=\" -f 2" % (TEST_PREFIX, PKG_NAME))
for line in output:
if "Failure" in line:
action_status = False
break
if not output == []:
ANDROID_CROSSWALK_PATH = output[0]
CROSSWALK = os.path.basename(ANDROID_CROSSWALK_PATH)
if not doCopy(ANDROID_CROSSWALK_PATH, "%s/opt/%s/resources/installer/%s" % (TEST_PREFIX, PKG_NAME, CROSSWALK)):
action_status = False
return action_status
def main():
try:
global TEST_PREFIX
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-t", dest="testprefix", action="store", help="unzip path prefix", default=os.environ["HOME"])
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
TEST_PREFIX = PARAMETERS.testprefix
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
os.system("%s -s %s uninstall %s" % (ADB_CMD, PARAMETERS.device, "org.xwalk.runtime.lib"))
#if not uninstPKGs():
#sys.exit(1)
else:
os.system("%s -s %s install -r %s" % (ADB_CMD, PARAMETERS.device, "resources/installer/XWalkRuntimeLib.apk"))
#if not instPKGs():
#sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
javaarchive/idle | systools.py | 2 | 1768 | import pickle as p
class sysfile:
def __init__(self,name="program.log",ftype="r+"):
self.f=open(name,ftype)
try:
#self.rr=self.f.read()
self.prop={"name":name,"opentype":ftype,"data":self.f.readlines(),"datastr":self.f.read()}
except:
print("error:fileerror")
self.prop={"name":name,"opentype":ftype}
print(self.prop)
self.ft=ftype
#try:
# self.byt=p.Pickler(open(name,"rb+"),p.DEFAULT_PROTOCOL)
#except:
#
# raise Exception("Pickle could not be created warning")
def init(self,name="program.log",ftype="r+"):
self.f.close()
self.f=open(name,ftype)
self.prop={"name":name,"opentype":ftype,"data":self.f.readlines(),"datastr":self.f.read()}
self.ft=ftype
def addlines(self,lines=[]):
if not self.ftype=="r":
self.f.writelines(lines)
def add(self,word="",glitch=""):
print("the glitch was",glitch)
self.f.write(word)
def close(self):
self.f.close()
self.f=None
def pack(files,filen):
packf=[]
for x in files:
print("packing file:",x)
s=sysfile(x)
s.close()
packf.append(s)
p.dump(packf,open(filen,"wb"))
def unpack(filen):
f=open(filen,"rb+")
dat=p.load(f)
for data in dat:
cf=sysfile(data.prop["name"],"w")
print("this is: ",data.prop["datastr"])
cf.add(data.prop["datastr"])#prop["datastr"])
cf.close()
def test():
pack(["read.txt"],"r.ppf")
| mit |
jmchilton/galaxy-central | galaxy/tools/parameters.py | 1 | 19896 | """
Classes encapsulating tool parameters
"""
import logging, string, sys
from galaxy import config, datatypes, util, form_builder
import validation
from elementtree.ElementTree import XML, Element
log = logging.getLogger(__name__)
class ToolParameter( object ):
"""
Describes a parameter accepted by a tool. This is just a simple stub at the
moment but in the future should encapsulate more complex parameters (lists
of valid choices, validation logic, ...)
"""
def __init__( self, tool, param ):
self.tool = tool
self.name = param.get("name")
self.label = util.xml_text(param, "label")
self.help = util.xml_text(param, "help")
self.html = "no html set"
self.validators = []
for elem in param.findall("validator"):
self.validators.append( validation.Validator.from_element( elem ) )
def get_label( self ):
"""Return user friendly name for the parameter"""
if self.label: return self.label
else: return self.name
def get_html( self, trans=None, value=None, other_values={} ):
"""
Returns the html widget corresponding to the paramter.
Optionally attempt to retain the current value specific by 'value'
"""
return self.html
def get_required_enctype( self ):
"""
If this parameter needs the form to have a specific encoding
return it, otherwise return None (indicating compatibility with
any encoding)
"""
return None
def filter_value( self, value, trans=None, other_values={} ):
"""
Parse the value returned by the view into a form usable by the tool OR
raise a ValueError.
"""
return value
def to_string( self, value, app ):
"""Convert a value to a string representation suitable for persisting"""
return str( value )
def to_python( self, value, app ):
"""Convert a value created with to_string back to an object representation"""
return value
def validate( self, value, history=None ):
for validator in self.validators:
validator.validate( value, history )
@classmethod
def build( cls, tool, param ):
"""Factory method to create parameter of correct type"""
param_type = param.get("type")
if not param_type or param_type not in parameter_types:
raise ValueError( "Unknown tool parameter type '%s'" % param_type )
else:
return parameter_types[param_type]( tool, param )
class TextToolParameter( ToolParameter ):
"""
Parameter that can take on any text value.
>>> p = TextToolParameter( None, XML( '<param name="blah" type="text" size="4" value="default" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="text" name="blah" size="4" value="default">
>>> print p.get_html( value="meh" )
<input type="text" name="blah" size="4" value="meh">
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.size = elem.get( 'size' )
self.value = elem.get( 'value' )
self.area = str_bool( elem.get( 'area', False ) )
def get_html( self, trans=None, value=None, other_values={} ):
if self.area:
return form_builder.TextArea( self.name, self.size, value or self.value ).get_html()
return form_builder.TextField( self.name, self.size, value or self.value ).get_html()
class IntegerToolParameter( TextToolParameter ):
"""
Parameter that takes an integer value.
>>> p = IntegerToolParameter( None, XML( '<param name="blah" type="integer" size="4" value="10" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="text" name="blah" size="4" value="10">
>>> type( p.filter_value( "10" ) )
<type 'int'>
>>> type( p.filter_value( "bleh" ) )
Traceback (most recent call last):
...
ValueError: An integer is required
"""
def filter_value( self, value, trans=None, other_values={} ):
try: return int( value )
except: raise ValueError( "An integer is required" )
def to_python( self, value, app ):
return int( value )
class FloatToolParameter( TextToolParameter ):
"""
Parameter that takes a real number value.
>>> p = FloatToolParameter( None, XML( '<param name="blah" type="integer" size="4" value="3.141592" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="text" name="blah" size="4" value="3.141592">
>>> type( p.filter_value( "36.1" ) )
<type 'float'>
>>> type( p.filter_value( "bleh" ) )
Traceback (most recent call last):
...
ValueError: A real number is required
"""
def filter_value( self, value, trans=None, other_values={} ):
try: return float( value )
except: raise ValueError( "A real number is required")
def to_python( self, value, app ):
return float( value )
class BooleanToolParameter( ToolParameter ):
"""
Parameter that takes one of two values.
>>> p = BooleanToolParameter( None, XML( '<param name="blah" type="boolean" checked="yes" truevalue="bulletproof vests" falsevalue="cellophane chests" />' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="checkbox" name="blah" value="true" checked><input type="hidden" name="blah" value="true">
>>> print p.filter_value( ["true","true"] )
bulletproof vests
>>> print p.filter_value( ["true"] )
cellophane chests
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.truevalue = elem.get( 'truevalue', 'true' )
self.falsevalue = elem.get( 'falsevalue', 'false' )
self.name = elem.get( 'name' )
self.checked = elem.get( 'checked' )
def get_html( self, trans=None, value=None, other_values={} ):
checked = self.checked
if value: checked = form_builder.CheckboxField.is_checked( value )
return form_builder.CheckboxField( self.name, checked ).get_html()
def filter_value( self, value, trans=None, other_values={} ):
if form_builder.CheckboxField.is_checked( value ):
return self.truevalue
else:
return self.falsevalue
def to_python( self, value, app ):
return ( value == 'True' )
class FileToolParameter( ToolParameter ):
"""
Parameter that takes an uploaded file as a value.
>>> p = FileToolParameter( None, XML( '<param name="blah" type="file"/>' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="file" name="blah">
"""
def __init__( self, tool, elem ):
"""
Example: C{<param name="bins" type="file" />}
"""
ToolParameter.__init__( self, tool, elem )
self.html = form_builder.FileField( elem.get( 'name') ).get_html()
def get_required_enctype( self ):
"""
File upload elements require the multipart/form-data encoding
"""
return "multipart/form-data"
def to_string( self, value, app ):
raise Exception( "FileToolParameter cannot be persisted" )
def to_python( self, value, app ):
raise Exception( "FileToolParameter cannot be persisted" )
class HiddenToolParameter( ToolParameter ):
"""
Parameter that takes one of two values.
FIXME: This seems hacky, parameters should only describe things the user
might change. It is used for 'initializing' the UCSC proxy tool
>>> p = HiddenToolParameter( None, XML( '<param name="blah" type="hidden" value="wax so rockin"/>' ) )
>>> print p.name
blah
>>> print p.get_html()
<input type="hidden" name="blah" value="wax so rockin">
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.value = elem.get( 'value' )
self.html = form_builder.HiddenField( self.name, self.value ).get_html()
## This is clearly a HACK, parameters should only be used for things the user
## can change, there needs to be a different way to specify this. I'm leaving
## it for now to avoid breaking any tools.
class BaseURLToolParameter( ToolParameter ):
"""
Returns a parameter the contains its value prepended by the
current server base url. Used in all redirects.
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.name = elem.get( 'name' )
self.value = elem.get( 'value', '' )
def get_html( self, trans=None, value=None, other_values={} ):
return form_builder.HiddenField( self.name, trans.request.base + self.value ).get_html()
class SelectToolParameter( ToolParameter ):
"""
Parameter that takes on one (or many) or a specific set of values.
TODO: There should be an alternate display that allows single selects to be
displayed as radio buttons and multiple selects as a set of checkboxes
>>> p = SelectToolParameter( None, XML(
... '''
... <param name="blah" type="select">
... <option value="x">I am X</option>
... <option value="y" selected="true">I am Y</option>
... <option value="z">I am Z</option>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html()
<select name="blah">
<option value="x">I am X</option>
<option value="y" selected>I am Y</option>
<option value="z">I am Z</option>
</select>
>>> print p.get_html( value="z" )
<select name="blah">
<option value="x">I am X</option>
<option value="y">I am Y</option>
<option value="z" selected>I am Z</option>
</select>
>>> print p.filter_value( "y" )
y
>>> p = SelectToolParameter( None, XML(
... '''
... <param name="blah" type="select" multiple="true">
... <option value="x">I am X</option>
... <option value="y" selected="true">I am Y</option>
... <option value="z" selected="true">I am Z</option>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html()
<select name="blah" multiple>
<option value="x">I am X</option>
<option value="y" selected>I am Y</option>
<option value="z" selected>I am Z</option>
</select>
>>> print p.get_html( value=["x","y"])
<select name="blah" multiple>
<option value="x" selected>I am X</option>
<option value="y" selected>I am Y</option>
<option value="z">I am Z</option>
</select>
>>> print p.filter_value( ["y", "z"] )
y,z
>>> p = SelectToolParameter( None, XML(
... '''
... <param name="blah" type="select" multiple="true" display="checkboxes">
... <option value="x">I am X</option>
... <option value="y" selected="true">I am Y</option>
... <option value="z" selected="true">I am Z</option>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html()
<div><input type="checkbox" name="blah" value="x">I am X</div>
<div><input type="checkbox" name="blah" value="y" checked>I am Y</div>
<div><input type="checkbox" name="blah" value="z" checked>I am Z</div>
>>> print p.get_html( value=["x","y"])
<div><input type="checkbox" name="blah" value="x" checked>I am X</div>
<div><input type="checkbox" name="blah" value="y" checked>I am Y</div>
<div><input type="checkbox" name="blah" value="z">I am Z</div>
>>> print p.filter_value( ["y", "z"] )
y,z
"""
def __init__( self, tool, elem):
ToolParameter.__init__( self, tool, elem )
self.multiple = str_bool( elem.get( 'multiple', False ) )
self.display = elem.get( 'display', None )
self.separator = elem.get( 'separator', ',' )
self.legal_values = set()
self.dynamic_options = elem.get( "dynamic_options", None )
if self.dynamic_options is None:
self.options = list()
for index, option in enumerate( elem.findall("option") ):
value = option.get( "value" )
self.legal_values.add( value )
selected = ( option.get( "selected", None ) == "true" )
self.options.append( ( option.text, value, selected ) )
def get_html( self, trans=None, value=None, other_values={} ):
if value is not None:
if not isinstance( value, list ): value = [ value ]
field = form_builder.SelectField( self.name, self.multiple, self.display )
if self.dynamic_options:
options = eval( self.dynamic_options, self.tool.code_namespace, other_values )
else:
options = self.options
for text, optval, selected in options:
if value: selected = ( optval in value )
field.add_option( text, optval, selected )
return field.get_html()
def filter_value( self, value, trans=None, other_values={} ):
if self.dynamic_options:
legal_values = set( v for _, v, _ in eval( self.dynamic_options, self.tool.code_namespace, other_values ) )
else:
legal_values = self.legal_values
if isinstance( value, list ):
assert self.multiple, "Multiple values provided but parameter is not expecting multiple values"
rval = []
for v in value:
v = util.restore_text( v )
assert v in legal_values
rval.append( v )
return self.separator.join( rval )
else:
value = util.restore_text( value )
assert value in legal_values
return value
class DataToolParameter( ToolParameter ):
"""
Parameter that takes on one (or many) or a specific set of values.
TODO: There should be an alternate display that allows single selects to be
displayed as radio buttons and multiple selects as a set of checkboxes
>>> # Mock up a history (not connected to database)
>>> from galaxy.model import History, Dataset
>>> from cookbook.patterns import Bunch
>>> hist = History()
>>> hist.add_dataset( Dataset( id=1, extension='text' ) )
>>> hist.add_dataset( Dataset( id=2, extension='bed' ) )
>>> hist.add_dataset( Dataset( id=3, extension='fasta' ) )
>>> hist.add_dataset( Dataset( id=4, extension='png' ) )
>>> hist.add_dataset( Dataset( id=5, extension='interval' ) )
>>> p = DataToolParameter( None, XML( '<param name="blah" type="data" format="interval"/>' ) )
>>> print p.name
blah
>>> print p.get_html( trans=Bunch( history=hist ) )
<select name="blah">
<option value="2">2: Unnamed dataset</option>
<option value="5" selected>5: Unnamed dataset</option>
</select>
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
self.format = datatypes.get_datatype_by_extension( elem.get( 'format', 'data' ).lower() )
self.multiple = str_bool( elem.get( 'multiple', False ) )
self.optional = str_bool( elem.get( 'optional', False ) )
def get_html( self, trans=None, value=None, other_values={} ):
assert trans is not None, "DataToolParameter requires a trans"
history = trans.history
assert history is not None, "DataToolParameter requires a history"
if value is not None:
if type( value ) != list: value = [ value ]
field = form_builder.SelectField( self.name, self.multiple )
some_data = False
for data in history.datasets:
if isinstance( data.datatype, self.format.__class__ ) and not data.parent_id:
some_data = True
selected = ( value and ( data in value ) )
field.add_option( "%d: %s" % ( data.hid, data.name[:30] ), data.id, selected )
if some_data and value is None:
# Ensure that the last item is always selected
a, b, c = field.options[-1]; field.options[-1] = a, b, True
else:
# HACK: we should just disable the form or something
field.add_option( "no data has the proper type", '' )
if self.optional == True:
field.add_option( "Selection is Optional", 'None', True )
return field.get_html()
def filter_value( self, value, trans, other_values={} ):
if not value:
raise ValueError( "A data of the appropriate type is required" )
if value in [None, "None"]:
temp_data = trans.app.model.Dataset()
temp_data.state = temp_data.states.FAKE
return temp_data
if isinstance( value, list ):
return [ trans.app.model.Dataset.get( v ) for v in value ]
else:
return trans.app.model.Dataset.get( value )
def to_string( self, value, app ):
return value.id
def to_python( self, value, app ):
return app.model.Dataset.get( int( value ) )
class RawToolParameter( ToolParameter ):
"""
Completely nondescript parameter, HTML representation is provided as text
contents.
>>> p = RawToolParameter( None, XML(
... '''
... <param name="blah" type="raw">
... <![CDATA[<span id="$name">Some random stuff</span>]]>
... </param>
... ''' ) )
>>> print p.name
blah
>>> print p.get_html().strip()
<span id="blah">Some random stuff</span>
"""
def __init__( self, tool, elem ):
ToolParameter.__init__( self, tool, elem )
template = string.Template( elem.text )
self.html = template.substitute( self.__dict__ )
# class HistoryIDParameter( ToolParameter ):
# """
# Parameter that takes a name value, makes history.id available.
#
# FIXME: This is a hack (esp. if hidden params are a hack) but in order to
# have the history accessable at the job level, it is necessary
# I also probably wrote this docstring test thing wrong.
#
# >>> from galaxy.model import History, Dataset
# >>> from cookbook.patterns import Bunch
# >>> hist = History( id=1 )
# >>> p = HistoryIDParameter( None, XML( '<param name="blah" type="history"/>' ) )
# >>> print p.name
# blah
# >>> html_string = '<input type="hidden" name="blah" value="%d">' % hist.id
# >>> assert p.get_html( trans=Bunch( history=hist ) ) == html_string
# """
# def __init__( self, tool, elem ):
# ToolParameter.__init__( self, tool, elem )
# self.name = elem.get('name')
# def get_html( self, trans, value=None, other_values={} ):
# assert trans.history is not None, "HistoryIDParameter requires a history"
# self.html = form_builder.HiddenField( self.name, trans.history.id ).get_html()
# return self.html
parameter_types = dict( text = TextToolParameter,
integer = IntegerToolParameter,
float = FloatToolParameter,
boolean = BooleanToolParameter,
select = SelectToolParameter,
hidden = HiddenToolParameter,
baseurl = BaseURLToolParameter,
file = FileToolParameter,
data = DataToolParameter,
raw = RawToolParameter )
def get_suite():
"""Get unittest suite for this module"""
import doctest, sys
return doctest.DocTestSuite( sys.modules[__name__] )
def str_bool(in_str):
"""
returns true/false of a string, since bool(str), always returns true if string is not empty
default action is to return false
"""
if str(in_str).lower() == 'true':
return True
return False | mit |
casualuser/ajenti | plugins/webserver_common/api.py | 17 | 8267 | from ajenti.com import *
from ajenti.apis import API
from ajenti.api import CategoryPlugin, event
from ajenti.ui import UI
from ajenti import apis
class Webserver(API):
class VirtualHost:
def __init__(self):
self.name = ''
self.config = ''
class Module:
def __init__(self):
self.name = ''
self.config = ''
self.has_config = False
class WebserverPlugin(apis.services.ServiceControlPlugin):
abstract = True
ws_service = 'none'
ws_title = 'none'
ws_backend = None
ws_mods = False
ws_vhosts = True
def on_init(self):
self.service_name = self.ws_service
self.tab_hosts = 0
self.tab_mods = 1 if self.ws_vhosts else 0
self._backend = self.ws_backend(self.app)
def on_session_start(self):
self._tab = 0
self._creating_host = False
self._editing_host = None
self._editin_mod = None
def get_config(self):
return self.app.get_config(self._backend)
def get_main_ui(self):
ui = self.app.inflate('webserver_common:main')
tc = UI.TabControl(active=self._tab)
if self.ws_vhosts:
tc.add('Hosts', self.get_ui_hosts(ui))
else:
ui.remove('addhost')
if self.ws_mods:
tc.add('Modules', self.get_ui_mods(ui))
ui.append('main', tc)
return ui
def get_ui_hosts(self, gui):
ui = self.app.inflate('webserver_common:hosts')
tbl = ui.find('list')
hosts = self._backend.get_hosts()
for x in sorted(hosts.keys()):
tbl.append(UI.DTR(
UI.Image(file='/dl/core/ui/stock/status-%sabled.png'%(
'en' if hosts[x].enabled else 'dis')),
UI.Label(text=x),
UI.DTD(
UI.HContainer(
UI.TipIcon(
icon='/dl/core/ui/stock/edit.png',
id='edithost/'+x,
text='Edit'
),
UI.TipIcon(
icon='/dl/core/ui/stock/'+ ('dis' if hosts[x].enabled else 'en') + 'able.png',
id='togglehost/'+x,
text='Disable' if hosts[x].enabled else 'Enable'
),
UI.TipIcon(
icon='/dl/core/ui/stock/delete.png',
id='deletehost/'+x,
text='Delete',
warning='Delete host %s'%x
),
spacing=0
),
hidden=True
)
))
if self._creating_host:
gui.append(
'main',
UI.InputBox(
text='Host config name',
id='dlgCreateHost'
)
)
if self._editing_host is not None:
gui.append(
'main',
UI.InputBox(
extra='code',
text='Host config',
value=self._backend.get_hosts()[self._editing_host].config,
id='dlgEditHost'
)
)
return ui
def get_ui_mods(self, gui):
ui = self.app.inflate('webserver_common:mods')
tbl = ui.find('list')
mods = self._backend.get_mods()
for x in sorted(mods.keys()):
tbl.append(UI.DTR(
UI.Image(file='/dl/core/ui/stock/status-%sabled.png'%(
'en' if mods[x].enabled else 'dis')),
UI.Label(text=x),
UI.DTD(
UI.HContainer(
UI.TipIcon(
icon='/dl/core/ui/stock/edit.png',
id='editmod/'+x,
text='Edit'
) if mods[x].has_config else None,
UI.TipIcon(
icon='/dl/core/ui/stock/'+ ('dis' if mods[x].enabled else 'en') + 'able.png',
id='togglemod/'+x,
text='Disable' if mods[x].enabled else 'Enable'
),
spacing=0
),
hidden=True
)
))
if self._editing_mod is not None:
gui.append(
'main',
UI.InputBox(
extra='code',
text='Module config:',
value=self._backend.get_mods()[self._editing_mod].config,
id='dlgEditMod'
)
)
return ui
@event('button/click')
def on_click(self, event, params, vars=None):
if params[0] == 'togglehost':
self._tab = self.tab_hosts
h = self._backend.get_hosts()[params[1]]
if h.enabled:
self._backend.disable_host(params[1])
else:
self._backend.enable_host(params[1])
if params[0] == 'deletehost':
self._tab = self.tab_hosts
self._backend.delete_host(params[1])
if params[0] == 'edithost':
self._tab = self.tab_hosts
self._editing_host = params[1]
if params[0] == 'addhost':
self._tab = self.tab_hosts
self._creating_host = True
if params[0] == 'togglemod':
self._tab = self.tab_mods
h = self._backend.get_mods()[params[1]]
if h.enabled:
self._backend.disable_mod(params[1])
else:
self._backend.enable_mod(params[1])
if params[0] == 'editmod':
self._tab = self.tab_mods
self._editing_mod = params[1]
@event('dialog/submit')
def on_submit(self, event, params, vars):
if params[0] == 'dlgCreateHost':
if vars.getvalue('action', '') == 'OK':
h = apis.webserver.VirtualHost()
h.name = vars.getvalue('value', '')
h.config = self._backend.host_template % h.name
self._backend.save_host(h)
self._creating_host = False
if params[0] == 'dlgEditHost':
if vars.getvalue('action', '') == 'OK':
h = self._backend.get_hosts()[self._editing_host]
h.config = vars.getvalue('value', '')
self._backend.save_host(h)
self._editing_host = None
if params[0] == 'dlgEditMod':
if vars.getvalue('action', '') == 'OK':
h = self._backend.get_mods()[self._editing_mod]
h.config = vars.getvalue('value', '')
self._backend.save_mod(h)
self._editing_mod = None
| lgpl-3.0 |
EmreAtes/spack | var/spack/repos/builtin/packages/gnutls/package.py | 5 | 3543 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Gnutls(AutotoolsPackage):
"""GnuTLS is a secure communications library implementing the SSL, TLS
and DTLS protocols and technologies around them. It provides a simple C
language application programming interface (API) to access the secure
communications protocols as well as APIs to parse and write X.509, PKCS
#12, OpenPGP and other required structures. It is aimed to be portable
and efficient with focus on security and interoperability."""
homepage = "http://www.gnutls.org"
url = "https://www.gnupg.org/ftp/gcrypt/gnutls/v3.5/gnutls-3.5.13.tar.xz"
version('3.5.13', '4fd41ad86572933c2379b4cc321a0959')
version('3.5.10', '336c03a71ba90184ffd0388075dde504')
version('3.5.9', '0ab25eb6a1509345dd085bc21a387951')
version('3.3.9', 'ff61b77e39d09f1140ab5a9cf52c58b6')
variant('zlib', default=True, description='Enable zlib compression support')
# Note that version 3.3.9 of gnutls doesn't support nettle 3.0.
depends_on('nettle@:2.9', when='@3.3.9')
depends_on('nettle', when='@3.5:')
depends_on('zlib', when='+zlib')
depends_on('gettext')
depends_on('pkgconfig', type='build')
build_directory = 'spack-build'
def url_for_version(self, version):
url = "https://www.gnupg.org/ftp/gcrypt/gnutls/v{0}/gnutls-{1}.tar.xz"
return url.format(version.up_to(2), version)
def configure_args(self):
spec = self.spec
args = [
'--enable-static',
]
if spec.satisfies('@3.5:'):
# use shipped libraries, might be turned into variants
args.append('--with-included-libtasn1')
args.append('--with-included-unistring')
args.append('--without-p11-kit') # p11-kit@0.23.1: ...
if '+zlib' in spec:
args.append('--with-zlib')
else:
args.append('--without-zlib')
if self.run_tests:
args.extend([
'--enable-tests',
'--enable-valgrind-tests',
'--enable-full-test-suite',
])
else:
args.extend([
'--disable-tests',
'--disable-valgrind-tests',
'--disable-full-test-suite',
])
return args
| lgpl-2.1 |
fhahn/django-guardian | setup.py | 32 | 1607 | import os
import sys
from setuptools import setup, find_packages
from extras import RunFlakesCommand
guardian = __import__('guardian')
readme_file = os.path.join(os.path.dirname(__file__), 'README.rst')
try:
long_description = open(readme_file).read()
except IOError as err:
sys.stderr.write("[ERROR] Cannot find file specified as "
"``long_description`` (%s)\n" % readme_file)
sys.exit(1)
if sys.version_info >= (3,):
extra_kwargs = {'use_2to3': True}
else:
extra_kwargs = {}
setup(
name = 'django-guardian',
version = guardian.get_version(),
url = 'http://github.com/lukaszb/django-guardian',
author = 'Lukasz Balcerzak',
author_email = 'lukaszbalcerzak@gmail.com',
download_url='https://github.com/lukaszb/django-guardian/tags',
description = guardian.__doc__.strip(),
long_description = long_description,
zip_safe = False,
packages = find_packages(),
include_package_data = True,
license = 'BSD',
install_requires = [
'Django>=1.2',
],
tests_require = [
'mock',
],
classifiers = ['Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Security',
],
test_suite='tests.main',
cmdclass={'flakes': RunFlakesCommand},
**extra_kwargs
)
| bsd-2-clause |
carljm/django | tests/logging_tests/tests.py | 7 | 19482 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import logging
import warnings
from contextlib import contextmanager
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.core import mail
from django.core.files.temp import NamedTemporaryFile
from django.core.management import color
from django.db import connection
from django.test import RequestFactory, SimpleTestCase, override_settings
from django.test.utils import LoggingCaptureMixin, patch_logger
from django.utils import six
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.log import (
DEFAULT_LOGGING, AdminEmailHandler, CallbackFilter, RequireDebugFalse,
RequireDebugTrue, ServerFormatter,
)
from .logconfig import MyEmailBackend
# logging config prior to using filter with mail_admins
OLD_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class LoggingFiltersTest(SimpleTestCase):
def test_require_debug_false_filter(self):
"""
Test the RequireDebugFalse filter class.
"""
filter_ = RequireDebugFalse()
with self.settings(DEBUG=True):
self.assertIs(filter_.filter("record is not used"), False)
with self.settings(DEBUG=False):
self.assertIs(filter_.filter("record is not used"), True)
def test_require_debug_true_filter(self):
"""
Test the RequireDebugTrue filter class.
"""
filter_ = RequireDebugTrue()
with self.settings(DEBUG=True):
self.assertIs(filter_.filter("record is not used"), True)
with self.settings(DEBUG=False):
self.assertIs(filter_.filter("record is not used"), False)
class SetupDefaultLoggingMixin(object):
@classmethod
def setUpClass(cls):
super(SetupDefaultLoggingMixin, cls).setUpClass()
cls._logging = settings.LOGGING
logging.config.dictConfig(DEFAULT_LOGGING)
@classmethod
def tearDownClass(cls):
super(SetupDefaultLoggingMixin, cls).tearDownClass()
logging.config.dictConfig(cls._logging)
class DefaultLoggingTests(SetupDefaultLoggingMixin, LoggingCaptureMixin, SimpleTestCase):
def test_django_logger(self):
"""
The 'django' base logger only output anything when DEBUG=True.
"""
self.logger.error("Hey, this is an error.")
self.assertEqual(self.logger_output.getvalue(), '')
with self.settings(DEBUG=True):
self.logger.error("Hey, this is an error.")
self.assertEqual(self.logger_output.getvalue(), 'Hey, this is an error.\n')
@override_settings(DEBUG=True)
def test_django_logger_warning(self):
self.logger.warning('warning')
self.assertEqual(self.logger_output.getvalue(), 'warning\n')
@override_settings(DEBUG=True)
def test_django_logger_info(self):
self.logger.info('info')
self.assertEqual(self.logger_output.getvalue(), 'info\n')
@override_settings(DEBUG=True)
def test_django_logger_debug(self):
self.logger.debug('debug')
self.assertEqual(self.logger_output.getvalue(), '')
@override_settings(DEBUG=True, ROOT_URLCONF='logging_tests.urls')
class HandlerLoggingTests(SetupDefaultLoggingMixin, LoggingCaptureMixin, SimpleTestCase):
def test_page_found_no_warning(self):
self.client.get('/innocent/')
self.assertEqual(self.logger_output.getvalue(), '')
def test_page_not_found_warning(self):
self.client.get('/does_not_exist/')
self.assertEqual(self.logger_output.getvalue(), 'Not Found: /does_not_exist/\n')
@override_settings(
DEBUG=True,
USE_I18N=True,
LANGUAGES=[('en', 'English')],
MIDDLEWARE=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
],
ROOT_URLCONF='logging_tests.urls_i18n',
)
class I18nLoggingTests(SetupDefaultLoggingMixin, LoggingCaptureMixin, SimpleTestCase):
def test_i18n_page_found_no_warning(self):
self.client.get('/exists/')
self.client.get('/en/exists/')
self.assertEqual(self.logger_output.getvalue(), '')
def test_i18n_page_not_found_warning(self):
self.client.get('/this_does_not/')
self.client.get('/en/nor_this/')
self.assertEqual(self.logger_output.getvalue(), 'Not Found: /this_does_not/\nNot Found: /en/nor_this/\n')
class WarningLoggerTests(SimpleTestCase):
"""
Tests that warnings output for RemovedInDjangoXXWarning (XX being the next
Django version) is enabled and captured to the logging system
"""
def setUp(self):
# If tests are invoke with "-Wall" (or any -W flag actually) then
# warning logging gets disabled (see configure_logging in django/utils/log.py).
# However, these tests expect warnings to be logged, so manually force warnings
# to the logs. Use getattr() here because the logging capture state is
# undocumented and (I assume) brittle.
self._old_capture_state = bool(getattr(logging, '_warnings_showwarning', False))
logging.captureWarnings(True)
def tearDown(self):
# Reset warnings state.
logging.captureWarnings(self._old_capture_state)
@override_settings(DEBUG=True)
def test_error_filter_still_raises(self):
with warnings.catch_warnings():
warnings.filterwarnings(
'error',
category=RemovedInNextVersionWarning
)
with self.assertRaises(RemovedInNextVersionWarning):
warnings.warn('Foo Deprecated', RemovedInNextVersionWarning)
class CallbackFilterTest(SimpleTestCase):
def test_sense(self):
f_false = CallbackFilter(lambda r: False)
f_true = CallbackFilter(lambda r: True)
self.assertEqual(f_false.filter("record"), False)
self.assertEqual(f_true.filter("record"), True)
def test_passes_on_record(self):
collector = []
def _callback(record):
collector.append(record)
return True
f = CallbackFilter(_callback)
f.filter("a record")
self.assertEqual(collector, ["a record"])
class AdminEmailHandlerTest(SimpleTestCase):
logger = logging.getLogger('django')
def get_admin_email_handler(self, logger):
# Ensure that AdminEmailHandler does not get filtered out
# even with DEBUG=True.
admin_email_handler = [
h for h in logger.handlers
if h.__class__.__name__ == "AdminEmailHandler"
][0]
return admin_email_handler
def test_fail_silently(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
self.assertTrue(admin_email_handler.connection().fail_silently)
@override_settings(
ADMINS=[('whatever admin', 'admin@example.com')],
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-'
)
def test_accepts_args(self):
"""
Ensure that user-supplied arguments and the EMAIL_SUBJECT_PREFIX
setting are used to compose the email subject.
Refs #16736.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
self.logger.error(message, token1, token2)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR: Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=[('whatever admin', 'admin@example.com')],
EMAIL_SUBJECT_PREFIX='-SuperAwesomeSubject-',
INTERNAL_IPS=['127.0.0.1'],
)
def test_accepts_args_and_request(self):
"""
Ensure that the subject is also handled if being
passed a request object.
"""
message = "Custom message that says '%s' and '%s'"
token1 = 'ping'
token2 = 'pong'
admin_email_handler = self.get_admin_email_handler(self.logger)
# Backup then override original filters
orig_filters = admin_email_handler.filters
try:
admin_email_handler.filters = []
rf = RequestFactory()
request = rf.get('/')
self.logger.error(
message, token1, token2,
extra={
'status_code': 403,
'request': request,
}
)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['admin@example.com'])
self.assertEqual(mail.outbox[0].subject,
"-SuperAwesomeSubject-ERROR (internal IP): Custom message that says 'ping' and 'pong'")
finally:
# Restore original filters
admin_email_handler.filters = orig_filters
@override_settings(
ADMINS=[('admin', 'admin@example.com')],
EMAIL_SUBJECT_PREFIX='',
DEBUG=False,
)
def test_subject_accepts_newlines(self):
"""
Ensure that newlines in email reports' subjects are escaped to avoid
AdminErrorHandler to fail.
Refs #17281.
"""
message = 'Message \r\n with newlines'
expected_subject = 'ERROR: Message \\r\\n with newlines'
self.assertEqual(len(mail.outbox), 0)
self.logger.error(message)
self.assertEqual(len(mail.outbox), 1)
self.assertNotIn('\n', mail.outbox[0].subject)
self.assertNotIn('\r', mail.outbox[0].subject)
self.assertEqual(mail.outbox[0].subject, expected_subject)
@override_settings(
ADMINS=[('admin', 'admin@example.com')],
DEBUG=False,
)
def test_uses_custom_email_backend(self):
"""
Refs #19325
"""
message = 'All work and no play makes Jack a dull boy'
admin_email_handler = self.get_admin_email_handler(self.logger)
mail_admins_called = {'called': False}
def my_mail_admins(*args, **kwargs):
connection = kwargs['connection']
self.assertIsInstance(connection, MyEmailBackend)
mail_admins_called['called'] = True
# Monkeypatches
orig_mail_admins = mail.mail_admins
orig_email_backend = admin_email_handler.email_backend
mail.mail_admins = my_mail_admins
admin_email_handler.email_backend = (
'logging_tests.logconfig.MyEmailBackend')
try:
self.logger.error(message)
self.assertTrue(mail_admins_called['called'])
finally:
# Revert Monkeypatches
mail.mail_admins = orig_mail_admins
admin_email_handler.email_backend = orig_email_backend
@override_settings(
ADMINS=[('whatever admin', 'admin@example.com')],
)
def test_emit_non_ascii(self):
"""
#23593 - AdminEmailHandler should allow Unicode characters in the
request.
"""
handler = self.get_admin_email_handler(self.logger)
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
rf = RequestFactory()
url_path = '/º'
record.request = rf.get(url_path)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
msg = mail.outbox[0]
self.assertEqual(msg.to, ['admin@example.com'])
self.assertEqual(msg.subject, "[Django] ERROR (EXTERNAL IP): message")
self.assertIn("Report at %s" % url_path, msg.body)
@override_settings(
MANAGERS=[('manager', 'manager@example.com')],
DEBUG=False,
)
def test_customize_send_mail_method(self):
class ManagerEmailHandler(AdminEmailHandler):
def send_mail(self, subject, message, *args, **kwargs):
mail.mail_managers(subject, message, *args, connection=self.connection(), **kwargs)
handler = ManagerEmailHandler()
record = self.logger.makeRecord('name', logging.ERROR, 'function', 'lno', 'message', None, None)
self.assertEqual(len(mail.outbox), 0)
handler.emit(record)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, ['manager@example.com'])
@override_settings(ALLOWED_HOSTS='example.com')
def test_disallowed_host_doesnt_crash(self):
admin_email_handler = self.get_admin_email_handler(self.logger)
old_include_html = admin_email_handler.include_html
# Text email
admin_email_handler.include_html = False
try:
self.client.get('/', HTTP_HOST='evil.com')
finally:
admin_email_handler.include_html = old_include_html
# HTML email
admin_email_handler.include_html = True
try:
self.client.get('/', HTTP_HOST='evil.com')
finally:
admin_email_handler.include_html = old_include_html
class SettingsConfigTest(AdminScriptTestCase):
"""
Test that accessing settings in a custom logging handler does not trigger
a circular import error.
"""
def setUp(self):
log_config = """{
'version': 1,
'handlers': {
'custom_handler': {
'level': 'INFO',
'class': 'logging_tests.logconfig.MyHandler',
}
}
}"""
self.write_settings('settings.py', sdict={'LOGGING': log_config})
def tearDown(self):
self.remove_settings('settings.py')
def test_circular_dependency(self):
# validate is just an example command to trigger settings configuration
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
def dictConfig(config):
dictConfig.called = True
dictConfig.called = False
class SetupConfigureLogging(SimpleTestCase):
"""
Test that calling django.setup() initializes the logging configuration.
"""
@override_settings(LOGGING_CONFIG='logging_tests.tests.dictConfig',
LOGGING=OLD_LOGGING)
def test_configure_initializes_logging(self):
from django import setup
setup()
self.assertTrue(dictConfig.called)
@override_settings(DEBUG=True, ROOT_URLCONF='logging_tests.urls')
class SecurityLoggerTest(SimpleTestCase):
def test_suspicious_operation_creates_log_message(self):
with patch_logger('django.security.SuspiciousOperation', 'error') as calls:
self.client.get('/suspicious/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
def test_suspicious_operation_uses_sublogger(self):
with patch_logger('django.security.DisallowedHost', 'error') as calls:
self.client.get('/suspicious_spec/')
self.assertEqual(len(calls), 1)
self.assertEqual(calls[0], 'dubious')
@override_settings(
ADMINS=[('admin', 'admin@example.com')],
DEBUG=False,
)
def test_suspicious_email_admins(self):
self.client.get('/suspicious/')
self.assertEqual(len(mail.outbox), 1)
self.assertIn('Report at /suspicious/', mail.outbox[0].body)
class SettingsCustomLoggingTest(AdminScriptTestCase):
"""
Test that using a logging defaults are still applied when using a custom
callable in LOGGING_CONFIG (i.e., logging.config.fileConfig).
"""
def setUp(self):
logging_conf = """
[loggers]
keys=root
[handlers]
keys=stream
[formatters]
keys=simple
[logger_root]
handlers=stream
[handler_stream]
class=StreamHandler
formatter=simple
args=(sys.stdout,)
[formatter_simple]
format=%(message)s
"""
self.temp_file = NamedTemporaryFile()
self.temp_file.write(logging_conf.encode('utf-8'))
self.temp_file.flush()
sdict = {'LOGGING_CONFIG': '"logging.config.fileConfig"',
'LOGGING': 'r"%s"' % self.temp_file.name}
self.write_settings('settings.py', sdict=sdict)
def tearDown(self):
self.temp_file.close()
self.remove_settings('settings.py')
def test_custom_logging(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
self.assertOutput(out, "System check identified no issues (0 silenced).")
class SchemaLoggerTests(SimpleTestCase):
def test_extra_args(self):
editor = connection.schema_editor(collect_sql=True)
sql = "SELECT * FROM foo WHERE id in (%s, %s)"
params = [42, 1337]
with patch_logger('django.db.backends.schema', 'debug', log_kwargs=True) as logger:
editor.execute(sql, params)
self.assertEqual(
logger,
[(
'SELECT * FROM foo WHERE id in (%s, %s); (params [42, 1337])',
{'extra': {
'sql': 'SELECT * FROM foo WHERE id in (%s, %s)',
'params': [42, 1337],
}},
)]
)
class LogFormattersTests(SimpleTestCase):
def test_server_formatter_styles(self):
color_style = color.make_style('')
formatter = ServerFormatter()
formatter.style = color_style
log_msg = 'log message'
status_code_styles = [
(200, 'HTTP_SUCCESS'),
(100, 'HTTP_INFO'),
(304, 'HTTP_NOT_MODIFIED'),
(300, 'HTTP_REDIRECT'),
(404, 'HTTP_NOT_FOUND'),
(400, 'HTTP_BAD_REQUEST'),
(500, 'HTTP_SERVER_ERROR'),
]
for status_code, style in status_code_styles:
record = logging.makeLogRecord({'msg': log_msg, 'status_code': status_code})
self.assertEqual(formatter.format(record), getattr(color_style, style)(log_msg))
record = logging.makeLogRecord({'msg': log_msg})
self.assertEqual(formatter.format(record), log_msg)
def test_server_formatter_default_format(self):
server_time = '2016-09-25 10:20:30'
log_msg = 'log message'
logger = logging.getLogger('django.server')
@contextmanager
def patch_django_server_logger():
old_stream = logger.handlers[0].stream
new_stream = six.StringIO()
logger.handlers[0].stream = new_stream
yield new_stream
logger.handlers[0].stream = old_stream
with patch_django_server_logger() as logger_output:
logger.info(log_msg, extra={'server_time': server_time})
self.assertEqual('[%s] %s\n' % (server_time, log_msg), logger_output.getvalue())
with patch_django_server_logger() as logger_output:
logger.info(log_msg)
six.assertRegex(self, logger_output.getvalue(), r'^\[[-:,.\s\d]+\] %s' % log_msg)
| bsd-3-clause |
HailStorm32/Q.bo_stacks | qbo_webi/src/teleoperation/sip2rtmp/rtmplite/siprtmp.py | 4 | 91053 | # Copyright (c) 2007-2009, Mamta Singh. All rights reserved. See LICENSING for details.
# Copyright (c) 2010-2011, Kundan Singh.
'''
Introduction
------------
The goal of this project is to allow multimedia calls from Flash Player to SIP network and vice-versa. This allows either a
web browser or a standalone AIR-based application to call to and receive call from a SIP phone. The SIP-RTMP gateway implements
translation of signaling as well as media to support audio, video and text with the SIP user agent. The client side ActionScript
library allows any third-party to build user interface for the web-based soft-phone. The Gateway can run either as a server hosted
by the provider, or as a local application on the client's host.
For other Flash-SIP projects see:
1. http://www.gtalk2voip.com/sipper/
2. http://www.flaphone.com/ (formerly Flashphone.ru)
3. http://code.google.com/p/red5phone/
Design choices
--------------
Two design alternatives: dedicated server vs. server app. The advantages of a dedicated server that implements SIP-RTMP gateway
is that management is easier, and the application does just one thing. On the other hand implementing the gateway as a RTMP server
application is more extensible, and the same server can be used to implement other applications. I outline the implementations
using both alternatives, and finally pick the second alternative in this implementation.
In the dedicated server case, the FlashServer class of rtmp.py module is extended into a Gateway class. This subclass then
overrides the various methods such as publishhandler and playhandler to map to similar operations using the SIP library such as
register, invite or accept. One advantage of this approach is that the Gateway class can be used as a component in other
applications without having to run a separate Server.
In the server application case, the Gateway class extends the App class of rtmp.py to implement the SIP-RTMP gateway application,
and installs itself as application named 'sip'. The Gateway class overrides the methods such as onConnect, onPublish, etc., to
map to the SIP library methods such as register, invite or accept. One advantage of this approach is that the same Server can
be used to perform other RTMP server functions besides hosting a SIP gateway.
There are several API alternatives from the Flash client point of view as well:
1. The RTMP NetConnection is just used as RPC layer to control the SIP library.
2. Have 1-to-1 mapping between a RTMP NetConnection and a SIP user agent. (choosen one)
3. Have 1-to-1 mapping between a RTMP connection's scope and a SIP multi-party conference.
In the first approach, the application connects to the gateway using NetConnection URL of the form 'rtmp://server/sip'. Once
connected, the application uses various RPC commands and indications to register, invite, accept or bye a SIP session. Each
command has a full set of arguments needed to execute that command. For example, NetConnection.call('invite',..., 'alice','bob')
will make a call from local user 'alice' to remote user 'bob'. One major problem with this approach is that there is no
information hiding or abstraction in the API. Hence, any connected application can alter the state of any user or call in the
library. One could use cookies to store state information, but nevertheless the API is pretty rudimentary.
In the second approach, a single SIP user agent is associated with a NetConnection. The application connects to the URL of the
form 'rtmp://server/sip/alice@example.net' and supplies additional connection arguments such as display name and password.
The gateway associates this connection with the user address-of-record (AOR) 'sip:alice@example.net'. In particular, it sends
SIP REGISTER on behalf of this user, and keeps refreshing the registration as long as the NetConnection is connected. Thus, this
NetConnection represents an abstraction of the SIP user agent for this user. The application uses RPC commands and indications
to invite, accept or bye a SIP session in this user agent. In the simple implementation, a single user agent is capable of a
single SIP session at any instance. The API for multi-line SIP user agent will be more complex. When the application calls
NetConnection.call('invite', ..., 'bob@home.com') the gateway sends a SIP INVITE request to the AOR sip:bob@home.com. When a
call is successful, the application can use the NetStream named 'local' and 'remote' to send and receive audio/video with the
remote user. In this approach a multi-party call is implemented entirely in the application by having two different NetConnection
objects in two different calls, or by making a call to a separate multi-point conference server. Additional commands and
indications are used to represent text messages and presence information. Alternatively, a SharedObject named 'contacts' could
represent the local user's contact list with presence information accessible from the Flash application. Since the SharedObject
is scoped to the NetConnection's URL, it represents that particular user's contact list.
In the third approach, the Flash application connects a NetConnection to a conference URL of the form 'rtmp://server/sip/abc1'.
In this case the conference is identified by name 'abc1'. Each connection to this URL creates a new conference leg from an
RTMP user. Then the application uses NetConnection RPC commands such as 'invite', 'accept' and indications such as 'invited',
'accepted', to inform the gateway to change the membership of the conference, either by inviting a new user or by accepting an
incoming invitation. The gateway can be distributed such that the conference context is maintained in a gateway-farm. The
membership information can be stored using a SharedObject accessible from the Flash application. One major advantage of this
approach is that it maps the URL to a conference context and supports built-in multi-party conferencing. Whenever a new participant
joins the conference, the gateway informs the application about the stream name for that participant. The application opens
a new NetStream for that stream name to play, and receives media from that participant on that stream. There is at most one
published stream in a NetConnection, which represents the local participant's media.
The third approach seems most logical and complete, however requires implementation of a distributed conference state in the
gateway farm, and multi-party conference server logic. We do not want to mix media going to the Flash application, because
not all media (e.g., video) can be mixed and audio mixing incurs additional CPU load on the server. For example, a typical
mixer employs a decode-add-encode cycle. However, existing SIP clients do not usually handle multiple media streams well.
Hence the conference server logic becomes more complex where it mixes some audio going to SIP user agents, and does not mix
audio going to the RTMP clients. Secondly, maintaining a consistent conference membership information among the distributed
gateway farm is a challenge which requires implementing various XCON extensions to server. Thirdly, a centralized conference model
doesn't mend well with a P2P-SIP network. More details about centralized, distributed and P2P-SIP conferencing can be found
in the work of http://kundansingh.com. Because of all these issues I have decided to implement the second approach instead.
The second approach is described in much detail next.
Design description
------------------
This module defines two classes: Gateway and Context. The Gateway class extends the rtmp.App class to implement the SIP-RTMP
gateway application in the RTMP server. The Context class implements the translator context for each user or connection from
the RTMP side. The main routine is similar to that in rtmp.py, in that it launches the server additionally with the "sip" gateway
application service.
Since there is a one-to-one mapping between a RTMP connection and a SIP user, a single Context behaves as a single line SIP
user agent, which can be in at most one SIP registration and at most one SIP call state at any time. I think implementing
multiple lines can be easily done in the Flash application by creating additional connections to the server.
The Gateway class overrides these methods of the App class: onConnect causes a SIP registration, onDisconnect causes a SIP
unregistration, onCommand invokes various commands such as 'invite', 'bye', 'accept', 'reject' from the RTMP side to the
SIP side, onPublish and onClose update the published stream information, onPlay and onStop update the played stream information
and onPublishData handle the media data from RTMP to SIP side. A new context is created in onConnect and destroyed in
onDisconnect. The Client (RTMP) as well as User (SIP) objects store a reference to the context. I use the SIP stack from the
p2p-sip (39 Peers) project at http://39peers.net.
The Context class maintains a mapping between RTMP client and SIP user (single line phone). It also maintains state regarding
the media sesion, incoming and outgoing pending call, and published and played streams. One unique feature of the translator
is that it tries to re-use the same port for the given SIP URL when registering with the SIP server. This way we avoid
registering multiple contacts in the SIP server for the same SIP URL.
As you will see in the later section, a connection from the RTMP client supplies a SIP URL of the registering user. The context
maps this request to a SIP REGISTER request using the local contact address for that SIP URL. This allows the gateway to
receive incoming SIP calls for this SIP URL. When the RTMP client invokes commands such as "invite", they get mapped to the
SIP side using the methods defined on the User class. Similarly, when the User class invokes callback, they get mapped to the
RTMP callbacks such as "invited".
The RTMP client MUST create at most one published NetStream and at most one played NetStream for the given connection.
The published stream supplies the client's audio and video to the context. The context maps this audio and video data to the
appropriate SIP side using the RTP module available in the SIP stack. Similarly the audio and video data from the SIP side
coming in RTP are mapped to the audio and video data given to the played stream to the RTMP client.
Interoperability with SIP/SDP/RTP
---------------------------------
The Flash application must be version 10 or higher so that it can support Speex audio codec. We can only interoperate with
SIP user agents that support Speex/16000 or Speex/8000. The reason is that Flash Player supports only limited set of codecs for
audio captured from Microphone. Flash Player 9 and earlier supported only proprietary NellyMoser codec, which are not understood
or supported beyond Flash platform. Flash Player 10 incorporated Speex audio codec which is an open source and open specification,
and are available in several SIP applications such as X-Lite. The support of Speex audio codec is not as widely available in PSTN
gateways though. Note that we support wideband (16000 Hz) and narrowband (8000 Hz) variant of Speex audio codec. The selection
can be done from Flash application during NetConnection.connect.
This section describes other interoperability issues with a SIP or Flash client. When the client issues an outbound
"invite" request, the mapped SIP INVITE advertises the session using SDP module of the SIP stack. This session contains
media stream offer for both audio and video. The audio stream has only Speex/16000 format whereas the video stream has RTMP
specific proprietary x-flv format (more about this later). An example SDP offer is shown below:
v=0
o=- 1247290948 1247290948 IN IP4 Macintosh-2.local
s=-
c=IN IP4 192.168.1.3
t=0 0
m=audio 22700 RTP/AVP 96
a=rtpmap:96 speex/16000
m=video 26498 RTP/AVP 97
a=rtpmap:97 x-flv/90000
If the response contains a both valid audio and video answer streams, then we assume that the remote side is also our own Flash
application, as it can support the proprietary x-flv video format. If the answer contains port=0 for video stream, that means
the remote party does not support our proprietary video format, then we assume that the remote side is standard SIP user agent.
Similar SDP negotiation happens for incoming call. In particular, if incoming SDP offer does not have speex audio codec, then
we disable the audio stream. Similarly if the incoming SDP offer does not have a x-flv video codec, then we disable the video
stream.
One caveat in the implementation is that the media matching is done when the Flash application accepts the incoming call. Thus,
it is possible that for an incoming call, the Flash application gets alerted even when there is no matching media session.
And when the Flash application tries it accept the incoming call, the gateway performs media matching and rejects the incoming
SIP call, and informs the Flash application that call got disconnected. I need to fix this by doing media matching as soon as
incoming SIP invitation is received.
If the remote party does not support x-flv video but supports speex/16000 audio, then we only send audio data from RTMP to
SIP side. Similarly, only audio data will be mapped from SIP to RTMP side, hence the Flash application will not see remote
party's video. Standard RTP and RTCP formatting is used for sending/receiving data to/from the SIP side. The timestamp
of RTP is deirived from the RTMP message's time stamp property. In particular, RTMP message uses 'millisecond' unit where as
RTP header uses 'clock rate' unit. Since we support only 16000 Hz clock rate, each millisecond unit is equivalent to
16 clock rate unit, and each speex frame of typically 20 ms is equivalent to 320 clock rate.
If the remote party supports x-flv, then we disable the speex/16000 audio. Even though the remote side is SIP, we assume that
it is backed by a Flash application with a similar gateway as this. Since x-flv format includes both audio and video, we
do not need another audio only stream in the session. Next I describe the x-flv format.
The x-flv video format is basically a modification of the RTMP media messages, so that it works with RTP. It includes interleaved
audio and video packets. One problem with RTMP media message is that there is no sequence number which makes it hard to detect
and correct packet losses over RTP/UDP transport. Another problem is that video packet size can be huge, which causes problem
with UDP transport -- certain NATs may drop large packets. For these reasons, the RTMP media message is broken down into smaller
chunks such that each chunk can be sent in a single RTP message.
The timestamp of RTP is derived from the RTMP message's time stamp property. The payload type reflect 'x-flv/90000' media type
as negotiated in SDP. In particular for outgoing call, it will use payload type of 97 and for incoming call, it will use the
payload type that was advertised by the remote party's SDP. If remote party is also using our gateway, then it will be 97.
The sequence number, SSRC and other fields in the RTMP message are taken care by the RTP module of the SIP stack and are
independent of the RTMP side, as long as the sequence number keeps incrementing for each RTP packet sent, and SSRC is
randomly generated for the session and remains constant in the session.
The RTP paylaod is constructed as follows. First the RTMP message is constructed in its entirety. The Message object in rtmp
module has type, size and time properties. These are added in that order using big endian 32-bit number each as the header,
followed by the data part of the message. Note that the data part of the media message actually has one byte type information
containing codec type (e.g., 0xb2 for speex/16000), but we treat the whole data part including the type together to simplify
the translation. Thus the assembled media message looks as follows:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
type | RTMP message type |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
size | RTMP message body size |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
time | RTMP message time stamp |
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
body | RTMP message body ... |
time | The size of this body is |
time | in the second field above |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Now the assembled media message is broken down in smaller chunks such that each chunk has at most 1000 bytes. Typically for
audio media message the size is smaller than that already hence it generates only one chunk. On the other hand a large video
media message may generate several chunks. Each chunk is treated as opaque data for the rest of the formatting. Thus, the
receiving side must re-assemble the full message as described above from the received chunks before acting of the message.
Note that if a message is split into chunks, all the chunks must be received before the whole message can be constructed.
Even if a single chunk is missing due to packet loss, the whole message needs to be discarded. The chunks idea is part of
the RTMP specification itself, however is not useful as it is, because of lack of proper sequence numbering to detect packet
losses. Hence this chunk algorithm is different than what RTMP specification uses.
Each chunk is prepended with a chunk header to form the complete RTP payload. Each chunk header starts with four bytes of
magic word 'RTMP' which is actually a big-endian 32-bit number 0x52544d50. This magic word allows detecting corrupted or
incorrect x-flv payload type. There are two sequence numbers: the message sequence number (seq) and chunk number (cseq).
Each assembed message as described before gets a unique auto-incremented message sequence number. If a message is broken
into 5 chunks, say, then the chunk will get chunk numbers as 0, 1, 2, 3, 4 in that order. Thus the first chunk of a message
always as chunk number of 0. In the chunk header, next 32-bits contain the big-endian message sequence number. Note that
this sequence number is different than the RTP sequence number, because the RTP sequence number is based on the lower layer's
actual message sent count, whereas this message sequence number is based on RTMP's message count. This is followed by a
big-endian 16-bit chunk number. Next 16-bit field is an optional size of the assembled message and is present if-and-only-if
the chunk number is 0, i.e., this is the first chunk of the message. This field is not present for subsequent chunks of the
message. This field is useful to know the full size of the assembled message, so that a receiver can know when to finish
the chunks and re-assemble the full message. I could have used the body size present in the full message, but that looked
more complicated to me in parsing on the receiver, hence I added this optional field. The complete chunk is shown below.
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
magic | magic word 'RTMP' 0x52544d50 |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
seq | message sequence number (seq) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
rest | chunk number (cseq) | (optional) message size |
+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
body | chunk data ... |
time | lower layer (UDP) provides size information |
time | of the full packet |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
The sender is expected to send all the messages in the correct sequence number order, and all the chunks of the message
again in the correct chunk number order.
The receiver processing is described below. First the payload type is matched to identify it as x-flv packet as per the
SDP negotiation. The other fields such as timestamp can be ignored because they appear in the actual assembed message anyway.
The payload of the RTP packet is parsed using the chunk format described above. The receiver verifies the magic word of 'RTMP'
and if failed it ignores the packet. The message sequence number is extracted as seq. If the seq is 0, then message size is
extracted. Remaining data is assumed to be chunk data. The receiver maintains the last seq received so far, and also all the
chunk data in the last seq received so far. The receiver may maintain more than one seq data, if it wants to handle out-of-
order packets. For each received packet, the receiver checks if all the chunks are received or not? if the total size of
all the chunk data received so far becomes equal to the message size found in the first chunk, then we have received all the
chunks. When all the chunks are received, all the chunk data are concatenated in the order of chunk number, to for the
complete assembled message. This message is than used to contruct the rtmp module's Message object by extracted the type,
body size, time stamp and body data as mentioned before. Note that the receiver may detect lost chunks if there is a missing
chunk number and may discard all the chunks in this message seq. The receiver may also detect missing first chunk if the
the new seq number is more than the last seq but the chunk number is not 0. In this case it may discard all future chunks
in this message seq.
Once a message is assembled, it is given to the RTMP side using the played NetStream.
Client API in ActionScript
--------------------------
This section described the client side API needed to use this SIP-RTMP gateway service. The first step for the client is to
create a connection to the gateway. It is assumed that the client is written in ActionScript with appropriate Flex SDK that
supports Flash Player 10 or layer features such as Speex audio codec. Note also that since the rtmp.py module currently supports
only AMF0, the client must specify this as the object encoding format. First the client creates a new NetConnection as follows:
var nc:NetConnection = new NetConnection();
nc.objectEncoding = ObjectEncoding.AMF0;
Then to receive various callbacks such as "invited", and to receive various events such as "NetConnection.Connect.Success" it
installs the listeners as follows. This assumes that the callbacks will be invoked on the current (this) object.
nc.client = this;
nc.addEventListener(NetStatusEvent.NET_STATUS, netStatusHandler);
nc.addEventListener(SecurityErrorEvent.SECURITY_ERROR, errorHandler);
nc.addEventListener(IOErrorEvent.IO_ERROR, errorHandler);
Finally to establish the connection, it invokes the 'connect' method on the NetConnection using a RTMP URL pointing to this
gateway service. In particular if the gateway is running on your local host, then use "rtmp://localhost/sip/...". If the
gateway is running on the "server" host, then use "rtmp://server/sip/...". The connection must also be scoped to the given
SIP user's address. For exacmple if the client's SIP user name is "alice@example.com" then the URL will be
"rtmp://server/sip/alice@example.com".
nc.connect('rtmp://server/sip/alice@iptel.org', 'alice', 'mypass', 'Alice Smith');
For your testing purpose, if you are running the SIP server such as sipd.py locally, and your local IP address is
'192.168.1.3' then the URL to connect becomes "rtmp://localhost/sip/alice@192.168.1.3". The connect method takes additional
arguments for authentication and registration: authentication name, authentication password, and display name. Note that
you must supply authentication name, authentication password and display name to perform SIP registration even if there is no
authentication requested by your SIP server. However, you must set authentication password to empty string '' if you do not
want to do SIP registration, and just want to make outbound SIP calls (assuming that your SIP server allows outbound calls without
SIP registration).
nc.connect('rtmp://localhost/sip/alice@192.168.1.3', 'alice', 'mypass', 'Alice Smith');
Internally, a call such as one mentioned before causes the gateway to send SIP registration for local SIP URL of the form
"Alice Smith" <sip:alice@192.168.1.3> and authenticate if needed using username 'alice' and password 'mypass'. The netStatus
event with code 'NetConnection.Connect.Success' is dispatched when connection and registration are successful, and with code
'NetConnection.Connect.Rejected' or 'NetConnection.Connect.Failed' if the connection or registration failed for some reason.
Typically a registration or authentication fail results in rejected message whereas a RTMP connection failure due to incorrect
server name results in failed message. The client will typically treat both message as same. Once the NetConnection is connected
the client is in connected state and can make or receive calls via the gateway.
For a call, the Flash application needs to set up its devices correctly. I recommend the following set up. In particular, you
should set the Microphone codec to use Speex audio codec, and Camera to operate in reasonable dimension and frame rate. Note that
this requires Flash Player 10 if your want to run the code, and associated Flex SDK if you want to compile your code.
var mic:Microphone = Microphone.getMicrophone(-1); // little known fact that -1 gives default microphone.
mic.setUseEchoSuppression(true);
mic.setLoopback(false);
mic.setSilenceLevel(0);
mic.codec = 'Speex';
mic.gain = 80;
var cam:Camera = Camera.getCamera(); // on Mac OS, use Flash Player settings to set default camera
cam.setLoopback(false); // so that local video is not compressed in the view
cam.setMode(320, 240, 12); // tune this based on your needs
cam.setQuality(0, 70); // tune this based on your needs
localVideo.attachCamera(cam);
To place an outbound call, the client invokes the RPC method "invite" on the NetConnection and supplies the remote party's SIP
address. This SIP address must be a fully qualified SIP URL or SIP address, which includes optional display name. Examples are
"Bob Jones" <sip:bob@home.com> and sip:bob@office.net.
nc.call('invite', null, '"Bob Jones" <sip:bob@home.com>');
If you registered using "Alice Smith" <sip:alice@192.168.1.3> from another browser instance, then you can use that URL in
the "invite" method to call that user. Note however that calling a user on a NetConnection who was registered using the same
instance of the NetConnection may result in unexpected behavior, as this means you are using your phone to call your own
number in a single like SIP user agent. The expected behavior is that you will receive 'busy' response in this case.
nc.call('invite', null, 'sip:alice@192.168.1.3');
An incoming call is indicated using a callback method "invited" on the NetConnection.client property. The remote party's
SIP address and your SIP address are both supplied as arguments, along with a unique invitation identifier. The invitation
identifier is useful for multiple call case, if you received multiple incoming calls and want to respond differently to
them.
public function invited(yourName:String, myName:String):void { ... }
The client should display some kind of alert to the user on incoming call. If the user accepts the call, the client invokes
the "accept" RPC method to accept the incoming invitation using the same invitation identifier that was received in "invited".
nc.call('accept', null);
If the user wants to reject an incoming call, the client invokes the "reject" RPC method and also supplies the original
invitation identifier and an optional reason for rejecting the call. The reason to reject is of the format "code text" where
code is a three digit reject code such as 486 for busy, and 603 for decline. The text is a human readable text phrase
indicating the reason for rejection. The numeric code is optional, if not supplied, then the gateway uses a pre-configured
reject code of 603.
nc.call('reject', null, '486 Busy Here');
nc.call('reject', null); // uses "603 Decline" as default
Once a call is established, either an outbound or inbound, the client will need to create two streams to exchange audio and
video with the remote party. The "local" stream is used to publish the local audio and video, and the "remote" stream is used to
play the remote's audio and video. As mentioned earlier the current implementation allows only two streams in the NetConnection,
one in each direction. If the client opens more than one published stream or more than one played stream, then the gateway will
only use the latest stream and ignore the previous one. Once the Camera and Microphone are attached to the local stream and
the stream is published, the gateway starts getting audio video data from local user and sends them to the remote party. Once
the remote stream is attached to a Video display object and is played, the gateway streams remote party's audio and video
data to this client, and the video gets displayed in the Video object and the audio gets played out by the Flash Player.
var local:NetStream = new NetStream(nc), remote:NetStream = new NetStream(nc);
local.attachAudio(mic);
local.attachCamera(cam);
local.publish('local');
remote.play('remote');
remoteVideo.attachStream(remote);
The client may terminate an active call or a pending outbound call using the "bye" RPC method as follows.
nc.call('bye');
Note that the client must also close the two streams when the call is terminated either by local or remote user.
local.close();
remote.close();
The gateway invokes several callbacks on the client besides the "invited" callback which was discussed earlier.
In particular the "byed" callback indicates that the remote party terminated an active call, "accepted"
callback indicates that the remote party accepted our call invitation,"rejected" callback indicates that the remote party
rejected our call invitation, and "cancelled' callback indicates that the remote party cancelled its call invitation to
us. The "rejected" and "cancelled" callbacks take some arguments. These functions must be defined in the client to handle
the approrpiate events.
public function accepted():void { ... }
public function rejected(reason:String):void { ... }
public function cancelled(frm:String, to:String):void { ... }
public function byed():void { ... }
If the user wants to make a SIP call to a phone number, he can use the standard SIP URL typically supported by the phone
providers. For example, if the user has an account for save 'phoneprovider.com' VoIP provider with user name of
'12125551234' and password of '5678', and want to make call to another number 18001234567, the client can do the following.
nc.connect("rtmp://server/sip/12125551234@phoneprovider.com", "12125551234", "5678")
nc.call("invite", null, "sip:18001234567@phoneprovider.com");
If your VoIP provider does not require a SIP registration to make outbound calls, you will need to supply the authentication
credentials in the "invite" call. TODO: this is for future work.
nc.connect("rtmp://server/sip/alice@iptel.org", "alice", "", "Alice Smith");
nc.call("invite", null, "sip:18001234567@phone.iptel.org", "alice", "mypass");
If you want to use the default preconfigured VoIP provider of the gateway service, you can use the "tel:" URL to make a call.
TODO: this is for future work.
nc.call('invite', null, 'tel:12125551234');
If you want to remain anonymous in your outbound call, the recommended way is to use the SIP address of <sip:anonymous@invalid>
If you supply your password as "" then no SIP registration will be done.
nc.connect("rtmp://server/sip/anonymous@invalid", "anonymous", "", "Anonymous User");
To use a secure connection replace sip with sips and rtmp with rtmps. TODO: this is for future work.
In particuar, a rtmps URL uses secure TLS connection from Flash Player to the gateway server and sips URL uses secure TLS
hop-by-hop connection from gateway server to your SIP destination. A NetConnection that uses sips will only be able to receive
secure connections from remote party. Thus, the application may need two netconnections to support both secure and regular
SIP signaling. Note that the URL in connect method is "rtmps://.../sips/...".
nc.connect('rtmps://server/sips/...',...);
nc.call("invite", null, "sips:bob@home.com");
Note, however, that security using this method is not end-to-end secure even for media. In particular, the gateway server has
access to your media stream. You should use both rtmps and sips together. If your gateway server is running on local host, you
may not need rtmps though. Note also that signaling security does not guarantee media encryption and privacy. My implementation
will make sure that SRTP is required when using sips.
In an active call, you can send DTMF digits using RFC 2833. The following example sends digit "5" in the RTP session of the
active call using RFC 2833 (touch-tones).
nc.call("sendDTMF", null, "5");
The digits are sent only if the remote end acknowledged support for telephone-event in SDP of session initiation. Only single
digit can be sent using sendDTMF using rfc2833.py module, and does not use redundancy rfc2198.py payload.
Limitations
-----------
1. The URI schemes 'sips' and 'rtmps' are not yet implemented.
2. Audio interoperability requires that the SIP user agent support Speex codec, and that the Flash Player is version 10 or later.
The older version of Flash Player included a proprietary Nellymoser codec which is not interoperable with other SIP phones.
3. Video communication is transported using a proprietary packetization format, and will work only between two Flash clients
connected via a gateway following the packetization protocol defined in this file.
4. Multi-party conferencing is not implemented. If at all, the logic should be implemented in the application or external
third-party conference server in this design approach.
5. NAT/firewall traversal is not implemented. Thus, the gateway should run in public Internet, a third-party solution such as
RTP proxy be used to connect to SIP side, the PSTN gateway should be in public Internet and the Flash client network should
allow outbound RTMP traffic to the gateway. In future I will add support for STUN and TURN in the gateway so that it can be
run behind NAT or on user's local computer, and can be used to connect to SIP clients behind NAT.
An example SIP user agent component is available in the videoPhone directory. To build use Flex builder or mxmlc compiler. A
pre-compiled SWF is included in that project directory's bin-release sub-directory for you to try out the user agent.
Major Updates
-------------
Support for transcoding between Flash Player's speex and SIP side's PCMU and PCMA using external audiospeex module.
If the audiospeex module is found in PYTHONPATH then it is automatically used, and session negotiation includes new
codecs of pcmu/8000 and pcma/8000 along with speex/8000 and speex/16000. Please see the project web site for details on
how to build/compile this audiospeex module.
Support for G.711 PCMU and PCMA, and H.264 from Flash Player 11. The NetConnection.connect API is modified to ignore the
rate parameter if codecs are supplied in "invite" or "accept" calls. The "invite" and "accept" calls can now have a list of
supported codecs with one or more of the following values.
codec Flash Player sends and receives SDP contains
wideband speex wideband. speex/16000
narrowband send speex wideband but receive speex narrowband. speex/8000
pcmu pcmu at 8kHz and 20ms pcmu/8000
pcmu pcma at 8kHz and 20ms pcma/8000
ulaw speex pcmu/8000 via transcoding
alaw speex pcma/8000 via transcoding
dtmf sendDTMF, ignore on receive telephone-event/8000
h264 H264Avc (Baseline or Main) h264/90000
flv RTMP message x-flv/90000
The list supplied in "invite" or "accept" is used in decreasing order of preference.
For backward compatibility, if no list of codecs are supplied, it uses the default speex/16000 (or speex/8000) and
x-flv/90000. The media part is moved to a separate MediaContext class which is reused by both multitask and gevent version.
For example, following is the default for backward compatibility
nc.call('invite', null, '"Bob Jones" <sip:bob@home.com>', 'wideband', 'dtmf', 'flv');
To support all codecs but prefer speex wideband, use
nc.call(...., 'wideband', 'narrowband', 'pcmu', 'pcma', 'ulaw', 'alaw', 'dtmf', 'h264', 'flv')
If the application sends 'pcmu', 'pcma' or 'h264' it must be using Flash Player 11 or later that supports these codecs.
If the application sends 'ulaw' or 'alaw' but siprtmp cannot find the audiospeex module, it will continue the call with
speex in SDP. As mentioned above, if the application doesn't supply any codec, the default setting is used. If the application
supplies some codecs, others are not used. For example, if only 'wideband' is supplied and no video, it will not use
x-flv/90000 in SDP.
Additionally, the "accepted" callback can have two additional arguments for preferred audio and video codecs after the
media negotiation is complete. Note that "accepted" is invoked for both incoming and outgoing calls.
public function accepted(audioCodec:String=null, videoCodec:String=null) { ... }
The audio and video codecs have the meaning as described below, and the Flash application should listen for the callback
to change the encoding for microphone and camera as needed before starting to publish. For backward compatibility with
existing Flash applications, if a list of codecs was not supplied in "invite" or "accept" then the additional arguments
are not supplied in "accepted" callback.
The "audioCodec" argument can be "speex", "pcmu", "pcma" or "default". If it is default, the application can use either
speex or nellymoser but not pcmu or pcma. The "videoCodec" argument can be "h264" or "default". If it is default, the
application can use any codec. The capability negotiation tries to honor the preference order of the codecs by comparing the
codecs supported by the Flash application and the remote SIP endpoint. If the remote side is capable of "flv" and the
Flash side specified "flv" then both audioCodec and videoCodec will be set to "default". If the remote side is not capable
of any of advertised video codecs, then the videoCodec will be set to null, to tell Flash application to stop video publish.
If the remote side is not capable of any of the advertised audio codecs but the call was accepted, then the audioCodec will
be set to null, to tell Flash application to stop audio publish. If the Flash application advertised "wideband", "narrowband",
"ulaw", "alaw", "pcmu", "pcma" but the remote side is capable of only "pcmu" then audioCodec will be set to "speex" requiring
transcoding to "pcmu". Hence, the application should always put "ulaw" or "alaw" after "pcmu" or "pcma" if available.
For audio codecs the first advertised codec which is supported by the target is used. Thus the Flash application has more
control of which audio codec will be used in case of multiple choices.
The recommended codec list for Flash Player 11+ is "wideband", "narrowband", "pcmu", "pcma", "ulaw", "alaw", "dtmf", "h264", "flv"
The recommended codec list for Flash Player 10 is "wideband", "narrowband", "ulaw", "alaw", "dtmf", "flv".
'''
from __future__ import with_statement
import os, sys, socket, time, traceback, random, multitask
from struct import pack, unpack
from rtmp import App, Header, Message, FlashServer
from amf import AMF0
try:
from app.voip import User, Session, MediaSession
from std.rfc3550 import RTP, Network as RTPNetwork
from std.rfc2396 import Address
from std.rfc4566 import SDP, attrs as format
from std.rfc2833 import DTMF
from std.rfc3261 import Header as SIPHeader
from std.kutil import setlocaladdr, getlocaladdr
except:
print 'Please include p2p-sip src directory in your PYTHONPATH'
exit(1)
try: import audiospeex, audioop
except: audiospeex = None
_debug = False
class Context(object):
'''Context stores state needed for gateway. The client.context property holds an instance of this class. The methods invoked
by RTMP side are prefixed with rtmp_ and those invoked by SIP side are prefixed sip_. All such methods are actually generators.
'''
def __init__(self, app, client):
self.app, self.client = app, client
self.user = self.session = self.outgoing = self.incoming = None # SIP User and session for this connection
self.publish_stream = self.play_stream = self.media = self._preferred = None # streams on RTMP side, media context and preferred rate.
self._gin = self._gss = None # generators that needs to be closed on unregister
if not hasattr(self.app, '_ports'): self.app._ports = {} # used to persist SIP port wrt registering URI. map: uri=>port
def rtmp_register(self, login=None, passwd='', display=None, rate="wideband"):
global agent
scheme, ignore, aor = self.client.path.partition('/')
self._preferred = rate
if _debug: print 'rtmp-register scheme=', scheme, 'aor=', aor, 'login=', login, 'passwd=', '*'*(len(passwd) if passwd else 0), 'display=', display
addr = '"%s" <sip:%s>'%(display, aor) if display else 'sip:%s'%(aor)
sock = socket.socket(type=socket.SOCK_DGRAM) # signaling socket for SIP
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = self.app._ports.get(aor, 0)
try: sock.bind((agent.int_ip, port)); port = sock.getsockname()[1]
except:
if _debug: print ' exception in register', (sys and sys.exc_info() or None)
yield self.client.rejectConnection(reason='Cannot bind socket port')
raise StopIteration(None)
#self.ports[name] = sock.getsockname()[1] # store the port number
# TODO: storing and keeping the persistent port per user doesn't work well if the app is re-loaded in brief interval.
try:
user = self.user = User(sock, nat=False).start() # create SIP user. Ownership of sock is moved to User.
user.context, user.username, user.password = self, login, passwd
if user.password:
if _debug: print ' registering addr=', addr, 'port=', port
result, reason = yield user.bind(addr, refresh=True)
if _debug: print ' registration returned', result, reason
if result == 'failed':
yield self.client.rejectConnection(reason=reason)
raise StopIteration(None)
self._gin = self._incominghandler(); multitask.add(self._gin) # incoming SIP messages handler
else: user.address = Address(addr)
if _debug: print ' register successful', self.user.address
yield self.client.accept()
except:
if _debug: print ' exception in register', (sys and sys.exc_info() or None)
yield self.client.rejectConnection(reason=sys and str(sys.exc_info()[1]) or 'Server Error')
raise StopIteration(None)
def rtmp_unregister(self):
try:
if self.user is not None:
if _debug: print 'rtmp-unregister', (self.client and self.client.path or None)
yield self._cleanup() # close the call first, if any
yield self.user.close()
yield self.user.stop()
if self.user.sock:
try: self.user.sock.close()
except: pass
self.user.sock = None
self.user.context = None; self.user = None
if self._gin is not None: self._gin.close(); self._gin = None
if self._gss is not None: self._gss.close(); self._gss = None
if self.media:
self.media.close(); self.media = None
except:
if _debug: print ' exception in unregister', (sys and sys.exc_info() or None)
def rtmp_invite(self, dest, *args):
global agent
try:
if _debug: print 'rtmp-invite %r %r'%(dest, args)
if self.user: # already a registered user exists
if not self.session: # not already in a session, so create one
try: dest = Address(dest) # first try the default scheme supplied by application
except: dest = Address(self.user.address.uri.scheme + ':' + dest) # otherwise scheme is picked from registered URI
if _debug: print ' create media context'
media = MediaContext(self, None, agent.int_ip, self._preferred, RTPNetwork, *args) # create a media context for the call
self.outgoing = self.user.connect(dest, sdp=media.session.mysdp, provisional=True)
try:
session, reason = yield self.outgoing
if _debug: print ' session=', session, 'reason=', reason
while reason is not None and reason.partition(" ")[0] in ('180', '183'):
yield self.client.call('ringing', reason)
self.outgoing = self.user.continueConnect(session, provisional=True)
session, reason = yield self.outgoing
except:
media.close()
if self.outgoing is not None: raise
else: raise StopIteration(None) # else call was cancelled in another task
self.outgoing = None # because the generator returned, and no more pending outgoing call
if session: # call connected
self.media, self.session, session.media = media, session, media.session
self.media.session.setRemote(session.yoursdp)
self._gss = self._sessionhandler(); multitask.add(self._gss) # receive more requests from SIP
codecs = self.media.accepting();
if _debug: print 'sip-accepted %r'%(codecs,)
yield self.client.call('accepted', *codecs)
else: # connection failed, close media socket
media.close()
yield self.client.call('rejected', reason)
else: yield self.client.call('rejected', 'Already in an active or pending call')
else: yield self.client.call('rejected', 'Registration required before making a call')
except:
if _debug: print ' exception in invite', (sys and sys.exc_info() or None)
if _debug: traceback.print_exc()
yield self.client.call('rejected', 'Internal server error')
def rtmp_accept(self, *args):
global agent
if _debug: print 'rtmp-accept %r'%(args,)
incoming = self.incoming; self.incoming = reason = media = None # clear self.incoming, and store value in incoming
try:
if self.user is not None and incoming is not None:
self.media = MediaContext(self, incoming[1].request, agent.int_ip, self._preferred, RTPNetwork, *args) # create a media context for the call
if self.media.session.mysdp is None:
reason = '488 Incompatible SDP'
else:
session, reason = yield self.user.accept(incoming, sdp=self.media.session.mysdp)
if session: # call connected
self.session, session.media = session, self.media.session
self._gss = self._sessionhandler(); multitask.add(self._gss) # receive more requests from SIP
codecs = self.media.accepting();
if _debug: print 'sip-accepted %r'%(codecs,)
yield self.client.call('accepted', *codecs)
else:
if not reason: reason = '500 Internal Server Error in Accepting'
else:
if _debug: print ' no incoming call. ignored.'
except:
if _debug: print ' exception in rtmp_accept', (sys and sys.exc_info())
reason = '500 Internat Server Exception'
if reason:
if self.media:
self.media.close(); self.media = None
if self.user: yield self.user.reject(incoming, reason) # TODO: a better way would be to reject in _incominghandler
if self.client: yield self.client.call('byed')
def rtmp_reject(self, reason='Decline'):
try:
if _debug: print 'rtmp-reject'
if self.user is not None and self.incoming is not None:
yield self.user.reject(self.incoming, reason)
self.incoming = None # no more pending incoming call
elif _debug: print ' no incoming call. ignored'
except:
if _debug: print ' exception in reject', (sys and sys.exc_info() or None)
def rtmp_bye(self):
try:
if _debug: print 'rtmp-bye'
if self.session is None and self.outgoing is not None: # pending outgoing invite
if _debug: print ' cancel outbound invite'
self.outgoing.close()
self.outgoing = None
elif self.session:
yield self._cleanup()
except:
if _debug: print ' exception in bye', (sys and sys.exc_info() or None)
def sip_invite(self, dest):
try:
if _debug: print 'sip-invite'
yield self.client.call('invited', str(dest), str(self.user.address))
except:
if _debug: print ' exception in sip_invite', (sys and sys.exc_info() or None)
yield
def sip_cancel(self, dest):
try:
if _debug: print 'sip-cancel'
yield self.client.call('cancelled', str(dest), str(self.user.address))
except:
if _debug: print ' exception in sip_cancel', (sys and sys.exc_info() or None)
yield
def sip_bye(self):
try:
if _debug: print 'sip-bye'
yield self.client.call('byed')
except:
if _debug: print ' exception in sip_bye', (sys and sys.exc_info() or None)
yield
def sip_hold(self, value):
try:
if _debug: print 'sip-hold', value
yield self.client.call('holded', value)
except:
if _debug: print ' exception in sip_hold', (sys and sys.exc_info() or None)
yield
def _incominghandler(self): # Handle incoming SIP messages
try:
user = self.user
while True:
cmd, arg = (yield user.recv())
if _debug: print 'incominghandler', cmd
if cmd == 'connect': # incoming invitation, inform RTMP side
self.incoming = arg
multitask.add(self.sip_invite(str(Address(arg[0]))))
elif cmd == 'close': # incoming call cancelled
self.incoming = None
multitask.add(self.sip_cancel(str(Address(arg[0]))))
except StopIteration: raise
except:
if _debug: print 'incominghandler exiting', (sys and sys.exc_info() or None)
self._gin = None
def _sessionhandler(self): # Handle SIP session messages
try:
session = self.session
while True:
cmd, arg = (yield session.recv())
if cmd == 'close': multitask.add(self.sip_bye()); break # exit from session handler
if cmd == 'change': # new SDP received from SIP side
is_hold = bool(arg and arg['c'] and arg['c'].address == '0.0.0.0')
multitask.add(self.sip_hold(is_hold))
yield self._cleanup()
except GeneratorExit: pass
except:
if _debug: print 'exception in sessionhandler', (sys and sys.exc_info() or None)
self._gss = None
if _debug: print 'sessionhandler exiting'
def _cleanup(self): # cleanup a session
if self.session:
yield self.session.close() # close the session
self.session = None
if self.media:
self.media.close()
self.media = None
if self._gss is not None: self._gss.close(); self._gss = None
def received(self, media, fmt, packet): # an RTP packet is received. Hand over to sip_data.
if fmt is not None:
multitask.add(self.sip_data(fmt, packet))
def sip_data(self, fmt, data): # handle media stream received from SIP
try:
p = RTP(data) if not isinstance(data, RTP) else data
if _debug: print ' <-s pt=%r seq=%r ts=%r ssrc=%r marker=%r len=%d'%(p.pt, p.seq, p.ts, p.ssrc, p.marker, len(p.payload))
if self.media:
messages = self.media.rtp2rtmp(fmt, p)
if self.play_stream and messages:
for message in messages:
if _debug: print 'f<- type=%r len=%r codec=0x%02x'%(message.type, message.size, message.data and ord(message.data[0]) or -1)
yield self.play_stream.send(message)
except (ValueError, AttributeError), E:
if _debug: print ' exception in sip_data', E; traceback.print_exc()
yield
def rtmp_data(self, stream, message): # handle media data message received from RTMP
try:
if _debug: print 'f-> type=%x len=%d codec=0x%02x'%(message.header.type, message.size, message.data and ord(message.data[0]) or -1)
if self.media:
messages = self.media.rtmp2rtp(stream, message)
if self.session and self.media.session and messages:
for payload, ts, marker, fmt in messages:
if _debug: print ' ->s fmt=%r %r/%r ts=%r marker=%r len=%d'%(fmt.pt, fmt.name, fmt.rate, ts, marker, len(payload))
self.media.session.send(payload=payload, ts=ts, marker=marker, fmt=fmt)
except:
if _debug: print ' exception in rtmp_data'; traceback.print_exc()
yield
def rtmp_sendDTMF(self, digit):
try:
if _debug: print 'rtmp-sendDTMF', digit
if self.media:
messages = self.media.dtmf2rtp(digit)
if self.session and self.media.session and messages is not None:
for payload, ts, marker, fmt in messages:
self.media.session.send(payload=payload, ts=ts, marker=marker, fmt=fmt)
except:
if _debug: print ' exception in rtmp_sendDTMF'; traceback.print_exc()
yield
def rtmp_hold(self, value):
try:
if _debug: print 'rtmp-hold', value
self.session.hold(value)
except:
if _debug: print ' exception in rtmp_hold'; traceback.print_exc()
traceback.print_exc()
yield
def requestFIR(self):
# TODO: this should be sent if we received INFO for FIR from remote.
if self.session and self.session.ua:
ua = self.session.ua
m = ua.createRequest('INFO')
m['Content-Type'] = SIPHeader('application/media_control+xml', 'Content-Type')
m.body = '''<?xml version="1.0" encoding="utf-8" ?>
<media_control>
<vc_primitive>
<to_encoder>
<picture_fast_update></picture_fast_update>
</to_encoder>
</vc_primitive>
</media_control>
'''
ua.sendRequest(m)
class MediaContext(object):
'''MediaContext stores the media related session and context for any transcoding for the gateway.
It is independent of multitask or gevent and reused by gevent version.
'''
def __init__(self, context, request=None, listen_ip=None, rate='wideband', NetworkClass=None, *args):
if not NetworkClass: raise ValueError('must supply the RTP NetworkClass')
self._context, self._rate, self._codecs = context, rate, args
self._flv, self._h264, self._touchtone, self._narrowband, self._wideband, self._pcmu, self._pcma = format(pt=-1, name='x-flv', rate=90000), format(pt=-1, name='h264', rate=90000), format(pt=-1, name='telephone-event', rate=8000), format(pt=-1, name='speex', rate=8000), format(pt=-1, name='speex', rate=16000), format(pt=0, name='pcmu', rate=8000), format(pt=8, name='pcma', rate=8000)
self._audio, self._video = self._getMediaStreams()
self._reset()
streams = [x for x in [self._audio, self._video] if x]
self.session = MediaSession(app=context, streams=streams, request=request, listen_ip=listen_ip, NetworkClass=NetworkClass) # create the actual MediaSession
def close(self):
if self.session:
self.session.close()
self.session = None
self._reset()
self._context = self.session = None
def _reset(self):
# x-flv states
self._flv1_txseq = self._flv2_rxseq = self._flv2_rxlen = 0
self._flv2_rxchunks = []
# H264 transcoder state
self._h1_cfgVer = self._h1_profileIdc = self._h1_profileCompat = self._h1_levelIdc = self._h1_lenSize = self._h1_SPS = self._h1_PPS = self._h1_data = None
self._h2_SPS, self._h2_PPS, self._h2_sentSeq, self._h2_sentMetaData, self._h2_startTs, self._h2_startTm, self._h2_queue, self._h2_firstTime, self._h2_lastFIR = None, None, False, False, 0, 0, [], True, 0
# Audio transcoder state
self._au1_resample = self._au1_speex2lin = self._au1_lin2speex = self._au1_fmt = self._au2_lin2speex = None # transcoder states for audiospeex module.
self._au1_ts = self._au2_ts0 = 0
def rtmp2rtp(self, stream, message): # public method called by Context to transcode RTMP to RTP for media.
if self.session: # order of following processing is important
if self.session.hasType('video') and self.session.hasYourFormat(self._flv): # the remote SIP user supports our video format. send FLV video to remote in RTP.
return self._rtmp2rtpFLV(message) # both audio and video sent via this
elif message.header.type == Message.VIDEO and message.size > 1: # possibly H.264 video packet
if self.session.hasYourFormat(self._h264): # if h264 is available
return self._rtmp2rtpH264(message)
# else just ignore the message for audio-only call to SIP VoIP phone
elif message.header.type == Message.AUDIO and message.size > 1: # audio packet of speex codec.
return self._rtmp2rtpAU(message)
elif _debug: print ' ignoring in rtmp2rtp type=', message.header.type, 'size=', message.size
def rtp2rtmp(self, fmt, p): # public method called by Context to transcode RTP to RTMP for media
if str(fmt.name).lower() == str(self._flv.name).lower(): # this is a video (FLV) packet, just assemble and return to rtmp
return self._rtp2rtmpFLV(p)
elif str(fmt.name).lower() == str(self._touchtone.name).lower(): # this is DTMF
if _debug: print 'ignoring incoming DTMF touchtone'
elif str(fmt.name).lower() == str(self._h264.name).lower(): # this is H264
return self._rtp2rtmpH264(fmt, p)
#if self.session and self.session.hasYourFormat(self._h264): # uncomment for loopback
# self.session.send(payload=p.payload, ts=p.ts, marker=p.marker, fmt=self._h264)
else: # this is a audio (Speex) packet. Build RTMP header and return to rtmp
if self._context.play_stream: # avoid transcoding if play-stream is not created yet.
return self._rtp2rtmpAU(fmt, p)
def dtmf2rtp(self, digit): # public method called by Context to send DTMF to RTP.
if len(digit) != 1:
if _debug: print ' only single digit DTMF is supported in sendDTMF'
elif not self.session or not self.session.hasType('audio'):
if _debug: print ' ignoring sendDTMF: not an active audio call'
else:
payload = repr(DTMF(key=digit, end=True))
if _debug: print ' sending payload %r'%(payload,)
return [(payload, self._au1_ts, False, self._touchtone)]
def _getMediaStreams(self):
global audiospeex
audio, video = SDP.media(media='audio'), SDP.media(media='video')
if not self._codecs: # use the default codecs for backward compatibility
audio.fmt, video.fmt = [format(pt=96, name='speex', rate=8000 if self._rate == 'narrowband' else 16000)], [format(pt=97, name='x-flv', rate=90000)]
if audiospeex:
audio.fmt.extend([format(pt=98, name='speex', rate=16000 if self._rate == 'narrowband' else 8000), format(pt=0, name='pcmu', rate=8000), format(pt=8, name='pcma', rate=8000)])
# add touchtone format to allow sending this format as well.
audio.fmt.extend([format(pt=101, name='telephone-event', rate=8000)])
else:
pcmu = pcma = narrowband = hasvideo = hasaudio = False
for codec in self._codecs:
if codec == 'wideband': audio.fmt.append(format(pt=96, name='speex', rate=16000)); hasaudio = True
elif codec == 'narrowband' and not narrowband: audio.fmt.append(format(pt=98, name='speex', rate=8000)); hasaudio = narrowband = True
elif codec == 'pcmu' and not pcmu: audio.fmt.append(format(pt=0, name='pcmu', rate=8000)); hasaudio = pcmu = True
elif codec == 'pcma' and not pcma: audio.fmt.append(format(pt=8, name='pcma', rate=8000)); hasaudio = pcma = True
elif codec == 'ulaw' and audiospeex and not pcmu: audio.fmt.append(format(pt=0, name='pcmu', rate=8000)); hasaudio = pcmu = True
elif codec == 'alaw' and audiospeex and not pcma: audio.fmt.append(format(pt=8, name='pcma', rate=8000)); hasaudio = pcma = True
elif codec == 'dtmf': audio.fmt.append(format(pt=101, name='telephone-event', rate=8000)); hasaudio = True
elif codec == 'flv': video.fmt.append(format(pt=97, name='x-flv', rate=90000)); hasvideo = True
elif codec and codec.startswith('h264'): video.fmt.append(format(pt=99, name='h264', rate=90000)); hasvideo = True
elif _debug: print 'ignoring %r, may already be added'%(codec,)
if codec and codec.startswith('h264') and 'a' not in video: video['a'] = ['fmtp:99 profile-level-id=420014;packetization-mode=1'] # TODO: handle h264/baseline vs h264/main
if not hasaudio: audio = None
if not hasvideo: video = None
return (audio, video)
def _rtmp2rtpFLV(self, message): # convert given RTMP message to RTP packets and send to SIP side
data = pack('>III', message.type, message.size, message.time) + message.data # assembled message
origlen, packets, cseq = len(data), [], 0
hdr = pack('>Ihh', self._flv1_txseq, cseq, len(data)) # header for first chunk
while len(data) > 0:
packets.append('RTMP'+hdr+data[:1000])
data = data[1000:]
cseq += 1
hdr = pack('>Ih', self._flv1_txseq, cseq)
# if _debug: print ' FLV sending type=%d,len=%d split seq=%d, chunks=%d'%(message.type, origlen, self._flv1_txseq, len(packets))
self._flv1_txseq += 1
return [(packet, message.time*(self._flv.rate/1000), False, self._flv) for packet in packets]
def _rtp2rtmpFLV(self, p): # convert given RTP packet to RTMP message and play to the rtmp side.
magic, payload = p.payload[:4], p.payload[4:]
if magic != 'RTMP':
if _debug: print 'ignoring non-RTMP packet in received video'
return
seq, cseq = unpack('>Ih', payload[:6])
# if _debug: print ' FLV received seq=%d cseq=%d len=%d'%(seq, cseq, len(payload))
if cseq == 0: # first packet in the chunks. Initialize the rx state.
self._flv2_rxseq, self._flv2_rxchunks[:] = seq, []
self._flv2_rxlen, = unpack('>h', payload[6:8])
self._flv2_rxchunks.append(payload[8:])
else:
if seq != self._flv2_rxseq or len(self._flv2_rxchunks) == 0:
if _debug: print 'probably missed a begin packet'
return
if cseq != len(self._flv2_rxchunks):
if _debug: print 'probably out of order packet'
return
self._flv2_rxchunks.append(payload[6:])
got = sum(map(lambda x: len(x), self._flv2_rxchunks), 0)
if got < self._flv2_rxlen: return # not all chunk is received yet
if got > self._flv2_rxlen:
if _debug: print 'unexpected error, got more than expected %d > %d'%(got, self._flv2_rxlen)
return
if self._flv2_rxlen < 12:
if _debug: print 'received data is too small %d'%(self._flv2_rxlen)
return
data, message = ''.join(self._flv2_rxchunks), Message()
self._flv2_rxlen, self._flv2_rxchunks[:] = 0, [] # clear the state now that we have full packet
message.type, msglen, message.time = unpack('>III', data[0:12]); message.data = data[12:]
if msglen != len(message.data):
if _debug: print 'invalid message len %d != %d'%(msglen, len(message.data))
return
return [message]
def _rtmp2rtpH264(self, message):
# if _debug: print 'f-> ', len(message.data), repr(message.data[:20])
messages = []
if message.data[:2] == '\x17\x00': # AVC seq
data = message.data[2:]
cfgVer, profileIdc, profileCompat, levelIdc = unpack('>BBBB', data[3:7])
if cfgVer == 1:
lenSize = (ord(data[7]) & 0x03) + 1
numSPS, data, SPS = (ord(data[8]) & 0x1f), data[9:], []
for i in range(numSPS):
lenSPS, data = unpack('>H', data[:2])[0], data[2:]
SPS.append(data[:lenSPS])
data = data[lenSPS:]
numPPS, data, PPS = ord(data[0]), data[1:], []
for j in range(numPPS):
lenPPS, data = unpack('>H', data[:2])[0], data[2:]
PPS.append(data[:lenPPS])
data = data[lenPPS:]
# if _debug: print 'avcCfg: cfgVer=%r profileIdc=%r profileCompat=%r levelIdc=%r lenSize=%r numSPS=%r numPPS=%r SPS=%r PPS=%r'%(cfgVer, profileIdc, profileCompat, levelIdc, lenSize, numSPS, numPPS, SPS, PPS)
# store the parameter sets
self._h1_cfgVer, self._h1_profileIdc, self._h1_profileCompat, self._h1_levelIdc, self._h1_lenSize, self._h1_SPS, self._h1_PPS, self._h1_data = cfgVer, profileIdc, profileCompat, levelIdc, lenSize, SPS, PPS, message.data
if SPS: # send this to other end.
ts, marker = message.time * self._h264.rate / 1000, True
# if _debug: print ' ->s', len(SPS[0]), repr(SPS[0])
messages.append((SPS[0], ts, marker, self._h264))
if PPS:
# if _debug: print ' ->s', len(PPS[0]), repr(PPS[0])
ts, marker = message.time * self._h264.rate / 1000, True
messages.append((PPS[0], ts, marker, self._h264))
elif message.data[:2] == '\x17\x01' or message.data[:2] == '\x27\x01': # AVC intra or inter
if self._h1_PPS and self._h1_SPS: # indicates that SPS/PPS are sent
try:
nals = []
lenSize, data = self._h1_lenSize, message.data[5:]
while data:
nalSize = data[:lenSize]
if lenSize == 1: nalSize = unpack('>B', nalSize)[0]
elif lenSize == 2: nalSize = unpack('>H', nalSize)[0]
elif lenSize == 4: nalSize = unpack('>I', nalSize)[0]
else: raise ValueError('invalid lenSize %d'%(lenSize,))
nalData, data = data[lenSize:lenSize+nalSize], data[lenSize+nalSize:]
nals.append(nalData)
# if _debug: print ' nals count=', len(nals), 'types=', repr([(ord(x[0]) & 0x1f) for x in nals])
if nals:
remaining = nals[-1]
# message.data = message.data[:5] + pack('>I', len(remaining)) + remaining
maxSize = 1500
nalType, nri = (ord(remaining[0]) & 0x1f), (ord(remaining[0]) & 0x60)
if nalType == 5 or nalType == 1: # others are ignored for now
ts, marker = message.time * self._h264.rate / 1000, True # treat each Message as an access unit
if len(remaining) <= (maxSize-1):
# if _debug: print ' ->s', len(remaining), repr(remaining[:15])
messages.append((remaining, ts, marker, self._h264))
else: # TODO: only if packetization-mode != 0
start = 0x80
remaining = remaining[1:]
while remaining:
data, remaining = remaining[:maxSize-2], remaining[maxSize-2:]
end = 0x00 if remaining else 0x40
payload = pack('>BB', nri | 28, start | end | nalType) + data
start = 0x00
# if _debug: print ' ->s', len(payload), repr(payload[:15])
messages.append((payload, ts, bool(end), self._h264))
except:
print 'exception', sys.exc_info()
traceback.print_exc()
return messages
def _rtp2rtmpH264(self, fmt, p):
# if _debug: print ' <-s', len(p.payload), repr(p.payload[:15])
nalType, nri = ord(p.payload[0]) & 0x1f, ord(p.payload[0]) & 0x60
if nalType == 7: # SPS
self._h2_SPS = p.payload
elif nalType == 8: # PPS
self._h2_PPS = p.payload
else:
self._h2_queue.append(p) # assumes sorted order by seq.
if not p.marker: return
SPS, PPS, sentSeq = self._h2_SPS, self._h2_PPS, self._h2_sentSeq
if self._context.play_stream is None or not PPS or not SPS or PPS and SPS and not sentSeq and nalType != 5:
if _debug: print 'H264 drop until next intra'
self._h2_queue[:] = [] # drop until next intra
if (time.time() - self._h2_lastFIR) > 5.0:
self._h2_lastFIR = time.time()
self._context.requestFIR()
else:
if PPS and SPS and not sentSeq and nalType == 5:
self._h2_sentSeq = sentSeq = True
if not self._h2_startTs:
self._h2_startTs = p.ts
if not self._h2_startTm:
self._h2_startTm = self._context.play_stream.client.relativeTime
self._h2_queue, packets = [], self._h2_queue
messages = []
while packets:
payloads = []
first = ([x for x in packets if ord(x.payload[0]) & 0x1f in (5, 1)] + [packets[-1]])[0]
all = [x for x in packets if x.ts == first.ts]
packets = [x for x in packets if x.ts != first.ts]
nalType, nri = ord(first.payload[0]) & 0x1f, ord(first.payload[0]) & 0x60
tm = int((first.ts - self._h2_startTs) * 1000 / self._h264.rate) + self._h2_startTm
# tm = int(tm/30) # Ekiga specific # TODO: make this ekiga specific
# if not self._h2_sentMetaData:
# self._h2_sentMetaData = True
# data = amf.AMF0()
# data.write('onMetaData')
# m = amf.Object()
# # m.videocodecid, m.avcprofile, m.avclevel, m.videoframerate, m.width, m.height = 7.0, 66, 20, 25, 320, 240
# m.videocodecid, m.width, m.height = 7.0, 320.0, 240.0
# data.write(m)
# payload = data.data.getvalue()
# header = Header(time=tm, size=len(payload), type=Message.DATA, streamId=self._context.play_stream.id)
# m = Message(header, payload)
# # print repr(payload), m
## if _debug: print 'f<- ', len(payload), repr(payload[:20])
# messages.append(m)
if nalType == 5:
# payloads.append('\27\x02\x00\x00\x00')
if _debug: print " SPS", repr(SPS), "PPS", repr(PPS)
data = '\x17\x00\x00\x00\x00\x01' + SPS[1:4] + '\xff\xe1' + pack('>H', len(SPS)) + SPS + '\x01' + pack('>H', len(PPS)) + PPS
payloads.append(data)
if nalType in (5, 1, 28, 24):
if nalType == 28: # fragmented
realType, realNri = ord(first.payload[1]) & 0x1f, ord(first.payload[0]) & 0x60
data = ('\x17' if realType == 5 else '\x27') + '\x01\x00\x00\x00';
#data += pack('>I', 2) + '\x09' + ('\x10' if realType == 5 else '\x30') # needed for sending access unit delimiter
payload = pack('>B', realNri | realType) + ''.join([x.payload[2:] for x in all])
data += pack('>I', len(payload)) + payload
elif nalType == 24: # aggregated
data = ''
for x in all: # assumes all aggregated packet of same type
payload = x.payload
size, payload = unpack('>H', payload[:2])[0], payload[2:]
nalData, payload = payload[:size], payload[size:]
innerType = ord(nalData[0]) & 0x1f
if not data:
data = ('\x17' if innerType == 5 else '\x27') + '\x01\x00\x00\x00'
data += pack('>I', len(nalData)) + nalData
else:
data = ('\x17' if nalType == 5 else '\x27') + '\x01\x00\x00\x00'
# aud = '\x09' + ('\x10' if nalType == 5 else '\x30') # needed for sending access unit delimiter
# data += pack('>I', 2) + aud
# sei = '\x06' + ('\x00\x01\xc0' if nalType == 5 else '') + '\x01\x07\t\x08' + pack('>BB', int((tm % 1000) / 40), 0x80 | (int(tm / 1000) << 1)) + '\x00\x00\x03' + '\x00\x80'
# data += pack('>I', len(sei)) + sei
# if len(all) > 1:
# atype = (nri & 0x60) | (24 & 0x1f)
# payload = chr(atype) + ''.join([(pack('>H', len(x.payload)) + x.payload) for x in all])
# data += pack('>I', len(payload)) + payload
# else:
# print 'sending single nal'
# for x in all:
# data += pack('>I', len(x.payload)) + x.payload
for x in all:
data += pack('>I', len(x.payload)) + x.payload
# payloads.append(data + pack('>I', len(x.payload)) + x.payload)
payloads.append(data)
else:
print 'ignoring nalType=%d'%(nalType,)
if self._context.play_stream:
messages.extend([Message(Header(time=tm, size=len(payload), type=Message.VIDEO, streamId=self._context.play_stream.id), payload) for payload in payloads])
return messages
def accepting(self): # called by Context to return the selected codec after negotiation
global audiospeex
session = self.session
if not self._codecs: # do not set codecs for backward compatibility with older applications
preferred = self._audio and self._audio.fmt[0]
if audiospeex and session.hasType('audio') and not session.hasYourFormat(preferred): # if we have audiospeex transcoding module and remote doesn't have our preferred format, enable transcoding
fmt = ([fy for fy in self._audio.fmt if session.hasYourFormat(fy)] + [None])[0]
if _debug: print ' enable transcoding between %r/%r and %r/%r'%(preferred.name if preferred else None, preferred.rate if preferred else 0, fmt.name if fmt else None, fmt.rate if fmt else 0)
if fmt: self._au1_fmt = fmt # this will enable transcoding in rtmp2rtpAU
return tuple()
else:
if 'flv' in self._codecs and self.session.hasYourFormat(self._flv):
return ('default', 'default') # let the Flash Player choose between speex/nellymoser and h264/sorenson.
audiop = videop = None
for codec in self._codecs: # need to deal with only audio codecs
if not audiop and codec in ('wideband', 'narrowband', 'pcmu', 'pcma', 'ulaw', 'alaw'):
if codec == 'wideband' and session.hasYourFormat(self._wideband) or codec == 'narrowband' and session.hasYourFormat(self._narrowband) or codec == 'pcmu' and session.hasYourFormat(self._pcmu) or codec == 'pcma' and session.hasYourFormat(self._pcma):
audiop = 'speex' if codec in ('wideband', 'narrowband') else codec # no transcoding needed
elif codec == 'ulaw' and session.hasYourFormat(self._pcmu) or codec == 'alaw' and session.hasYourFormat(self._pcma):
if audiospeex: # enable transcoding if needed
preferred = self._narrowband
fmt = ([fy for fy in self._audio.fmt if session.hasYourFormat(self._pcmu if codec == 'ulaw' else self._pcma)] + [None])[0]
if _debug: print ' enable transcoding between %r/%r and %r/%r'%(preferred.name if preferred else None, preferred.rate if preferred else 0, fmt.name if fmt else None, fmt.rate if fmt else 0)
if fmt: self._au1_fmt = fmt
audiop = 'speex'
if not videop and codec == 'h264' and session.hasYourFormat(self._h264):
videop = 'h264'
return (audiop, videop)
def _rtmp2rtpAU(self, message):
global audiospeex
# if _debug: print ' AU received %r'%(message.data[0],)
first, payload, fmt = ord(message.data[0]), message.data[1:], None
codec = {0xb0: 'speex', 0x70: 'pcma', 0x80: 'pcmu'}.get(first & 0xf0, '')
if not codec: return # probably nellymoser or something else but target doesn't support x-flv.
session = self.session
if not self._au1_fmt: # no transcoding needed
if codec == 'speex' and session.hasYourFormat(self._wideband):
fmt = self._wideband
elif codec == 'speex' and session.hasYourFormat(self._narrowband):
fmt, payload = self._narrowband, self._removeWideband(payload) # remove wideband if target supports only narrowband but not wideband
elif codec == 'pcmu' and session.hasYourFormat(self._pcmu):
fmt = self._pcmu
elif codec == 'pcma' and session.hasYourFormat(self._pcma):
fmt = self._pcma
elif _debug: print 'ignoring codec audio type %r'%(first,)
elif audiospeex: # perform transcoding from speex/16000 to self._au1_fmt
fmt = self._au1_fmt
if str(fmt.name).lower() != 'speex' or fmt.rate != 16000: # only if transcoding is needed.
linear, self._au1_speex2lin = audiospeex.speex2lin(payload, sample_rate=16000, state=self._au1_speex2lin)
linear, self._au1_resample = audiospeex.resample(linear, input_rate=16000, output_rate=fmt.rate, state=self._au1_resample)
if str(fmt.name).lower() == 'speex' and fmt.rate != 16000: # transcode speex/16000 to speex/rate
payload, self._au1_lin2speex = audiospeex.lin2speex(linear, sample_rate=fmt.rate, state=self._au1_lin2speex)
elif str(fmt.name).lower() == 'pcmu' and fmt.rate == 8000 or fmt.pt == 0: # transcode speex/16000 to pcmu/8000
payload = audioop.lin2ulaw(linear, 2)
elif str(fmt.name).lower() == 'pcma' and fmt.rate == 8000 or fmt.pt == 8:
payload = audioop.lin2alaw(linear, 2)
else: raise ValueError, 'ignoring unsupported payload type %r %r/%r'%(fmt.pt, fmt.name, fmt.rate)
# TODO: map from RTMP timestamp to RTP
if fmt: self._au1_ts += (fmt.rate * 20 / 1000) # assume 20 ms at 8000 or 16000 Hz
return [(payload, self._au1_ts, False, fmt)] if payload and fmt else None
def _rtp2rtmpAU(self, fmt, p):
global audiospeex
if not self._au1_fmt: # no transcoding needed
speex_data, input_rate = p.payload, fmt.rate or 8000 # TODO: assume pcmu or pcma at 8kHz
if str(fmt.name).lower() == 'speex':
type = '\xb2'
elif str(fmt.name).lower() == 'pcmu' and fmt.rate == 8000 or fmt.pt == 0:
type = '\x82'
elif str(fmt.name).lower() == 'pcma' and fmt.rate == 8000 or fmt.pt == 8:
type = '\x72'
else:
raise ValueError, 'ignoring unsupported payload type %r %r/%r'%(fmt.pt, fmt.name, fmt.rate)
elif str(fmt.name).lower() == 'speex': # no transcoding since Flash supports speex 8000/16000 anyway
type, speex_data, input_rate = '\xb2', p.payload, fmt.rate
else: # perform transcoding from self._au1_fmt to speex/8000
type, input_rate = '\xb2', fmt.rate or 8000
if str(fmt.name).lower() == 'pcmu' and fmt.rate == 8000 or fmt.pt == 0:
linear = audioop.ulaw2lin(p.payload, 2)
elif str(fmt.name).lower() == 'pcma' and fmt.rate == 8000 or fmt.pt == 8:
linear = audioop.ulaw2lin(p.payload, 2)
else:
raise ValueError, 'ignoring unsupported payload type %r %r/%r'%(fmt.pt, fmt.name, fmt.rate)
# TODO: never send speex/16000 to Flash after transcoding
speex_data, self._au2_lin2speex = audiospeex.lin2speex(linear, sample_rate=8000, state=self._au2_lin2speex)
if not self._au2_ts0: self._au2_ts0 = p.ts
payload, tm = type + speex_data, (p.ts - self._au2_ts0) / (input_rate / 1000)
header = Header(time=tm, size=len(payload), type=Message.AUDIO, streamId=self._context.play_stream.id)
m = Message(header, payload)
# if _debug: print ' RTMP pt=%x len=%d hdr=%r'%(m.header.type, m.size, m.header)
return [m]
def _removeWideband(self, payload):
if ord(payload[0]) & 0x80 == 0: # narrowband
mode = (ord(payload[0]) & 0x78) >> 3
bits = (5, 43, 119, 160, 220, 300, 364, 492, 79)[mode] if mode < 9 else 0
size, bits = bits / 8, bits % 8
if bits and (size + 1) <= len(payload):
payload = payload[:size] + chr(((ord(payload[size]) & ((0xff << (8-bits)) & 0xff)) | (0xff >> (bits + 1))) & 0xff)
elif not bits and size <= len(payload):
payload = payload[:size]
return payload
class Gateway(App):
'''The SIP-RTMP gateway implemented as RTMP server application.'''
def __init__(self):
App.__init__(self)
def onConnect(self, client, *args):
App.onConnect(self, client, args)
for c in self.clients: multitask.add(c.connectionClosed())
client.context = Context(self, client)
multitask.add(client.context.rtmp_register(*args))
return None
def onDisconnect(self, client):
App.onDisconnect(self, client)
multitask.add(client.context.rtmp_unregister())
def onCommand(self, client, cmd, *args):
App.onCommand(self, client, cmd, args)
if hasattr(client.context, 'rtmp_%s'%(cmd,)) and callable(eval('client.context.rtmp_%s'%(cmd,))):
multitask.add(eval('client.context.rtmp_%s'%(cmd,))(*args))
elif _debug: print 'invalid command', cmd
def onPublish(self, client, stream):
if _debug: print self.name, 'onPublish', client.path, stream.name
client.context.publish_stream = stream
def onClose(self, client, stream):
if _debug: print self.name, 'onClose', client.path, stream.name
client.context.publish_stream = None
def onPlay(self, client, stream):
if _debug: print self.name, 'onPlay', client.path, stream.name
client.context.play_stream = stream
client.context.media._au2_ts0 = 0
def onStop(self, client, stream):
if _debug: print self.name, 'onStop', client.path, stream.name
client.context.play_stream = None
def onStatus(self, client, info):
if _debug: print self.name, 'onStatus', info
def onResult(self, client, result):
if _debug: print self.name, 'onResult', result
def onPublishData(self, client, stream, message):
multitask.add(client.context.rtmp_data(stream, message))
return False
#---------------------------------- Testing -------------------------------
# The main routine to start, run and stop the service. This part is similar to rtmp.py
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option('-i', '--host', dest='host', default='0.0.0.0', help="listening IP address for RTMP. Default '0.0.0.0'")
parser.add_option('-p', '--port', dest='port', default=1935, type="int", help='listening port number for RTMP. Default 1935')
parser.add_option('-r', '--root', dest='root', default='./', help="document path prefix. Directory must end with /. Default './'")
parser.add_option('-l', '--int-ip', dest='int_ip', default='0.0.0.0', help="listening IP address for SIP and RTP. Default '0.0.0.0'")
parser.add_option('-e', '--ext-ip', dest='ext_ip', default=None, help='IP address to advertise in SIP/SDP. Default is to use "--int-ip" or any local interface')
parser.add_option('-d', '--verbose', dest='verbose', default=False, action='store_true', help='enable debug trace')
parser.add_option('-D', '--verbose-all', dest='verbose_all', default=False, action='store_true', help='enable full debug trace of all modules')
(options, args) = parser.parse_args()
import rtmp, app.voip, std.rfc3550, std.rfc3261
rtmp._debug = options.verbose_all
app.voip._debug = options.verbose or options.verbose_all
#std.rfc3550._debug = options.verbose
std.rfc3261._debug = options.verbose_all
_debug = options.verbose or options.verbose_all
if _debug and not audiospeex:
print 'warning: audiospeex module not found; disabling transcoding to/from speex'
if options.ext_ip: setlocaladdr(options.ext_ip)
elif options.int_ip != '0.0.0.0': setlocaladdr(options.int_ip)
try:
agent = FlashServer()
agent.apps['sip'] = Gateway
agent.root, agent.int_ip, agent.ext_ip = options.root, options.int_ip, options.ext_ip
agent.start(options.host, options.port)
if _debug: print time.asctime(), 'Flash Server Starts - %s:%d' % (options.host, options.port)
while True:
try: multitask.run()
except multitask.Timeout: pass
except KeyboardInterrupt:
pass
if _debug: time.asctime(), 'Flash Server Stops'
| lgpl-2.1 |
LaoZhongGu/kbengine | kbe/src/lib/python/Lib/turtledemo/wikipedia.py | 150 | 1347 | """ turtle-example-suite:
tdemo_wikipedia3.py
This example is
inspired by the Wikipedia article on turtle
graphics. (See example wikipedia1 for URLs)
First we create (ne-1) (i.e. 35 in this
example) copies of our first turtle p.
Then we let them perform their steps in
parallel.
Followed by a complete undo().
"""
from turtle import Screen, Turtle, mainloop
from time import clock, sleep
def mn_eck(p, ne,sz):
turtlelist = [p]
#create ne-1 additional turtles
for i in range(1,ne):
q = p.clone()
q.rt(360.0/ne)
turtlelist.append(q)
p = q
for i in range(ne):
c = abs(ne/2.0-i)/(ne*.7)
# let those ne turtles make a step
# in parallel:
for t in turtlelist:
t.rt(360./ne)
t.pencolor(1-c,0,c)
t.fd(sz)
def main():
s = Screen()
s.bgcolor("black")
p=Turtle()
p.speed(0)
p.hideturtle()
p.pencolor("red")
p.pensize(3)
s.tracer(36,0)
at = clock()
mn_eck(p, 36, 19)
et = clock()
z1 = et-at
sleep(1)
at = clock()
while any([t.undobufferentries() for t in s.turtles()]):
for t in s.turtles():
t.undo()
et = clock()
return "runtime: %.3f sec" % (z1+et-at)
if __name__ == '__main__':
msg = main()
print(msg)
mainloop()
| lgpl-3.0 |
puttarajubr/commcare-hq | corehq/ex-submodules/couchforms/tests/test_dbaccessors.py | 1 | 2772 | import datetime
from django.test import TestCase
from couchforms.dbaccessors import get_forms_by_type, clear_forms_in_domain, \
get_number_of_forms_by_type, get_number_of_forms_of_all_types, \
get_form_ids_by_type, get_number_of_forms_all_domains_in_couch
from couchforms.models import XFormInstance, XFormError
class TestDBAccessors(TestCase):
@classmethod
def setUpClass(cls):
from casexml.apps.case.tests import delete_all_xforms
delete_all_xforms()
cls.domain = 'evelyn'
cls.now = datetime.datetime.utcnow()
cls.xforms = [
XFormInstance(_id='xform_1',
received_on=cls.now - datetime.timedelta(days=10)),
XFormInstance(_id='xform_2', received_on=cls.now)
]
cls.xform_errors = [XFormError(_id='xform_error_1')]
for form in cls.xforms + cls.xform_errors:
form.domain = cls.domain
form.save()
@classmethod
def tearDownClass(cls):
clear_forms_in_domain(cls.domain)
def test_get_forms_by_type_xforminstance(self):
forms = get_forms_by_type(self.domain, 'XFormInstance', limit=10)
self.assertEqual(len(forms), len(self.xforms))
self.assertEqual({form._id for form in forms},
{form._id for form in self.xforms})
for form in forms:
self.assertIsInstance(form, XFormInstance)
def test_get_forms_by_type_xformerror(self):
forms = get_forms_by_type(self.domain, 'XFormError', limit=10)
self.assertEqual(len(forms), len(self.xform_errors))
self.assertEqual({form._id for form in forms},
{form._id for form in self.xform_errors})
for form in forms:
self.assertIsInstance(form, XFormError)
def test_get_number_of_forms_by_type_xforminstance(self):
self.assertEqual(
get_number_of_forms_by_type(self.domain, 'XFormInstance'),
len(self.xforms)
)
def test_get_number_of_forms_by_type_xformerror(self):
self.assertEqual(
get_number_of_forms_by_type(self.domain, 'XFormError'),
len(self.xform_errors)
)
def test_get_number_of_forms_of_all_types(self):
self.assertEqual(
get_number_of_forms_of_all_types(self.domain),
len(self.xforms) + len(self.xform_errors)
)
def test_get_form_ids_by_type(self):
form_ids = get_form_ids_by_type(self.domain, 'XFormError')
self.assertEqual(form_ids, [form._id for form in self.xform_errors])
def test_get_number_of_forms_all_domains_in_couch(self):
self.assertEqual(
get_number_of_forms_all_domains_in_couch(),
len(self.xforms)
)
| bsd-3-clause |
devs1991/test_edx_docmode | venv/lib/python2.7/site-packages/sklearn/lda.py | 3 | 9301 | """
The :mod:`sklearn.lda` module implements Linear Discriminant Analysis (LDA).
"""
# Authors: Matthieu Perrot
# Mathieu Blondel
import warnings
import numpy as np
from scipy import linalg
from .base import BaseEstimator, ClassifierMixin, TransformerMixin
from .utils.extmath import logsumexp
from .utils.fixes import unique
from .utils import check_arrays
__all__ = ['LDA']
class LDA(BaseEstimator, ClassifierMixin, TransformerMixin):
"""
Linear Discriminant Analysis (LDA)
A classifier with a linear decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that
all classes share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality
of the input, by projecting it to the most discriminative
directions.
Parameters
----------
n_components: int
Number of components (< n_classes - 1) for dimensionality reduction
priors : array, optional, shape = [n_classes]
Priors on classes
Attributes
----------
`means_` : array-like, shape = [n_classes, n_features]
Class means
`xbar_` : float, shape = [n_features]
Over all mean
`priors_` : array-like, shape = [n_classes]
Class priors (sum to 1)
`covariance_` : array-like, shape = [n_features, n_features]
Covariance matrix (shared by all classes)
Examples
--------
>>> import numpy as np
>>> from sklearn.lda import LDA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LDA()
>>> clf.fit(X, y)
LDA(n_components=None, priors=None)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.qda.QDA: Quadratic discriminant analysis
"""
def __init__(self, n_components=None, priors=None):
self.n_components = n_components
self.priors = np.asarray(priors) if priors is not None else None
if self.priors is not None:
if (self.priors < 0).any():
raise ValueError('priors must be non-negative')
if self.priors.sum() != 1:
print 'warning: the priors do not sum to 1. Renormalizing'
self.priors = self.priors / self.priors.sum()
def fit(self, X, y, store_covariance=False, tol=1.0e-4):
"""
Fit the LDA model according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array, shape = [n_samples]
Target values (integers)
store_covariance : boolean
If True the covariance matrix (shared by all classes) is computed
and stored in `self.covariance_` attribute.
"""
X, y = check_arrays(X, y, sparse_format='dense')
self.classes_, y = unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('y has less than 2 classes')
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
# Group means n_classes*n_features matrix
means = []
Xc = []
cov = None
if store_covariance:
cov = np.zeros((n_features, n_features))
for ind in xrange(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
# centered group data
Xgc = Xg - meang
Xc.append(Xgc)
if store_covariance:
cov += np.dot(Xgc.T, Xgc)
if store_covariance:
cov /= (n_samples - n_classes)
self.covariance_ = cov
self.means_ = np.asarray(means)
Xc = np.concatenate(Xc, 0)
# ----------------------------
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = float(1) / (n_samples - n_classes)
# ----------------------------
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear")
# Scaling of within covariance is: V' 1/S
scaling = (V[:rank] / std).T / S[:rank]
## ----------------------------
## 3) Between variance scaling
# Overall mean
xbar = np.dot(self.priors_, self.means_)
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(means - xbar).T).T, scaling)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use svd to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
# compose the scalings
self.scaling = np.dot(scaling, V.T[:, :rank])
self.xbar_ = xbar
# weight vectors / centroids
self.coef_ = np.dot(self.means_ - self.xbar_, self.scaling)
self.intercept_ = -0.5 * np.sum(self.coef_ ** 2, axis=1) + \
np.log(self.priors_)
return self
@property
def classes(self):
warnings.warn("LDA.classes is deprecated and will be removed in 0.14. "
"Use LDA.classes_ instead.", DeprecationWarning,
stacklevel=2)
return self.classes_
def _decision_function(self, X):
X = np.asarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
return np.dot(X, self.coef_.T) + self.intercept_
def decision_function(self, X):
"""
This function return the decision function values related to each
class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes] or [n_samples,]
Decision function values related to each class, per sample.
In the two-class case, the shape is [n_samples,], giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def transform(self, X):
"""
Project the data so as to maximize class separation (large separation
between projected class means and small variance within each class).
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
X_new : array, shape = [n_samples, n_components]
"""
X = np.asarray(X)
# center and scale data
X = np.dot(X - self.xbar_, self.scaling)
n_comp = X.shape[1] if self.n_components is None else self.n_components
return np.dot(X, self.coef_[:n_comp].T)
def predict(self, X):
"""
This function does classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""
This function return posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""
This function return posterior log-probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples, n_classes]
"""
values = self._decision_function(X)
loglikelihood = (values - values.max(axis=1)[:, np.newaxis])
normalization = logsumexp(loglikelihood, axis=1)
return loglikelihood - normalization[:, np.newaxis]
| agpl-3.0 |
moijes12/oh-mainline | vendor/packages/mechanize/mechanize/_auth.py | 137 | 2576 | """HTTP Authentication and Proxy support.
Copyright 2006 John J. Lee <jjl@pobox.com>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
from _urllib2_fork import HTTPPasswordMgr
# TODO: stop deriving from HTTPPasswordMgr
class HTTPProxyPasswordMgr(HTTPPasswordMgr):
# has default realm and host/port
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if uri is None or isinstance(uri, basestring):
uris = [uri]
else:
uris = uri
passwd_by_domain = self.passwd.setdefault(realm, {})
for uri in uris:
for default_port in True, False:
reduced_uri = self.reduce_uri(uri, default_port)
passwd_by_domain[reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
attempts = [(realm, authuri), (None, authuri)]
# bleh, want default realm to take precedence over default
# URI/authority, hence this outer loop
for default_uri in False, True:
for realm, authuri in attempts:
authinfo_by_domain = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uri, authinfo in authinfo_by_domain.iteritems():
if uri is None and not default_uri:
continue
if self.is_suburi(uri, reduced_authuri):
return authinfo
user, password = None, None
if user is not None:
break
return user, password
def reduce_uri(self, uri, default_port=True):
if uri is None:
return None
return HTTPPasswordMgr.reduce_uri(self, uri, default_port)
def is_suburi(self, base, test):
if base is None:
# default to the proxy's host/port
hostport, path = test
base = (hostport, "/")
return HTTPPasswordMgr.is_suburi(self, base, test)
class HTTPSClientCertMgr(HTTPPasswordMgr):
# implementation inheritance: this is not a proper subclass
def add_key_cert(self, uri, key_file, cert_file):
self.add_password(None, uri, key_file, cert_file)
def find_key_cert(self, authuri):
return HTTPPasswordMgr.find_user_password(self, None, authuri)
| agpl-3.0 |
ashmastaflash/don-bot | app/donlib/halo.py | 1 | 11070 | import cloudpassage
import os
import requests
from formatter import Formatter
from urlparse import urljoin
from utility import Utility as util
from halocelery.apputils import Utility as hc_util
class Halo(object):
"""This contains all Halo interaction logic
Attrubites:
session (cloudpassage.HaloSession): Halo session object
"""
def __init__(self, config, health_string, tasks_obj):
"""Initialization only instantiates the session object."""
self.session = cloudpassage.HaloSession(config.halo_api_key,
config.halo_api_secret_key,
api_host=config.halo_api_host,
api_port=config.halo_api_port,
integration_string=config.ua)
self.product_version = config.product_version
self.monitor_events = config.monitor_events
self.slack_channel = config.slack_channel
self.health_string = health_string
self.tasks = tasks_obj
self.flower_host = config.flower_host
self.config = config
return
def credentials_work(self):
"""Attempts to authenticate against Halo API"""
good = True
try:
self.session.authenticate_client()
except cloudpassage.CloudPassageAuthentication:
good = False
return good
@classmethod
def list_tasks_formatted(cls, flower_host):
"""Gets a formatted list of tasks from Flower"""
report = "Cortex Tasks:\n"
celery_url = urljoin(flower_host, "api/tasks")
try:
response = requests.get(celery_url)
result = response.json()
except (ValueError, requests.exceptions.ConnectionError) as e:
report += "Error: Unable to retrieve task list at this time."
# We print the output so that it will be retained in the
# container logs.
hc_util.log_stderr(e)
return report
try:
for task in result.items():
prefmt = {"id": task[0], "name": task[1]["name"],
"args": str(task[1]["args"]),
"kwargs": str(task[1]["kwargs"]),
"started": util.u_to_8601(task[1]["started"]),
"tstamp": util.u_to_8601(task[1]["timestamp"]),
"state": task[1]["state"],
"exception": str(task[1]["exception"])}
report += Formatter.format_item(prefmt, "task")
except AttributeError as e: # Empty set will throw AttributeError
hc_util.log_stderr("Halo.list_tasks_formatted(): AttributeError! %s" % e) # NOQA
pass
return report
def interrogate(self, query_type, target):
"""Entrypoint for report generation
This method is where you start for generating reports. When you add
a new report this is the second place you configure it, right after
you set it up in Lexicals.get_message_type().
Returns a finished report, as a string.
"""
report = "I didn't understand your request. Try asking for help!\n"
if query_type == "server_report":
report = self.tasks.report_server_formatted.delay(target)
elif query_type == "group_report":
report = self.tasks.report_group_formatted.delay(target)
elif query_type == "ip_report":
report = self.get_ip_report(target)
elif query_type == "all_servers":
report = self.tasks.list_all_servers_formatted.delay()
elif query_type == "all_groups":
report = self.tasks.list_all_groups_formatted.delay()
elif query_type == "group_firewall_report":
img_tag = os.getenv('FIREWALL_GRAPH_VERSION', 'v0.2')
image = "docker.io/halotools/firewall-graph:%s" % img_tag
env_literal = {"TARGET": target}
env_expand = {"HALO_API_KEY": "HALO_API_KEY",
"HALO_API_SECRET_KEY": "HALO_API_SECRET_KEY",
"HALO_API_HOSTNAME": "HALO_API_HOSTNAME",
"HTTPS_PROXY": "HTTPS_PROXY"}
report = self.tasks.generic_containerized_task.delay(image,
env_literal,
env_expand,
False)
elif query_type == "servers_in_group":
report = self.tasks.servers_in_group_formatted.delay(target)
elif query_type == "servers_by_cve":
report = self.tasks.search_server_by_cve(target)
elif query_type == "ec2_halo_footprint_csv":
img_tag = os.getenv('EC2_HALO_DELTA_VERSION', 'v0.2')
image = "docker.io/halotools/ec2-halo-delta:%s" % img_tag
env_literal = {"OUTPUT_FORMAT": "csv"}
# Set optional args
optional_fields = ["AWS_ROLE_NAME", "AWS_ACCOUNT_NUMBERS"]
for field in optional_fields:
if os.getenv(field, "") != "":
env_literal[field] = os.getenv(field)
env_expand = {"HALO_API_KEY": "HALO_API_KEY",
"HALO_API_SECRET_KEY": "HALO_API_SECRET_KEY",
"HALO_API_HOSTNAME": "HALO_API_HOSTNAME",
"AWS_ACCESS_KEY_ID": "AWS_ACCESS_KEY_ID",
"AWS_SECRET_ACCESS_KEY": "AWS_SECRET_ACCESS_KEY",
"HTTPS_PROXY": "HTTPS_PROXY"}
report = self.tasks.generic_containerized_task.delay(image,
env_literal,
env_expand,
False)
elif query_type == "tasks":
report = self.list_tasks_formatted(self.flower_host)
elif query_type == "selfie":
report = Halo.take_selfie()
elif query_type == "help":
report = Halo.help_text()
elif query_type == "version":
report = Halo.version_info(self.product_version) + "\n"
elif query_type == "config":
report = self.running_config()
elif query_type == "health":
report = self.health_string
return(report)
@classmethod
def help_text(cls):
"""This is the help output"""
ret = ("I currently answer these burning questions, " +
"but only when you address me by name:\n" +
"\"tell me about server `(server_id|server_name)`\"\n" +
"\"tell me about ip `ip_address`\"\n" +
"\"tell me about group `(group_id|group_name)`\"\n" +
"\"list all servers\"\n" +
"\"list server groups\"\n" +
"\"servers with CVE `cve_id`\"\n" +
"\"servers in group `(group_id|group_name)`\"\n" +
"\"group firewall `(group_id|group_name)`\"\n" +
"\"ec2 halo footprint csv\"\n" +
"\"version\"\n" +
"\"tasks\"\n" +
"\"config\"\n")
return ret
@classmethod
def version_info(cls, product_version):
return "v%s" % product_version
def running_config(self):
if os.getenv("NOSLACK"):
return "Slack integration is disabled. CLI access only."
if self.monitor_events == 'yes':
events = "Monitoring Halo events"
conf = ("IP-Blocker Configuration\n" +
"------------------------\n" +
"IPBLOCKER_ENABLED=%s\n" % (self.config.ipblocker_enable) +
"IPBLOCKER_IP_ZONE_NAME=%s\n" % (self.config.ip_zone_name) + # NOQA
"IPBLOCKER_TRIGGER_EVENTS=%s\n" % (self.config.ipblocker_trigger_events) + # NOQA
"IPBLOCKER_TRIGGER_ONLY_ON_CRITICAL=%s\n\n" % (self.config.ipblocker_trigger_only_on_critical) + # NOQA
"Quarantine Configuration\n" +
"------------------------\n" +
"QUARANTINE_ENABLED=%s\n" % (self.config.quarantine_enable) + # NOQA
"QUARANTINE_TRIGGER_GROUP_NAMES=%s\n" % (self.config.quarantine_trigger_group_names) + # NOQA
"QUARANTINE_TRIGGER_EVENTS=%s\n" % (self.config.quarantine_trigger_events) + # NOQA
"QUARANTINE_TRIGGER_ONLY_ON_CRITICAL=%s\n" % (self.config.quarantine_trigger_only_on_critical) + # NOQA
"QUARANTINE_GROUP_NAME=%s\n\n" % (self.config.quarantine_group_name) + # NOQA
"Event Suppression Configuration\n" +
"-------------------------------\n" +
"SUPPRESS_EVENTS_IN_CHANNEL=%s\n" % (self.config.suppress_events)) # NOQA
else:
events = "NOT monitoring Halo events"
retval = "%s\nHalo channel: #%s\n%s\n" % (events,
self.slack_channel,
conf)
return retval
def get_ip_report(self, target):
"""This wraps the report_server_by_id by accepting IP as target"""
servers = cloudpassage.Server(self.session)
report = "Unknown IP: \n" + target
try:
s_id = servers.list_all(connecting_ip_address=target)[0]["id"]
report = self.tasks.report_server_formatted(s_id)
except:
pass
return report
def quarantine_server(self, event):
server_id = event["server_id"]
quarantine_group_name = event["quarantine_group"]
hc_util.log_stdout("Quarantine %s to group %s" % (server_id,
quarantine_group_name)) # NOQA
return self.tasks.quarantine_server.delay(server_id,
quarantine_group_name)
def add_ip_to_blocklist(self, ip_address, block_list_name):
# We trigger a removal job for one hour out.
hc_util.log_stdout("Add IP %s to blocklist %s" % (ip_address,
block_list_name))
self.tasks.remove_ip_from_list.apply_async(args=[ip_address,
block_list_name],
countdown=3600)
return self.tasks.add_ip_to_list.delay(ip_address, block_list_name)
@classmethod
def take_selfie(cls):
selfie_file_name = "selfie.txt"
heredir = os.path.abspath(os.path.dirname(__file__))
selfie_full_path = os.path.join(heredir, selfie_file_name)
with open(selfie_full_path, 'r') as s_file:
selfie = "```" + s_file.read() + "```"
return selfie
| bsd-3-clause |
Lab603/PicEncyclopedias | jni-build/jni-build/jni/include/tensorflow/contrib/distributions/python/kernel_tests/normal_test.py | 4 | 12117 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for initializers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from scipy import stats
import tensorflow as tf
class NormalTest(tf.test.TestCase):
def testNormalLogPDF(self):
with self.test_session():
batch_size = 6
mu = tf.constant([3.0] * batch_size)
sigma = tf.constant([math.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
expected_log_pdf = stats.norm(mu.eval(), sigma.eval()).logpdf(x)
log_pdf = normal.log_pdf(x)
self.assertAllClose(expected_log_pdf, log_pdf.eval())
self.assertAllEqual(normal.batch_shape().eval(), log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), log_pdf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), log_pdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), log_pdf.eval().shape)
pdf = normal.pdf(x)
self.assertAllClose(np.exp(expected_log_pdf), pdf.eval())
self.assertAllEqual(normal.batch_shape().eval(), pdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), pdf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), pdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), pdf.eval().shape)
def testNormalLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant([[math.sqrt(10.0), math.sqrt(15.0)]] * batch_size)
x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
expected_log_pdf = stats.norm(mu.eval(), sigma.eval()).logpdf(x)
log_pdf = normal.log_pdf(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(expected_log_pdf, log_pdf_values)
self.assertAllEqual(normal.batch_shape().eval(), log_pdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), log_pdf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), log_pdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), log_pdf.eval().shape)
pdf = normal.pdf(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(np.exp(expected_log_pdf), pdf_values)
self.assertAllEqual(normal.batch_shape().eval(), pdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), pdf_values.shape)
self.assertAllEqual(normal.get_batch_shape(), pdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), pdf_values.shape)
def testNormalCDF(self):
with self.test_session():
batch_size = 6
mu = tf.constant([3.0] * batch_size)
sigma = tf.constant([math.sqrt(10.0)] * batch_size)
x = np.array([-2.5, 2.5, 4.0, 0.0, -1.0, 2.0], dtype=np.float32)
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
expected_cdf = stats.norm(mu.eval(), sigma.eval()).cdf(x)
cdf = normal.cdf(x)
self.assertAllClose(expected_cdf, cdf.eval())
self.assertAllEqual(normal.batch_shape().eval(), cdf.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), cdf.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), cdf.get_shape())
self.assertAllEqual(normal.get_batch_shape(), cdf.eval().shape)
def testNormalEntropyWithScalarInputs(self):
# Scipy.stats.norm cannot deal with the shapes in the other test.
with self.test_session():
mu_v = 2.34
sigma_v = 4.56
normal = tf.contrib.distributions.Normal(mu=mu_v, sigma=sigma_v)
# scipy.stats.norm cannot deal with these shapes.
expected_entropy = stats.norm(mu_v, sigma_v).entropy()
entropy = normal.entropy()
self.assertAllClose(expected_entropy, entropy.eval())
self.assertAllEqual(normal.batch_shape().eval(), entropy.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), entropy.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), entropy.get_shape())
self.assertAllEqual(normal.get_batch_shape(), entropy.eval().shape)
def testNormalEntropy(self):
with self.test_session():
mu_v = np.array([1.0, 1.0, 1.0])
sigma_v = np.array([[1.0, 2.0, 3.0]]).T
normal = tf.contrib.distributions.Normal(mu=mu_v, sigma=sigma_v)
# scipy.stats.norm cannot deal with these shapes.
sigma_broadcast = mu_v * sigma_v
expected_entropy = 0.5 * np.log(2*np.pi*np.exp(1)*sigma_broadcast**2)
entropy = normal.entropy()
np.testing.assert_allclose(expected_entropy, entropy.eval())
self.assertAllEqual(normal.batch_shape().eval(), entropy.get_shape())
self.assertAllEqual(normal.batch_shape().eval(), entropy.eval().shape)
self.assertAllEqual(normal.get_batch_shape(), entropy.get_shape())
self.assertAllEqual(normal.get_batch_shape(), entropy.eval().shape)
def testNormalMeanAndMode(self):
with self.test_session():
# Mu will be broadcast to [7, 7, 7].
mu = [7.]
sigma = [11., 12., 13.]
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
self.assertAllEqual((3,), normal.mean().get_shape())
self.assertAllEqual([7., 7, 7], normal.mean().eval())
self.assertAllEqual((3,), normal.mode().get_shape())
self.assertAllEqual([7., 7, 7], normal.mode().eval())
def testNormalVariance(self):
with self.test_session():
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
self.assertAllEqual((3,), normal.variance().get_shape())
self.assertAllEqual([49., 49, 49], normal.variance().eval())
def testNormalStandardDeviation(self):
with self.test_session():
# sigma will be broadcast to [7, 7, 7]
mu = [1., 2., 3.]
sigma = [7.]
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
self.assertAllEqual((3,), normal.std().get_shape())
self.assertAllEqual([7., 7, 7], normal.std().eval())
def testNormalSample(self):
with self.test_session():
mu = tf.constant(3.0)
sigma = tf.constant(math.sqrt(3.0))
mu_v = 3.0
sigma_v = np.sqrt(3.0)
n = tf.constant(100000)
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
samples = normal.sample_n(n)
sample_values = samples.eval()
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(sample_values.shape, (100000,))
self.assertAllClose(sample_values.mean(), mu_v, atol=1e-1)
self.assertAllClose(sample_values.std(), sigma_v, atol=1e-1)
expected_samples_shape = (
tf.TensorShape([n.eval()]).concatenate(
tf.TensorShape(normal.batch_shape().eval())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tf.TensorShape([n.eval()]).concatenate(
normal.get_batch_shape()))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testNormalSampleMultiDimensional(self):
with self.test_session():
batch_size = 2
mu = tf.constant([[3.0, -3.0]] * batch_size)
sigma = tf.constant([[math.sqrt(2.0), math.sqrt(3.0)]] * batch_size)
mu_v = [3.0, -3.0]
sigma_v = [np.sqrt(2.0), np.sqrt(3.0)]
n = tf.constant(100000)
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
samples = normal.sample_n(n)
sample_values = samples.eval()
# Note that the standard error for the sample mean is ~ sigma / sqrt(n).
# The sample variance similarly is dependent on sigma and n.
# Thus, the tolerances below are very sensitive to number of samples
# as well as the variances chosen.
self.assertEqual(samples.get_shape(), (100000, batch_size, 2))
self.assertAllClose(sample_values[:, 0, 0].mean(), mu_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 0].std(), sigma_v[0], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].mean(), mu_v[1], atol=1e-1)
self.assertAllClose(sample_values[:, 0, 1].std(), sigma_v[1], atol=1e-1)
expected_samples_shape = (
tf.TensorShape([n.eval()]).concatenate(
tf.TensorShape(normal.batch_shape().eval())))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
expected_samples_shape = (
tf.TensorShape([n.eval()]).concatenate(normal.get_batch_shape()))
self.assertAllEqual(expected_samples_shape, samples.get_shape())
self.assertAllEqual(expected_samples_shape, sample_values.shape)
def testNegativeSigmaFails(self):
with self.test_session():
normal = tf.contrib.distributions.Normal(
mu=[1.],
sigma=[-5.],
name='G')
with self.assertRaisesOpError('Condition x > 0 did not hold'):
normal.mean().eval()
def testNormalShape(self):
with self.test_session():
mu = tf.constant([-3.0] * 5)
sigma = tf.constant(11.0)
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
self.assertEqual(normal.batch_shape().eval(), [5])
self.assertEqual(normal.get_batch_shape(), tf.TensorShape([5]))
self.assertAllEqual(normal.event_shape().eval(), [])
self.assertEqual(normal.get_event_shape(), tf.TensorShape([]))
def testNormalShapeWithPlaceholders(self):
mu = tf.placeholder(dtype=tf.float32)
sigma = tf.placeholder(dtype=tf.float32)
normal = tf.contrib.distributions.Normal(mu=mu, sigma=sigma)
with self.test_session() as sess:
# get_batch_shape should return an "<unknown>" tensor.
self.assertEqual(normal.get_batch_shape(), tf.TensorShape(None))
self.assertEqual(normal.get_event_shape(), ())
self.assertAllEqual(normal.event_shape().eval(), [])
self.assertAllEqual(
sess.run(normal.batch_shape(),
feed_dict={mu: 5.0, sigma: [1.0, 2.0]}),
[2])
def testNormalNormalKL(self):
with self.test_session() as sess:
batch_size = 6
mu_a = np.array([3.0] * batch_size)
sigma_a = np.array([1.0, 2.0, 3.0, 1.5, 2.5, 3.5])
mu_b = np.array([-3.0] * batch_size)
sigma_b = np.array([0.5, 1.0, 1.5, 2.0, 2.5, 3.0])
n_a = tf.contrib.distributions.Normal(mu=mu_a, sigma=sigma_a)
n_b = tf.contrib.distributions.Normal(mu=mu_b, sigma=sigma_b)
kl = tf.contrib.distributions.kl(n_a, n_b)
kl_val = sess.run(kl)
kl_expected = (
(mu_a - mu_b)**2 / (2 * sigma_b**2)
+ 0.5 * ((sigma_a**2/sigma_b**2) -
1 - 2 * np.log(sigma_a / sigma_b)))
self.assertEqual(kl.get_shape(), (batch_size,))
self.assertAllClose(kl_val, kl_expected)
if __name__ == '__main__':
tf.test.main()
| mit |
dmeulen/home-assistant | homeassistant/components/automation/template.py | 16 | 1846 | """
Offer template automation rules.
For more details about this automation rule, please refer to the documentation
at https://home-assistant.io/components/automation/#template-trigger
"""
import logging
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.const import CONF_VALUE_TEMPLATE, CONF_PLATFORM
from homeassistant.helpers import condition
from homeassistant.helpers.event import async_track_state_change
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
TRIGGER_SCHEMA = IF_ACTION_SCHEMA = vol.Schema({
vol.Required(CONF_PLATFORM): 'template',
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
})
def async_trigger(hass, config, action):
"""Listen for state changes based on configuration."""
value_template = config.get(CONF_VALUE_TEMPLATE)
value_template.hass = hass
# Local variable to keep track of if the action has already been triggered
already_triggered = False
@callback
def state_changed_listener(entity_id, from_s, to_s):
"""Listen for state changes and calls action."""
nonlocal already_triggered
template_result = condition.async_template(hass, value_template)
# Check to see if template returns true
if template_result and not already_triggered:
already_triggered = True
hass.async_run_job(action, {
'trigger': {
'platform': 'template',
'entity_id': entity_id,
'from_state': from_s,
'to_state': to_s,
},
})
elif not template_result:
already_triggered = False
return async_track_state_change(hass, value_template.extract_entities(),
state_changed_listener)
| mit |
HyShai/youtube-dl | youtube_dl/extractor/spiegeltv.py | 33 | 2762 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import float_or_none
class SpiegeltvIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?spiegel\.tv/(?:#/)?filme/(?P<id>[\-a-z0-9]+)'
_TESTS = [{
'url': 'http://www.spiegel.tv/filme/flug-mh370/',
'info_dict': {
'id': 'flug-mh370',
'ext': 'm4v',
'title': 'Flug MH370',
'description': 'Das Rätsel um die Boeing 777 der Malaysia-Airlines',
'thumbnail': 're:http://.*\.jpg$',
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.spiegel.tv/#/filme/alleskino-die-wahrheit-ueber-maenner/',
'only_matching': True,
}]
def _real_extract(self, url):
if '/#/' in url:
url = url.replace('/#/', '/')
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._html_search_regex(r'<h1.*?>(.*?)</h1>', webpage, 'title')
apihost = 'http://spiegeltv-ivms2-restapi.s3.amazonaws.com'
version_json = self._download_json(
'%s/version.json' % apihost, video_id,
note='Downloading version information')
version_name = version_json['version_name']
slug_json = self._download_json(
'%s/%s/restapi/slugs/%s.json' % (apihost, version_name, video_id),
video_id,
note='Downloading object information')
oid = slug_json['object_id']
media_json = self._download_json(
'%s/%s/restapi/media/%s.json' % (apihost, version_name, oid),
video_id, note='Downloading media information')
uuid = media_json['uuid']
is_wide = media_json['is_wide']
server_json = self._download_json(
'http://www.spiegel.tv/streaming_servers/', video_id,
note='Downloading server information')
server = server_json[0]['endpoint']
thumbnails = []
for image in media_json['images']:
thumbnails.append({
'url': image['url'],
'width': image['width'],
'height': image['height'],
})
description = media_json['subtitle']
duration = float_or_none(media_json.get('duration_in_ms'), scale=1000)
format = '16x9' if is_wide else '4x3'
url = server + 'mp4:' + uuid + '_spiegeltv_0500_' + format + '.m4v'
return {
'id': video_id,
'title': title,
'url': url,
'ext': 'm4v',
'description': description,
'duration': duration,
'thumbnails': thumbnails
}
| unlicense |
grimfang/panda3d | samples/carousel/main.py | 25 | 9571 | #!/usr/bin/env python
# Author: Shao Zhang, Phil Saltzman, and Eddie Canaan
# Last Updated: 2015-03-13
#
# This tutorial will demonstrate some uses for intervals in Panda
# to move objects in your panda world.
# Intervals are tools that change a value of something, like position,
# rotation or anything else, linearly, over a set period of time. They can be
# also be combined to work in sequence or in Parallel
#
# In this lesson, we will simulate a carousel in motion using intervals.
# The carousel will spin using an hprInterval while 4 pandas will represent
# the horses on a traditional carousel. The 4 pandas will rotate with the
# carousel and also move up and down on their poles using a LerpFunc interval.
# Finally there will also be lights on the outer edge of the carousel that
# will turn on and off by switching their texture with intervals in Sequence
# and Parallel
from direct.showbase.ShowBase import ShowBase
from panda3d.core import AmbientLight, DirectionalLight, LightAttrib
from panda3d.core import NodePath
from panda3d.core import LVector3
from direct.interval.IntervalGlobal import * # Needed to use Intervals
from direct.gui.DirectGui import *
# Importing math constants and functions
from math import pi, sin
class CarouselDemo(ShowBase):
def __init__(self):
# Initialize the ShowBase class from which we inherit, which will
# create a window and set up everything we need for rendering into it.
ShowBase.__init__(self)
# This creates the on screen title that is in every tutorial
self.title = OnscreenText(text="Panda3D: Tutorial - Carousel",
parent=base.a2dBottomCenter,
fg=(1, 1, 1, 1), shadow=(0, 0, 0, .5),
pos=(0, .1), scale=.1)
base.disableMouse() # Allow manual positioning of the camera
camera.setPosHpr(0, -8, 2.5, 0, -9, 0) # Set the cameras' position
# and orientation
self.loadModels() # Load and position our models
self.setupLights() # Add some basic lighting
self.startCarousel() # Create the needed intervals and put the
# carousel into motion
def loadModels(self):
# Load the carousel base
self.carousel = loader.loadModel("models/carousel_base")
self.carousel.reparentTo(render) # Attach it to render
# Load the modeled lights that are on the outer rim of the carousel
# (not Panda lights)
# There are 2 groups of lights. At any given time, one group will have
# the "on" texture and the other will have the "off" texture.
self.lights1 = loader.loadModel("models/carousel_lights")
self.lights1.reparentTo(self.carousel)
# Load the 2nd set of lights
self.lights2 = loader.loadModel("models/carousel_lights")
# We need to rotate the 2nd so it doesn't overlap with the 1st set.
self.lights2.setH(36)
self.lights2.reparentTo(self.carousel)
# Load the textures for the lights. One texture is for the "on" state,
# the other is for the "off" state.
self.lightOffTex = loader.loadTexture("models/carousel_lights_off.jpg")
self.lightOnTex = loader.loadTexture("models/carousel_lights_on.jpg")
# Create an list (self.pandas) with filled with 4 dummy nodes attached
# to the carousel.
# This uses a python concept called "Array Comprehensions." Check the
# Python manual for more information on how they work
self.pandas = [self.carousel.attachNewNode("panda" + str(i))
for i in range(4)]
self.models = [loader.loadModel("models/carousel_panda")
for i in range(4)]
self.moves = [0] * 4
for i in range(4):
# set the position and orientation of the ith panda node we just created
# The Z value of the position will be the base height of the pandas.
# The headings are multiplied by i to put each panda in its own position
# around the carousel
self.pandas[i].setPosHpr(0, 0, 1.3, i * 90, 0, 0)
# Load the actual panda model, and parent it to its dummy node
self.models[i].reparentTo(self.pandas[i])
# Set the distance from the center. This distance is based on the way the
# carousel was modeled in Maya
self.models[i].setY(.85)
# Load the environment (Sky sphere and ground plane)
self.env = loader.loadModel("models/env")
self.env.reparentTo(render)
self.env.setScale(7)
# Panda Lighting
def setupLights(self):
# Create some lights and add them to the scene. By setting the lights on
# render they affect the entire scene
# Check out the lighting tutorial for more information on lights
ambientLight = AmbientLight("ambientLight")
ambientLight.setColor((.4, .4, .35, 1))
directionalLight = DirectionalLight("directionalLight")
directionalLight.setDirection(LVector3(0, 8, -2.5))
directionalLight.setColor((0.9, 0.8, 0.9, 1))
render.setLight(render.attachNewNode(directionalLight))
render.setLight(render.attachNewNode(ambientLight))
# Explicitly set the environment to not be lit
self.env.setLightOff()
def startCarousel(self):
# Here's where we actually create the intervals to move the carousel
# The first type of interval we use is one created directly from a NodePath
# This interval tells the NodePath to vary its orientation (hpr) from its
# current value (0,0,0) to (360,0,0) over 20 seconds. Intervals created from
# NodePaths also exist for position, scale, color, and shear
self.carouselSpin = self.carousel.hprInterval(20, LVector3(360, 0, 0))
# Once an interval is created, we need to tell it to actually move.
# start() will cause an interval to play once. loop() will tell an interval
# to repeat once it finished. To keep the carousel turning, we use
# loop()
self.carouselSpin.loop()
# The next type of interval we use is called a LerpFunc interval. It is
# called that becuase it linearly interpolates (aka Lerp) values passed to
# a function over a given amount of time.
# In this specific case, horses on a carousel don't move contantly up,
# suddenly stop, and then contantly move down again. Instead, they start
# slowly, get fast in the middle, and slow down at the top. This motion is
# close to a sine wave. This LerpFunc calls the function oscillatePanda
# (which we will create below), which changes the height of the panda based
# on the sin of the value passed in. In this way we achieve non-linear
# motion by linearly changing the input to a function
for i in range(4):
self.moves[i] = LerpFunc(
self.oscillatePanda, # function to call
duration=3, # 3 second duration
fromData=0, # starting value (in radians)
toData=2 * pi, # ending value (2pi radians = 360 degrees)
# Additional information to pass to
# self.oscialtePanda
extraArgs=[self.models[i], pi * (i % 2)]
)
# again, we want these to play continuously so we start them with
# loop()
self.moves[i].loop()
# Finally, we combine Sequence, Parallel, Func, and Wait intervals,
# to schedule texture swapping on the lights to simulate the lights turning
# on and off.
# Sequence intervals play other intervals in a sequence. In other words,
# it waits for the current interval to finish before playing the next
# one.
# Parallel intervals play a group of intervals at the same time
# Wait intervals simply do nothing for a given amount of time
# Func intervals simply make a single function call. This is helpful because
# it allows us to schedule functions to be called in a larger sequence. They
# take virtually no time so they don't cause a Sequence to wait.
self.lightBlink = Sequence(
# For the first step in our sequence we will set the on texture on one
# light and set the off texture on the other light at the same time
Parallel(
Func(self.lights1.setTexture, self.lightOnTex, 1),
Func(self.lights2.setTexture, self.lightOffTex, 1)),
Wait(1), # Then we will wait 1 second
# Then we will switch the textures at the same time
Parallel(
Func(self.lights1.setTexture, self.lightOffTex, 1),
Func(self.lights2.setTexture, self.lightOnTex, 1)),
Wait(1) # Then we will wait another second
)
self.lightBlink.loop() # Loop this sequence continuously
def oscillatePanda(self, rad, panda, offset):
# This is the oscillation function mentioned earlier. It takes in a
# degree value, a NodePath to set the height on, and an offset. The
# offset is there so that the different pandas can move opposite to
# each other. The .2 is the amplitude, so the height of the panda will
# vary from -.2 to .2
panda.setZ(sin(rad + offset) * .2)
demo = CarouselDemo()
demo.run()
| bsd-3-clause |
bfrgoncalves/Online-PhyloViZ | node_modules/l/node_modules/hook.io/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/generator/scons.py | 231 | 35679 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import gyp
import gyp.common
import gyp.SCons as SCons
import os.path
import pprint
import re
import subprocess
# TODO: remove when we delete the last WriteList() call in this module
WriteList = SCons.WriteList
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': '${LIBPREFIX}',
'SHARED_LIB_PREFIX': '${SHLIBPREFIX}',
'STATIC_LIB_SUFFIX': '${LIBSUFFIX}',
'SHARED_LIB_SUFFIX': '${SHLIBSUFFIX}',
'INTERMEDIATE_DIR': '${INTERMEDIATE_DIR}',
'SHARED_INTERMEDIATE_DIR': '${SHARED_INTERMEDIATE_DIR}',
'OS': 'linux',
'PRODUCT_DIR': '$TOP_BUILDDIR',
'SHARED_LIB_DIR': '$LIB_DIR',
'LIB_DIR': '$LIB_DIR',
'RULE_INPUT_ROOT': '${SOURCE.filebase}',
'RULE_INPUT_DIRNAME': '${SOURCE.dir}',
'RULE_INPUT_EXT': '${SOURCE.suffix}',
'RULE_INPUT_NAME': '${SOURCE.file}',
'RULE_INPUT_PATH': '${SOURCE.abspath}',
'CONFIGURATION_NAME': '${CONFIG_NAME}',
}
# Tell GYP how to process the input for us.
generator_handles_variants = True
generator_wants_absolute_build_file_paths = True
def FixPath(path, prefix):
if not os.path.isabs(path) and not path[0] == '$':
path = prefix + path
return path
header = """\
# This file is generated; do not edit.
"""
_alias_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
_outputs = env.Alias(
['_%(target_name)s_action'],
%(inputs)s,
_action
)
env.AlwaysBuild(_outputs)
"""
_run_as_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
"""
_run_as_template_suffix = """
_run_as_target = env.Alias('run_%(target_name)s', target_files, _action)
env.Requires(_run_as_target, [
Alias('%(target_name)s'),
])
env.AlwaysBuild(_run_as_target)
"""
_command_template = """
if GetOption('verbose'):
_action = Action([%(action)s])
else:
_action = Action([%(action)s], %(message)s)
_outputs = env.Command(
%(outputs)s,
%(inputs)s,
_action
)
"""
# This is copied from the default SCons action, updated to handle symlinks.
_copy_action_template = """
import shutil
import SCons.Action
def _copy_files_or_dirs_or_symlinks(dest, src):
SCons.Node.FS.invalidate_node_memos(dest)
if SCons.Util.is_List(src) and os.path.isdir(dest):
for file in src:
shutil.copy2(file, dest)
return 0
elif os.path.islink(src):
linkto = os.readlink(src)
os.symlink(linkto, dest)
return 0
elif os.path.isfile(src):
return shutil.copy2(src, dest)
else:
return shutil.copytree(src, dest, 1)
def _copy_files_or_dirs_or_symlinks_str(dest, src):
return 'Copying %s to %s ...' % (src, dest)
GYPCopy = SCons.Action.ActionFactory(_copy_files_or_dirs_or_symlinks,
_copy_files_or_dirs_or_symlinks_str,
convert=str)
"""
_rule_template = """
%(name)s_additional_inputs = %(inputs)s
%(name)s_outputs = %(outputs)s
def %(name)s_emitter(target, source, env):
return (%(name)s_outputs, source + %(name)s_additional_inputs)
if GetOption('verbose'):
%(name)s_action = Action([%(action)s])
else:
%(name)s_action = Action([%(action)s], %(message)s)
env['BUILDERS']['%(name)s'] = Builder(action=%(name)s_action,
emitter=%(name)s_emitter)
_outputs = []
_processed_input_files = []
for infile in input_files:
if (type(infile) == type('')
and not os.path.isabs(infile)
and not infile[0] == '$'):
infile = %(src_dir)r + infile
if str(infile).endswith('.%(extension)s'):
_generated = env.%(name)s(infile)
env.Precious(_generated)
_outputs.append(_generated)
%(process_outputs_as_sources_line)s
else:
_processed_input_files.append(infile)
prerequisites.extend(_outputs)
input_files = _processed_input_files
"""
_spawn_hack = """
import re
import SCons.Platform.posix
needs_shell = re.compile('["\\'><!^&]')
def gyp_spawn(sh, escape, cmd, args, env):
def strip_scons_quotes(arg):
if arg[0] == '"' and arg[-1] == '"':
return arg[1:-1]
return arg
stripped_args = [strip_scons_quotes(a) for a in args]
if needs_shell.search(' '.join(stripped_args)):
return SCons.Platform.posix.exec_spawnvpe([sh, '-c', ' '.join(args)], env)
else:
return SCons.Platform.posix.exec_spawnvpe(stripped_args, env)
"""
def EscapeShellArgument(s):
"""Quotes an argument so that it will be interpreted literally by a POSIX
shell. Taken from
http://stackoverflow.com/questions/35817/whats-the-best-way-to-escape-ossystem-calls-in-python
"""
return "'" + s.replace("'", "'\\''") + "'"
def InvertNaiveSConsQuoting(s):
"""SCons tries to "help" with quoting by naively putting double-quotes around
command-line arguments containing space or tab, which is broken for all
but trivial cases, so we undo it. (See quote_spaces() in Subst.py)"""
if ' ' in s or '\t' in s:
# Then SCons will put double-quotes around this, so add our own quotes
# to close its quotes at the beginning and end.
s = '"' + s + '"'
return s
def EscapeSConsVariableExpansion(s):
"""SCons has its own variable expansion syntax using $. We must escape it for
strings to be interpreted literally. For some reason this requires four
dollar signs, not two, even without the shell involved."""
return s.replace('$', '$$$$')
def EscapeCppDefine(s):
"""Escapes a CPP define so that it will reach the compiler unaltered."""
s = EscapeShellArgument(s)
s = InvertNaiveSConsQuoting(s)
s = EscapeSConsVariableExpansion(s)
return s
def GenerateConfig(fp, config, indent='', src_dir=''):
"""
Generates SCons dictionary items for a gyp configuration.
This provides the main translation between the (lower-case) gyp settings
keywords and the (upper-case) SCons construction variables.
"""
var_mapping = {
'ASFLAGS' : 'asflags',
'CCFLAGS' : 'cflags',
'CFLAGS' : 'cflags_c',
'CXXFLAGS' : 'cflags_cc',
'CPPDEFINES' : 'defines',
'CPPPATH' : 'include_dirs',
# Add the ldflags value to $LINKFLAGS, but not $SHLINKFLAGS.
# SCons defines $SHLINKFLAGS to incorporate $LINKFLAGS, so
# listing both here would case 'ldflags' to get appended to
# both, and then have it show up twice on the command line.
'LINKFLAGS' : 'ldflags',
}
postamble='\n%s],\n' % indent
for scons_var in sorted(var_mapping.keys()):
gyp_var = var_mapping[scons_var]
value = config.get(gyp_var)
if value:
if gyp_var in ('defines',):
value = [EscapeCppDefine(v) for v in value]
if gyp_var in ('include_dirs',):
if src_dir and not src_dir.endswith('/'):
src_dir += '/'
result = []
for v in value:
v = FixPath(v, src_dir)
# Force SCons to evaluate the CPPPATH directories at
# SConscript-read time, so delayed evaluation of $SRC_DIR
# doesn't point it to the --generator-output= directory.
result.append('env.Dir(%r)' % v)
value = result
else:
value = map(repr, value)
WriteList(fp,
value,
prefix=indent,
preamble='%s%s = [\n ' % (indent, scons_var),
postamble=postamble)
def GenerateSConscript(output_filename, spec, build_file, build_file_data):
"""
Generates a SConscript file for a specific target.
This generates a SConscript file suitable for building any or all of
the target's configurations.
A SConscript file may be called multiple times to generate targets for
multiple configurations. Consequently, it needs to be ready to build
the target for any requested configuration, and therefore contains
information about the settings for all configurations (generated into
the SConscript file at gyp configuration time) as well as logic for
selecting (at SCons build time) the specific configuration being built.
The general outline of a generated SConscript file is:
-- Header
-- Import 'env'. This contains a $CONFIG_NAME construction
variable that specifies what configuration to build
(e.g. Debug, Release).
-- Configurations. This is a dictionary with settings for
the different configurations (Debug, Release) under which this
target can be built. The values in the dictionary are themselves
dictionaries specifying what construction variables should added
to the local copy of the imported construction environment
(Append), should be removed (FilterOut), and should outright
replace the imported values (Replace).
-- Clone the imported construction environment and update
with the proper configuration settings.
-- Initialize the lists of the targets' input files and prerequisites.
-- Target-specific actions and rules. These come after the
input file and prerequisite initializations because the
outputs of the actions and rules may affect the input file
list (process_outputs_as_sources) and get added to the list of
prerequisites (so that they're guaranteed to be executed before
building the target).
-- Call the Builder for the target itself.
-- Arrange for any copies to be made into installation directories.
-- Set up the {name} Alias (phony Node) for the target as the
primary handle for building all of the target's pieces.
-- Use env.Require() to make sure the prerequisites (explicitly
specified, but also including the actions and rules) are built
before the target itself.
-- Return the {name} Alias to the calling SConstruct file
so it can be added to the list of default targets.
"""
scons_target = SCons.Target(spec)
gyp_dir = os.path.dirname(output_filename)
if not gyp_dir:
gyp_dir = '.'
gyp_dir = os.path.abspath(gyp_dir)
output_dir = os.path.dirname(output_filename)
src_dir = build_file_data['_DEPTH']
src_dir_rel = gyp.common.RelativePath(src_dir, output_dir)
subdir = gyp.common.RelativePath(os.path.dirname(build_file), src_dir)
src_subdir = '$SRC_DIR/' + subdir
src_subdir_ = src_subdir + '/'
component_name = os.path.splitext(os.path.basename(build_file))[0]
target_name = spec['target_name']
if not os.path.exists(gyp_dir):
os.makedirs(gyp_dir)
fp = open(output_filename, 'w')
fp.write(header)
fp.write('\nimport os\n')
fp.write('\nImport("env")\n')
#
fp.write('\n')
fp.write('env = env.Clone(COMPONENT_NAME=%s,\n' % repr(component_name))
fp.write(' TARGET_NAME=%s)\n' % repr(target_name))
#
for config in spec['configurations'].itervalues():
if config.get('scons_line_length'):
fp.write(_spawn_hack)
break
#
indent = ' ' * 12
fp.write('\n')
fp.write('configurations = {\n')
for config_name, config in spec['configurations'].iteritems():
fp.write(' \'%s\' : {\n' % config_name)
fp.write(' \'Append\' : dict(\n')
GenerateConfig(fp, config, indent, src_subdir)
libraries = spec.get('libraries')
if libraries:
WriteList(fp,
map(repr, libraries),
prefix=indent,
preamble='%sLIBS = [\n ' % indent,
postamble='\n%s],\n' % indent)
fp.write(' ),\n')
fp.write(' \'FilterOut\' : dict(\n' )
for key, var in config.get('scons_remove', {}).iteritems():
fp.write(' %s = %s,\n' % (key, repr(var)))
fp.write(' ),\n')
fp.write(' \'Replace\' : dict(\n' )
scons_settings = config.get('scons_variable_settings', {})
for key in sorted(scons_settings.keys()):
val = pprint.pformat(scons_settings[key])
fp.write(' %s = %s,\n' % (key, val))
if 'c++' in spec.get('link_languages', []):
fp.write(' %s = %s,\n' % ('LINK', repr('$CXX')))
if config.get('scons_line_length'):
fp.write(' SPAWN = gyp_spawn,\n')
fp.write(' ),\n')
fp.write(' \'ImportExternal\' : [\n' )
for var in config.get('scons_import_variables', []):
fp.write(' %s,\n' % repr(var))
fp.write(' ],\n')
fp.write(' \'PropagateExternal\' : [\n' )
for var in config.get('scons_propagate_variables', []):
fp.write(' %s,\n' % repr(var))
fp.write(' ],\n')
fp.write(' },\n')
fp.write('}\n')
fp.write('\n'
'config = configurations[env[\'CONFIG_NAME\']]\n'
'env.Append(**config[\'Append\'])\n'
'env.FilterOut(**config[\'FilterOut\'])\n'
'env.Replace(**config[\'Replace\'])\n')
fp.write('\n'
'# Scons forces -fPIC for SHCCFLAGS on some platforms.\n'
'# Disable that so we can control it from cflags in gyp.\n'
'# Note that Scons itself is inconsistent with its -fPIC\n'
'# setting. SHCCFLAGS forces -fPIC, and SHCFLAGS does not.\n'
'# This will make SHCCFLAGS consistent with SHCFLAGS.\n'
'env[\'SHCCFLAGS\'] = [\'$CCFLAGS\']\n')
fp.write('\n'
'for _var in config[\'ImportExternal\']:\n'
' if _var in ARGUMENTS:\n'
' env[_var] = ARGUMENTS[_var]\n'
' elif _var in os.environ:\n'
' env[_var] = os.environ[_var]\n'
'for _var in config[\'PropagateExternal\']:\n'
' if _var in ARGUMENTS:\n'
' env[_var] = ARGUMENTS[_var]\n'
' elif _var in os.environ:\n'
' env[\'ENV\'][_var] = os.environ[_var]\n')
fp.write('\n'
"env['ENV']['LD_LIBRARY_PATH'] = env.subst('$LIB_DIR')\n")
#
#fp.write("\nif env.has_key('CPPPATH'):\n")
#fp.write(" env['CPPPATH'] = map(env.Dir, env['CPPPATH'])\n")
variants = spec.get('variants', {})
for setting in sorted(variants.keys()):
if_fmt = 'if ARGUMENTS.get(%s) not in (None, \'0\'):\n'
fp.write('\n')
fp.write(if_fmt % repr(setting.upper()))
fp.write(' env.AppendUnique(\n')
GenerateConfig(fp, variants[setting], indent, src_subdir)
fp.write(' )\n')
#
scons_target.write_input_files(fp)
fp.write('\n')
fp.write('target_files = []\n')
prerequisites = spec.get('scons_prerequisites', [])
fp.write('prerequisites = %s\n' % pprint.pformat(prerequisites))
actions = spec.get('actions', [])
for action in actions:
a = ['cd', src_subdir, '&&'] + action['action']
message = action.get('message')
if message:
message = repr(message)
inputs = [FixPath(f, src_subdir_) for f in action.get('inputs', [])]
outputs = [FixPath(f, src_subdir_) for f in action.get('outputs', [])]
if outputs:
template = _command_template
else:
template = _alias_template
fp.write(template % {
'inputs' : pprint.pformat(inputs),
'outputs' : pprint.pformat(outputs),
'action' : pprint.pformat(a),
'message' : message,
'target_name': target_name,
})
if int(action.get('process_outputs_as_sources', 0)):
fp.write('input_files.extend(_outputs)\n')
fp.write('prerequisites.extend(_outputs)\n')
fp.write('target_files.extend(_outputs)\n')
rules = spec.get('rules', [])
for rule in rules:
name = re.sub('[^a-zA-Z0-9_]', '_', rule['rule_name'])
message = rule.get('message')
if message:
message = repr(message)
if int(rule.get('process_outputs_as_sources', 0)):
poas_line = '_processed_input_files.extend(_generated)'
else:
poas_line = '_processed_input_files.append(infile)'
inputs = [FixPath(f, src_subdir_) for f in rule.get('inputs', [])]
outputs = [FixPath(f, src_subdir_) for f in rule.get('outputs', [])]
# Skip a rule with no action and no inputs.
if 'action' not in rule and not rule.get('rule_sources', []):
continue
a = ['cd', src_subdir, '&&'] + rule['action']
fp.write(_rule_template % {
'inputs' : pprint.pformat(inputs),
'outputs' : pprint.pformat(outputs),
'action' : pprint.pformat(a),
'extension' : rule['extension'],
'name' : name,
'message' : message,
'process_outputs_as_sources_line' : poas_line,
'src_dir' : src_subdir_,
})
scons_target.write_target(fp, src_subdir)
copies = spec.get('copies', [])
if copies:
fp.write(_copy_action_template)
for copy in copies:
destdir = None
files = None
try:
destdir = copy['destination']
except KeyError, e:
gyp.common.ExceptionAppend(
e,
"Required 'destination' key missing for 'copies' in %s." % build_file)
raise
try:
files = copy['files']
except KeyError, e:
gyp.common.ExceptionAppend(
e, "Required 'files' key missing for 'copies' in %s." % build_file)
raise
if not files:
# TODO: should probably add a (suppressible) warning;
# a null file list may be unintentional.
continue
if not destdir:
raise Exception(
"Required 'destination' key is empty for 'copies' in %s." % build_file)
fmt = ('\n'
'_outputs = env.Command(%s,\n'
' %s,\n'
' GYPCopy(\'$TARGET\', \'$SOURCE\'))\n')
for f in copy['files']:
# Remove trailing separators so basename() acts like Unix basename and
# always returns the last element, whether a file or dir. Without this,
# only the contents, not the directory itself, are copied (and nothing
# might be copied if dest already exists, since scons thinks nothing needs
# to be done).
dest = os.path.join(destdir, os.path.basename(f.rstrip(os.sep)))
f = FixPath(f, src_subdir_)
dest = FixPath(dest, src_subdir_)
fp.write(fmt % (repr(dest), repr(f)))
fp.write('target_files.extend(_outputs)\n')
run_as = spec.get('run_as')
if run_as:
action = run_as.get('action', [])
working_directory = run_as.get('working_directory')
if not working_directory:
working_directory = gyp_dir
else:
if not os.path.isabs(working_directory):
working_directory = os.path.normpath(os.path.join(gyp_dir,
working_directory))
if run_as.get('environment'):
for (key, val) in run_as.get('environment').iteritems():
action = ['%s="%s"' % (key, val)] + action
action = ['cd', '"%s"' % working_directory, '&&'] + action
fp.write(_run_as_template % {
'action' : pprint.pformat(action),
'message' : run_as.get('message', ''),
})
fmt = "\ngyp_target = env.Alias('%s', target_files)\n"
fp.write(fmt % target_name)
dependencies = spec.get('scons_dependencies', [])
if dependencies:
WriteList(fp, dependencies, preamble='dependencies = [\n ',
postamble='\n]\n')
fp.write('env.Requires(target_files, dependencies)\n')
fp.write('env.Requires(gyp_target, dependencies)\n')
fp.write('for prerequisite in prerequisites:\n')
fp.write(' env.Requires(prerequisite, dependencies)\n')
fp.write('env.Requires(gyp_target, prerequisites)\n')
if run_as:
fp.write(_run_as_template_suffix % {
'target_name': target_name,
})
fp.write('Return("gyp_target")\n')
fp.close()
#############################################################################
# TEMPLATE BEGIN
_wrapper_template = """\
__doc__ = '''
Wrapper configuration for building this entire "solution,"
including all the specific targets in various *.scons files.
'''
import os
import sys
import SCons.Environment
import SCons.Util
def GetProcessorCount():
'''
Detects the number of CPUs on the system. Adapted form:
http://codeliberates.blogspot.com/2008/05/detecting-cpuscores-in-python.html
'''
# Linux, Unix and Mac OS X:
if hasattr(os, 'sysconf'):
if os.sysconf_names.has_key('SC_NPROCESSORS_ONLN'):
# Linux and Unix or Mac OS X with python >= 2.5:
return os.sysconf('SC_NPROCESSORS_ONLN')
else: # Mac OS X with Python < 2.5:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key('NUMBER_OF_PROCESSORS'):
return max(int(os.environ.get('NUMBER_OF_PROCESSORS', '1')), 1)
return 1 # Default
# Support PROGRESS= to show progress in different ways.
p = ARGUMENTS.get('PROGRESS')
if p == 'spinner':
Progress(['/\\r', '|\\r', '\\\\\\r', '-\\r'],
interval=5,
file=open('/dev/tty', 'w'))
elif p == 'name':
Progress('$TARGET\\r', overwrite=True, file=open('/dev/tty', 'w'))
# Set the default -j value based on the number of processors.
SetOption('num_jobs', GetProcessorCount() + 1)
# Have SCons use its cached dependency information.
SetOption('implicit_cache', 1)
# Only re-calculate MD5 checksums if a timestamp has changed.
Decider('MD5-timestamp')
# Since we set the -j value by default, suppress SCons warnings about being
# unable to support parallel build on versions of Python with no threading.
default_warnings = ['no-no-parallel-support']
SetOption('warn', default_warnings + GetOption('warn'))
AddOption('--mode', nargs=1, dest='conf_list', default=[],
action='append', help='Configuration to build.')
AddOption('--verbose', dest='verbose', default=False,
action='store_true', help='Verbose command-line output.')
#
sconscript_file_map = %(sconscript_files)s
class LoadTarget:
'''
Class for deciding if a given target sconscript is to be included
based on a list of included target names, optionally prefixed with '-'
to exclude a target name.
'''
def __init__(self, load):
'''
Initialize a class with a list of names for possible loading.
Arguments:
load: list of elements in the LOAD= specification
'''
self.included = set([c for c in load if not c.startswith('-')])
self.excluded = set([c[1:] for c in load if c.startswith('-')])
if not self.included:
self.included = set(['all'])
def __call__(self, target):
'''
Returns True if the specified target's sconscript file should be
loaded, based on the initialized included and excluded lists.
'''
return (target in self.included or
('all' in self.included and not target in self.excluded))
if 'LOAD' in ARGUMENTS:
load = ARGUMENTS['LOAD'].split(',')
else:
load = []
load_target = LoadTarget(load)
sconscript_files = []
for target, sconscript in sconscript_file_map.iteritems():
if load_target(target):
sconscript_files.append(sconscript)
target_alias_list= []
conf_list = GetOption('conf_list')
if conf_list:
# In case the same --mode= value was specified multiple times.
conf_list = list(set(conf_list))
else:
conf_list = [%(default_configuration)r]
sconsbuild_dir = Dir(%(sconsbuild_dir)s)
def FilterOut(self, **kw):
kw = SCons.Environment.copy_non_reserved_keywords(kw)
for key, val in kw.items():
envval = self.get(key, None)
if envval is None:
# No existing variable in the environment, so nothing to delete.
continue
for vremove in val:
# Use while not if, so we can handle duplicates.
while vremove in envval:
envval.remove(vremove)
self[key] = envval
# TODO(sgk): SCons.Environment.Append() has much more logic to deal
# with various types of values. We should handle all those cases in here
# too. (If variable is a dict, etc.)
non_compilable_suffixes = {
'LINUX' : set([
'.bdic',
'.css',
'.dat',
'.fragment',
'.gperf',
'.h',
'.hh',
'.hpp',
'.html',
'.hxx',
'.idl',
'.in',
'.in0',
'.in1',
'.js',
'.mk',
'.rc',
'.sigs',
'',
]),
'WINDOWS' : set([
'.h',
'.hh',
'.hpp',
'.dat',
'.idl',
'.in',
'.in0',
'.in1',
]),
}
def compilable(env, file):
base, ext = os.path.splitext(str(file))
if ext in non_compilable_suffixes[env['TARGET_PLATFORM']]:
return False
return True
def compilable_files(env, sources):
return [x for x in sources if compilable(env, x)]
def GypProgram(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.Program(target, source, *args, **kw)
if env.get('INCREMENTAL'):
env.Precious(result)
return result
def GypTestProgram(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.Program(target, source, *args, **kw)
if env.get('INCREMENTAL'):
env.Precious(*result)
return result
def GypLibrary(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.Library(target, source, *args, **kw)
return result
def GypLoadableModule(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.LoadableModule(target, source, *args, **kw)
return result
def GypStaticLibrary(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.StaticLibrary(target, source, *args, **kw)
return result
def GypSharedLibrary(env, target, source, *args, **kw):
source = compilable_files(env, source)
result = env.SharedLibrary(target, source, *args, **kw)
if env.get('INCREMENTAL'):
env.Precious(result)
return result
def add_gyp_methods(env):
env.AddMethod(GypProgram)
env.AddMethod(GypTestProgram)
env.AddMethod(GypLibrary)
env.AddMethod(GypLoadableModule)
env.AddMethod(GypStaticLibrary)
env.AddMethod(GypSharedLibrary)
env.AddMethod(FilterOut)
env.AddMethod(compilable)
base_env = Environment(
tools = %(scons_tools)s,
INTERMEDIATE_DIR='$OBJ_DIR/${COMPONENT_NAME}/_${TARGET_NAME}_intermediate',
LIB_DIR='$TOP_BUILDDIR/lib',
OBJ_DIR='$TOP_BUILDDIR/obj',
SCONSBUILD_DIR=sconsbuild_dir.abspath,
SHARED_INTERMEDIATE_DIR='$OBJ_DIR/_global_intermediate',
SRC_DIR=Dir(%(src_dir)r),
TARGET_PLATFORM='LINUX',
TOP_BUILDDIR='$SCONSBUILD_DIR/$CONFIG_NAME',
LIBPATH=['$LIB_DIR'],
)
if not GetOption('verbose'):
base_env.SetDefault(
ARCOMSTR='Creating library $TARGET',
ASCOMSTR='Assembling $TARGET',
CCCOMSTR='Compiling $TARGET',
CONCATSOURCECOMSTR='ConcatSource $TARGET',
CXXCOMSTR='Compiling $TARGET',
LDMODULECOMSTR='Building loadable module $TARGET',
LINKCOMSTR='Linking $TARGET',
MANIFESTCOMSTR='Updating manifest for $TARGET',
MIDLCOMSTR='Compiling IDL $TARGET',
PCHCOMSTR='Precompiling $TARGET',
RANLIBCOMSTR='Indexing $TARGET',
RCCOMSTR='Compiling resource $TARGET',
SHCCCOMSTR='Compiling $TARGET',
SHCXXCOMSTR='Compiling $TARGET',
SHLINKCOMSTR='Linking $TARGET',
SHMANIFESTCOMSTR='Updating manifest for $TARGET',
)
add_gyp_methods(base_env)
for conf in conf_list:
env = base_env.Clone(CONFIG_NAME=conf)
SConsignFile(env.File('$TOP_BUILDDIR/.sconsign').abspath)
for sconscript in sconscript_files:
target_alias = env.SConscript(sconscript, exports=['env'])
if target_alias:
target_alias_list.extend(target_alias)
Default(Alias('all', target_alias_list))
help_fmt = '''
Usage: hammer [SCONS_OPTIONS] [VARIABLES] [TARGET] ...
Local command-line build options:
--mode=CONFIG Configuration to build:
--mode=Debug [default]
--mode=Release
--verbose Print actual executed command lines.
Supported command-line build variables:
LOAD=[module,...] Comma-separated list of components to load in the
dependency graph ('-' prefix excludes)
PROGRESS=type Display a progress indicator:
name: print each evaluated target name
spinner: print a spinner every 5 targets
The following TARGET names can also be used as LOAD= module names:
%%s
'''
if GetOption('help'):
def columnar_text(items, width=78, indent=2, sep=2):
result = []
colwidth = max(map(len, items)) + sep
cols = (width - indent) / colwidth
if cols < 1:
cols = 1
rows = (len(items) + cols - 1) / cols
indent = '%%*s' %% (indent, '')
sep = indent
for row in xrange(0, rows):
result.append(sep)
for i in xrange(row, len(items), rows):
result.append('%%-*s' %% (colwidth, items[i]))
sep = '\\n' + indent
result.append('\\n')
return ''.join(result)
load_list = set(sconscript_file_map.keys())
target_aliases = set(map(str, target_alias_list))
common = load_list and target_aliases
load_only = load_list - common
target_only = target_aliases - common
help_text = [help_fmt %% columnar_text(sorted(list(common)))]
if target_only:
fmt = "The following are additional TARGET names:\\n\\n%%s\\n"
help_text.append(fmt %% columnar_text(sorted(list(target_only))))
if load_only:
fmt = "The following are additional LOAD= module names:\\n\\n%%s\\n"
help_text.append(fmt %% columnar_text(sorted(list(load_only))))
Help(''.join(help_text))
"""
# TEMPLATE END
#############################################################################
def GenerateSConscriptWrapper(build_file, build_file_data, name,
output_filename, sconscript_files,
default_configuration):
"""
Generates the "wrapper" SConscript file (analogous to the Visual Studio
solution) that calls all the individual target SConscript files.
"""
output_dir = os.path.dirname(output_filename)
src_dir = build_file_data['_DEPTH']
src_dir_rel = gyp.common.RelativePath(src_dir, output_dir)
if not src_dir_rel:
src_dir_rel = '.'
scons_settings = build_file_data.get('scons_settings', {})
sconsbuild_dir = scons_settings.get('sconsbuild_dir', '#')
scons_tools = scons_settings.get('tools', ['default'])
sconscript_file_lines = ['dict(']
for target in sorted(sconscript_files.keys()):
sconscript = sconscript_files[target]
sconscript_file_lines.append(' %s = %r,' % (target, sconscript))
sconscript_file_lines.append(')')
fp = open(output_filename, 'w')
fp.write(header)
fp.write(_wrapper_template % {
'default_configuration' : default_configuration,
'name' : name,
'scons_tools' : repr(scons_tools),
'sconsbuild_dir' : repr(sconsbuild_dir),
'sconscript_files' : '\n'.join(sconscript_file_lines),
'src_dir' : src_dir_rel,
})
fp.close()
# Generate the SConstruct file that invokes the wrapper SConscript.
dir, fname = os.path.split(output_filename)
SConstruct = os.path.join(dir, 'SConstruct')
fp = open(SConstruct, 'w')
fp.write(header)
fp.write('SConscript(%s)\n' % repr(fname))
fp.close()
def TargetFilename(target, build_file=None, output_suffix=''):
"""Returns the .scons file name for the specified target.
"""
if build_file is None:
build_file, target = gyp.common.ParseQualifiedTarget(target)[:2]
output_file = os.path.join(os.path.dirname(build_file),
target + output_suffix + '.scons')
return output_file
def PerformBuild(data, configurations, params):
options = params['options']
# Due to the way we test gyp on the chromium typbots
# we need to look for 'scons.py' as well as the more common 'scons'
# TODO(sbc): update the trybots to have a more normal install
# of scons.
scons = 'scons'
paths = os.environ['PATH'].split(os.pathsep)
for scons_name in ['scons', 'scons.py']:
for path in paths:
test_scons = os.path.join(path, scons_name)
print 'looking for: %s' % test_scons
if os.path.exists(test_scons):
print "found scons: %s" % scons
scons = test_scons
break
for config in configurations:
arguments = [scons, '-C', options.toplevel_dir, '--mode=%s' % config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
"""
Generates all the output files for the specified targets.
"""
options = params['options']
if options.generator_output:
def output_path(filename):
return filename.replace(params['cwd'], options.generator_output)
else:
def output_path(filename):
return filename
default_configuration = None
for qualified_target in target_list:
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in scons build (target %s)' %
qualified_target)
scons_target = SCons.Target(spec)
if scons_target.is_ignored:
continue
# TODO: assumes the default_configuration of the first target
# non-Default target is the correct default for all targets.
# Need a better model for handle variation between targets.
if (not default_configuration and
spec['default_configuration'] != 'Default'):
default_configuration = spec['default_configuration']
build_file, target = gyp.common.ParseQualifiedTarget(qualified_target)[:2]
output_file = TargetFilename(target, build_file, options.suffix)
if options.generator_output:
output_file = output_path(output_file)
if not spec.has_key('libraries'):
spec['libraries'] = []
# Add dependent static library targets to the 'libraries' value.
deps = spec.get('dependencies', [])
spec['scons_dependencies'] = []
for d in deps:
td = target_dicts[d]
target_name = td['target_name']
spec['scons_dependencies'].append("Alias('%s')" % target_name)
if td['type'] in ('static_library', 'shared_library'):
libname = td.get('product_name', target_name)
spec['libraries'].append('lib' + libname)
if td['type'] == 'loadable_module':
prereqs = spec.get('scons_prerequisites', [])
# TODO: parameterize with <(SHARED_LIBRARY_*) variables?
td_target = SCons.Target(td)
td_target.target_prefix = '${SHLIBPREFIX}'
td_target.target_suffix = '${SHLIBSUFFIX}'
GenerateSConscript(output_file, spec, build_file, data[build_file])
if not default_configuration:
default_configuration = 'Default'
for build_file in sorted(data.keys()):
path, ext = os.path.splitext(build_file)
if ext != '.gyp':
continue
output_dir, basename = os.path.split(path)
output_filename = path + '_main' + options.suffix + '.scons'
all_targets = gyp.common.AllTargets(target_list, target_dicts, build_file)
sconscript_files = {}
for t in all_targets:
scons_target = SCons.Target(target_dicts[t])
if scons_target.is_ignored:
continue
bf, target = gyp.common.ParseQualifiedTarget(t)[:2]
target_filename = TargetFilename(target, bf, options.suffix)
tpath = gyp.common.RelativePath(target_filename, output_dir)
sconscript_files[target] = tpath
output_filename = output_path(output_filename)
if sconscript_files:
GenerateSConscriptWrapper(build_file, data[build_file], basename,
output_filename, sconscript_files,
default_configuration)
| gpl-3.0 |
HPPTECH/hpp_IOSTressTest | Resources/ssh/pexpect-3.2/tests/sleep_for.py | 22 | 1402 | #!/usr/bin/env python
'''
PEXPECT LICENSE
This license is approved by the OSI and FSF as GPL-compatible.
http://opensource.org/licenses/isc-license.txt
Copyright (c) 2012, Noah Spurrier <noah@noah.org>
PERMISSION TO USE, COPY, MODIFY, AND/OR DISTRIBUTE THIS SOFTWARE FOR ANY
PURPOSE WITH OR WITHOUT FEE IS HEREBY GRANTED, PROVIDED THAT THE ABOVE
COPYRIGHT NOTICE AND THIS PERMISSION NOTICE APPEAR IN ALL COPIES.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
from __future__ import print_function
import time
import sys
def main():
"""
This script sleeps for the number of seconds (float) specified by the
command line argument.
"""
if len(sys.argv) < 2:
print("Usage: %s seconds_to_sleep" % (sys.argv[0],))
sys.exit(1)
timeout = float(sys.argv[1])
print("READY")
time.sleep(timeout)
print("END")
if __name__ == '__main__':
main()
| mit |
toobaz/pandas | ci/print_skipped.py | 1 | 1409 | #!/usr/bin/env python
import os
import sys
import math
import xml.etree.ElementTree as et
def parse_results(filename):
tree = et.parse(filename)
root = tree.getroot()
skipped = []
current_class = ""
i = 1
assert i - 1 == len(skipped)
for el in root.findall("testcase"):
cn = el.attrib["classname"]
for sk in el.findall("skipped"):
old_class = current_class
current_class = cn
name = "{classname}.{name}".format(
classname=current_class, name=el.attrib["name"]
)
msg = sk.attrib["message"]
out = ""
if old_class != current_class:
ndigits = int(math.log(i, 10) + 1)
# 4 for : + space + # + space
out += "-" * (len(name + msg) + 4 + ndigits) + "\n"
out += "#{i} {name}: {msg}".format(i=i, name=name, msg=msg)
skipped.append(out)
i += 1
assert i - 1 == len(skipped)
assert i - 1 == len(skipped)
# assert len(skipped) == int(root.attrib['skip'])
return "\n".join(skipped)
def main():
test_files = ["test-data-single.xml", "test-data-multiple.xml", "test-data.xml"]
print("SKIPPED TESTS:")
for fn in test_files:
if os.path.isfile(fn):
print(parse_results(fn))
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-3-clause |
sogis/Quantum-GIS | python/plugins/fTools/tools/doMeanCoords.py | 11 | 8882 | # -*- coding: utf-8 -*-
#-----------------------------------------------------------
#
# fTools
# Copyright (C) 2008-2011 Carson Farmer
# EMAIL: carson.farmer (at) gmail.com
# WEB : http://www.ftools.ca/fTools.html
#
# A collection of data management and analysis tools for vector data
#
#-----------------------------------------------------------
#
# licensed under the terms of GNU GPL 2
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#---------------------------------------------------------------------
from PyQt4.QtCore import QObject, SIGNAL, QVariant, QFile
from PyQt4.QtGui import QDialog, QDialogButtonBox, QMessageBox
import ftools_utils
from qgis.core import QGis, QgsFeature, QgsVectorFileWriter, QgsFields, QgsField, QgsGeometry, QgsPoint, QgsDistanceArea
from math import sqrt
from ui_frmMeanCoords import Ui_Dialog
class Dialog(QDialog, Ui_Dialog):
def __init__(self, iface, function):
QDialog.__init__(self, iface.mainWindow())
self.iface = iface
self.function = function
self.setupUi(self)
self.updateUi()
QObject.connect(self.toolOut, SIGNAL("clicked()"), self.outFile)
QObject.connect(self.inShape, SIGNAL("currentIndexChanged(QString)"), self.update)
self.buttonOk = self.buttonBox_2.button(QDialogButtonBox.Ok)
self.progressBar.setValue(0)
self.populateLayers()
def populateLayers(self):
layers = ftools_utils.getLayerNames([QGis.Point, QGis.Line, QGis.Polygon])
self.inShape.blockSignals(True)
self.inShape.clear()
self.inShape.blockSignals(False)
self.inShape.addItems(layers)
def updateUi(self):
if self.function == 1:
self.setWindowTitle(self.tr("Mean coordinates"))
self.sizeValue.setVisible(False)
self.label_size.setVisible(False)
elif self.function == 2:
self.setWindowTitle(self.tr("Standard distance"))
self.resize(381, 100)
def update(self, inputLayer):
self.weightField.clear()
self.uniqueField.clear()
self.weightField.addItem(self.tr("(Optional) Weight field"))
self.uniqueField.addItem(self.tr("(Optional) Unique ID field"))
self.changedLayer = ftools_utils.getVectorLayerByName(inputLayer)
changedField = ftools_utils.getFieldList(self.changedLayer)
for f in changedField:
if f.type() == QVariant.Int or f.type() == QVariant.Double:
self.weightField.addItem(unicode(f.name()))
self.uniqueField.addItem(unicode(f.name()))
def accept(self):
self.buttonOk.setEnabled(False)
if self.inShape.currentText() == "":
QMessageBox.information(self, self.tr("Coordinate statistics"), self.tr("No input vector layer specified"))
elif self.outShape.text() == "":
QMessageBox.information(self, self.tr("Coordinate statistics"), self.tr("Please specify output shapefile"))
else:
inName = self.inShape.currentText()
outPath = self.outShape.text()
self.compute(inName, self.weightField.currentText(), self.sizeValue.value(), self.uniqueField.currentText())
self.progressBar.setValue(100)
self.outShape.clear()
if self.addToCanvasCheck.isChecked():
addCanvasCheck = ftools_utils.addShapeToCanvas(unicode(outPath))
if not addCanvasCheck:
QMessageBox.warning(self, self.tr("Coordinate statistics"), self.tr("Error loading output shapefile:\n%s") % (unicode(outPath)))
self.populateLayers()
else:
QMessageBox.information(self, self.tr("Coordinate statistics"), self.tr("Created output shapefile:\n%s") % (unicode(outPath)))
self.progressBar.setValue(0)
self.buttonOk.setEnabled(True)
def outFile(self):
self.outShape.clear()
(self.shapefileName, self.encoding) = ftools_utils.saveDialog(self)
if self.shapefileName is None or self.encoding is None:
return
self.outShape.setText(self.shapefileName)
def compute(self, inName, weightField="", times=1, uniqueField=""):
vlayer = ftools_utils.getVectorLayerByName(inName)
provider = vlayer.dataProvider()
weightIndex = provider.fieldNameIndex(weightField)
uniqueIndex = provider.fieldNameIndex(uniqueField)
feat = QgsFeature()
sRs = provider.crs()
check = QFile(self.shapefileName)
if check.exists():
if not QgsVectorFileWriter.deleteShapeFile(self.shapefileName):
return
if uniqueIndex != -1:
uniqueValues = ftools_utils.getUniqueValues(provider, int(uniqueIndex))
single = False
else:
uniqueValues = [1]
single = True
if self.function == 2:
fieldList = QgsFields()
fieldList.append(QgsField("STD_DIST", QVariant.Double))
fieldList.append(QgsField("UID", QVariant.String))
writer = QgsVectorFileWriter(self.shapefileName, self.encoding, fieldList, QGis.WKBPolygon, sRs)
else:
fieldList = QgsFields()
fieldList.append(QgsField("MEAN_X", QVariant.Double))
fieldList.append(QgsField("MEAN_Y", QVariant.Double))
fieldList.append(QgsField("UID", QVariant.String))
writer = QgsVectorFileWriter(self.shapefileName, self.encoding, fieldList, QGis.WKBPoint, sRs)
outfeat = QgsFeature()
outfeat.setFields(fieldList)
points = []
weights = []
nFeat = provider.featureCount() * len(uniqueValues)
nElement = 0
self.progressBar.setValue(0)
self.progressBar.setRange(0, nFeat)
for j in uniqueValues:
cx = 0.00
cy = 0.00
points = []
weights = []
fit = provider.getFeatures()
while fit.nextFeature(feat):
nElement += 1
self.progressBar.setValue(nElement)
if single:
check = unicode(j).strip()
else:
check = unicode(feat[uniqueIndex]).strip()
if check == unicode(j).strip():
cx = 0.00
cy = 0.00
if weightIndex == -1:
weight = 1.00
else:
weight = float(feat[weightIndex])
geom = QgsGeometry(feat.geometry())
geom = ftools_utils.extractPoints(geom)
for i in geom:
cx += i.x()
cy += i.y()
points.append(QgsPoint((cx / len(geom)), (cy / len(geom))))
weights.append(weight)
sumWeight = sum(weights)
cx = 0.00
cy = 0.00
item = 0
for item, i in enumerate(points):
cx += i.x() * weights[item]
cy += i.y() * weights[item]
cx = cx / sumWeight
cy = cy / sumWeight
meanPoint = QgsPoint(cx, cy)
if self.function == 2:
values = []
md = 0.00
sd = 0.00
dist = QgsDistanceArea()
item = 0
for i in points:
tempDist = dist.measureLine(i, meanPoint)
values.append(tempDist)
item += 1
md += tempDist
md = md / item
for i in values:
sd += (i - md) * (i - md)
sd = sqrt(sd / item)
outfeat.setGeometry(QgsGeometry.fromPoint(meanPoint).buffer(sd * times, 10))
outfeat.setAttribute(0, sd)
outfeat.setAttribute(1, j)
else:
outfeat.setGeometry(QgsGeometry.fromPoint(meanPoint))
outfeat.setAttribute(0, cx)
outfeat.setAttribute(1, cy)
outfeat.setAttribute(2, j)
writer.addFeature(outfeat)
if single:
break
del writer
| gpl-2.0 |
javaos74/neutron | neutron/tests/api/admin/test_quotas.py | 26 | 3842 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest_lib.common.utils import data_utils
from neutron.tests.api import base
from neutron.tests.tempest import test
class QuotasTest(base.BaseAdminNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
list quotas for tenants who have non-default quota values
show quotas for a specified tenant
update quotas for a specified tenant
reset quotas to default values for a specified tenant
v2.0 of the API is assumed.
It is also assumed that the per-tenant quota extension API is configured
in /etc/neutron/neutron.conf as follows:
quota_driver = neutron.db.driver.DbQuotaDriver
"""
@classmethod
def resource_setup(cls):
super(QuotasTest, cls).resource_setup()
if not test.is_extension_enabled('quotas', 'network'):
msg = "quotas extension not enabled."
raise cls.skipException(msg)
cls.identity_admin_client = cls.os_adm.identity_client
def _check_quotas(self, new_quotas):
# Add a tenant to conduct the test
test_tenant = data_utils.rand_name('test_tenant_')
test_description = data_utils.rand_name('desc_')
tenant = self.identity_admin_client.create_tenant(
name=test_tenant,
description=test_description)
tenant_id = tenant['id']
self.addCleanup(self.identity_admin_client.delete_tenant, tenant_id)
# Change quotas for tenant
quota_set = self.admin_client.update_quotas(tenant_id,
**new_quotas)
self.addCleanup(self.admin_client.reset_quotas, tenant_id)
for key, value in six.iteritems(new_quotas):
self.assertEqual(value, quota_set[key])
# Confirm our tenant is listed among tenants with non default quotas
non_default_quotas = self.admin_client.list_quotas()
found = False
for qs in non_default_quotas['quotas']:
if qs['tenant_id'] == tenant_id:
found = True
self.assertTrue(found)
# Confirm from API quotas were changed as requested for tenant
quota_set = self.admin_client.show_quotas(tenant_id)
quota_set = quota_set['quota']
for key, value in six.iteritems(new_quotas):
self.assertEqual(value, quota_set[key])
# Reset quotas to default and confirm
self.admin_client.reset_quotas(tenant_id)
non_default_quotas = self.admin_client.list_quotas()
for q in non_default_quotas['quotas']:
self.assertNotEqual(tenant_id, q['tenant_id'])
@test.attr(type='gate')
@test.idempotent_id('2390f766-836d-40ef-9aeb-e810d78207fb')
def test_quotas(self):
new_quotas = {'network': 0, 'security_group': 0}
self._check_quotas(new_quotas)
@test.idempotent_id('a7add2b1-691e-44d6-875f-697d9685f091')
@test.requires_ext(extension='lbaas', service='network')
@test.attr(type='gate')
def test_lbaas_quotas(self):
new_quotas = {'vip': 1, 'pool': 2,
'member': 3, 'health_monitor': 4}
self._check_quotas(new_quotas)
| apache-2.0 |
dirn/readthedocs.org | readthedocs/projects/tasks.py | 2 | 36420 | """Tasks related to projects, including fetching repository code, cleaning
``conf.py`` files, and rebuilding documentation.
"""
import fnmatch
import os
import shutil
import json
import logging
import socket
import requests
import datetime
from celery import task
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from slumber.exceptions import HttpClientError
from builds.models import Build, Version
from core.utils import send_email, run_on_app_servers
from doc_builder.loader import loading as builder_loading
from doc_builder.base import restoring_chdir
from doc_builder.environments import DockerEnvironment
from projects.exceptions import ProjectImportError
from projects.models import ImportedFile, Project
from projects.utils import run, make_api_version, make_api_project
from projects.constants import LOG_TEMPLATE
from projects import symlinks
from privacy.loader import Syncer
from tastyapi import api, apiv2
from search.parse_json import process_all_json_files
from search.utils import process_mkdocs_json
from restapi.utils import index_search_request
from vcs_support import utils as vcs_support_utils
import tastyapi
try:
from readthedocs.projects.signals import before_vcs, after_vcs, before_build, after_build
except:
from projects.signals import before_vcs, after_vcs, before_build, after_build
log = logging.getLogger(__name__)
HTML_ONLY = getattr(settings, 'HTML_ONLY_PROJECTS', ())
@task(default_retry_delay=7 * 60, max_retries=5)
@restoring_chdir
def update_docs(pk, version_pk=None, build_pk=None, record=True, docker=False,
pdf=True, man=True, epub=True, dash=True,
search=True, force=False, intersphinx=True, localmedia=True,
api=None, basic=False, **kwargs):
"""
The main entry point for updating documentation.
It handles all of the logic around whether a project is imported or we
created it. Then it will build the html docs and other requested parts.
`pk`
Primary key of the project to update
`record`
Whether or not to keep a record of the update in the database. Useful
for preventing changes visible to the end-user when running commands
from the shell, for example.
"""
# Dependency injection to allow for testing
if api is None:
api = tastyapi.api
apiv2 = tastyapi.apiv2
else:
apiv2 = api
start_time = datetime.datetime.utcnow()
try:
project_data = api.project(pk).get()
except HttpClientError:
log.exception(LOG_TEMPLATE.format(project=pk, version='', msg='Failed to get project data on build. Erroring.'))
project = make_api_project(project_data)
# Don't build skipped projects
if project.skip:
log.info(LOG_TEMPLATE.format(project=project.slug, version='', msg='Skipping'))
return
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version='', msg='Building'))
version = ensure_version(api, project, version_pk)
build = create_build(build_pk)
results = {}
# Build Servery stuff
try:
record_build(api=api, build=build, record=record, results=results, state='cloning')
vcs_results = setup_vcs(version, build, api)
if vcs_results:
results.update(vcs_results)
if project.documentation_type == 'auto':
update_documentation_type(version, apiv2)
if docker or settings.DOCKER_ENABLE:
record_build(api=api, build=build, record=record, results=results, state='building')
docker = DockerEnvironment(version)
build_results = docker.build()
results.update(build_results)
else:
record_build(api=api, build=build, record=record, results=results, state='installing')
setup_results = setup_environment(version)
results.update(setup_results)
record_build(api=api, build=build, record=record, results=results, state='building')
build_results = build_docs(version, force, pdf, man, epub, dash, search, localmedia)
results.update(build_results)
except vcs_support_utils.LockTimeout, e:
results['checkout'] = (423, "", "Version locked, retrying in 5 minutes.")
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to lock, will retry"))
# http://celery.readthedocs.org/en/3.0/userguide/tasks.html#retrying
# Should completely retry the task for us until max_retries is exceeded
update_docs.retry(exc=e, throw=False)
except ProjectImportError, e:
results['checkout'] = (404, "", 'Failed to import project; skipping build.\n\nError\n-----\n\n%s' % e.message)
# Close out build in finally with error.
pass
except Exception, e:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Top-level Build Failure"), exc_info=True)
results['checkout'] = (404, "", 'Top-level Build Failure: %s' % e.message)
finally:
record_build(api=api, build=build, record=record, results=results, state='finished', start_time=start_time)
record_pdf(api=api, record=record, results=results, state='finished', version=version)
log.info(LOG_TEMPLATE.format(project=version.project.slug, version='', msg='Build finished'))
build_id = build.get('id')
# Web Server Tasks
if build_id:
finish_build.delay(
version_pk=version.pk,
build_pk=build_id,
hostname=socket.gethostname(),
html=results.get('html', [404])[0] == 0,
localmedia=results.get('localmedia', [404])[0] == 0,
search=results.get('search', [404])[0] == 0,
pdf=True,
epub=results.get('epub', [404])[0] == 0,
)
def ensure_version(api, project, version_pk):
"""
Ensure we're using a sane version.
"""
if version_pk:
version_data = api.version(version_pk).get()
else:
version_data = api.version(project.slug).get(slug='latest')['objects'][0]
version = make_api_version(version_data)
return version
def update_documentation_type(version, api):
"""
Automatically determine the doc type for a user.
"""
checkout_path = version.project.checkout_path(version.slug)
os.chdir(checkout_path)
files = run('find .')[1].split('\n')
markdown = sphinx = 0
for filename in files:
if fnmatch.fnmatch(filename, '*.md') or fnmatch.fnmatch(filename, '*.markdown'):
markdown += 1
elif fnmatch.fnmatch(filename, '*.rst'):
sphinx += 1
ret = 'sphinx'
if markdown > sphinx:
ret = 'mkdocs'
project_data = api.project(version.project.pk).get()
project_data['documentation_type'] = ret
api.project(version.project.pk).put(project_data)
version.project.documentation_type = ret
def docker_build(version, pdf=True, man=True, epub=True, dash=True,
search=True, force=False, intersphinx=True, localmedia=True):
"""
The code that executes inside of docker
"""
environment_results = setup_environment(version)
results = build_docs(version=version, force=force, pdf=pdf, man=man,
epub=epub, dash=dash, search=search, localmedia=localmedia)
results.update(environment_results)
return results
def setup_vcs(version, build, api):
"""
Update the checkout of the repo to make sure it's the latest.
This also syncs versions in the DB.
"""
log.info(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg='Updating docs from VCS'))
try:
update_output = update_imported_docs(version.pk, api)
commit = version.project.vcs_repo(version.slug).commit
if commit:
build['commit'] = commit
except ProjectImportError:
log.error(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='Failed to import project; skipping build'), exc_info=True)
raise
return update_output
@task()
def update_imported_docs(version_pk, api=None):
"""
Check out or update the given project's repository.
"""
if api is None:
api = tastyapi.api
version_data = api.version(version_pk).get()
version = make_api_version(version_data)
project = version.project
ret_dict = {}
# Make Dirs
if not os.path.exists(project.doc_path):
os.makedirs(project.doc_path)
if not project.vcs_repo():
raise ProjectImportError(("Repo type '{0}' unknown".format(project.repo_type)))
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
before_vcs.send(sender=version)
# Get the actual code on disk
if version:
log.info(
LOG_TEMPLATE.format(
project=project.slug,
version=version.slug,
msg='Checking out version {slug}: {identifier}'.format(
slug=version.slug,
identifier=version.identifier
)
)
)
version_slug = version.slug
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.checkout(
version.identifier,
)
else:
# Does this ever get called?
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Updating to latest revision'))
version_slug = 'latest'
version_repo = project.vcs_repo(version_slug)
ret_dict['checkout'] = version_repo.update()
after_vcs.send(sender=version)
# Update tags/version
version_post_data = {'repo': version_repo.repo_url}
if version_repo.supports_tags:
version_post_data['tags'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.tags
]
if version_repo.supports_branches:
version_post_data['branches'] = [
{'identifier': v.identifier,
'verbose_name': v.verbose_name,
} for v in version_repo.branches
]
try:
apiv2.project(project.pk).sync_versions.post(version_post_data)
except Exception, e:
print "Sync Versions Exception: %s" % e.message
return ret_dict
def setup_environment(version):
"""
Build the virtualenv and install the project into it.
Always build projects with a virtualenv.
"""
ret_dict = {}
project = version.project
build_dir = os.path.join(project.venv_path(version=version.slug), 'build')
if os.path.exists(build_dir):
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='Removing existing build dir'))
shutil.rmtree(build_dir)
if project.use_system_packages:
site_packages = '--system-site-packages'
else:
site_packages = '--no-site-packages'
# Here the command has been modified to support different
# interpreters.
ret_dict['venv'] = run(
'{cmd} {site_packages} {path}'.format(
cmd='{interpreter} -m virtualenv'.format(
interpreter=project.python_interpreter),
site_packages=site_packages,
path=project.venv_path(version=version.slug)
)
)
# Other code expects sphinx-build to be installed inside the
# virtualenv. Using the -I option makes sure it gets installed
# even if it is already installed system-wide (and
# --system-site-packages is used)
if project.use_system_packages:
ignore_option = '-I'
else:
ignore_option = ''
requirements = ' '.join([
'sphinx==1.3.1',
'Pygments==2.0.2',
'virtualenv==1.10.1',
'setuptools==1.1',
'docutils==0.11',
'mkdocs==0.13.3',
'mock==1.0.1',
'pillow==2.6.1',
'readthedocs-sphinx-ext==0.5.3',
'sphinx-rtd-theme==0.1.8',
'recommonmark==0.1.1',
])
wheeldir = os.path.join(settings.SITE_ROOT, 'deploy', 'wheels')
ret_dict['doc_builder'] = run(
(
'{cmd} install --use-wheel --find-links={wheeldir} -U '
'{ignore_option} {requirements}'
).format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
ignore_option=ignore_option,
wheeldir=wheeldir,
requirements=requirements,
)
)
# Handle requirements
requirements_file_path = project.requirements_file
checkout_path = project.checkout_path(version.slug)
if not requirements_file_path:
docs_dir = builder_loading.get(project.documentation_type)(version).docs_dir()
for path in [docs_dir, '']:
for req_file in ['pip_requirements.txt', 'requirements.txt']:
test_path = os.path.join(checkout_path, path, req_file)
print('Testing %s' % test_path)
if os.path.exists(test_path):
requirements_file_path = test_path
break
if requirements_file_path:
os.chdir(checkout_path)
ret_dict['requirements'] = run(
'{cmd} install --exists-action=w -r {requirements}'.format(
cmd=project.venv_bin(version=version.slug, bin='pip'),
requirements=requirements_file_path))
# Handle setup.py
os.chdir(project.checkout_path(version.slug))
if os.path.isfile("setup.py"):
if getattr(settings, 'USE_PIP_INSTALL', False):
ret_dict['install'] = run(
'{cmd} install --ignore-installed .'.format(
cmd=project.venv_bin(version=version.slug, bin='pip')))
else:
ret_dict['install'] = run(
'{cmd} setup.py install --force'.format(
cmd=project.venv_bin(version=version.slug,
bin='python')))
else:
ret_dict['install'] = (999, "", "No setup.py, skipping install")
return ret_dict
@task()
def build_docs(version, force, pdf, man, epub, dash, search, localmedia):
"""
This handles the actual building of the documentation
"""
project = version.project
results = {}
before_build.send(sender=version)
with project.repo_nonblockinglock(version=version,
max_lock_age=getattr(settings, 'REPO_LOCK_SECONDS', 30)):
html_builder = builder_loading.get(project.documentation_type)(version)
if force:
html_builder.force()
html_builder.append_conf()
results['html'] = html_builder.build()
if results['html'][0] == 0:
html_builder.move()
# Gracefully attempt to move files via task on web workers.
try:
move_files.delay(
version_pk=version.pk,
html=True,
hostname=socket.gethostname(),
)
except socket.error:
pass
fake_results = (999, "Project Skipped, Didn't build",
"Project Skipped, Didn't build")
if 'mkdocs' in project.documentation_type:
if search:
try:
search_builder = builder_loading.get('mkdocs_json')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
if 'sphinx' in project.documentation_type:
# Search builder. Creates JSON from docs and sends it to the
# server.
if search:
try:
search_builder = builder_loading.get(
'sphinx_search')(version)
results['search'] = search_builder.build()
if results['search'][0] == 0:
# Copy json for safe keeping
search_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="JSON Build Error"), exc_info=True)
# Local media builder for singlepage HTML download archive
if localmedia:
try:
localmedia_builder = builder_loading.get(
'sphinx_singlehtmllocalmedia')(version)
results['localmedia'] = localmedia_builder.build()
if results['localmedia'][0] == 0:
localmedia_builder.move()
except:
log.error(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg="Local Media HTML Build Error"), exc_info=True)
# Optional build steps
if version.project.slug not in HTML_ONLY and not project.skip:
if pdf:
pdf_builder = builder_loading.get('sphinx_pdf')(version)
results['pdf'] = pdf_builder.build()
# Always move pdf results even when there's an error.
# if pdf_results[0] == 0:
pdf_builder.move()
else:
results['pdf'] = fake_results
if epub:
epub_builder = builder_loading.get('sphinx_epub')(version)
results['epub'] = epub_builder.build()
if results['epub'][0] == 0:
epub_builder.move()
else:
results['epub'] = fake_results
after_build.send(sender=version)
return results
def create_build(build_pk):
"""
Old placeholder for build creation. Now it just gets it from the database.
"""
if build_pk:
build = api.build(build_pk).get()
for key in ['project', 'version', 'resource_uri', 'absolute_uri']:
if key in build:
del build[key]
else:
build = {}
return build
def record_build(api, record, build, results, state, start_time=None):
"""
Record a build by hitting the API.
Returns nothing
"""
if not record:
return None
build['builder'] = socket.gethostname()
setup_steps = ['checkout', 'venv', 'doc_builder', 'requirements', 'install']
output_steps = ['html']
all_steps = setup_steps + output_steps
build['state'] = state
if 'html' in results:
build['success'] = results['html'][0] == 0
else:
build['success'] = False
# Set global state
# for step in all_steps:
# if results.get(step, False):
# if results.get(step)[0] != 0:
# results['success'] = False
build['exit_code'] = max([results.get(step, [0])[0] for step in all_steps])
build['setup'] = build['setup_error'] = ""
build['output'] = build['error'] = ""
if start_time:
build['length'] = (datetime.datetime.utcnow() - start_time).total_seconds()
for step in setup_steps:
if step in results:
build['setup'] += "\n\n%s\n-----\n\n" % step
try:
build['setup'] += results.get(step)[1]
except (IndexError, TypeError):
pass
build['setup_error'] += "\n\n%s\n-----\n\n" % step
try:
build['setup_error'] += results.get(step)[2]
except (IndexError, TypeError):
pass
for step in output_steps:
if step in results:
build['output'] += "\n\n%s\n-----\n\n" % step
try:
build['output'] += results.get(step)[1]
except (IndexError, TypeError):
pass
build['error'] += "\n\n%s\n-----\n\n" % step
try:
build['error'] += results.get(step)[2]
except (IndexError, TypeError):
pass
# Attempt to stop unicode errors on build reporting
for key, val in build.items():
if isinstance(val, basestring):
build[key] = val.decode('utf-8', 'ignore')
try:
api.build(build['id']).put(build)
except Exception:
log.error("Unable to post a new build", exc_info=True)
def record_pdf(api, record, results, state, version):
if not record or 'sphinx' not in version.project.documentation_type:
return None
try:
if 'pdf' in results:
pdf_exit = results['pdf'][0]
pdf_success = pdf_exit == 0
pdf_output = results['pdf'][1]
pdf_error = results['pdf'][2]
else:
pdf_exit = 999
pdf_success = False
pdf_output = pdf_error = "PDF Failed"
pdf_output = pdf_output.decode('utf-8', 'ignore')
pdf_error = pdf_error.decode('utf-8', 'ignore')
if 'Output written on' in pdf_output:
pdf_success = True
api.build.post(dict(
state=state,
project='/api/v1/project/%s/' % version.project.pk,
version='/api/v1/version/%s/' % version.pk,
success=pdf_success,
type='pdf',
output=pdf_output,
error=pdf_error,
exit_code=pdf_exit,
builder=socket.gethostname(),
))
except Exception:
log.error(LOG_TEMPLATE.format(project=version.project.slug,
version=version.slug, msg="Unable to post a new build"), exc_info=True)
###########
# Web tasks
###########
@task(queue='web')
def finish_build(version_pk, build_pk, hostname=None, html=False, localmedia=False, search=False, pdf=False, epub=False):
"""
Build Finished, do house keeping bits
"""
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
if html:
version.active = True
version.built = True
version.save()
move_files(
version_pk=version_pk,
hostname=hostname,
html=html,
localmedia=localmedia,
search=search,
pdf=pdf,
epub=epub,
)
symlinks.symlink_cnames(version)
symlinks.symlink_translations(version)
symlinks.symlink_subprojects(version)
if version.project.single_version:
symlinks.symlink_single_version(version)
else:
symlinks.remove_symlink_single_version(version)
# Delayed tasks
update_static_metadata.delay(version.project.pk)
fileify.delay(version.pk, commit=build.commit)
update_search.delay(version.pk, commit=build.commit)
if not html and version.slug != 'stable' and build.exit_code != 423:
send_notifications.delay(version.pk, build_pk=build.pk)
@task(queue='web')
def move_files(version_pk, hostname, html=False, localmedia=False, search=False, pdf=False, epub=False):
version = Version.objects.get(pk=version_pk)
if html:
from_path = version.project.artifact_path(version=version.slug, type=version.project.documentation_type)
target = version.project.rtd_build_path(version.slug)
Syncer.copy(from_path, target, host=hostname)
if 'sphinx' in version.project.documentation_type:
if localmedia:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_localmedia')
to_path = version.project.get_production_media_path(type='htmlzip', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if search:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_search')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
# Always move PDF's because the return code lies.
if pdf:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_pdf')
to_path = version.project.get_production_media_path(type='pdf', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if epub:
from_path = version.project.artifact_path(version=version.slug, type='sphinx_epub')
to_path = version.project.get_production_media_path(type='epub', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
if 'mkdocs' in version.project.documentation_type:
if search:
from_path = version.project.artifact_path(version=version.slug, type='mkdocs_json')
to_path = version.project.get_production_media_path(type='json', version_slug=version.slug, include_file=False)
Syncer.copy(from_path, to_path, host=hostname)
@task(queue='web')
def update_search(version_pk, commit):
version = Version.objects.get(pk=version_pk)
if 'sphinx' in version.project.documentation_type:
page_list = process_all_json_files(version, build_dir=False)
elif 'mkdocs' in version.project.documentation_type:
page_list = process_mkdocs_json(version, build_dir=False)
else:
log.error('Unknown documentation type: %s' % version.project.documentation_type)
return
log_msg = ' '.join([page['path'] for page in page_list])
log.info("(Search Index) Sending Data: %s [%s]" % (version.project.slug, log_msg))
index_search_request(version=version, page_list=page_list, commit=commit, project_scale=0, page_scale=0)
@task(queue='web')
def fileify(version_pk, commit):
"""
Create ImportedFile objects for all of a version's files.
This is a prereq for indexing the docs for search.
It also causes celery-haystack to kick off an index of the file.
"""
version = Version.objects.get(pk=version_pk)
project = version.project
if not commit:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Imported File not being built because no commit information'))
path = project.rtd_build_path(version.slug)
if path:
log.info(LOG_TEMPLATE.format(
project=project.slug, version=version.slug, msg='Creating ImportedFiles'))
for root, dirnames, filenames in os.walk(path):
for filename in filenames:
if fnmatch.fnmatch(filename, '*.html'):
dirpath = os.path.join(root.replace(path, '').lstrip('/'),
filename.lstrip('/'))
obj, created = ImportedFile.objects.get_or_create(
project=project,
version=version,
path=dirpath,
name=filename,
commit=commit,
)
if not created:
obj.save()
# Delete ImportedFiles from previous versions
ImportedFile.objects.filter(project=project, version=version).exclude(commit=commit).delete()
else:
log.info(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg='No ImportedFile files'))
@task(queue='web')
def send_notifications(version_pk, build_pk):
version = Version.objects.get(pk=version_pk)
build = Build.objects.get(pk=build_pk)
for hook in version.project.webhook_notifications.all():
webhook_notification(version, build, hook.url)
for email in version.project.emailhook_notifications.all().values_list('email', flat=True):
email_notification(version, build, email)
def email_notification(version, build, email):
log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug,
msg='sending email to: %s' % email))
context = {'version': version,
'project': version.project,
'build': build,
'build_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
build.get_absolute_url()),
'unsub_url': 'https://{0}{1}'.format(
getattr(settings, 'PRODUCTION_DOMAIN', 'readthedocs.org'),
reverse('projects_notifications', args=[version.project.slug])),
}
if build.commit:
title = _('Failed: {project.name} ({commit})').format(commit=build.commit[:8], **context)
else:
title = _('Failed: {project.name} ({version.verbose_name})').format(**context)
send_email(
email,
title,
template='projects/email/build_failed.txt',
template_html='projects/email/build_failed.html',
context=context
)
def webhook_notification(version, build, hook_url):
data = json.dumps({
'name': project.name,
'slug': project.slug,
'build': {
'id': build.id,
'success': build.success,
'date': build.date.strftime('%Y-%m-%d %H:%M:%S'),
}
})
log.debug(LOG_TEMPLATE.format(project=project.slug, version='', msg='sending notification to: %s' % hook_url))
requests.post(hook_url, data=data)
@task(queue='web')
def update_static_metadata(project_pk, path=None):
"""Update static metadata JSON file
Metadata settings include the following project settings:
version
The default version for the project, default: `latest`
language
The default language for the project, default: `en`
languages
List of languages built by linked translation projects.
"""
project = Project.objects.get(pk=project_pk)
if not path:
path = project.static_metadata_path()
log.info(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Updating static metadata',
))
translations = [trans.language for trans in project.translations.all()]
languages = set(translations)
# Convert to JSON safe types
metadata = {
'version': project.default_version,
'language': project.language,
'languages': list(languages),
'single_version': project.single_version,
}
try:
fh = open(path, 'w+')
json.dump(metadata, fh)
fh.close()
Syncer.copy(path, path, host=socket.gethostname(), file=True)
except (AttributeError, IOError) as e:
log.debug(LOG_TEMPLATE.format(
project=project.slug,
version='',
msg='Cannot write to metadata.json: {0}'.format(e)
))
#@periodic_task(run_every=crontab(hour="*", minute="*/5", day_of_week="*"))
def update_docs_pull(record=False, pdf=False, man=False, force=False):
"""
A high-level interface that will update all of the projects.
This is mainly used from a cronjob or management command.
"""
for version in Version.objects.filter(built=True):
try:
update_docs(
pk=version.project.pk, version_pk=version.pk, record=record, pdf=pdf, man=man)
except Exception, e:
log.error("update_docs_pull failed", exc_info=True)
##############
# Random Tasks
##############
@task()
def remove_dir(path):
"""
Remove a directory on the build/celery server.
This is mainly a wrapper around shutil.rmtree so that app servers
can kill things on the build server.
"""
log.info("Removing %s" % path)
shutil.rmtree(path)
@task(queue='web')
def clear_artifacts(version_pk):
""" Remove artifacts from the web servers. """
version = Version.objects.get(pk=version_pk)
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='pdf', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='epub', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.get_production_media_path(type='htmlzip', version_slug=version.slug))
run_on_app_servers('rm -rf %s' % version.project.rtd_build_path(version=version.slug))
# @task()
# def update_config_from_json(version_pk):
# """
# Check out or update the given project's repository.
# """
# Remove circular import
# from projects.forms import ImportProjectForm
# version_data = api.version(version_pk).get()
# version = make_api_version(version_data)
# project = version.project
# log.debug(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg="Checking for json config"))
# try:
# rtd_json = open(os.path.join(
# project.checkout_path(version.slug),
# '.rtd.json'
# ))
# json_obj = json.load(rtd_json)
# for key in json_obj.keys():
# Treat the defined fields on the Import form as
# the canonical list of allowed user editable fields.
# This is in essense just another UI for that form.
# if key not in ImportProjectForm._meta.fields:
# del json_obj[key]
# except IOError:
# log.debug(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg="No rtd.json found."))
# return None
# project_data = api.project(project.pk).get()
# project_data.update(json_obj)
# api.project(project.pk).put(project_data)
# log.debug(LOG_TEMPLATE.format(project=project.slug, version=version.slug, msg="Updated from JSON."))
# def update_state(version):
# """
# Keep state between the repo and the database
# """
# log.debug(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug, msg='Setting config values from .rtd.yml'))
# try:
# update_config_from_json(version.pk)
# except Exception, e:
# Never kill the build, but log the error
# log.error(LOG_TEMPLATE.format(project=version.project.slug, version=version.slug, msg='Failure in config parsing code: %s ' % e.message))
# @task()
# def zenircbot_notification(version_id):
# version = version.objects.get(id=version_id)
# message = "build of %s successful" % version
# redis_obj = redis.redis(**settings.redis)
# irc = getattr(settings, 'irc_channel', '#readthedocs-build')
# try:
# redis_obj.publish('out',
# json.dumps({
# 'version': 1,
# 'type': 'privmsg',
# 'data': {
# 'to': irc,
# 'message': message,
# }
# }))
# except redis.connectionerror:
# return
# @task()
# def clear_artifacts(version_pk):
# """ Remove artifacts from the build server. """
# Stop doing this for now as it causes 403s if people build things back to
# back some times because of a race condition
# version_data = api.version(version_pk).get()
# version = make_api_version(version_data)
# run('rm -rf %s' % version.project.full_epub_path(version.slug))
# run('rm -rf %s' % version.project.full_man_path(version.slug))
# run('rm -rf %s' % version.project.full_build_path(version.slug))
# run('rm -rf %s' % version.project.full_latex_path(version.slug))
# @periodic_task(run_every=crontab(hour="*/12", minute="*", day_of_week="*"))
# def update_mirror_docs():
# """
# A periodic task used to update all projects that we mirror.
# """
# record = False
# current = datetime.datetime.now()
# Only record one build a day, at midnight.
# if current.hour == 0 and current.minute == 0:
# record = True
# data = apiv2.project().get(mirror=True, page_size=500)
# for project_data in data['results']:
# p = make_api_project(project_data)
# update_docs(pk=p.pk, record=record)
| mit |
ujenmr/ansible | test/units/modules/network/eos/test_eos_user.py | 68 | 4171 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.eos import eos_user
from units.modules.utils import set_module_args
from .eos_module import TestEosModule, load_fixture
class TestEosUserModule(TestEosModule):
module = eos_user
def setUp(self):
super(TestEosUserModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.eos.eos_user.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.eos.eos_user.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestEosUserModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
self.get_config.return_value = load_fixture('eos_user_config.cfg')
self.load_config.return_value = dict(diff=None, session='session')
def test_eos_user_create(self):
set_module_args(dict(name='test', nopassword=True))
commands = ['username test nopassword']
self.execute_module(changed=True, commands=commands)
def test_eos_user_delete(self):
set_module_args(dict(name='ansible', state='absent'))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_password(self):
set_module_args(dict(name='ansible', configured_password='test'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_privilege(self):
set_module_args(dict(name='ansible', privilege=15, configured_password='test'))
result = self.execute_module(changed=True)
self.assertIn('username ansible privilege 15', result['commands'])
def test_eos_user_privilege_invalid(self):
set_module_args(dict(name='ansible', privilege=25, configured_password='test'))
self.execute_module(failed=True)
def test_eos_user_purge(self):
set_module_args(dict(purge=True))
commands = ['no username ansible']
self.execute_module(changed=True, commands=commands)
def test_eos_user_role(self):
set_module_args(dict(name='ansible', role='test', configured_password='test'))
result = self.execute_module(changed=True)
self.assertIn('username ansible role test', result['commands'])
def test_eos_user_sshkey(self):
set_module_args(dict(name='ansible', sshkey='test'))
commands = ['username ansible sshkey test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_changed(self):
set_module_args(dict(name='test', configured_password='test', update_password='on_create'))
commands = ['username test secret test']
self.execute_module(changed=True, commands=commands)
def test_eos_user_update_password_on_create_ok(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='on_create'))
self.execute_module()
def test_eos_user_update_password_always(self):
set_module_args(dict(name='ansible', configured_password='test', update_password='always'))
commands = ['username ansible secret test']
self.execute_module(changed=True, commands=commands)
| gpl-3.0 |
codeaurora-unoffical/linux-msm | tools/perf/scripts/python/intel-pt-events.py | 233 | 3961 | # intel-pt-events.py: Print Intel PT Power Events and PTWRITE
# Copyright (c) 2017, Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
import os
import sys
import struct
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
# These perf imports are not used at present
#from perf_trace_context import *
#from Core import *
def trace_begin():
print "Intel PT Power Events and PTWRITE"
def trace_end():
print "End"
def trace_unhandled(event_name, context, event_fields_dict):
print ' '.join(['%s=%s'%(k,str(v))for k,v in sorted(event_fields_dict.items())])
def print_ptwrite(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
flags = data[0]
payload = data[1]
exact_ip = flags & 1
print "IP: %u payload: %#x" % (exact_ip, payload),
def print_cbr(raw_buf):
data = struct.unpack_from("<BBBBII", raw_buf)
cbr = data[0]
f = (data[4] + 500) / 1000
p = ((cbr * 1000 / data[2]) + 5) / 10
print "%3u freq: %4u MHz (%3u%%)" % (cbr, f, p),
def print_mwait(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hints = payload & 0xff
extensions = (payload >> 32) & 0x3
print "hints: %#x extensions: %#x" % (hints, extensions),
def print_pwre(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
hw = (payload >> 7) & 1
cstate = (payload >> 12) & 0xf
subcstate = (payload >> 8) & 0xf
print "hw: %u cstate: %u sub-cstate: %u" % (hw, cstate, subcstate),
def print_exstop(raw_buf):
data = struct.unpack_from("<I", raw_buf)
flags = data[0]
exact_ip = flags & 1
print "IP: %u" % (exact_ip),
def print_pwrx(raw_buf):
data = struct.unpack_from("<IQ", raw_buf)
payload = data[1]
deepest_cstate = payload & 0xf
last_cstate = (payload >> 4) & 0xf
wake_reason = (payload >> 8) & 0xf
print "deepest cstate: %u last cstate: %u wake reason: %#x" % (deepest_cstate, last_cstate, wake_reason),
def print_common_start(comm, sample, name):
ts = sample["time"]
cpu = sample["cpu"]
pid = sample["pid"]
tid = sample["tid"]
print "%16s %5u/%-5u [%03u] %9u.%09u %7s:" % (comm, pid, tid, cpu, ts / 1000000000, ts %1000000000, name),
def print_common_ip(sample, symbol, dso):
ip = sample["ip"]
print "%16x %s (%s)" % (ip, symbol, dso)
def process_event(param_dict):
event_attr = param_dict["attr"]
sample = param_dict["sample"]
raw_buf = param_dict["raw_buf"]
comm = param_dict["comm"]
name = param_dict["ev_name"]
# Symbol and dso info are not always resolved
if (param_dict.has_key("dso")):
dso = param_dict["dso"]
else:
dso = "[unknown]"
if (param_dict.has_key("symbol")):
symbol = param_dict["symbol"]
else:
symbol = "[unknown]"
if name == "ptwrite":
print_common_start(comm, sample, name)
print_ptwrite(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "cbr":
print_common_start(comm, sample, name)
print_cbr(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "mwait":
print_common_start(comm, sample, name)
print_mwait(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "pwre":
print_common_start(comm, sample, name)
print_pwre(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "exstop":
print_common_start(comm, sample, name)
print_exstop(raw_buf)
print_common_ip(sample, symbol, dso)
elif name == "pwrx":
print_common_start(comm, sample, name)
print_pwrx(raw_buf)
print_common_ip(sample, symbol, dso)
| gpl-2.0 |
sourcepole/kadas-albireo | python/ext-libs/jinja2/utils.py | 598 | 16165 | # -*- coding: utf-8 -*-
"""
jinja2.utils
~~~~~~~~~~~~
Utility functions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import re
import errno
from collections import deque
from jinja2._compat import text_type, string_types, implements_iterator, \
allocate_lock, url_quote
_word_split_re = re.compile(r'(\s+)')
_punctuation_re = re.compile(
'^(?P<lead>(?:%s)*)(?P<middle>.*?)(?P<trail>(?:%s)*)$' % (
'|'.join(map(re.escape, ('(', '<', '<'))),
'|'.join(map(re.escape, ('.', ',', ')', '>', '\n', '>')))
)
)
_simple_email_re = re.compile(r'^\S+@[a-zA-Z0-9._-]+\.[a-zA-Z0-9._-]+$')
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
_entity_re = re.compile(r'&([^;]+);')
_letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
_digits = '0123456789'
# special singleton representing missing values for the runtime
missing = type('MissingType', (), {'__repr__': lambda x: 'missing'})()
# internal code
internal_code = set()
concat = u''.join
def contextfunction(f):
"""This decorator can be used to mark a function or method context callable.
A context callable is passed the active :class:`Context` as first argument when
called from the template. This is useful if a function wants to get access
to the context or functions provided on the context object. For example
a function that returns a sorted list of template variables the current
template exports could look like this::
@contextfunction
def get_exported_names(context):
return sorted(context.exported_vars)
"""
f.contextfunction = True
return f
def evalcontextfunction(f):
"""This decorator can be used to mark a function or method as an eval
context callable. This is similar to the :func:`contextfunction`
but instead of passing the context, an evaluation context object is
passed. For more information about the eval context, see
:ref:`eval-context`.
.. versionadded:: 2.4
"""
f.evalcontextfunction = True
return f
def environmentfunction(f):
"""This decorator can be used to mark a function or method as environment
callable. This decorator works exactly like the :func:`contextfunction`
decorator just that the first argument is the active :class:`Environment`
and not context.
"""
f.environmentfunction = True
return f
def internalcode(f):
"""Marks the function as internally used"""
internal_code.add(f.__code__)
return f
def is_undefined(obj):
"""Check if the object passed is undefined. This does nothing more than
performing an instance check against :class:`Undefined` but looks nicer.
This can be used for custom filters or tests that want to react to
undefined variables. For example a custom default filter can look like
this::
def default(var, default=''):
if is_undefined(var):
return default
return var
"""
from jinja2.runtime import Undefined
return isinstance(obj, Undefined)
def consume(iterable):
"""Consumes an iterable without doing anything with it."""
for event in iterable:
pass
def clear_caches():
"""Jinja2 keeps internal caches for environments and lexers. These are
used so that Jinja2 doesn't have to recreate environments and lexers all
the time. Normally you don't have to care about that but if you are
messuring memory consumption you may want to clean the caches.
"""
from jinja2.environment import _spontaneous_environments
from jinja2.lexer import _lexer_cache
_spontaneous_environments.clear()
_lexer_cache.clear()
def import_string(import_name, silent=False):
"""Imports an object based on a string. This is useful if you want to
use import paths as endpoints or something similar. An import path can
be specified either in dotted notation (``xml.sax.saxutils.escape``)
or with a colon as object delimiter (``xml.sax.saxutils:escape``).
If the `silent` is True the return value will be `None` if the import
fails.
:return: imported object
"""
try:
if ':' in import_name:
module, obj = import_name.split(':', 1)
elif '.' in import_name:
items = import_name.split('.')
module = '.'.join(items[:-1])
obj = items[-1]
else:
return __import__(import_name)
return getattr(__import__(module, None, None, [obj]), obj)
except (ImportError, AttributeError):
if not silent:
raise
def open_if_exists(filename, mode='rb'):
"""Returns a file descriptor for the filename if that file exists,
otherwise `None`.
"""
try:
return open(filename, mode)
except IOError as e:
if e.errno not in (errno.ENOENT, errno.EISDIR):
raise
def object_type_repr(obj):
"""Returns the name of the object's type. For some recognized
singletons the name of the object is returned instead. (For
example for `None` and `Ellipsis`).
"""
if obj is None:
return 'None'
elif obj is Ellipsis:
return 'Ellipsis'
# __builtin__ in 2.x, builtins in 3.x
if obj.__class__.__module__ in ('__builtin__', 'builtins'):
name = obj.__class__.__name__
else:
name = obj.__class__.__module__ + '.' + obj.__class__.__name__
return '%s object' % name
def pformat(obj, verbose=False):
"""Prettyprint an object. Either use the `pretty` library or the
builtin `pprint`.
"""
try:
from pretty import pretty
return pretty(obj, verbose=verbose)
except ImportError:
from pprint import pformat
return pformat(obj)
def urlize(text, trim_url_limit=None, nofollow=False):
"""Converts any URLs in text into clickable links. Works on http://,
https:// and www. links. Links can have trailing punctuation (periods,
commas, close-parens) and leading punctuation (opening parens) and
it'll still do the right thing.
If trim_url_limit is not None, the URLs in link text will be limited
to trim_url_limit characters.
If nofollow is True, the URLs in link text will get a rel="nofollow"
attribute.
"""
trim_url = lambda x, limit=trim_url_limit: limit is not None \
and (x[:limit] + (len(x) >=limit and '...'
or '')) or x
words = _word_split_re.split(text_type(escape(text)))
nofollow_attr = nofollow and ' rel="nofollow"' or ''
for i, word in enumerate(words):
match = _punctuation_re.match(word)
if match:
lead, middle, trail = match.groups()
if middle.startswith('www.') or (
'@' not in middle and
not middle.startswith('http://') and
not middle.startswith('https://') and
len(middle) > 0 and
middle[0] in _letters + _digits and (
middle.endswith('.org') or
middle.endswith('.net') or
middle.endswith('.com')
)):
middle = '<a href="http://%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if middle.startswith('http://') or \
middle.startswith('https://'):
middle = '<a href="%s"%s>%s</a>' % (middle,
nofollow_attr, trim_url(middle))
if '@' in middle and not middle.startswith('www.') and \
not ':' in middle and _simple_email_re.match(middle):
middle = '<a href="mailto:%s">%s</a>' % (middle, middle)
if lead + middle + trail != word:
words[i] = lead + middle + trail
return u''.join(words)
def generate_lorem_ipsum(n=5, html=True, min=20, max=100):
"""Generate some lorem impsum for the template."""
from jinja2.constants import LOREM_IPSUM_WORDS
from random import choice, randrange
words = LOREM_IPSUM_WORDS.split()
result = []
for _ in range(n):
next_capitalized = True
last_comma = last_fullstop = 0
word = None
last = None
p = []
# each paragraph contains out of 20 to 100 words.
for idx, _ in enumerate(range(randrange(min, max))):
while True:
word = choice(words)
if word != last:
last = word
break
if next_capitalized:
word = word.capitalize()
next_capitalized = False
# add commas
if idx - randrange(3, 8) > last_comma:
last_comma = idx
last_fullstop += 2
word += ','
# add end of sentences
if idx - randrange(10, 20) > last_fullstop:
last_comma = last_fullstop = idx
word += '.'
next_capitalized = True
p.append(word)
# ensure that the paragraph ends with a dot.
p = u' '.join(p)
if p.endswith(','):
p = p[:-1] + '.'
elif not p.endswith('.'):
p += '.'
result.append(p)
if not html:
return u'\n\n'.join(result)
return Markup(u'\n'.join(u'<p>%s</p>' % escape(x) for x in result))
def unicode_urlencode(obj, charset='utf-8'):
"""URL escapes a single bytestring or unicode string with the
given charset if applicable to URL safe quoting under all rules
that need to be considered under all supported Python versions.
If non strings are provided they are converted to their unicode
representation first.
"""
if not isinstance(obj, string_types):
obj = text_type(obj)
if isinstance(obj, text_type):
obj = obj.encode(charset)
return text_type(url_quote(obj))
class LRUCache(object):
"""A simple LRU Cache implementation."""
# this is fast for small capacities (something below 1000) but doesn't
# scale. But as long as it's only used as storage for templates this
# won't do any harm.
def __init__(self, capacity):
self.capacity = capacity
self._mapping = {}
self._queue = deque()
self._postinit()
def _postinit(self):
# alias all queue methods for faster lookup
self._popleft = self._queue.popleft
self._pop = self._queue.pop
self._remove = self._queue.remove
self._wlock = allocate_lock()
self._append = self._queue.append
def __getstate__(self):
return {
'capacity': self.capacity,
'_mapping': self._mapping,
'_queue': self._queue
}
def __setstate__(self, d):
self.__dict__.update(d)
self._postinit()
def __getnewargs__(self):
return (self.capacity,)
def copy(self):
"""Return a shallow copy of the instance."""
rv = self.__class__(self.capacity)
rv._mapping.update(self._mapping)
rv._queue = deque(self._queue)
return rv
def get(self, key, default=None):
"""Return an item from the cache dict or `default`"""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
"""Set `default` if the key is not in the cache otherwise
leave unchanged. Return the value of this key.
"""
self._wlock.acquire()
try:
try:
return self[key]
except KeyError:
self[key] = default
return default
finally:
self._wlock.release()
def clear(self):
"""Clear the cache."""
self._wlock.acquire()
try:
self._mapping.clear()
self._queue.clear()
finally:
self._wlock.release()
def __contains__(self, key):
"""Check if a key exists in this cache."""
return key in self._mapping
def __len__(self):
"""Return the current size of the cache."""
return len(self._mapping)
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self._mapping
)
def __getitem__(self, key):
"""Get an item from the cache. Moves the item up so that it has the
highest priority then.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
rv = self._mapping[key]
if self._queue[-1] != key:
try:
self._remove(key)
except ValueError:
# if something removed the key from the container
# when we read, ignore the ValueError that we would
# get otherwise.
pass
self._append(key)
return rv
finally:
self._wlock.release()
def __setitem__(self, key, value):
"""Sets the value for an item. Moves the item up so that it
has the highest priority then.
"""
self._wlock.acquire()
try:
if key in self._mapping:
self._remove(key)
elif len(self._mapping) == self.capacity:
del self._mapping[self._popleft()]
self._append(key)
self._mapping[key] = value
finally:
self._wlock.release()
def __delitem__(self, key):
"""Remove an item from the cache dict.
Raise a `KeyError` if it does not exist.
"""
self._wlock.acquire()
try:
del self._mapping[key]
try:
self._remove(key)
except ValueError:
# __getitem__ is not locked, it might happen
pass
finally:
self._wlock.release()
def items(self):
"""Return a list of items."""
result = [(key, self._mapping[key]) for key in list(self._queue)]
result.reverse()
return result
def iteritems(self):
"""Iterate over all items."""
return iter(self.items())
def values(self):
"""Return a list of all values."""
return [x[1] for x in self.items()]
def itervalue(self):
"""Iterate over all values."""
return iter(self.values())
def keys(self):
"""Return a list of all keys ordered by most recent usage."""
return list(self)
def iterkeys(self):
"""Iterate over all keys in the cache dict, ordered by
the most recent usage.
"""
return reversed(tuple(self._queue))
__iter__ = iterkeys
def __reversed__(self):
"""Iterate over the values in the cache dict, oldest items
coming first.
"""
return iter(tuple(self._queue))
__copy__ = copy
# register the LRU cache as mutable mapping if possible
try:
from collections import MutableMapping
MutableMapping.register(LRUCache)
except ImportError:
pass
@implements_iterator
class Cycler(object):
"""A cycle helper for templates."""
def __init__(self, *items):
if not items:
raise RuntimeError('at least one item has to be provided')
self.items = items
self.reset()
def reset(self):
"""Resets the cycle."""
self.pos = 0
@property
def current(self):
"""Returns the current item."""
return self.items[self.pos]
def __next__(self):
"""Goes one item ahead and returns it."""
rv = self.current
self.pos = (self.pos + 1) % len(self.items)
return rv
class Joiner(object):
"""A joining helper for templates."""
def __init__(self, sep=u', '):
self.sep = sep
self.used = False
def __call__(self):
if not self.used:
self.used = True
return u''
return self.sep
# Imported here because that's where it was in the past
from markupsafe import Markup, escape, soft_unicode
| gpl-2.0 |
Sweet-Peas/mbed | workspace_tools/host_tests/tcpecho_client.py | 52 | 1874 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
import string, random
from time import time
from private_settings import SERVER_ADDRESS
ECHO_PORT = 7
LEN_PACKET = 127
N_PACKETS = 5000
TOT_BITS = float(LEN_PACKET * N_PACKETS * 8) * 2
MEGA = float(1024 * 1024)
UPDATE_STEP = (N_PACKETS/10)
class TCP_EchoClient:
def __init__(self, host):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((host, ECHO_PORT))
self.packet = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(LEN_PACKET))
def __packet(self):
# Comment out the checks when measuring the throughput
# self.packet = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(LEN_PACKET))
self.s.send(self.packet)
data = self.s.recv(LEN_PACKET)
# assert self.packet == data, "packet error:\n%s\n%s\n" % (self.packet, data)
def test(self):
start = time()
for i in range(N_PACKETS):
if (i % UPDATE_STEP) == 0: print '%.2f%%' % ((float(i)/float(N_PACKETS)) * 100.)
self.__packet()
t = time() - start
print 'Throughput: (%.2f)Mbits/s' % ((TOT_BITS / t)/MEGA)
def __del__(self):
self.s.close()
while True:
e = TCP_EchoClient(SERVER_ADDRESS)
e.test()
| apache-2.0 |
TsinghuaX/edx-platform | cms/djangoapps/contentstore/views/helpers.py | 11 | 1043 | from django.http import HttpResponse
from django.shortcuts import redirect
from mitxmako.shortcuts import render_to_string, render_to_response
__all__ = ['edge', 'event', 'landing']
# points to the temporary course landing page with log in and sign up
def landing(request, org, course, coursename):
return render_to_response('temp-course-landing.html', {})
# points to the temporary edge page
def edge(request):
return redirect('/')
def event(request):
'''
A noop to swallow the analytics call so that cms methods don't spook and poor developers looking at
console logs don't get distracted :-)
'''
return HttpResponse(status=204)
def render_from_lms(template_name, dictionary, context=None, namespace='main'):
"""
Render a template using the LMS MAKO_TEMPLATES
"""
return render_to_string(template_name, dictionary, context, namespace="lms." + namespace)
def _xmodule_recurse(item, action):
for child in item.get_children():
_xmodule_recurse(child, action)
action(item)
| agpl-3.0 |
openstack-infra/shade | shade/tests/unit/test_floating_ip_neutron.py | 1 | 41101 | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_floating_ip_neutron
----------------------------------
Tests Floating IP resource methods for Neutron
"""
import copy
import datetime
import munch
from shade import exc
from shade.tests import fakes
from shade.tests.unit import base
class TestFloatingIP(base.RequestsMockTestCase):
mock_floating_ip_list_rep = {
'floatingips': [
{
'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f',
'tenant_id': '4969c491a3c74ee4af974e6d800c62de',
'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57',
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.229',
'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac',
'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda7',
'status': 'ACTIVE'
},
{
'router_id': None,
'tenant_id': '4969c491a3c74ee4af974e6d800c62de',
'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57',
'fixed_ip_address': None,
'floating_ip_address': '203.0.113.30',
'port_id': None,
'id': '61cea855-49cb-4846-997d-801b70c71bdd',
'status': 'DOWN'
}
]
}
mock_floating_ip_new_rep = {
'floatingip': {
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.229',
'floating_network_id': 'my-network-id',
'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8',
'port_id': None,
'router_id': None,
'status': 'ACTIVE',
'tenant_id': '4969c491a3c74ee4af974e6d800c62df'
}
}
mock_floating_ip_port_rep = {
'floatingip': {
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.229',
'floating_network_id': 'my-network-id',
'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8',
'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac',
'router_id': None,
'status': 'ACTIVE',
'tenant_id': '4969c491a3c74ee4af974e6d800c62df'
}
}
mock_get_network_rep = {
'status': 'ACTIVE',
'subnets': [
'54d6f61d-db07-451c-9ab3-b9609b6b6f0b'
],
'name': 'my-network',
'provider:physical_network': None,
'admin_state_up': True,
'tenant_id': '4fd44f30292945e481c7b8a0c8908869',
'provider:network_type': 'local',
'router:external': True,
'shared': True,
'id': 'my-network-id',
'provider:segmentation_id': None
}
mock_search_ports_rep = [
{
'status': 'ACTIVE',
'binding:host_id': 'devstack',
'name': 'first-port',
'created_at': datetime.datetime.now().isoformat(),
'allowed_address_pairs': [],
'admin_state_up': True,
'network_id': '70c1db1f-b701-45bd-96e0-a313ee3430b3',
'tenant_id': '',
'extra_dhcp_opts': [],
'binding:vif_details': {
'port_filter': True,
'ovs_hybrid_plug': True
},
'binding:vif_type': 'ovs',
'device_owner': 'compute:None',
'mac_address': 'fa:16:3e:58:42:ed',
'binding:profile': {},
'binding:vnic_type': 'normal',
'fixed_ips': [
{
'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062',
'ip_address': u'172.24.4.2'
}
],
'id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac',
'security_groups': [],
'device_id': 'server-id'
}
]
def assertAreInstances(self, elements, elem_type):
for e in elements:
self.assertIsInstance(e, elem_type)
def setUp(self):
super(TestFloatingIP, self).setUp()
self.fake_server = fakes.make_fake_server(
'server-id', '', 'ACTIVE',
addresses={u'test_pnztt_net': [{
u'OS-EXT-IPS:type': u'fixed',
u'addr': '192.0.2.129',
u'version': 4,
u'OS-EXT-IPS-MAC:mac_addr':
u'fa:16:3e:ae:7d:42'}]})
self.floating_ip = self.cloud._normalize_floating_ips(
self.mock_floating_ip_list_rep['floatingips'])[0]
def test_float_no_status(self):
floating_ips = [
{
'fixed_ip_address': '10.0.0.4',
'floating_ip_address': '172.24.4.229',
'floating_network_id': 'my-network-id',
'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8',
'port_id': None,
'router_id': None,
'tenant_id': '4969c491a3c74ee4af974e6d800c62df'
}
]
normalized = self.cloud._normalize_floating_ips(floating_ips)
self.assertEqual('UNKNOWN', normalized[0]['status'])
def test_list_floating_ips(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_list_rep)])
floating_ips = self.cloud.list_floating_ips()
self.assertIsInstance(floating_ips, list)
self.assertAreInstances(floating_ips, dict)
self.assertEqual(2, len(floating_ips))
self.assert_calls()
def test_list_floating_ips_with_filters(self):
self.register_uris([
dict(method='GET',
uri=('https://network.example.com/v2.0/floatingips.json?'
'Foo=42'),
json={'floatingips': []})])
self.cloud.list_floating_ips(filters={'Foo': 42})
self.assert_calls()
def test_search_floating_ips(self):
self.register_uris([
dict(method='GET',
uri=('https://network.example.com/v2.0/floatingips.json'),
json=self.mock_floating_ip_list_rep)])
floating_ips = self.cloud.search_floating_ips(
filters={'attached': False})
self.assertIsInstance(floating_ips, list)
self.assertAreInstances(floating_ips, dict)
self.assertEqual(1, len(floating_ips))
self.assert_calls()
def test_get_floating_ip(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_list_rep)])
floating_ip = self.cloud.get_floating_ip(
id='2f245a7b-796b-4f26-9cf9-9e82d248fda7')
self.assertIsInstance(floating_ip, dict)
self.assertEqual('172.24.4.229', floating_ip['floating_ip_address'])
self.assertEqual(
self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'],
floating_ip['project_id']
)
self.assertEqual(
self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'],
floating_ip['tenant_id']
)
self.assertIn('location', floating_ip)
self.assert_calls()
def test_get_floating_ip_not_found(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_list_rep)])
floating_ip = self.cloud.get_floating_ip(id='non-existent')
self.assertIsNone(floating_ip)
self.assert_calls()
def test_get_floating_ip_by_id(self):
fid = self.mock_floating_ip_new_rep['floatingip']['id']
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/floatingips/'
'{id}'.format(id=fid),
json=self.mock_floating_ip_new_rep)])
floating_ip = self.cloud.get_floating_ip_by_id(id=fid)
self.assertIsInstance(floating_ip, dict)
self.assertEqual('172.24.4.229', floating_ip['floating_ip_address'])
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['tenant_id'],
floating_ip['project_id']
)
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['tenant_id'],
floating_ip['tenant_id']
)
self.assertIn('location', floating_ip)
self.assert_calls()
def test_create_floating_ip(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_new_rep,
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id'}}))
])
ip = self.cloud.create_floating_ip(network='my-network')
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'],
ip['floating_ip_address'])
self.assert_calls()
def test_create_floating_ip_port_bad_response(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_new_rep,
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id',
'port_id': u'ce705c24-c1ef-408a-bda3-7bbd946164ab'}}))
])
# Fails because we requested a port and the returned FIP has no port
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.create_floating_ip,
network='my-network', port='ce705c24-c1ef-408a-bda3-7bbd946164ab')
self.assert_calls()
def test_create_floating_ip_port(self):
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json=self.mock_floating_ip_port_rep,
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id',
'port_id': u'ce705c24-c1ef-408a-bda3-7bbd946164ac'}}))
])
ip = self.cloud.create_floating_ip(
network='my-network', port='ce705c24-c1ef-408a-bda3-7bbd946164ac')
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'],
ip['floating_ip_address'])
self.assert_calls()
def test_neutron_available_floating_ips(self):
"""
Test without specifying a network name.
"""
fips_mock_uri = 'https://network.example.com/v2.0/floatingips.json'
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={'subnets': []}),
dict(method='GET', uri=fips_mock_uri, json={'floatingips': []}),
dict(method='POST', uri=fips_mock_uri,
json=self.mock_floating_ip_new_rep,
validate=dict(json={
'floatingip': {
'floating_network_id': self.mock_get_network_rep['id']
}}))
])
# Test if first network is selected if no network is given
self.cloud._neutron_available_floating_ips()
self.assert_calls()
def test_neutron_available_floating_ips_network(self):
"""
Test with specifying a network name.
"""
fips_mock_uri = 'https://network.example.com/v2.0/floatingips.json'
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={'subnets': []}),
dict(method='GET', uri=fips_mock_uri, json={'floatingips': []}),
dict(method='POST', uri=fips_mock_uri,
json=self.mock_floating_ip_new_rep,
validate=dict(json={
'floatingip': {
'floating_network_id': self.mock_get_network_rep['id']
}}))
])
# Test if first network is selected if no network is given
self.cloud._neutron_available_floating_ips(
network=self.mock_get_network_rep['name']
)
self.assert_calls()
def test_neutron_available_floating_ips_invalid_network(self):
"""
Test with an invalid network name.
"""
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={'subnets': []})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud._neutron_available_floating_ips,
network='INVALID')
self.assert_calls()
def test_auto_ip_pool_no_reuse(self):
# payloads taken from citycloud
self.register_uris([
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={"networks": [{
"status": "ACTIVE",
"subnets": [
"df3e17fa-a4b2-47ae-9015-bc93eb076ba2",
"6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec",
"fc541f48-fc7f-48c0-a063-18de6ee7bdd7"],
"availability_zone_hints": [],
"availability_zones": ["nova"],
"name": "ext-net",
"admin_state_up": True,
"tenant_id": "a564613210ee43708b8a7fc6274ebd63",
"tags": [],
"ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa
"mtu": 0,
"is_default": False,
"router:external": True,
"ipv4_address_scope": None,
"shared": False,
"id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf",
"description": None
}, {
"status": "ACTIVE",
"subnets": ["f0ad1df5-53ee-473f-b86b-3604ea5591e9"],
"availability_zone_hints": [],
"availability_zones": ["nova"],
"name": "private",
"admin_state_up": True,
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"created_at": "2016-10-22T13:46:26",
"tags": [],
"updated_at": "2016-10-22T13:46:26",
"ipv6_address_scope": None,
"router:external": False,
"ipv4_address_scope": None,
"shared": False,
"mtu": 1450,
"id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f",
"description": ""
}]}),
dict(method='GET',
uri='https://network.example.com/v2.0/ports.json'
'?device_id=f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7',
json={"ports": [{
"status": "ACTIVE",
"created_at": "2017-02-06T20:59:45",
"description": "",
"allowed_address_pairs": [],
"admin_state_up": True,
"network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f",
"dns_name": None,
"extra_dhcp_opts": [],
"mac_address": "fa:16:3e:e8:7f:03",
"updated_at": "2017-02-06T20:59:49",
"name": "",
"device_owner": "compute:None",
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"binding:vnic_type": "normal",
"fixed_ips": [{
"subnet_id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9",
"ip_address": "10.4.0.16"}],
"id": "a767944e-057a-47d1-a669-824a21b8fb7b",
"security_groups": [
"9fb5ba44-5c46-4357-8e60-8b55526cab54"],
"device_id": "f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7",
}]}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json={"floatingip": {
"router_id": "9de9c787-8f89-4a53-8468-a5533d6d7fd1",
"status": "DOWN",
"description": "",
"dns_domain": "",
"floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa
"fixed_ip_address": "10.4.0.16",
"floating_ip_address": "89.40.216.153",
"port_id": "a767944e-057a-47d1-a669-824a21b8fb7b",
"id": "e69179dc-a904-4c9a-a4c9-891e2ecb984c",
"dns_name": "",
"tenant_id": "65222a4d09ea4c68934fa1028c77f394"
}},
validate=dict(json={"floatingip": {
"floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa
"fixed_ip_address": "10.4.0.16",
"port_id": "a767944e-057a-47d1-a669-824a21b8fb7b",
}})),
dict(method='GET',
uri='{endpoint}/servers/detail'.format(
endpoint=fakes.COMPUTE_ENDPOINT),
json={"servers": [{
"status": "ACTIVE",
"updated": "2017-02-06T20:59:49Z",
"addresses": {
"private": [{
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03",
"version": 4,
"addr": "10.4.0.16",
"OS-EXT-IPS:type": "fixed"
}, {
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03",
"version": 4,
"addr": "89.40.216.153",
"OS-EXT-IPS:type": "floating"
}]},
"key_name": None,
"image": {"id": "95e4c449-8abf-486e-97d9-dc3f82417d2d"},
"OS-EXT-STS:task_state": None,
"OS-EXT-STS:vm_state": "active",
"OS-SRV-USG:launched_at": "2017-02-06T20:59:48.000000",
"flavor": {"id": "2186bd79-a05e-4953-9dde-ddefb63c88d4"},
"id": "f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7",
"security_groups": [{"name": "default"}],
"OS-SRV-USG:terminated_at": None,
"OS-EXT-AZ:availability_zone": "nova",
"user_id": "c17534835f8f42bf98fc367e0bf35e09",
"name": "testmt",
"created": "2017-02-06T20:59:44Z",
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"OS-DCF:diskConfig": "MANUAL",
"os-extended-volumes:volumes_attached": [],
"accessIPv4": "",
"accessIPv6": "",
"progress": 0,
"OS-EXT-STS:power_state": 1,
"config_drive": "",
"metadata": {}
}]}),
dict(method='GET',
uri='https://network.example.com/v2.0/networks.json',
json={"networks": [{
"status": "ACTIVE",
"subnets": [
"df3e17fa-a4b2-47ae-9015-bc93eb076ba2",
"6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec",
"fc541f48-fc7f-48c0-a063-18de6ee7bdd7"],
"availability_zone_hints": [],
"availability_zones": ["nova"],
"name": "ext-net",
"admin_state_up": True,
"tenant_id": "a564613210ee43708b8a7fc6274ebd63",
"tags": [],
"ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa
"mtu": 0,
"is_default": False,
"router:external": True,
"ipv4_address_scope": None,
"shared": False,
"id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf",
"description": None
}, {
"status": "ACTIVE",
"subnets": ["f0ad1df5-53ee-473f-b86b-3604ea5591e9"],
"availability_zone_hints": [],
"availability_zones": ["nova"],
"name": "private",
"admin_state_up": True,
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"created_at": "2016-10-22T13:46:26",
"tags": [],
"updated_at": "2016-10-22T13:46:26",
"ipv6_address_scope": None,
"router:external": False,
"ipv4_address_scope": None,
"shared": False,
"mtu": 1450,
"id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f",
"description": ""
}]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={"subnets": [{
"description": "",
"enable_dhcp": True,
"network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f",
"tenant_id": "65222a4d09ea4c68934fa1028c77f394",
"created_at": "2016-10-22T13:46:26",
"dns_nameservers": [
"89.36.90.101",
"89.36.90.102"],
"updated_at": "2016-10-22T13:46:26",
"gateway_ip": "10.4.0.1",
"ipv6_ra_mode": None,
"allocation_pools": [{
"start": "10.4.0.2",
"end": "10.4.0.200"}],
"host_routes": [],
"ip_version": 4,
"ipv6_address_mode": None,
"cidr": "10.4.0.0/24",
"id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9",
"subnetpool_id": None,
"name": "private-subnet-ipv4",
}]})])
self.cloud.add_ips_to_server(
munch.Munch(
id='f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7',
addresses={
"private": [{
"OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03",
"version": 4,
"addr": "10.4.0.16",
"OS-EXT-IPS:type": "fixed"
}]}),
ip_pool='ext-net', reuse=False)
self.assert_calls()
def test_available_floating_ip_new(self):
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets.json']),
json={'subnets': []}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': []}),
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id'}}),
json=self.mock_floating_ip_new_rep)
])
ip = self.cloud.available_floating_ip(network='my-network')
self.assertEqual(
self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'],
ip['floating_ip_address'])
self.assert_calls()
def test_delete_floating_ip_existing(self):
fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7'
fake_fip = {
'id': fip_id,
'floating_ip_address': '172.99.106.167',
'status': 'ACTIVE',
}
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': []}),
])
self.assertTrue(
self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2))
self.assert_calls()
def test_delete_floating_ip_existing_down(self):
fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7'
fake_fip = {
'id': fip_id,
'floating_ip_address': '172.99.106.167',
'status': 'ACTIVE',
}
down_fip = {
'id': fip_id,
'floating_ip_address': '172.99.106.167',
'status': 'DOWN',
}
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [down_fip]}),
])
self.assertTrue(
self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2))
self.assert_calls()
def test_delete_floating_ip_existing_no_delete(self):
fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7'
fake_fip = {
'id': fip_id,
'floating_ip_address': '172.99.106.167',
'status': 'ACTIVE',
}
self.register_uris([
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(fip_id)]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fake_fip]}),
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud.delete_floating_ip,
floating_ip_id=fip_id, retry=2)
self.assert_calls()
def test_delete_floating_ip_not_found(self):
self.register_uris([
dict(method='DELETE',
uri=('https://network.example.com/v2.0/floatingips/'
'a-wild-id-appears.json'),
status_code=404)])
ret = self.cloud.delete_floating_ip(
floating_ip_id='a-wild-id-appears')
self.assertFalse(ret)
self.assert_calls()
def test_attach_ip_to_server(self):
fip = self.mock_floating_ip_list_rep['floatingips'][0]
device_id = self.fake_server['id']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports.json'],
qs_elements=["device_id={0}".format(device_id)]),
json={'ports': self.mock_search_ports_rep}),
dict(method='PUT',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
fip['id'])]),
json={'floatingip': fip},
validate=dict(
json={'floatingip': {
'port_id': self.mock_search_ports_rep[0]['id'],
'fixed_ip_address': self.mock_search_ports_rep[0][
'fixed_ips'][0]['ip_address']}})),
])
self.cloud._attach_ip_to_server(
server=self.fake_server,
floating_ip=self.floating_ip)
self.assert_calls()
def test_add_ip_refresh_timeout(self):
device_id = self.fake_server['id']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'networks.json']),
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri='https://network.example.com/v2.0/subnets.json',
json={'subnets': []}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports.json'],
qs_elements=["device_id={0}".format(device_id)]),
json={'ports': self.mock_search_ports_rep}),
dict(method='POST',
uri='https://network.example.com/v2.0/floatingips.json',
json={'floatingip': self.floating_ip},
validate=dict(
json={'floatingip': {
'floating_network_id': 'my-network-id',
'fixed_ip_address': self.mock_search_ports_rep[0][
'fixed_ips'][0]['ip_address'],
'port_id': self.mock_search_ports_rep[0]['id']}})),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [self.floating_ip]}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
self.floating_ip['id'])]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': []}),
])
self.assertRaises(
exc.OpenStackCloudTimeout,
self.cloud._add_auto_ip,
server=self.fake_server,
wait=True, timeout=0.01,
reuse=False)
self.assert_calls()
def test_detach_ip_from_server(self):
fip = self.mock_floating_ip_new_rep['floatingip']
attached_fip = copy.copy(fip)
attached_fip['port_id'] = 'server-port-id'
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [attached_fip]}),
dict(method='PUT',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
fip['id'])]),
json={'floatingip': fip},
validate=dict(
json={'floatingip': {'port_id': None}}))
])
self.cloud.detach_ip_from_server(
server_id='server-id',
floating_ip_id=fip['id'])
self.assert_calls()
def test_add_ip_from_pool(self):
network = self.mock_get_network_rep
fip = self.mock_floating_ip_new_rep['floatingip']
fixed_ip = self.mock_search_ports_rep[0]['fixed_ips'][0]['ip_address']
port_id = self.mock_search_ports_rep[0]['id']
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': [network]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets.json']),
json={'subnets': []}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [fip]}),
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingip': fip},
validate=dict(
json={'floatingip': {
'floating_network_id': network['id']}})),
dict(method="GET",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports.json'],
qs_elements=[
"device_id={0}".format(self.fake_server['id'])]),
json={'ports': self.mock_search_ports_rep}),
dict(method='PUT',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
fip['id'])]),
json={'floatingip': fip},
validate=dict(
json={'floatingip': {
'fixed_ip_address': fixed_ip,
'port_id': port_id}})),
])
server = self.cloud._add_ip_from_pool(
server=self.fake_server,
network=network['id'],
fixed_address=fixed_ip)
self.assertEqual(server, self.fake_server)
self.assert_calls()
def test_cleanup_floating_ips(self):
floating_ips = [{
"id": "this-is-a-floating-ip-id",
"fixed_ip_address": None,
"internal_network": None,
"floating_ip_address": "203.0.113.29",
"network": "this-is-a-net-or-pool-id",
"port_id": None,
"status": "ACTIVE"
}, {
"id": "this-is-an-attached-floating-ip-id",
"fixed_ip_address": None,
"internal_network": None,
"floating_ip_address": "203.0.113.29",
"network": "this-is-a-net-or-pool-id",
"attached": True,
"port_id": "this-is-id-of-port-with-fip",
"status": "ACTIVE"
}]
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': floating_ips}),
dict(method='DELETE',
uri=self.get_mock_url(
'network', 'public',
append=['v2.0', 'floatingips/{0}.json'.format(
floating_ips[0]['id'])]),
json={}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingips': [floating_ips[1]]}),
])
self.cloud.delete_unattached_floating_ips()
self.assert_calls()
def test_create_floating_ip_no_port(self):
server_port = {
"id": "port-id",
"device_id": "some-server",
'created_at': datetime.datetime.now().isoformat(),
'fixed_ips': [
{
'subnet_id': 'subnet-id',
'ip_address': '172.24.4.2'
}
],
}
floating_ip = {
"id": "floating-ip-id",
"port_id": None
}
self.register_uris([
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'networks.json']),
json={'networks': [self.mock_get_network_rep]}),
dict(method='GET',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'subnets.json']),
json={'subnets': []}),
dict(method="GET",
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'ports.json'],
qs_elements=['device_id=some-server']),
json={'ports': [server_port]}),
dict(method='POST',
uri=self.get_mock_url(
'network', 'public', append=['v2.0', 'floatingips.json']),
json={'floatingip': floating_ip})
])
self.assertRaises(
exc.OpenStackCloudException,
self.cloud._neutron_create_floating_ip,
server=dict(id='some-server'))
self.assert_calls()
| apache-2.0 |
kjw0106/GCM_app_server | venv/lib/python2.7/site-packages/werkzeug/contrib/testtools.py | 365 | 2453 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.testtools
~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements extended wrappers for simplified testing.
`TestResponse`
A response wrapper which adds various cached attributes for
simplified assertions on various content types.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from werkzeug.utils import cached_property, import_string
from werkzeug.wrappers import Response
from warnings import warn
warn(DeprecationWarning('werkzeug.contrib.testtools is deprecated and '
'will be removed with Werkzeug 1.0'))
class ContentAccessors(object):
"""
A mixin class for response objects that provides a couple of useful
accessors for unittesting.
"""
def xml(self):
"""Get an etree if possible."""
if 'xml' not in self.mimetype:
raise AttributeError(
'Not a XML response (Content-Type: %s)'
% self.mimetype)
for module in ['xml.etree.ElementTree', 'ElementTree',
'elementtree.ElementTree']:
etree = import_string(module, silent=True)
if etree is not None:
return etree.XML(self.body)
raise RuntimeError('You must have ElementTree installed '
'to use TestResponse.xml')
xml = cached_property(xml)
def lxml(self):
"""Get an lxml etree if possible."""
if ('html' not in self.mimetype and 'xml' not in self.mimetype):
raise AttributeError('Not an HTML/XML response')
from lxml import etree
try:
from lxml.html import fromstring
except ImportError:
fromstring = etree.HTML
if self.mimetype == 'text/html':
return fromstring(self.data)
return etree.XML(self.data)
lxml = cached_property(lxml)
def json(self):
"""Get the result of simplejson.loads if possible."""
if 'json' not in self.mimetype:
raise AttributeError('Not a JSON response')
try:
from simplejson import loads
except ImportError:
from json import loads
return loads(self.data)
json = cached_property(json)
class TestResponse(Response, ContentAccessors):
"""Pass this to `werkzeug.test.Client` for easier unittesting."""
| mit |
bblais/plasticity | setup.py | 1 | 2798 | # this is from https://github.com/cython/cython/wiki/PackageHierarchy
import sys, os, stat, subprocess
from distutils.core import setup
from Cython.Distutils import build_ext
from distutils.extension import Extension
# we'd better have Cython installed, or it's a no-go
try:
from Cython.Distutils import build_ext
except:
print("You don't seem to have Cython installed. Please get a")
print("copy from www.cython.org and install it")
sys.exit(1)
import numpy
def get_version(package):
d={}
version_line=''
with open('%s/version.py' % package) as fid:
for line in fid:
if line.startswith('version='):
version_line=line
print(version_line)
exec(version_line,d)
return d['version']
# scan the directory for extension files, converting
# them to extension names in dotted notation
def scandir(dir, files=[]):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
files.append(path.replace(os.path.sep, ".")[:-4])
elif os.path.isdir(path):
scandir(path, files)
return files
def cleanc(dir):
for file in os.listdir(dir):
path = os.path.join(dir, file)
if os.path.isfile(path) and path.endswith(".pyx"):
base,ext=os.path.splitext(path)
cpath=base+'.c'
if os.path.isfile(cpath):
os.remove(cpath)
print("~~",cpath)
elif os.path.isdir(path):
cleanc(path)
# generate an Extension object from its dotted name
def makeExtension(extName):
extPath = extName.replace(".", os.path.sep)+".pyx"
folder=extName.split(".")[0]
return Extension(
extName,
[extPath,'plasticity/randomkit.c'],
include_dirs = [numpy.get_include(), ".", "%s/" % folder], # adding the '.' to include_dirs is CRUCIAL!!
extra_compile_args = ["-O3", "-Wall"],
extra_link_args = ['-g'],
)
# get the list of extensions
extNames = scandir("plasticity")
print(extNames)
cleanc("plasticity")
# and build up the set of Extension objects
print(extNames)
extensions = [makeExtension(name) for name in extNames]
# finally, we can pass all this to distutils
setup(
name="plasticity",
version=get_version('plasticity'),
description="Synaptic Plasticity in Rate-Based Neurons",
author="Brian Blais",
packages=['plasticity',
'plasticity.dialogs',
'plasticity.dialogs.waxy'],
scripts=['plasticity/Plasticity.pyw'],
package_data={'plasticity': ['images/*.*','dialogs/images/*.*',
'dialogs/images/learning_rules/*.*','hdf5/*.*']},
ext_modules=extensions,
cmdclass = {'build_ext': build_ext},
)
| mit |
dslackw/sbo-templates | sbo_templates/__metadata__.py | 1 | 1203 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
# __metadata__.py file is part of sbo-templates.
# Copyright 2015-2021 Dimitris Zlatanidis <d.zlatanidis@gmail.com>
# All rights reserved.
# SBo tool for managing templates.
# https://gitlab.com/dslackw/sbo-templates
# sbo-templates is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__prog__ = "sbo-templates"
__author__ = "dslackw"
__copyright__ = 2015-2021
__version_info__ = (1, 3, 2)
__version__ = "{0}.{1}.{2}".format(*__version_info__)
__license__ = "GNU General Public License v3 (GPLv3)"
__email__ = "d.zlatanidis@gmail.com"
__website__ = "https://gitlab.com/dslackw/sbo-templates"
| gpl-3.0 |
lowitty/sendtrap | lib/pysnmp/entity/rfc3413/context.py | 6 | 2373 | # SNMP entity context
from pyasn1.type import univ
from pyasn1.compat.octets import null
from pysnmp import error
from pysnmp import debug
class SnmpContext:
def __init__(self, snmpEngine, contextEngineId=None):
snmpEngineId,= snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMP-FRAMEWORK-MIB', 'snmpEngineID')
if contextEngineId is None:
# Default to local snmpEngineId
self.contextEngineId = snmpEngineId.syntax
else:
self.contextEngineId = snmpEngineId.syntax.clone(contextEngineId)
debug.logger & debug.flagIns and debug.logger('SnmpContext: contextEngineId \"%r\"' % (self.contextEngineId,))
self.contextNames = {
null: snmpEngine.msgAndPduDsp.mibInstrumController # Default name
}
def registerContextName(self, contextName, mibInstrum=None):
contextName = univ.OctetString(contextName).asOctets()
if contextName in self.contextNames:
raise error.PySnmpError(
'Duplicate contextName %s' % contextName
)
debug.logger & debug.flagIns and debug.logger('registerContextName: registered contextName %r, mibInstrum %r' % (contextName, mibInstrum))
if mibInstrum is None:
self.contextNames[contextName] = self.contextNames[null]
else:
self.contextNames[contextName] = mibInstrum
def unregisterContextName(self, contextName):
contextName = univ.OctetString(contextName).asOctets()
if contextName in self.contextNames:
debug.logger & debug.flagIns and debug.logger('unregisterContextName: unregistered contextName %r' % contextName)
del self.contextNames[contextName]
def getMibInstrum(self, contextName=null):
contextName = univ.OctetString(contextName).asOctets()
if contextName not in self.contextNames:
debug.logger & debug.flagIns and debug.logger('getMibInstrum: contextName %r not registered' % contextName)
raise error.PySnmpError(
'Missing contextName %s' % contextName
)
else:
debug.logger & debug.flagIns and debug.logger('getMibInstrum: contextName %r, mibInstum %r' % (contextName, self.contextNames[contextName]))
return self.contextNames[contextName]
| mit |
smaiLee/smarthome | tests/test_plugin.py | 2 | 7133 |
import common
import unittest
import lib.plugin
import lib.item
from lib.model.smartplugin import SmartPlugin
import threading
class TestConfig(unittest.TestCase):
def props(self,cls):
return [i for i in cls.__dict__.keys() if i[:1] != '_']
def test_plugins(self):
plugins = lib.plugin.Plugins(MockSmartHome(), "resources/plugin.conf")
self.assertIsNone(plugins.get_plugin("wol1") )
self.assertIsNotNone(plugins._plugins )
if 0:
for p in plugins._threads:
print(p.name)
print(p.plugin)
print(self.props(p.plugin))
print(dir(p.plugin))
import inspect
print(inspect.getmembers(p.plugin, lambda a:not(inspect.isroutine(a))))
wolplug= plugins.get_plugin("wol_ww")
self.assertIsNotNone(wolplug )
self.assertEqual(wolplug.name,"wol_ww" )
self.assertIsNone(wolplug.ident )
plugins.start()
self.assertEqual(wolplug.ident,wolplug.get_ident())
self.assertEqual(wolplug.plugin, wolplug.get_implementation())
self.assertIsNotNone(wolplug.get_ident() )
plugins.stop()
# print(plugins.get_plugin("wol").get_ident() )
def test_plugininstance(self):
sh=MockSmartHome()
# load pluginsA
plugins = lib.plugin.Plugins(sh, "resources/plugin.conf")
sh._plugins=plugins
wolplug= plugins.get_plugin("wol")
self.assertEqual(wolplug.plugin.get_instance_name(),"")
config_mock = {'key3', 'value3'}
self.assertTrue(wolplug.plugin.has_iattr(config_mock,"key3"))
config_mock = {'key3@*', 'value3'}
self.assertTrue(wolplug.plugin.has_iattr(config_mock, "key3"))
config_mock = {'key3@false*', 'value3'}
self.assertFalse(wolplug.plugin.has_iattr(config_mock, "key3"))
wolplug = plugins.get_plugin("wol_ww")
#wolplug.plugin.ALLOW_MULTIINSTANCE= False
#wolplug.plugin.set_instance_name("")
self.assertTrue(isinstance(wolplug.plugin, SmartPlugin))
self.assertTrue(wolplug.plugin.is_multi_instance_capable())
self.assertEqual(wolplug.plugin.get_instance_name(),"bind")
config_mock = {'key3@bind', 'value3'}
self.assertTrue(wolplug.plugin.has_iattr(config_mock, "key3"))
config_mock = {'key3@*', 'value3'}
self.assertTrue(wolplug.plugin.has_iattr(config_mock, "key3"))
config_mock = {'key3@false', 'value3'}
self.assertFalse(wolplug.plugin.has_iattr(config_mock, "key3"))
config_mock = {}
config_mock["key3@*"] = "value3"
self.assertEqual(wolplug.plugin.get_iattr_value(config_mock, "key3"), "value3")
config_mock = {}
config_mock["key3@bind"] = "value2"
self.assertEqual(wolplug.plugin.get_iattr_value(config_mock, "key3"), "value2")
config_mock = {}
config_mock["key3@bind2"] = "value4"
self.assertIsNone(wolplug.plugin.get_iattr_value(config_mock, "key3"))
if 0:
print(sh._plugins)
for plug in sh.return_plugins():
print(plug)
#load items
item_conf = None
item_conf = lib.config.parse("resources/plugin_items.conf", item_conf)
# print(item_conf.items())
for attr, value in item_conf.items():
if isinstance(value, dict):
child_path = attr
try:
child = lib.item.Item(sh, sh, child_path, value)
except Exception as e:
self.logger.error("Item {}: problem creating: ()".format(child_path, e))
else:
#vars(sh)[attr] = child
sh.add_item(child_path, child)
sh.children.append(child)
# for item in sh.return_items():
# item._init_prerun()
# for item in sh.return_items():
# item._init_run()
#
if 0: self.dump_items(sh)
it = sh.return_item("item3.item3b.item3b1.item3b1a")
self.assertIsNotNone(it)
self.assertEqual(len(it.get_method_triggers()),2)
it = sh.return_item("item3.item3b.item3b1")
self.assertIsNotNone(it)
self.assertEqual(len(it.get_method_triggers()),1)
it = sh.return_item("item3.item3b")
self.assertIsNotNone(it)
self.assertEqual(len(it.get_method_triggers()),0)
sh.scheduler.add(wolplug.name, wolplug.plugin.update_item, prio=5, cycle=300, offset=2)
wolplug.plugin.testprint()
wolplug.plugin.wake_on_lan("11:22:33:44:55:66")
def _update_dummy(self):
print("update dummy")
def dump_items(self, sh ):
for item in sh.return_items():
print(item)
for meth in item.get_method_triggers():
print(' ' + meth.__self__.get_info())
def _test_configsave(self):
import configparser
plugins = lib.plugin.Plugins(MockSmartHome(), "resources/plugin.conf")
item_conf = None
item_conf = lib.config.parse("resources/plugin_items.conf", item_conf)
print(item_conf)
for attr, value in item_conf.items():
if isinstance(value, dict):
child_path = attr
try:
child = lib.item.Item(self, self, child_path, value)
except Exception as e:
print("Item {}: problem creating: ()".format(child_path, e))
else:
vars(self)[attr] = child
sh.add_item(child_path, child)
sh.children.append(child)
config = configparser.RawConfigParser( )
#config.read('resources/plugin_items.conf')
config.read_dict(item_conf)
print(config)
with open('example.cfg', 'w') as configfile:
config.write(configfile)
class MockSmartHome():
class MockScheduler():
def add(self, name, obj, prio=3, cron=None, cycle=None, value=None, offset=None, next=None):
print(name)
if isinstance(obj.__self__, SmartPlugin):
name = name +'_'+ obj.__self__.get_instance_name()
print(name)
print( obj)
print(obj.__self__.get_instance_name())
__logs = {}
__item_dict = {}
__items = []
children = []
_plugins = []
scheduler = MockScheduler()
def add_log(self, name, log):
self.__logs[name] = log
def now(self):
import datetime
return datetime.datetime.now()
def add_item(self, path, item):
if path not in self.__items:
self.__items.append(path)
self.__item_dict[path] = item
def return_item(self, string):
if string in self.__items:
return self.__item_dict[string]
def return_items(self):
for item in self.__items:
yield self.__item_dict[item]
def return_plugins(self):
for plugin in self._plugins:
yield plugin
if __name__ == '__main__':
unittest.main(verbosity=2)
| gpl-3.0 |
shabab12/edx-platform | common/djangoapps/track/views/tests/test_views.py | 81 | 10225 | # pylint: disable=missing-docstring,maybe-no-member
from mock import patch, sentinel
from django.contrib.auth.models import User
from django.test.client import RequestFactory
from django.test.utils import override_settings
from track import views
from track.middleware import TrackMiddleware
from track.tests import EventTrackingTestCase, FROZEN_TIME
from openedx.core.lib.tests.assertions.events import assert_event_matches
class TestTrackViews(EventTrackingTestCase):
def setUp(self):
super(TestTrackViews, self).setUp()
self.request_factory = RequestFactory()
patcher = patch('track.views.tracker', autospec=True)
self.mock_tracker = patcher.start()
self.addCleanup(patcher.stop)
self.path_with_course = '/courses/foo/bar/baz/xmod/'
self.url_with_course = 'http://www.edx.org' + self.path_with_course
self.event = {
sentinel.key: sentinel.value
}
def test_user_track(self):
request = self.request_factory.get('/event', {
'page': self.url_with_course,
'event_type': sentinel.event_type,
'event': '{}'
})
views.user_track(request)
actual_event = self.get_event()
expected_event = {
'context': {
'course_id': 'foo/bar/baz',
'org_id': 'foo',
'event_source': 'browser',
'page': self.url_with_course,
'username': 'anonymous'
},
'data': {},
'timestamp': FROZEN_TIME,
'name': str(sentinel.event_type)
}
assert_event_matches(expected_event, actual_event)
def test_user_track_with_missing_values(self):
request = self.request_factory.get('/event')
views.user_track(request)
actual_event = self.get_event()
expected_event = {
'context': {
'course_id': '',
'org_id': '',
'event_source': 'browser',
'page': '',
'username': 'anonymous'
},
'data': {},
'timestamp': FROZEN_TIME,
'name': 'unknown'
}
assert_event_matches(expected_event, actual_event)
views.user_track(request)
def test_user_track_with_empty_event(self):
request = self.request_factory.get('/event', {
'page': self.url_with_course,
'event_type': sentinel.event_type,
'event': ''
})
views.user_track(request)
actual_event = self.get_event()
expected_event = {
'context': {
'course_id': 'foo/bar/baz',
'org_id': 'foo',
'event_source': 'browser',
'page': self.url_with_course,
'username': 'anonymous'
},
'data': {},
'timestamp': FROZEN_TIME,
'name': str(sentinel.event_type)
}
assert_event_matches(expected_event, actual_event)
@override_settings(
EVENT_TRACKING_PROCESSORS=[{'ENGINE': 'track.shim.LegacyFieldMappingProcessor'}],
)
def test_user_track_with_middleware_and_processors(self):
self.recreate_tracker()
middleware = TrackMiddleware()
payload = '{"foo": "bar"}'
user_id = 1
request = self.request_factory.get('/event', {
'page': self.url_with_course,
'event_type': sentinel.event_type,
'event': payload
})
request.user = User.objects.create(pk=user_id, username=str(sentinel.username))
request.META['REMOTE_ADDR'] = '10.0.0.1'
request.META['HTTP_REFERER'] = str(sentinel.referer)
request.META['HTTP_ACCEPT_LANGUAGE'] = str(sentinel.accept_language)
request.META['HTTP_USER_AGENT'] = str(sentinel.user_agent)
request.META['SERVER_NAME'] = 'testserver2'
middleware.process_request(request)
try:
views.user_track(request)
expected_event = {
'accept_language': str(sentinel.accept_language),
'referer': str(sentinel.referer),
'username': str(sentinel.username),
'session': '',
'ip': '10.0.0.1',
'event_source': 'browser',
'event_type': str(sentinel.event_type),
'name': str(sentinel.event_type),
'event': payload,
'agent': str(sentinel.user_agent),
'page': self.url_with_course,
'time': FROZEN_TIME,
'host': 'testserver2',
'context': {
'course_id': 'foo/bar/baz',
'org_id': 'foo',
'user_id': user_id,
'path': u'/event'
},
}
finally:
middleware.process_response(request, None)
actual_event = self.get_event()
assert_event_matches(expected_event, actual_event)
def test_server_track(self):
request = self.request_factory.get(self.path_with_course)
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {},
}
self.assert_mock_tracker_call_matches(expected_event)
def assert_mock_tracker_call_matches(self, expected_event):
self.assertEqual(len(self.mock_tracker.send.mock_calls), 1)
actual_event = self.mock_tracker.send.mock_calls[0][1][0]
assert_event_matches(expected_event, actual_event)
def test_server_track_with_middleware(self):
middleware = TrackMiddleware()
request = self.request_factory.get(self.path_with_course)
middleware.process_request(request)
# The middleware emits an event, reset the mock to ignore it since we aren't testing that feature.
self.mock_tracker.reset_mock()
try:
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {
'user_id': '',
'course_id': u'foo/bar/baz',
'org_id': 'foo',
'path': u'/courses/foo/bar/baz/xmod/'
},
}
finally:
middleware.process_response(request, None)
self.assert_mock_tracker_call_matches(expected_event)
def test_server_track_with_middleware_and_google_analytics_cookie(self):
middleware = TrackMiddleware()
request = self.request_factory.get(self.path_with_course)
request.COOKIES['_ga'] = 'GA1.2.1033501218.1368477899'
middleware.process_request(request)
# The middleware emits an event, reset the mock to ignore it since we aren't testing that feature.
self.mock_tracker.reset_mock()
try:
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {
'user_id': '',
'course_id': u'foo/bar/baz',
'org_id': 'foo',
'path': u'/courses/foo/bar/baz/xmod/'
},
}
finally:
middleware.process_response(request, None)
self.assert_mock_tracker_call_matches(expected_event)
def test_server_track_with_no_request(self):
request = None
views.server_track(request, str(sentinel.event_type), '{}')
expected_event = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '',
'event_source': 'server',
'event_type': str(sentinel.event_type),
'event': '{}',
'agent': '',
'page': None,
'time': FROZEN_TIME,
'host': '',
'context': {},
}
self.assert_mock_tracker_call_matches(expected_event)
def test_task_track(self):
request_info = {
'accept_language': '',
'referer': '',
'username': 'anonymous',
'ip': '127.0.0.1',
'agent': 'agent',
'host': 'testserver',
}
task_info = {
sentinel.task_key: sentinel.task_value
}
expected_event_data = dict(task_info)
expected_event_data.update(self.event)
views.task_track(request_info, task_info, str(sentinel.event_type), self.event)
expected_event = {
'username': 'anonymous',
'ip': '127.0.0.1',
'event_source': 'task',
'event_type': str(sentinel.event_type),
'event': expected_event_data,
'agent': 'agent',
'page': None,
'time': FROZEN_TIME,
'host': 'testserver',
'context': {
'course_id': '',
'org_id': ''
},
}
self.assert_mock_tracker_call_matches(expected_event)
| agpl-3.0 |
ericbaze/continuum_code_2012 | pydata/moin/pythonenv/local/lib/python2.7/re.py | 113 | 12984 | #
# Secret Labs' Regular Expression Engine
#
# re-compatible interface for the sre matching engine
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# This version of the SRE library can be redistributed under CNRI's
# Python 1.6 license. For any other use, please contact Secret Labs
# AB (info@pythonware.com).
#
# Portions of this engine have been developed in cooperation with
# CNRI. Hewlett-Packard provided funding for 1.6 integration and
# other compatibility work.
#
r"""Support for regular expressions (RE).
This module provides regular expression matching operations similar to
those found in Perl. It supports both 8-bit and Unicode strings; both
the pattern and the strings being processed can contain null bytes and
characters outside the US ASCII range.
Regular expressions can contain both special and ordinary characters.
Most ordinary characters, like "A", "a", or "0", are the simplest
regular expressions; they simply match themselves. You can
concatenate ordinary characters, so last matches the string 'last'.
The special characters are:
"." Matches any character except a newline.
"^" Matches the start of the string.
"$" Matches the end of the string or just before the newline at
the end of the string.
"*" Matches 0 or more (greedy) repetitions of the preceding RE.
Greedy means that it will match as many repetitions as possible.
"+" Matches 1 or more (greedy) repetitions of the preceding RE.
"?" Matches 0 or 1 (greedy) of the preceding RE.
*?,+?,?? Non-greedy versions of the previous three special characters.
{m,n} Matches from m to n repetitions of the preceding RE.
{m,n}? Non-greedy version of the above.
"\\" Either escapes special characters or signals a special sequence.
[] Indicates a set of characters.
A "^" as the first character indicates a complementing set.
"|" A|B, creates an RE that will match either A or B.
(...) Matches the RE inside the parentheses.
The contents can be retrieved or matched later in the string.
(?iLmsux) Set the I, L, M, S, U, or X flag for the RE (see below).
(?:...) Non-grouping version of regular parentheses.
(?P<name>...) The substring matched by the group is accessible by name.
(?P=name) Matches the text matched earlier by the group named name.
(?#...) A comment; ignored.
(?=...) Matches if ... matches next, but doesn't consume the string.
(?!...) Matches if ... doesn't match next.
(?<=...) Matches if preceded by ... (must be fixed length).
(?<!...) Matches if not preceded by ... (must be fixed length).
(?(id/name)yes|no) Matches yes pattern if the group with id/name matched,
the (optional) no pattern otherwise.
The special sequences consist of "\\" and a character from the list
below. If the ordinary character is not on the list, then the
resulting RE will match the second character.
\number Matches the contents of the group of the same number.
\A Matches only at the start of the string.
\Z Matches only at the end of the string.
\b Matches the empty string, but only at the start or end of a word.
\B Matches the empty string, but not at the start or end of a word.
\d Matches any decimal digit; equivalent to the set [0-9].
\D Matches any non-digit character; equivalent to the set [^0-9].
\s Matches any whitespace character; equivalent to [ \t\n\r\f\v].
\S Matches any non-whitespace character; equiv. to [^ \t\n\r\f\v].
\w Matches any alphanumeric character; equivalent to [a-zA-Z0-9_].
With LOCALE, it will match the set [0-9_] plus characters defined
as letters for the current locale.
\W Matches the complement of \w.
\\ Matches a literal backslash.
This module exports the following functions:
match Match a regular expression pattern to the beginning of a string.
search Search a string for the presence of a pattern.
sub Substitute occurrences of a pattern found in a string.
subn Same as sub, but also return the number of substitutions made.
split Split a string by the occurrences of a pattern.
findall Find all occurrences of a pattern in a string.
finditer Return an iterator yielding a match object for each match.
compile Compile a pattern into a RegexObject.
purge Clear the regular expression cache.
escape Backslash all non-alphanumerics in a string.
Some of the functions in this module takes flags as optional parameters:
I IGNORECASE Perform case-insensitive matching.
L LOCALE Make \w, \W, \b, \B, dependent on the current locale.
M MULTILINE "^" matches the beginning of lines (after a newline)
as well as the string.
"$" matches the end of lines (before a newline) as well
as the end of the string.
S DOTALL "." matches any character at all, including the newline.
X VERBOSE Ignore whitespace and comments for nicer looking RE's.
U UNICODE Make \w, \W, \b, \B, dependent on the Unicode locale.
This module also defines an exception 'error'.
"""
import sys
import sre_compile
import sre_parse
# public symbols
__all__ = [ "match", "search", "sub", "subn", "split", "findall",
"compile", "purge", "template", "escape", "I", "L", "M", "S", "X",
"U", "IGNORECASE", "LOCALE", "MULTILINE", "DOTALL", "VERBOSE",
"UNICODE", "error" ]
__version__ = "2.2.1"
# flags
I = IGNORECASE = sre_compile.SRE_FLAG_IGNORECASE # ignore case
L = LOCALE = sre_compile.SRE_FLAG_LOCALE # assume current 8-bit locale
U = UNICODE = sre_compile.SRE_FLAG_UNICODE # assume unicode locale
M = MULTILINE = sre_compile.SRE_FLAG_MULTILINE # make anchors look for newline
S = DOTALL = sre_compile.SRE_FLAG_DOTALL # make dot match newline
X = VERBOSE = sre_compile.SRE_FLAG_VERBOSE # ignore whitespace and comments
# sre extensions (experimental, don't rely on these)
T = TEMPLATE = sre_compile.SRE_FLAG_TEMPLATE # disable backtracking
DEBUG = sre_compile.SRE_FLAG_DEBUG # dump pattern after compilation
# sre exception
error = sre_compile.error
# --------------------------------------------------------------------
# public interface
def match(pattern, string, flags=0):
"""Try to apply the pattern at the start of the string, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).match(string)
def search(pattern, string, flags=0):
"""Scan through string looking for a match to the pattern, returning
a match object, or None if no match was found."""
return _compile(pattern, flags).search(string)
def sub(pattern, repl, string, count=0, flags=0):
"""Return the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in string by the
replacement repl. repl can be either a string or a callable;
if a string, backslash escapes in it are processed. If it is
a callable, it's passed the match object and must return
a replacement string to be used."""
return _compile(pattern, flags).sub(repl, string, count)
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count)
def split(pattern, string, maxsplit=0, flags=0):
"""Split the source string by the occurrences of the pattern,
returning a list containing the resulting substrings."""
return _compile(pattern, flags).split(string, maxsplit)
def findall(pattern, string, flags=0):
"""Return a list of all non-overlapping matches in the string.
If one or more groups are present in the pattern, return a
list of groups; this will be a list of tuples if the pattern
has more than one group.
Empty matches are included in the result."""
return _compile(pattern, flags).findall(string)
if sys.hexversion >= 0x02020000:
__all__.append("finditer")
def finditer(pattern, string, flags=0):
"""Return an iterator over all non-overlapping matches in the
string. For each match, the iterator returns a match object.
Empty matches are included in the result."""
return _compile(pattern, flags).finditer(string)
def compile(pattern, flags=0):
"Compile a regular expression pattern, returning a pattern object."
return _compile(pattern, flags)
def purge():
"Clear the regular expression cache"
_cache.clear()
_cache_repl.clear()
def template(pattern, flags=0):
"Compile a template pattern, returning a pattern object"
return _compile(pattern, flags|T)
_alphanum = {}
for c in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890':
_alphanum[c] = 1
del c
def escape(pattern):
"Escape all non-alphanumeric characters in pattern."
s = list(pattern)
alphanum = _alphanum
for i, c in enumerate(pattern):
if c not in alphanum:
if c == "\000":
s[i] = "\\000"
else:
s[i] = "\\" + c
return pattern[:0].join(s)
# --------------------------------------------------------------------
# internals
_cache = {}
_cache_repl = {}
_pattern_type = type(sre_compile.compile("", 0))
_MAXCACHE = 100
def _compile(*key):
# internal: compile pattern
cachekey = (type(key[0]),) + key
p = _cache.get(cachekey)
if p is not None:
return p
pattern, flags = key
if isinstance(pattern, _pattern_type):
if flags:
raise ValueError('Cannot process flags argument with a compiled pattern')
return pattern
if not sre_compile.isstring(pattern):
raise TypeError, "first argument must be string or compiled pattern"
try:
p = sre_compile.compile(pattern, flags)
except error, v:
raise error, v # invalid expression
if len(_cache) >= _MAXCACHE:
_cache.clear()
_cache[cachekey] = p
return p
def _compile_repl(*key):
# internal: compile replacement pattern
p = _cache_repl.get(key)
if p is not None:
return p
repl, pattern = key
try:
p = sre_parse.parse_template(repl, pattern)
except error, v:
raise error, v # invalid expression
if len(_cache_repl) >= _MAXCACHE:
_cache_repl.clear()
_cache_repl[key] = p
return p
def _expand(pattern, match, template):
# internal: match.expand implementation hook
template = sre_parse.parse_template(template, pattern)
return sre_parse.expand_template(template, match)
def _subx(pattern, template):
# internal: pattern.sub/subn implementation helper
template = _compile_repl(template, pattern)
if not template[0] and len(template[1]) == 1:
# literal replacement
return template[1][0]
def filter(match, template=template):
return sre_parse.expand_template(template, match)
return filter
# register myself for pickling
import copy_reg
def _pickle(p):
return _compile, (p.pattern, p.flags)
copy_reg.pickle(_pattern_type, _pickle, _compile)
# --------------------------------------------------------------------
# experimental stuff (see python-dev discussions for details)
class Scanner:
def __init__(self, lexicon, flags=0):
from sre_constants import BRANCH, SUBPATTERN
self.lexicon = lexicon
# combine phrases into a compound pattern
p = []
s = sre_parse.Pattern()
s.flags = flags
for phrase, action in lexicon:
p.append(sre_parse.SubPattern(s, [
(SUBPATTERN, (len(p)+1, sre_parse.parse(phrase, flags))),
]))
s.groups = len(p)+1
p = sre_parse.SubPattern(s, [(BRANCH, (None, p))])
self.scanner = sre_compile.compile(p)
def scan(self, string):
result = []
append = result.append
match = self.scanner.scanner(string).match
i = 0
while 1:
m = match()
if not m:
break
j = m.end()
if i == j:
break
action = self.lexicon[m.lastindex-1][1]
if hasattr(action, '__call__'):
self.match = m
action = action(self, m.group())
if action is not None:
append(action)
i = j
return result, string[i:]
| gpl-2.0 |
aayush2911/Fibonaccio | web2py/gluon/contrib/taskbar_widget.py | 43 | 8262 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# # Creates a taskbar icon for web2py
# # Author: Mark Larsen, mostly stolen from Mark Hammond's
# # C:\Python25\Lib\site-packages\win32\Demos\win32gui_taskbar.py
# # 11/7/08
# dual licensed under the web2py license (LGPL) and the Python license.
import os
import sys
import base64
import win32con
import win32api
import win32gui
class TaskBarIcon:
def __init__(self, iconPath=None):
self.iconPath = iconPath
self.status = []
msg_TaskbarRestart = \
win32api.RegisterWindowMessage('TaskbarCreated')
message_map = {
msg_TaskbarRestart: self.OnRestart,
win32con.WM_DESTROY: self.OnDestroy,
win32con.WM_COMMAND: self.OnCommand,
win32con.WM_USER + 20: self.OnTaskbarNotify,
}
# Register the Window class.
wc = win32gui.WNDCLASS()
hinst = wc.hInstance = win32api.GetModuleHandle(None)
wc.lpszClassName = 'web2pyTaskbar'
wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW
wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)
wc.hbrBackground = win32con.COLOR_WINDOW
wc.lpfnWndProc = message_map # could also specify a wndproc.
classAtom = win32gui.RegisterClass(wc)
# Create the Window.
style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU
self.hwnd = win32gui.CreateWindow(
classAtom,
'web2pyTaskbar',
style,
0,
0,
win32con.CW_USEDEFAULT,
win32con.CW_USEDEFAULT,
0,
0,
hinst,
None,
)
win32gui.UpdateWindow(self.hwnd)
self.SetServerStopped()
def __createIcon(self):
# try and use custom icon
if self.iconPath and os.path.isfile(self.iconPath):
hicon = self.__loadFromFile(self.iconPath)
else:
try:
fp = 'tmp.ico'
icFH = file(fp, 'wb')
if self.serverState == self.EnumServerState.STOPPED:
icFH.write(base64.b64decode(self.__getIconStopped()))
elif self.serverState == self.EnumServerState.RUNNING:
icFH.write(base64.b64decode(self.__getIconRunning()))
icFH.close()
hicon = self.__loadFromFile(fp)
os.unlink(fp)
except:
print "Can't load web2py icons - using default"
hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION)
flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE\
| win32gui.NIF_TIP
nid = (
self.hwnd,
0,
flags,
win32con.WM_USER + 20,
hicon,
'web2py Framework',
)
try:
win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, nid)
except:
try:
win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid)
except win32api.error:
# This is common when windows is starting, and this code is hit
# before the taskbar has been created.
print 'Failed to add the taskbar icon - is explorer running?'
# but keep running anyway - when explorer starts, we get the
def OnRestart(
self,
hwnd,
msg,
wparam,
lparam,
):
self._DoCreateIcons()
def OnDestroy(
self,
hwnd,
msg,
wparam,
lparam,
):
nid = (self.hwnd, 0)
win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid)
def OnTaskbarNotify(
self,
hwnd,
msg,
wparam,
lparam,
):
if lparam == win32con.WM_LBUTTONUP:
pass
elif lparam == win32con.WM_LBUTTONDBLCLK:
pass
elif lparam == win32con.WM_RBUTTONUP:
menu = win32gui.CreatePopupMenu()
win32gui.AppendMenu(menu, win32con.MF_STRING, 1023,
'Toggle Display')
win32gui.AppendMenu(menu, win32con.MF_SEPARATOR, 0, '')
if self.serverState == self.EnumServerState.STOPPED:
win32gui.AppendMenu(menu, win32con.MF_STRING, 1024,
'Start Server')
win32gui.AppendMenu(menu, win32con.MF_STRING
| win32con.MF_GRAYED, 1025,
'Restart Server')
win32gui.AppendMenu(menu, win32con.MF_STRING
| win32con.MF_GRAYED, 1026,
'Stop Server')
else:
win32gui.AppendMenu(menu, win32con.MF_STRING
| win32con.MF_GRAYED, 1024,
'Start Server')
win32gui.AppendMenu(menu, win32con.MF_STRING, 1025,
'Restart Server')
win32gui.AppendMenu(menu, win32con.MF_STRING, 1026,
'Stop Server')
win32gui.AppendMenu(menu, win32con.MF_SEPARATOR, 0, '')
win32gui.AppendMenu(menu, win32con.MF_STRING, 1027,
'Quit (pid:%i)' % os.getpid())
pos = win32gui.GetCursorPos()
# See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp
win32gui.SetForegroundWindow(self.hwnd)
win32gui.TrackPopupMenu(
menu,
win32con.TPM_LEFTALIGN,
pos[0],
pos[1],
0,
self.hwnd,
None,
)
win32api.PostMessage(self.hwnd, win32con.WM_NULL, 0, 0)
return 1
def OnCommand(
self,
hwnd,
msg,
wparam,
lparam,
):
id = win32api.LOWORD(wparam)
if id == 1023:
self.status.append(self.EnumStatus.TOGGLE)
elif id == 1024:
self.status.append(self.EnumStatus.START)
elif id == 1025:
self.status.append(self.EnumStatus.RESTART)
elif id == 1026:
self.status.append(self.EnumStatus.STOP)
elif id == 1027:
self.status.append(self.EnumStatus.QUIT)
self.Destroy()
else:
print 'Unknown command -', id
def Destroy(self):
win32gui.DestroyWindow(self.hwnd)
def SetServerRunning(self):
self.serverState = self.EnumServerState.RUNNING
self.__createIcon()
def SetServerStopped(self):
self.serverState = self.EnumServerState.STOPPED
self.__createIcon()
def __getIconRunning(self):
return 'AAABAAEAEBAQAAAAAAAoAQAAFgAAACgAAAAQAAAAIAAAAAEABAAAAAAAgAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAIXMGAABe/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABERAgAAIAAAEAACAAAgAAABEAIiACIgAAABAgAgIAIAEAECACAgAgABEAIiACACAAAAAAAAAAAAICACIiAiIAICAgIAACACAgICAgAAIAICAgICIiAiIAICAgIAACACAgICAgAAIAICAgICIiAiIAAAAAAAAAAAD//wAAhe8AAL3vAADMYwAA9a0AALWtAADMbQAA//8AAKwjAABV7QAAVe0AAFQjAABV7QAAVe0AAFQjAAD//wAA'
def __getIconStopped(self):
return 'AAABAAEAEBAQAAEABAAoAQAAFgAAACgAAAAQAAAAIAAAAAEABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJCdIAIXMGAABe/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAzMzMzMzMzAwERMjMzIzAzEDMyMzMjAzMxAzIiMyAjMzMwMjMjAzIzEzECMyAjMjMxEzAiAyMyMzMzMwAzMzMzIyMyACMiIzIyMjAzAyMyMjIyAjMwIzIyMjAyIiMCIzIyAjIzMyAyMjAyMjMzIwIyAjIyIiMiIDAzMzMzMzMzB//gAAhe0AAJ3rAADMYwAA9a0AALGNAADMLQAA/n8AAKwjAABVrQAAUc0AAFQjAABF5QAAVekAABQhAAB//gAA'
def __loadFromFile(self, iconPath):
hinst = win32api.GetModuleHandle(None)
icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE
hicon = win32gui.LoadImage(
hinst,
iconPath,
win32con.IMAGE_ICON,
0,
0,
icon_flags,
)
return hicon
class EnumStatus:
TOGGLE = 0
START = 1
STOP = 2
RESTART = 3
QUIT = 4
class EnumServerState:
RUNNING = 0
STOPPED = 1
| gpl-2.0 |
snf/servo | python/mach/mach/mixin/logging.py | 131 | 1994 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import absolute_import, unicode_literals
import logging
class LoggingMixin(object):
"""Provides functionality to control logging."""
def populate_logger(self, name=None):
"""Ensure this class instance has a logger associated with it.
Users of this mixin that call log() will need to ensure self._logger is
a logging.Logger instance before they call log(). This function ensures
self._logger is defined by populating it if it isn't.
"""
if hasattr(self, '_logger'):
return
if name is None:
name = '.'.join([self.__module__, self.__class__.__name__])
self._logger = logging.getLogger(name)
def log(self, level, action, params, format_str):
"""Log a structured log event.
A structured log event consists of a logging level, a string action, a
dictionary of attributes, and a formatting string.
The logging level is one of the logging.* constants, such as
logging.INFO.
The action string is essentially the enumeration of the event. Each
different type of logged event should have a different action.
The params dict is the metadata constituting the logged event.
The formatting string is used to convert the structured message back to
human-readable format. Conversion back to human-readable form is
performed by calling format() on this string, feeding into it the dict
of attributes constituting the event.
Example Usage
-------------
self.log(logging.DEBUG, 'login', {'username': 'johndoe'},
'User login: {username}')
"""
self._logger.log(level, format_str,
extra={'action': action, 'params': params})
| mpl-2.0 |
tidyjiang8/esp-idf-zh | tools/esp_app_trace/pylibelf/__init__.py | 13 | 5711 | from types import *
from constants import *
from ctypes import *
lelf=CDLL("libelf.so.1")
__all__ = []
all_objs = []
class ElfError(Exception):
def __init__(self, msg):
self.msg = msg
self.errno = elf_errno()
self.elfmsg = elf_errmsg(self.errno)
def __str__(self):
return "ElfError(%d, %s): %s" % (self.errno, self.elfmsg, self.msg)
__all__.append("ElfError")
def nonNullDec(f):
def decorated(*args):
res = f(*args)
try:
a = res.contents
all_objs.append(res)
except ValueError: # NULL
raise ElfError(f.__name__ + " returned NULL")
return res
return decorated
def nonNegDec(f):
def decorated(*args):
res = f(*args)
if 0 > res:
raise ElfError(f.__name__ + " returned %d" % (res,))
return res
return decorated
def badValDec(badVal):
def decorator(f):
def decorated(*args):
res = f(*args)
if res == badVal:
raise ElfError(f.__name__ + " returned %s" % (str(res),))
return res
return decorated
return decorator
def define(f, argtypes, restype, err_decorator = None):
f.argtypes = argtypes
f.restype = restype
name = f.__name__
__all__.append(name)
if (err_decorator != None):
f = err_decorator(f)
globals()[name] = f
define(lelf.elf_version, [ c_int ], c_int )
if (elf_version(EV_CURRENT) == EV_NONE):
raise Exception("Version mismatch")
off_t = c_size_t # TODO(dbounov): Figure out actual off_t type
define(lelf.elf_begin, [ c_int, Elf_Cmd, ElfP ], ElfP)
define(lelf.elf_getident, [ ElfP, POINTER(c_int) ], POINTER(Elf_IdentT), nonNullDec)
define(lelf.elf_end, [ ElfP ], c_int, nonNegDec )
define(lelf.elf_cntl, [ ElfP, c_int ], c_int, nonNegDec)
define(lelf.elf_errmsg, [ c_int ], c_char_p)
define(lelf.elf_errno, [ ], c_int)
define(lelf.elf_fill, [ c_int ], None)
define(lelf.elf_flagdata, [ Elf_DataP, c_int, c_uint ], c_uint)
define(lelf.elf_flagehdr, [ ElfP, c_int, c_uint ], c_uint)
define(lelf.elf_flagelf, [ ElfP, c_int, c_uint ], c_uint)
define(lelf.elf_flagphdr, [ ElfP, c_int, c_uint ], c_uint)
define(lelf.elf_flagscn, [ Elf_ScnP, c_int, c_uint ], c_uint)
define(lelf.elf_flagshdr, [ Elf_ScnP, c_int, c_uint ], c_uint)
define(lelf.elf_getarhdr, [ ElfP ], POINTER(Elf_Arhdr))
#define(lelf.elf_getarsym, [ ], )
define(lelf.elf_getbase, [ ElfP ], off_t, nonNegDec)
define(lelf.elf_getdata, [ Elf_ScnP, Elf_DataP ], Elf_DataP)
define(lelf.elf_getscn, [ ElfP, c_size_t ], Elf_ScnP, nonNullDec )
define(lelf.elf_getshnum, [ ElfP, POINTER(c_size_t) ], c_int, nonNegDec )
define(lelf.elf_getshstrndx, [ ElfP, POINTER(c_size_t) ], c_int, nonNegDec )
define(lelf.elf_hash, [ c_char_p ], c_ulong)
define(lelf.elf_kind, [ ElfP ], c_int )
define(lelf.elf_memory, [ POINTER(c_char), c_size_t ], ElfP, nonNullDec)
define(lelf.elf_ndxscn, [ Elf_ScnP ], c_size_t, badValDec(SHN_UNDEF))
define(lelf.elf_newdata, [ Elf_ScnP ], Elf_DataP, nonNullDec)
define(lelf.elf_newscn, [ ElfP ], Elf_ScnP, nonNullDec)
#define(lelf.elf_next, [ ], )
define(lelf.elf_nextscn, [ ElfP, Elf_ScnP ], Elf_ScnP)
#define(lelf.elf_rand, [ ], )
define(lelf.elf_rawdata, [ Elf_ScnP, Elf_DataP ], Elf_DataP)
#define(lelf.elf_rawfile, [ ], )
define(lelf.elf_strptr, [ ElfP, c_size_t, c_size_t ], c_char_p)
define(lelf.elf_update, [ ElfP, c_int], off_t, nonNegDec)
define(lelf.elf32_checksum, [ ElfP ], c_long)
define(lelf.elf32_fsize, [ c_int, c_size_t, c_uint ], c_size_t, nonNegDec)
define(lelf.elf32_getehdr, [ ElfP ], POINTER(Elf32_Ehdr), nonNullDec)
define(lelf.elf32_getphdr, [ ElfP ], POINTER(Elf32_Phdr), nonNullDec)
define(lelf.elf32_getshdr, [ Elf_ScnP ], POINTER(Elf32_Shdr), nonNullDec)
define(lelf.elf32_newehdr, [ ElfP ], POINTER(Elf32_Ehdr), nonNullDec)
define(lelf.elf32_newphdr, [ ElfP, c_size_t ], POINTER(Elf32_Phdr), nonNullDec)
define(lelf.elf32_xlatetof, [ Elf_DataP, Elf_DataP, c_uint ], Elf_DataP, nonNullDec)
define(lelf.elf32_xlatetom, [ Elf_DataP, Elf_DataP, c_uint ], Elf_DataP, nonNullDec)
define(lelf.elf64_checksum, [ ElfP ], c_long )
define(lelf.elf64_fsize, [ c_int, c_size_t, c_uint ], c_size_t, nonNegDec)
define(lelf.elf64_getehdr,[ ElfP ], POINTER(Elf64_Ehdr), nonNullDec)
define(lelf.elf64_getphdr, [ ElfP ], POINTER(Elf64_Phdr), nonNullDec)
define(lelf.elf64_getshdr, [ Elf_ScnP ], POINTER(Elf64_Shdr), nonNullDec)
define(lelf.elf64_newehdr, [ ElfP ], POINTER(Elf64_Ehdr), nonNullDec)
define(lelf.elf64_newphdr, [ ElfP, c_size_t ], POINTER(Elf64_Phdr), nonNullDec)
define(lelf.elf64_xlatetof, [ Elf_DataP, Elf_DataP, c_uint ], Elf_DataP, nonNullDec)
define(lelf.elf64_xlatetom, [ Elf_DataP, Elf_DataP, c_uint ], Elf_DataP, nonNullDec)
# NOTE(dbounov): Ignoring gelf functions for now
#define(lelf.gelf_checksum, [ ], )
#define(lelf.gelf_fsize, [ ], )
#define(lelf.gelf_getcap, [ ], )
#define(lelf.gelf_getclass, [ ], )
#define(lelf.gelf_getdyn, [ ], )
#define(lelf.gelf_getehdr, [ ], )
#define(lelf.gelf_getmove, [ ], )
#define(lelf.gelf_getphdr, [ ], )
#define(lelf.gelf_getrel, [ ], )
#define(lelf.gelf_getrela, [ ], )
#define(lelf.gelf_getshdr, [ ], )
#define(lelf.gelf_getsym, [ ], )
#define(lelf.gelf_getsyminfo, [ ], )
#define(lelf.gelf_getsymshndx, [ ], )
#define(lelf.gelf_newehdr, [ ], )
#define(lelf.gelf_newphdr, [ ], )
#define(lelf.gelf_update_cap, [ ], )
#define(lelf.gelf_update_dyn, [ ], )
#define(lelf.gelf_update_ehdr, [ ], )
#define(lelf.gelf_update_move, [ ], )
#define(lelf.gelf_update_phdr, [ ], )
#define(lelf.gelf_update_rel, [ ], )
#define(lelf.gelf_update_rela, [ ], )
#define(lelf.gelf_update_shdr, [ ], )
#define(lelf.gelf_update_sym, [ ], )
#define(lelf.gelf_update_symshndx, [ ], )
#define(lelf.gelf_update_syminfo, [ ], )
#define(lelf.gelf_xlatetof, [ ], )
#define(lelf.gelf_xlatetom, [ ], )
#define(lelf.nlist, [ ], )
| apache-2.0 |
mudbungie/NetExplorer | env/lib/python3.4/site-packages/networkx/algorithms/approximation/independent_set.py | 85 | 1997 | # -*- coding: utf-8 -*-
"""
Independent Set
Independent set or stable set is a set of vertices in a graph, no two of
which are adjacent. That is, it is a set I of vertices such that for every
two vertices in I, there is no edge connecting the two. Equivalently, each
edge in the graph has at most one endpoint in I. The size of an independent
set is the number of vertices it contains.
A maximum independent set is a largest independent set for a given graph G
and its size is denoted α(G). The problem of finding such a set is called
the maximum independent set problem and is an NP-hard optimization problem.
As such, it is unlikely that there exists an efficient algorithm for finding
a maximum independent set of a graph.
http://en.wikipedia.org/wiki/Independent_set_(graph_theory)
Independent set algorithm is based on the following paper:
`O(|V|/(log|V|)^2)` apx of maximum clique/independent set.
Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180–196. Springer.
doi:10.1007/BF01994876
"""
# Copyright (C) 2011-2012 by
# Nicholas Mancuso <nick.mancuso@gmail.com>
# All rights reserved.
# BSD license.
from networkx.algorithms.approximation import clique_removal
__all__ = ["maximum_independent_set"]
__author__ = """Nicholas Mancuso (nick.mancuso@gmail.com)"""
def maximum_independent_set(G):
"""Return an approximate maximum independent set.
Parameters
----------
G : NetworkX graph
Undirected graph
Returns
-------
iset : Set
The apx-maximum independent set
Notes
-----
Finds the `O(|V|/(log|V|)^2)` apx of independent set in the worst case.
References
----------
.. [1] Boppana, R., & Halldórsson, M. M. (1992).
Approximating maximum independent sets by excluding subgraphs.
BIT Numerical Mathematics, 32(2), 180–196. Springer.
"""
iset, _ = clique_removal(G)
return iset
| mit |
niktre/espressopp | contrib/mpi4py/mpi4py-2.0.0/test/test_status.py | 8 | 3107 | from mpi4py import MPI
import mpiunittest as unittest
class TestStatus(unittest.TestCase):
def setUp(self):
self.STATUS = MPI.Status()
def tearDown(self):
self.STATUS = None
def testDefaultFieldValues(self):
self.assertEqual(self.STATUS.Get_source(), MPI.ANY_SOURCE)
self.assertEqual(self.STATUS.Get_tag(), MPI.ANY_TAG)
self.assertEqual(self.STATUS.Get_error(), MPI.SUCCESS)
def testGetCount(self):
count = self.STATUS.Get_count(MPI.BYTE)
self.assertEqual(count, 0)
def testGetElements(self):
elements = self.STATUS.Get_elements(MPI.BYTE)
self.assertEqual(elements, 0)
def testSetElements(self):
try:
self.STATUS.Set_elements(MPI.BYTE, 7)
count = self.STATUS.Get_count(MPI.BYTE)
self.assertEqual(count, 7)
elements = self.STATUS.Get_elements(MPI.BYTE)
self.assertEqual(elements, 7)
except NotImplementedError:
if MPI.Get_version() >= (2,0): raise
def testIsCancelled(self):
flag = self.STATUS.Is_cancelled()
self.assertTrue(type(flag) is bool)
self.assertFalse(flag)
def testSetCancelled(self):
try:
self.STATUS.Set_cancelled(True)
flag = self.STATUS.Is_cancelled()
self.assertTrue(flag)
except NotImplementedError:
if MPI.Get_version() >= (2,0): raise
def testPyProps(self):
self.assertEqual(self.STATUS.Get_source(), self.STATUS.source)
self.assertEqual(self.STATUS.Get_tag(), self.STATUS.tag)
self.assertEqual(self.STATUS.Get_error(), self.STATUS.error)
self.STATUS.source = 1
self.STATUS.tag = 2
self.STATUS.error = MPI.ERR_ARG
self.assertEqual(self.STATUS.source, 1)
self.assertEqual(self.STATUS.tag, 2)
self.assertEqual(self.STATUS.error, MPI.ERR_ARG)
def testConstructor(self):
self.assertRaises(TypeError, MPI.Status, 123)
self.assertRaises(TypeError, MPI.Status, "abc")
def testCopyConstructor(self):
self.STATUS.source = 1
self.STATUS.tag = 2
self.STATUS.error = MPI.ERR_ARG
status = MPI.Status(self.STATUS)
self.assertEqual(status.source, 1)
self.assertEqual(status.tag, 2)
self.assertEqual(status.error, MPI.ERR_ARG)
try:
self.STATUS.Set_elements(MPI.BYTE, 7)
except NotImplementedError:
pass
try:
self.STATUS.Set_cancelled(True)
except NotImplementedError:
pass
status = MPI.Status(self.STATUS)
try:
count = status.Get_count(MPI.BYTE)
elems = status.Get_elements(MPI.BYTE)
self.assertEqual(count, 7)
self.assertEqual(elems, 7)
except NotImplementedError:
pass
try:
flag = status.Is_cancelled()
self.assertTrue(flag)
except NotImplementedError:
pass
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
TeslaProject/external_chromium_org | tools/clang/scripts/test_tool.py | 27 | 4732 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test harness for chromium clang tools."""
import difflib
import glob
import json
import os
import os.path
import subprocess
import shutil
import sys
def _GenerateCompileCommands(files, include_paths):
"""Returns a JSON string containing a compilation database for the input."""
include_path_flags = ' '.join('-I %s' % include_path
for include_path in include_paths)
return json.dumps([{'directory': '.',
'command': 'clang++ -fsyntax-only %s -c %s' % (
include_path_flags, f),
'file': f} for f in files], indent=2)
def _NumberOfTestsToString(tests):
"""Returns an English describing the number of tests."""
return "%d test%s" % (tests, 's' if tests != 1 else '')
def main(argv):
if len(argv) < 1:
print 'Usage: test_tool.py <clang tool>'
print ' <clang tool> is the clang tool to be tested.'
sys.exit(1)
tool_to_test = argv[0]
tools_clang_scripts_directory = os.path.dirname(os.path.realpath(__file__))
tools_clang_directory = os.path.dirname(tools_clang_scripts_directory)
test_directory_for_tool = os.path.join(
tools_clang_directory, tool_to_test, 'tests')
compile_database = os.path.join(test_directory_for_tool,
'compile_commands.json')
source_files = glob.glob(os.path.join(test_directory_for_tool,
'*-original.cc'))
actual_files = ['-'.join([source_file.rsplit('-', 1)[0], 'actual.cc'])
for source_file in source_files]
expected_files = ['-'.join([source_file.rsplit('-', 1)[0], 'expected.cc'])
for source_file in source_files]
include_paths = []
include_paths.append(
os.path.realpath(os.path.join(tools_clang_directory, '../..')))
# Many gtest headers expect to have testing/gtest/include in the include
# search path.
include_paths.append(
os.path.realpath(os.path.join(tools_clang_directory,
'../..',
'testing/gtest/include')))
try:
# Set up the test environment.
for source, actual in zip(source_files, actual_files):
shutil.copyfile(source, actual)
# Stage the test files in the git index. If they aren't staged, then
# run_tools.py will skip them when applying replacements.
args = ['git', 'add']
args.extend(actual_files)
subprocess.check_call(args)
# Generate a temporary compilation database to run the tool over.
with open(compile_database, 'w') as f:
f.write(_GenerateCompileCommands(actual_files, include_paths))
args = ['python',
os.path.join(tools_clang_scripts_directory, 'run_tool.py'),
tool_to_test,
test_directory_for_tool]
args.extend(actual_files)
run_tool = subprocess.Popen(args, stdout=subprocess.PIPE)
stdout, _ = run_tool.communicate()
if run_tool.returncode != 0:
print 'run_tool failed:\n%s' % stdout
sys.exit(1)
passed = 0
failed = 0
for expected, actual in zip(expected_files, actual_files):
print '[ RUN ] %s' % os.path.relpath(actual)
expected_output = actual_output = None
with open(expected, 'r') as f:
expected_output = f.readlines()
with open(actual, 'r') as f:
actual_output = f.readlines()
if actual_output != expected_output:
failed += 1
for line in difflib.unified_diff(expected_output, actual_output,
fromfile=os.path.relpath(expected),
tofile=os.path.relpath(actual)):
sys.stdout.write(line)
print '[ FAILED ] %s' % os.path.relpath(actual)
# Don't clean up the file on failure, so the results can be referenced
# more easily.
continue
print '[ OK ] %s' % os.path.relpath(actual)
passed += 1
os.remove(actual)
if failed == 0:
os.remove(compile_database)
print '[==========] %s ran.' % _NumberOfTestsToString(len(source_files))
if passed > 0:
print '[ PASSED ] %s.' % _NumberOfTestsToString(passed)
if failed > 0:
print '[ FAILED ] %s.' % _NumberOfTestsToString(failed)
finally:
# No matter what, unstage the git changes we made earlier to avoid polluting
# the index.
args = ['git', 'reset', '--quiet', 'HEAD']
args.extend(actual_files)
subprocess.call(args)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| bsd-3-clause |
chinmaygarde/CoreLib | Test/GoogleTest/scripts/fuse_gtest_files.py | 2577 | 8813 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print 'ERROR: Cannot find %s in directory %s.' % (relative_path,
directory)
print ('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print ('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print 'ABORTED.'
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = sets.Set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in file(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = sets.Set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in file(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
if 'include/' + m.group(1) == GTEST_SPI_H_SEED:
# It's '#include "gtest/gtest-spi.h"'. This file is not
# #included by "gtest/gtest.h", so we need to process it.
ProcessFile(GTEST_SPI_H_SEED)
else:
# It's '#include "gtest/foo.h"' where foo is not gtest-spi.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print __doc__
sys.exit(1)
if __name__ == '__main__':
main()
| mit |
mozilla/pto | pto/apps/autocomplete/views.py | 1 | 1912 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
from django import http
from pto.apps.dates.decorators import json_view
from pto.apps.users.models import UserProfile, User
from pto.apps.users.utils import ldap_lookup
@json_view
def cities(request):
if not request.user.is_authenticated():
return http.HttpResponseForbidden('Must be logged in')
data = []
term = request.GET.get('term')
qs = UserProfile.objects.exclude(city='')
if term:
qs = qs.filter(city__istartswith=term)
for each in (qs
.values('city')
.distinct()
.order_by('city')):
city = each['city']
data.append(city)
return data
@json_view
def users(request, known_only=False):
if not request.user.is_authenticated():
return http.HttpResponseForbidden('Must be logged in')
query = request.GET.get('term').strip()
if len(query) < 2:
return []
results = []
# I chose a limit of 30 because there are about 20+ 'peter'
# something in mozilla
for each in ldap_lookup.search_users(query, 30, autocomplete=True):
if not each.get('givenName'):
logging.warn("Skipping LDAP entry %s" % each)
continue
if known_only:
if not User.objects.filter(email__iexact=each['mail']).exists():
continue
full_name_and_email = '%s %s <%s>' % (each['givenName'],
each['sn'],
each['mail'])
result = {'id': each['uid'],
'label': full_name_and_email,
'value': full_name_and_email}
results.append(result)
return results
| mpl-2.0 |
cchurch/ansible | lib/ansible/plugins/doc_fragments/shell_common.py | 79 | 1833 | # Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# common shelldocumentation fragment
DOCUMENTATION = """
options:
remote_tmp:
description:
- Temporary directory to use on targets when executing tasks.
default: '~/.ansible/tmp'
env: [{name: ANSIBLE_REMOTE_TEMP}, {name: ANSIBLE_REMOTE_TMP}]
ini:
- section: defaults
key: remote_tmp
vars:
- name: ansible_remote_tmp
system_tmpdirs:
description:
- "List of valid system temporary directories for Ansible to choose when it cannot use
``remote_tmp``, normally due to permission issues. These must be world readable, writable,
and executable."
default: [ /var/tmp, /tmp ]
type: list
env: [{name: ANSIBLE_SYSTEM_TMPDIRS}]
ini:
- section: defaults
key: system_tmpdirs
vars:
- name: ansible_system_tmpdirs
async_dir:
description:
- Directory in which ansible will keep async job information
default: '~/.ansible_async'
env: [{name: ANSIBLE_ASYNC_DIR}]
ini:
- section: defaults
key: async_dir
vars:
- name: ansible_async_dir
environment:
type: dict
default: {}
description:
- dictionary of environment variables and their values to use when executing commands.
admin_users:
type: list
default: ['root', 'toor']
description:
- list of users to be expected to have admin privileges. This is used by the controller to
determine how to share temporary files between the remote user and the become user.
env:
- name: ANSIBLE_ADMIN_USERS
ini:
- section: defaults
key: admin_users
vars:
- name: ansible_admin_users
"""
| gpl-3.0 |
MiLk/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_group.py | 36 | 8114 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <wayne@riotousliving.com>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_group
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower group.
description:
- Create, update, or destroy Ansible Tower groups. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- The name to use for the group.
required: True
description:
description:
- The description to use for the group.
required: False
default: null
inventory:
description:
- Inventory the group should be made a member of.
required: True
variables:
description:
- Variables to use for the group, use '@' for a file.
required: False
default: null
credential:
description:
- Credential to use for the group.
required: False
default: null
source:
description:
- The source to use for this group.
required: False
default: null,
choices: ["manual", "file", "ec2", "rax", "vmware", "gce", "azure", "azure_rm", "openstack", "satellite6" , "cloudforms", "custom"]
source_regions:
description:
- Regions for cloud provider.
required: False
default: null
source_vars:
description:
- Override variables from source with variables from this field.
required: False
default: null
instance_filters:
description:
- Comma-separated list of filter expressions for matching hosts.
required: False
default: null
group_by:
description:
- Limit groups automatically created from inventory source.
required: False
default: null
source_script:
description:
- Inventory script to be used when group type is "custom".
required: False
default: null
overwrite:
description:
- Delete child roups and hosts not found in source.
required: False
default: False
overwrite_vars:
description:
- Override vars in child groups and hosts with those from external source.
required: False
default: null
update_on_launch:
description:
- Refresh inventory data from its source each time a job is run.
required: False
default: False
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
tower_host:
description:
- URL to your Tower instance.
required: False
default: null
tower_username:
description:
- Username for your Tower instance.
required: False
default: null
tower_password:
description:
- Password for your Tower instance.
required: False
default: null
tower_verify_ssl:
description:
- Dis/allow insecure connections to Tower. If C(no), SSL certificates will not be validated.
This should only be used on personally controlled sites using self-signed certificates.
required: False
default: True
tower_config_file:
description:
- Path to the Tower config file. See notes.
required: False
default: null
requirements:
- "python >= 2.6"
- "ansible-tower-cli >= 3.0.2"
notes:
- If no I(config_file) is provided we will attempt to use the tower-cli library
defaults to find your Tower host information.
- I(config_file) should contain Tower configuration in the following format
host=hostname
username=username
password=password
'''
EXAMPLES = '''
- name: Add tower group
tower_group:
name: localhost
description: "Local Host Group"
inventory: "Local Inventory"
state: present
tower_config_file: "~/tower_cli.cfg"
'''
try:
import os
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
from ansible.module_utils.ansible_tower import tower_auth_config, tower_check_mode
HAS_TOWER_CLI = True
except ImportError:
HAS_TOWER_CLI = False
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
description=dict(),
inventory=dict(required=True),
variables=dict(),
credential=dict(),
source=dict(choices=["manual", "file", "ec2", "rax", "vmware",
"gce", "azure", "azure_rm", "openstack",
"satellite6", "cloudforms", "custom"], default="manual"),
source_regions=dict(),
source_vars=dict(),
instance_filters=dict(),
group_by=dict(),
source_script=dict(),
overwrite=dict(type='bool', default=False),
overwrite_vars=dict(),
update_on_launch=dict(type='bool', default=False),
tower_host=dict(),
tower_username=dict(),
tower_password=dict(no_log=True),
tower_verify_ssl=dict(type='bool', default=True),
tower_config_file=dict(type='path'),
state=dict(choices=['present', 'absent'], default='present'),
),
supports_check_mode=True
)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
inventory = module.params.get('inventory')
credential = module.params.get('credential')
state = module.params.get('state')
variables = module.params.get('variables')
if variables:
if variables.startswith('@'):
filename = os.path.expanduser(variables[1:])
variables = module.contents_from_file(filename)
json_output = {'group': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
group = tower_cli.get_resource('group')
try:
params = module.params.copy()
params['create_on_missing'] = True
params['variables'] = variables
inv_res = tower_cli.get_resource('inventory')
inv = inv_res.get(name=inventory)
params['inventory'] = inv['id']
if credential:
cred_res = tower_cli.get_resource('credential')
cred = cred_res.get(name=credential)
params['credential'] = cred['id']
if state == 'present':
result = group.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = group.delete(**params)
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update the group, inventory not found: {0}'.format(excinfo), changed=False)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update the group: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
fahhem/zerorpc-python | tests/test_middleware_client.py | 102 | 12192 | # -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import gevent
import zerorpc
from testutils import random_ipc_endpoint
class EchoModule(object):
def __init__(self, trigger=None):
self.last_msg = None
self._trigger = trigger
def echo(self, msg):
self.last_msg = "echo: " + msg
if self._trigger:
self._trigger.set()
return self.last_msg
@zerorpc.stream
def echoes(self, msg):
self.last_msg = "echo: " + msg
for i in xrange(0, 3):
yield self.last_msg
def crash(self, msg):
try:
self.last_msg = "raise: " + msg
raise RuntimeError("BrokenEchoModule")
finally:
if self._trigger:
self._trigger.set()
@zerorpc.stream
def echoes_crash(self, msg):
self.crash(msg)
def timeout(self, msg):
self.last_msg = "timeout: " + msg
gevent.sleep(2)
def test_hook_client_before_request():
class ClientBeforeRequestMiddleware(object):
def __init__(self):
self.called = False
def client_before_request(self, event):
self.called = True
self.method = event.name
zero_ctx = zerorpc.Context()
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_client.echo("test") == "echo: test"
test_middleware = ClientBeforeRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
assert test_client.echo("test") == "echo: test"
assert test_middleware.called == True
assert test_middleware.method == 'echo'
test_server.stop()
test_server_task.join()
class ClientAfterRequestMiddleware(object):
def __init__(self):
self.called = False
def client_after_request(self, req_event, rep_event, exception):
self.called = True
assert req_event is not None
assert req_event.name == "echo" or req_event.name == "echoes"
self.retcode = rep_event.name
assert exception is None
def test_hook_client_after_request():
zero_ctx = zerorpc.Context()
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_client.echo("test") == "echo: test"
test_middleware = ClientAfterRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
assert test_client.echo("test") == "echo: test"
assert test_middleware.called == True
assert test_middleware.retcode == 'OK'
test_server.stop()
test_server_task.join()
def test_hook_client_after_request_stream():
zero_ctx = zerorpc.Context()
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
it = test_client.echoes("test")
assert next(it) == "echo: test"
for echo in it:
assert echo == "echo: test"
test_middleware = ClientAfterRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
assert test_middleware.called == False
it = test_client.echoes("test")
assert next(it) == "echo: test"
assert test_middleware.called == False
for echo in it:
assert echo == "echo: test"
assert test_middleware.called == True
assert test_middleware.retcode == 'STREAM_DONE'
test_server.stop()
test_server_task.join()
def test_hook_client_after_request_timeout():
class ClientAfterRequestMiddleware(object):
def __init__(self):
self.called = False
def client_after_request(self, req_event, rep_event, exception):
self.called = True
assert req_event is not None
assert req_event.name == "timeout"
assert rep_event is None
zero_ctx = zerorpc.Context()
test_middleware = ClientAfterRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(timeout=1, context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.timeout("test")
except zerorpc.TimeoutExpired as ex:
assert test_middleware.called == True
assert "timeout" in ex.args[0]
test_server.stop()
test_server_task.join()
class ClientAfterFailedRequestMiddleware(object):
def __init__(self):
self.called = False
def client_after_request(self, req_event, rep_event, exception):
assert req_event is not None
assert req_event.name == "crash" or req_event.name == "echoes_crash"
self.called = True
assert isinstance(exception, zerorpc.RemoteError)
assert exception.name == 'RuntimeError'
assert 'BrokenEchoModule' in exception.msg
assert rep_event.name == 'ERR'
def test_hook_client_after_request_remote_error():
zero_ctx = zerorpc.Context()
test_middleware = ClientAfterFailedRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(timeout=1, context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.crash("test")
except zerorpc.RemoteError:
assert test_middleware.called == True
test_server.stop()
test_server_task.join()
def test_hook_client_after_request_remote_error_stream():
zero_ctx = zerorpc.Context()
test_middleware = ClientAfterFailedRequestMiddleware()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(timeout=1, context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.echoes_crash("test")
except zerorpc.RemoteError:
assert test_middleware.called == True
test_server.stop()
test_server_task.join()
def test_hook_client_handle_remote_error_inspect():
class ClientHandleRemoteErrorMiddleware(object):
def __init__(self):
self.called = False
def client_handle_remote_error(self, event):
self.called = True
test_middleware = ClientHandleRemoteErrorMiddleware()
zero_ctx = zerorpc.Context()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.crash("test")
except zerorpc.RemoteError as ex:
assert test_middleware.called == True
assert ex.name == "RuntimeError"
test_server.stop()
test_server_task.join()
# This is a seriously broken idea, but possible nonetheless
class ClientEvalRemoteErrorMiddleware(object):
def __init__(self):
self.called = False
def client_handle_remote_error(self, event):
self.called = True
name, msg, tb = event.args
etype = eval(name)
e = etype(tb)
return e
def test_hook_client_handle_remote_error_eval():
test_middleware = ClientEvalRemoteErrorMiddleware()
zero_ctx = zerorpc.Context()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.crash("test")
except RuntimeError as ex:
assert test_middleware.called == True
assert "BrokenEchoModule" in ex.args[0]
test_server.stop()
test_server_task.join()
def test_hook_client_handle_remote_error_eval_stream():
test_middleware = ClientEvalRemoteErrorMiddleware()
zero_ctx = zerorpc.Context()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.echoes_crash("test")
except RuntimeError as ex:
assert test_middleware.called == True
assert "BrokenEchoModule" in ex.args[0]
test_server.stop()
test_server_task.join()
def test_hook_client_after_request_custom_error():
# This is a seriously broken idea, but possible nonetheless
class ClientEvalInspectRemoteErrorMiddleware(object):
def __init__(self):
self.called = False
def client_handle_remote_error(self, event):
name, msg, tb = event.args
etype = eval(name)
e = etype(tb)
return e
def client_after_request(self, req_event, rep_event, exception):
assert req_event is not None
assert req_event.name == "crash"
self.called = True
assert isinstance(exception, RuntimeError)
test_middleware = ClientEvalInspectRemoteErrorMiddleware()
zero_ctx = zerorpc.Context()
zero_ctx.register_middleware(test_middleware)
endpoint = random_ipc_endpoint()
test_server = zerorpc.Server(EchoModule(), context=zero_ctx)
test_server.bind(endpoint)
test_server_task = gevent.spawn(test_server.run)
test_client = zerorpc.Client(context=zero_ctx)
test_client.connect(endpoint)
assert test_middleware.called == False
try:
test_client.crash("test")
except RuntimeError as ex:
assert test_middleware.called == True
assert "BrokenEchoModule" in ex.args[0]
test_server.stop()
test_server_task.join()
| mit |
amenonsen/ansible | lib/ansible/modules/windows/win_defrag.py | 52 | 2718 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: 2017, Dag Wieers (@dagwieers) <dag@wieers.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_defrag
version_added: '2.4'
short_description: Consolidate fragmented files on local volumes
description:
- Locates and consolidates fragmented files on local volumes to improve system performance.
- 'More information regarding C(win_defrag) is available from: U(https://technet.microsoft.com/en-us/library/cc731650(v=ws.11).aspx)'
requirements:
- defrag.exe
options:
include_volumes:
description:
- A list of drive letters or mount point paths of the volumes to be defragmented.
- If this parameter is omitted, all volumes (not excluded) will be fragmented.
type: list
exclude_volumes:
description:
- A list of drive letters or mount point paths to exclude from defragmentation.
type: list
freespace_consolidation:
description:
- Perform free space consolidation on the specified volumes.
type: bool
default: no
priority:
description:
- Run the operation at low or normal priority.
type: str
choices: [ low, normal ]
default: low
parallel:
description:
- Run the operation on each volume in parallel in the background.
type: bool
default: no
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Defragment all local volumes (in parallel)
win_defrag:
parallel: yes
- name: 'Defragment all local volumes, except C: and D:'
win_defrag:
exclude_volumes: [ C, D ]
- name: 'Defragment volume D: with normal priority'
win_defrag:
include_volumes: D
priority: normal
- name: Consolidate free space (useful when reducing volumes)
win_defrag:
freespace_consolidation: yes
'''
RETURN = r'''
cmd:
description: The complete command line used by the module.
returned: always
type: str
sample: defrag.exe /C /V
rc:
description: The return code for the command.
returned: always
type: int
sample: 0
stdout:
description: The standard output from the command.
returned: always
type: str
sample: Success.
stderr:
description: The error output from the command.
returned: always
type: str
sample:
msg:
description: Possible error message on failure.
returned: failed
type: str
sample: Command 'defrag.exe' not found in $env:PATH.
changed:
description: Whether or not any changes were made.
returned: always
type: bool
sample: true
'''
| gpl-3.0 |
ajylee/gpaw-rtxs | gpaw/test/diamond_gllb.py | 1 | 2143 | from ase.structure import bulk
from sys import argv
from ase.dft.kpoints import ibz_points, get_bandpath
from gpaw import *
from ase import *
from gpaw.test import gen
from gpaw import setup_paths
import os
"""This calculation has the following structure.
1) Calculate the ground state of Diamond.
2) Calculate the band structure of diamond in order to obtain accurate KS band gap for Diamond.
3) Calculate ground state again, and calculate the potential discontinuity using accurate band gap.
4) Calculate band structure again, and apply the discontinuity to CBM.
Compare to reference.
"""
xc = 'GLLBSC'
gen('C',xcname=xc)
setup_paths.insert(0, '.')
# Calculate ground state
atoms = bulk('C', 'diamond', a=3.567)
calc = GPAW(h=0.15, kpts=(4,4,4), xc=xc, nbands = 6, eigensolver='cg')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Cgs.gpw')
# Calculate accurate KS-band gap from band structure
points = ibz_points['fcc']
# CMB is in G-X
G = points['Gamma']
X = points['X']
#W = points['W']
#K = points['K']
#L = points['L']
#[W, L, G, X, W, K]
kpts, x, X = get_bandpath([G, X], atoms.cell, npoints=12)
calc = GPAW('Cgs.gpw', kpts=kpts, fixdensity=True, usesymm=None, convergence=dict(bands=6))
calc.get_atoms().get_potential_energy()
# Get the accurate KS-band gap
homolumo = calc.occupations.get_homo_lumo(calc.wfs)
homo, lumo = homolumo
print "band gap ",(lumo-homo)*27.2
# Redo the ground state calculation
calc = GPAW(h=0.15, kpts=(4,4,4), xc=xc, nbands = 6, eigensolver='cg')
atoms.set_calculator(calc)
atoms.get_potential_energy()
# And calculate the discontinuity potential with accurate band gap
response = calc.hamiltonian.xc.xcs['RESPONSE']
response.calculate_delta_xc(homolumo=homolumo)
calc.write('CGLLBSC.gpw')
# Redo the band structure calculation
atoms, calc = restart('CGLLBSC.gpw', kpts=kpts, fixdensity=True, usesymm=None, convergence=dict(bands=6))
atoms.get_potential_energy()
response = calc.hamiltonian.xc.xcs['RESPONSE']
KS, dxc = response.calculate_delta_xc_perturbation()
assert abs(KS+dxc-5.41)<0.10
#M. Kuisma et. al, Phys. Rev. B 82, 115106, QP gap for C, 5.41eV, expt. 5.48eV
| gpl-3.0 |
marcosmodesto/django-testapp | django/django/contrib/admin/filters.py | 52 | 16279 | """
This encapsulates the logic for displaying filters in the Django admin.
Filters are specified in models with the "list_filter" option.
Each filter subclass knows how to display a filter for a field that passes a
certain test -- e.g. being a DateField or ForeignKey.
"""
import datetime
from django.db import models
from django.core.exceptions import ImproperlyConfigured, ValidationError
from django.utils.encoding import smart_unicode, force_unicode
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.contrib.admin.util import (get_model_from_relation,
reverse_field_path, get_limit_choices_to_from_path, prepare_lookup_value)
from django.contrib.admin.options import IncorrectLookupParameters
class ListFilter(object):
title = None # Human-readable title to appear in the right sidebar.
template = 'admin/filter.html'
def __init__(self, request, params, model, model_admin):
# This dictionary will eventually contain the request's query string
# parameters actually used by this filter.
self.used_parameters = {}
if self.title is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'title'." % self.__class__.__name__)
def has_output(self):
"""
Returns True if some choices would be output for this filter.
"""
raise NotImplementedError
def choices(self, cl):
"""
Returns choices ready to be output in the template.
"""
raise NotImplementedError
def queryset(self, request, queryset):
"""
Returns the filtered queryset.
"""
raise NotImplementedError
def expected_parameters(self):
"""
Returns the list of parameter names that are expected from the
request's query string and that will be used by this filter.
"""
raise NotImplementedError
class SimpleListFilter(ListFilter):
# The parameter that should be used in the query string for that filter.
parameter_name = None
def __init__(self, request, params, model, model_admin):
super(SimpleListFilter, self).__init__(
request, params, model, model_admin)
if self.parameter_name is None:
raise ImproperlyConfigured(
"The list filter '%s' does not specify "
"a 'parameter_name'." % self.__class__.__name__)
lookup_choices = self.lookups(request, model_admin)
if lookup_choices is None:
lookup_choices = ()
self.lookup_choices = list(lookup_choices)
if self.parameter_name in params:
value = params.pop(self.parameter_name)
self.used_parameters[self.parameter_name] = value
def has_output(self):
return len(self.lookup_choices) > 0
def value(self):
"""
Returns the value (in string format) provided in the request's
query string for this filter, if any. If the value wasn't provided then
returns None.
"""
return self.used_parameters.get(self.parameter_name, None)
def lookups(self, request, model_admin):
"""
Must be overriden to return a list of tuples (value, verbose value)
"""
raise NotImplementedError
def expected_parameters(self):
return [self.parameter_name]
def choices(self, cl):
yield {
'selected': self.value() is None,
'query_string': cl.get_query_string({}, [self.parameter_name]),
'display': _('All'),
}
for lookup, title in self.lookup_choices:
yield {
'selected': self.value() == force_unicode(lookup),
'query_string': cl.get_query_string({
self.parameter_name: lookup,
}, []),
'display': title,
}
class FieldListFilter(ListFilter):
_field_list_filters = []
_take_priority_index = 0
def __init__(self, field, request, params, model, model_admin, field_path):
self.field = field
self.field_path = field_path
self.title = getattr(field, 'verbose_name', field_path)
super(FieldListFilter, self).__init__(
request, params, model, model_admin)
for p in self.expected_parameters():
if p in params:
value = params.pop(p)
self.used_parameters[p] = prepare_lookup_value(p, value)
def has_output(self):
return True
def queryset(self, request, queryset):
try:
return queryset.filter(**self.used_parameters)
except ValidationError, e:
raise IncorrectLookupParameters(e)
@classmethod
def register(cls, test, list_filter_class, take_priority=False):
if take_priority:
# This is to allow overriding the default filters for certain types
# of fields with some custom filters. The first found in the list
# is used in priority.
cls._field_list_filters.insert(
cls._take_priority_index, (test, list_filter_class))
cls._take_priority_index += 1
else:
cls._field_list_filters.append((test, list_filter_class))
@classmethod
def create(cls, field, request, params, model, model_admin, field_path):
for test, list_filter_class in cls._field_list_filters:
if not test(field):
continue
return list_filter_class(field, request, params,
model, model_admin, field_path=field_path)
class RelatedFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
other_model = get_model_from_relation(field)
if hasattr(field, 'rel'):
rel_name = field.rel.get_related_field().name
else:
rel_name = other_model._meta.pk.name
self.lookup_kwarg = '%s__%s__exact' % (field_path, rel_name)
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(
self.lookup_kwarg_isnull, None)
self.lookup_choices = field.get_choices(include_blank=False)
super(RelatedFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
if hasattr(field, 'verbose_name'):
self.lookup_title = field.verbose_name
else:
self.lookup_title = other_model._meta.verbose_name
self.title = self.lookup_title
def has_output(self):
if (isinstance(self.field, models.related.RelatedObject)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
extra = 1
else:
extra = 0
return len(self.lookup_choices) + extra > 1
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': self.lookup_val is None and not self.lookup_val_isnull,
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
for pk_val, val in self.lookup_choices:
yield {
'selected': self.lookup_val == smart_unicode(pk_val),
'query_string': cl.get_query_string({
self.lookup_kwarg: pk_val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if (isinstance(self.field, models.related.RelatedObject)
and self.field.field.null or hasattr(self.field, 'rel')
and self.field.null):
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: (
hasattr(f, 'rel') and bool(f.rel) or
isinstance(f, models.related.RelatedObject)), RelatedFieldListFilter)
class BooleanFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_kwarg2 = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val2 = request.GET.get(self.lookup_kwarg2, None)
super(BooleanFieldListFilter, self).__init__(field,
request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg2]
def choices(self, cl):
for lookup, title in (
(None, _('All')),
('1', _('Yes')),
('0', _('No'))):
yield {
'selected': self.lookup_val == lookup and not self.lookup_val2,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup,
}, [self.lookup_kwarg2]),
'display': title,
}
if isinstance(self.field, models.NullBooleanField):
yield {
'selected': self.lookup_val2 == 'True',
'query_string': cl.get_query_string({
self.lookup_kwarg2: 'True',
}, [self.lookup_kwarg]),
'display': _('Unknown'),
}
FieldListFilter.register(lambda f: isinstance(f,
(models.BooleanField, models.NullBooleanField)), BooleanFieldListFilter)
class ChoicesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = '%s__exact' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg)
super(ChoicesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg]
def choices(self, cl):
yield {
'selected': self.lookup_val is None,
'query_string': cl.get_query_string({}, [self.lookup_kwarg]),
'display': _('All')
}
for lookup, title in self.field.flatchoices:
yield {
'selected': smart_unicode(lookup) == self.lookup_val,
'query_string': cl.get_query_string({
self.lookup_kwarg: lookup}),
'display': title,
}
FieldListFilter.register(lambda f: bool(f.choices), ChoicesFieldListFilter)
class DateFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.field_generic = '%s__' % field_path
self.date_params = dict([(k, v) for k, v in params.items()
if k.startswith(self.field_generic)])
now = timezone.now()
# When time zone support is enabled, convert "now" to the user's time
# zone so Django's definition of "Today" matches what the user expects.
if now.tzinfo is not None:
current_tz = timezone.get_current_timezone()
now = now.astimezone(current_tz)
if hasattr(current_tz, 'normalize'):
# available for pytz time zones
now = current_tz.normalize(now)
if isinstance(field, models.DateTimeField):
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
else: # field is a models.DateField
today = now.date()
tomorrow = today + datetime.timedelta(days=1)
self.lookup_kwarg_since = '%s__gte' % field_path
self.lookup_kwarg_until = '%s__lt' % field_path
self.links = (
(_('Any date'), {}),
(_('Today'), {
self.lookup_kwarg_since: str(today),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('Past 7 days'), {
self.lookup_kwarg_since: str(today - datetime.timedelta(days=7)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This month'), {
self.lookup_kwarg_since: str(today.replace(day=1)),
self.lookup_kwarg_until: str(tomorrow),
}),
(_('This year'), {
self.lookup_kwarg_since: str(today.replace(month=1, day=1)),
self.lookup_kwarg_until: str(tomorrow),
}),
)
super(DateFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg_since, self.lookup_kwarg_until]
def choices(self, cl):
for title, param_dict in self.links:
yield {
'selected': self.date_params == param_dict,
'query_string': cl.get_query_string(
param_dict, [self.field_generic]),
'display': title,
}
FieldListFilter.register(
lambda f: isinstance(f, models.DateField), DateFieldListFilter)
# This should be registered last, because it's a last resort. For example,
# if a field is eligible to use the BooleanFieldListFilter, that'd be much
# more appropriate, and the AllValuesFieldListFilter won't get used for it.
class AllValuesFieldListFilter(FieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
self.lookup_kwarg = field_path
self.lookup_kwarg_isnull = '%s__isnull' % field_path
self.lookup_val = request.GET.get(self.lookup_kwarg, None)
self.lookup_val_isnull = request.GET.get(self.lookup_kwarg_isnull,
None)
parent_model, reverse_path = reverse_field_path(model, field_path)
queryset = parent_model._default_manager.all()
# optional feature: limit choices base on existing relationships
# queryset = queryset.complex_filter(
# {'%s__isnull' % reverse_path: False})
limit_choices_to = get_limit_choices_to_from_path(model, field_path)
queryset = queryset.filter(limit_choices_to)
self.lookup_choices = (queryset
.distinct()
.order_by(field.name)
.values_list(field.name, flat=True))
super(AllValuesFieldListFilter, self).__init__(
field, request, params, model, model_admin, field_path)
def expected_parameters(self):
return [self.lookup_kwarg, self.lookup_kwarg_isnull]
def choices(self, cl):
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
yield {
'selected': (self.lookup_val is None
and self.lookup_val_isnull is None),
'query_string': cl.get_query_string({},
[self.lookup_kwarg, self.lookup_kwarg_isnull]),
'display': _('All'),
}
include_none = False
for val in self.lookup_choices:
if val is None:
include_none = True
continue
val = smart_unicode(val)
yield {
'selected': self.lookup_val == val,
'query_string': cl.get_query_string({
self.lookup_kwarg: val,
}, [self.lookup_kwarg_isnull]),
'display': val,
}
if include_none:
yield {
'selected': bool(self.lookup_val_isnull),
'query_string': cl.get_query_string({
self.lookup_kwarg_isnull: 'True',
}, [self.lookup_kwarg]),
'display': EMPTY_CHANGELIST_VALUE,
}
FieldListFilter.register(lambda f: True, AllValuesFieldListFilter)
| bsd-3-clause |
retomerz/intellij-community | python/lib/Lib/site-packages/django/templatetags/l10n.py | 247 | 1845 | from django.conf import settings
from django.template import Node
from django.template import TemplateSyntaxError, Library
from django.utils import formats
from django.utils.encoding import force_unicode
register = Library()
def localize(value):
"""
Forces a value to be rendered as a localized value,
regardless of the value of ``settings.USE_L10N``.
"""
return force_unicode(formats.localize(value, use_l10n=True))
localize.is_safe = False
def unlocalize(value):
"""
Forces a value to be rendered as a non-localized value,
regardless of the value of ``settings.USE_L10N``.
"""
return force_unicode(value)
unlocalize.is_safe = False
class LocalizeNode(Node):
def __init__(self, nodelist, use_l10n):
self.nodelist = nodelist
self.use_l10n = use_l10n
def __repr__(self):
return "<LocalizeNode>"
def render(self, context):
old_setting = context.use_l10n
context.use_l10n = self.use_l10n
output = self.nodelist.render(context)
context.use_l10n = old_setting
return output
@register.tag('localize')
def localize_tag(parser, token):
"""
Forces or prevents localization of values, regardless of the value of
`settings.USE_L10N`.
Sample usage::
{% localize off %}
var pi = {{ 3.1415 }};
{% endlocalize %}
"""
use_l10n = None
bits = list(token.split_contents())
if len(bits) == 1:
use_l10n = True
elif len(bits) > 2 or bits[1] not in ('on', 'off'):
raise TemplateSyntaxError("%r argument should be 'on' or 'off'" % bits[0])
else:
use_l10n = bits[1] == 'on'
nodelist = parser.parse(('endlocalize',))
parser.delete_first_token()
return LocalizeNode(nodelist, use_l10n)
register.filter(localize)
register.filter(unlocalize)
| apache-2.0 |
mvaled/OpenUpgrade | openerp/addons/base/ir/ir_model.py | 3 | 62576 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.openupgrade import openupgrade_log, openupgrade
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class unknown(models.AbstractModel):
"""
Abstract model used as a substitute for relational fields with an unknown
comodel.
"""
_name = '_unknown'
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
# OpenUpgrade: do not run the new table cleanup
openupgrade.message(
cr, 'Unknown', False, False,
"Not dropping the table or view of model %s", model.model)
continue
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
# add model in registry
self.instanciate(cr, user, vals['model'], context)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
CustomModel._build_model(self.pool, cr)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
# OpenUpgrade: do not run the new column cleanup
openupgrade.message(
cr, 'Unknown', False, False,
"Not dropping the column of field %s of model %s", field.name, field.model)
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = model._fields[field.name].relation
cr.execute('DROP table "%s"' % (rel_name))
model._pop_field(field.name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# The field we just deleted might have be inherited, and registry is
# inconsistent in this case; therefore we reload the registry.
cr.commit()
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
self.pool.clear_manual_fields()
# re-initialize model in registry
model.__init__(self.pool, cr)
self.pool.setup_models(cr, partial=(not self.pool.ready))
# update database schema
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
# if set, *one* column can be renamed here
column_rename = None
# field patches {model: {field_name: {prop_name: prop_value, ...}, ...}, ...}
patches = defaultdict(lambda: defaultdict(dict))
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', 'domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('select_level', 'index', lambda x: bool(int(x))),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
field = getattr(obj, '_fields', {}).get(item.name)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id.id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None and field is not None:
# find out which properties (per model) we need to update
for field_name, prop_name, func in model_props:
if field_name in vals:
prop_value = func(vals[field_name])
if getattr(field, prop_name) != prop_value:
patches[obj][final_name][prop_name] = prop_value
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
obj, rename = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % rename)
# This is VERY risky, but let us have this feature:
# we want to change the key of field in obj._fields and obj._columns
field = obj._pop_field(rename[1])
obj._add_field(rename[2], field)
self.pool.setup_models(cr, partial=(not self.pool.ready))
if patches:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context,
select=vals.get('select_level', '0'),
update_custom_fields=True,
)
for obj, model_patches in patches.iteritems():
for field_name, field_patches in model_patches.iteritems():
# update field properties, and adapt corresponding column
field = obj._fields[field_name]
attrs = dict(field._attrs, **field_patches)
obj._add_field(field_name, field.new(**attrs))
# update database schema
self.pool.setup_models(cr, partial=(not self.pool.ready))
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or patches:
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
id = False
try:
# One step to check the ID is defined and the record actually exists
record = self.get_object(cr, uid, module, xml_id)
if record:
id = record.id
self.loads[(module,xml_id)] = (model,id)
except Exception:
pass
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
#OpenUpgrade: log entry (used in csv import)
if xml_id:
openupgrade_log.log_xml_id(cr, module, xml_id)
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
if not res:
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
ir_values_obj.invalidate_cache(cr, uid, ['value'])
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules:
return True
to_unlink = []
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC""",
(tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module,name) not in self.loads:
to_unlink.append((model,res_id))
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if model in self.pool:
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT ir_model_data_delete');
self.pool[model].unlink(cr, uid, [res_id])
cr.execute('RELEASE SAVEPOINT ir_model_data_delete')
except Exception:
cr.execute('ROLLBACK TO SAVEPOINT ir_model_data_delete');
_logger.warning(
'Could not delete obsolete record with id: %d of model %s\n'
'Please refer to the log message right above',
res_id, model)
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
HyperBaton/ansible | lib/ansible/modules/network/junos/junos_system.py | 52 | 6186 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: junos_system
version_added: "2.4"
author: "Ganesh Nalawade (@ganeshrn)"
short_description: Manage the system attributes on Juniper JUNOS devices
description:
- This module provides declarative management of node system attributes
on Juniper JUNOS devices. It provides an option to configure host system
parameters or remove those parameters from the device active
configuration.
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- Configure the IP domain name
on the remote device to the provided value. Value
should be in the dotted name form and will be
appended to the C(hostname) to create a fully-qualified
domain name.
domain_search:
description:
- Provides the list of domain suffixes to
append to the hostname for the purpose of doing name resolution.
This argument accepts a list of names and will be reconciled
with the current active configuration on the running node.
name_servers:
description:
- List of DNS name servers by IP address to use to perform name resolution
lookups. This argument accepts either a list of DNS servers See
examples.
state:
description:
- State of the configuration
values in the device's current active configuration. When set
to I(present), the values should be configured in the device active
configuration and when set to I(absent) the values should not be
in the device active configuration
default: present
choices: ['present', 'absent']
active:
description:
- Specifies whether or not the configuration is active or deactivated
default: True
type: bool
requirements:
- ncclient (>=v0.5.2)
notes:
- This module requires the netconf system service be enabled on
the remote device being managed.
- Tested against vSRX JUNOS version 15.1X49-D15.4, vqfx-10000 JUNOS Version 15.1X53-D60.4.
- Recommended connection is C(netconf). See L(the Junos OS Platform Options,../network/user_guide/platform_junos.html).
- This module also works with C(local) connections for legacy playbooks.
extends_documentation_fragment: junos
"""
EXAMPLES = """
- name: configure hostname and domain name
junos_system:
hostname: junos01
domain_name: test.example.com
domain-search:
- ansible.com
- redhat.com
- juniper.com
- name: remove configuration
junos_system:
state: absent
- name: configure name servers
junos_system:
name_servers:
- 8.8.8.8
- 8.8.4.4
"""
RETURN = """
diff.prepared:
description: Configuration difference before and after applying change.
returned: when configuration is changed and diff option is enabled.
type: str
sample: >
[edit system]
+ host-name test;
+ domain-name ansible.com;
+ domain-search redhat.com;
[edit system name-server]
172.26.1.1 { ... }
+ 8.8.8.8;
"""
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.junos.junos import junos_argument_spec, tostring
from ansible.module_utils.network.junos.junos import load_config, map_params_to_obj, map_obj_to_ele
from ansible.module_utils.network.junos.junos import commit_configuration, discard_changes, locked_config
USE_PERSISTENT_CONNECTION = True
def validate_param_values(module, obj):
for key in obj:
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if callable(validator):
validator(module.params.get(key), module)
def main():
""" main entry point for module execution
"""
argument_spec = dict(
hostname=dict(),
domain_name=dict(),
domain_search=dict(type='list'),
name_servers=dict(type='list'),
state=dict(choices=['present', 'absent'], default='present'),
active=dict(default=True, type='bool')
)
argument_spec.update(junos_argument_spec)
params = ['hostname', 'domain_name', 'domain_search', 'name_servers']
required_if = [('state', 'present', params, True),
('state', 'absent', params, True),
('state', 'active', params, True),
('state', 'suspend', params, True)]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
result = {'changed': False}
if warnings:
result['warnings'] = warnings
top = 'system'
param_to_xpath_map = collections.OrderedDict()
param_to_xpath_map.update([
('hostname', {'xpath': 'host-name', 'leaf_only': True}),
('domain_name', {'xpath': 'domain-name', 'leaf_only': True}),
('domain_search', {'xpath': 'domain-search', 'leaf_only': True, 'value_req': True}),
('name_servers', {'xpath': 'name-server/name', 'is_key': True})
])
validate_param_values(module, param_to_xpath_map)
want = map_params_to_obj(module, param_to_xpath_map)
ele = map_obj_to_ele(module, want, top)
with locked_config(module):
diff = load_config(module, tostring(ele), warnings, action='merge')
commit = not module.check_mode
if diff:
if commit:
commit_configuration(module)
else:
discard_changes(module)
result['changed'] = True
if module._diff:
result['diff'] = {'prepared': diff}
module.exit_json(**result)
if __name__ == "__main__":
main()
| gpl-3.0 |
discoapi/discotech | discotech/discoAPI/keywordManager.py | 1 | 3203 | __package__ = 'discotech.discoAPI'
from discotech import discotechError
class KeywordManager(object):
"""
Simple object to store and queue keyword to search in social media providers
"""
def __init__(self,keywords = [],convertToSearchPhrases = False):
"""
@type keywords: list
@param keywords: the keyword you want search for
@type convertToSearchPhrases: bool
@param convertToSearchPhrases: whether keyword should be conveted to matching search phrases for example 'spider man' => ['spider','man','spiderman','spider_man']
"""
if keywords:
self.keywords = self._keyworsToSearchPhrases(keywords) if convertToSearchPhrases else list(keywords)
self._keywordCount = len(self.keywords)
self._headLocation = 0
else:
self.keywords = keywords
def dequque(self):
"""
dequque a keyword from the queue, the keyword is then moved to the end of the queue
@return: the next keyword in queue
"""
if not self.keywords:
raise discotechError("you don't any keywords")
retValue = self.keywords[self._headLocation]
# move head next
self._headLocation = (self._headLocation + 1) % self._keywordCount
return retValue
def _updateFromList(self,keywords):
self.keywords = list(keywords)
self._keywordCount = len(self.keywords)
self._headLocation = 0
def _updateFromDict(self,config):
if 'keywords' in config:
convertToSearchPhrases = False
if 'search_phrase' in config and config['search_phrase'] is True:
convertToSearchPhrases = True
self.keywords = self._keyworsToSearchPhrases(config['keywords']) if convertToSearchPhrases else list(config['keywords'])
self._keywordCount = len(self.keywords)
self._headLocation = 0
else:
raise discotechError("no keywords were given")
def _keyworToSearchPhrases(self,keyword):
words = keyword.split(' ')
#edge case
if len(words) == 1:
return words
cleanWords = []
#cleanup stage
for word in words:
word = word.strip()
if word != '':
cleanWords.append(word)
#combinator stage
combinators = ['','_']
combinedWords = []
for combinator in combinators:
combinedWords.append(combinator.join(cleanWords))
return cleanWords + combinedWords
def _keyworsToSearchPhrases(self,keywords):
retList = []
for keyword in keywords:
retList += self._keyworToSearchPhrases(keyword)
return retList
def loadConfig(self,config):
"""
load keywords from a configuation
@type config: list | str
@param config: a list of keywords or a path or address of JSON configuration file
"""
#if it's list
if type(config) is list:
self._updateFromList(config)
#if it's a dict
if type(config) is dict:
self._updateFromDict(config)
#if it's string
if type(config) is str:
#could be an address
if config.startswith('http://') or config.startswith('https://'):
configFile = getUrlContents(config)
confList = json.loads(configFile['response_text'])
#recursivly call yourself
return self.loadConfig(confList)
#could be file name
confFile = open(config,'r')
confLisr = json.loads(confFile.read())
#recursivly call yourself
return self.loadConfig(confList)
| gpl-2.0 |
pramasoul/micropython | tests/extmod/ure1.py | 11 | 2723 | try:
import ure as re
except ImportError:
try:
import re
except ImportError:
print("SKIP")
raise SystemExit
r = re.compile(".+")
m = r.match("abc")
print(m.group(0))
try:
m.group(1)
except IndexError:
print("IndexError")
# conversion of re and match to string
str(r)
str(m)
r = re.compile("(.+)1")
m = r.match("xyz781")
print(m.group(0))
print(m.group(1))
try:
m.group(2)
except IndexError:
print("IndexError")
r = re.compile("[a-cu-z]")
m = r.match("a")
print(m.group(0))
m = r.match("z")
print(m.group(0))
m = r.match("d")
print(m)
m = r.match("A")
print(m)
print("===")
r = re.compile("[^a-cu-z]")
m = r.match("a")
print(m)
m = r.match("z")
print(m)
m = r.match("d")
print(m.group(0))
m = r.match("A")
print(m.group(0))
print("===")
# '-' character within character class block
print(re.match("[-a]+", "-a]d").group(0))
print(re.match("[a-]+", "-a]d").group(0))
print("===")
r = re.compile("o+")
m = r.search("foobar")
print(m.group(0))
try:
m.group(1)
except IndexError:
print("IndexError")
m = re.match(".*", "foo")
print(m.group(0))
m = re.search("w.r", "hello world")
print(m.group(0))
m = re.match("a+?", "ab")
print(m.group(0))
m = re.match("a*?", "ab")
print(m.group(0))
m = re.match("^ab$", "ab")
print(m.group(0))
m = re.match("a|b", "b")
print(m.group(0))
m = re.match("a|b|c", "c")
print(m.group(0))
# Case where anchors fail to match
r = re.compile("^b|b$")
m = r.search("abc")
print(m)
try:
re.compile("*")
except:
print("Caught invalid regex")
# bytes objects
m = re.match(rb"a+?", b"ab")
print(m.group(0))
print("===")
# escaping
m = re.match(r"a\.c", "a.c")
print(m.group(0) if m else "")
m = re.match(r"a\.b", "abc")
print(m is None)
m = re.match(r"a\.b", "a\\bc")
print(m is None)
m = re.match(r"[a\-z]", "abc")
print(m.group(0))
m = re.match(r"[.\]]*", ".].]a")
print(m.group(0))
m = re.match(r"[.\]+]*", ".]+.]a")
print(m.group(0))
m = re.match(r"[a-f0-9x\-yz]*", "abxcd1-23")
print(m.group(0))
m = re.match(r"[a\\b]*", "a\\aa\\bb\\bbab")
print(m.group(0))
m = re.search(r"[a\-z]", "-")
print(m.group(0))
m = re.search(r"[a\-z]", "f")
print(m is None)
m = re.search(r"[a\]z]", "a")
print(m.group(0))
print(re.compile(r"[-a]").split("foo-bar"))
print(re.compile(r"[a-]").split("foo-bar"))
print(re.compile(r"[ax\-]").split("foo-bar"))
print(re.compile(r"[a\-x]").split("foo-bar"))
print(re.compile(r"[\-ax]").split("foo-bar"))
print("===")
# Module functions take str/bytes/re.
for f in (re.match, re.search):
print(f(".", "foo").group(0))
print(f(b".", b"foo").group(0))
print(f(re.compile("."), "foo").group(0))
try:
f(123, "a")
except TypeError:
print("TypeError")
print("===")
| mit |
jmztaylor/android_kernel_htc_a3ul_old | tools/perf/scripts/python/futex-contention.py | 11261 | 1486 | # futex contention
# (c) 2010, Arnaldo Carvalho de Melo <acme@redhat.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Translation of:
#
# http://sourceware.org/systemtap/wiki/WSFutexContention
#
# to perf python scripting.
#
# Measures futex contention
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + '/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Util import *
process_names = {}
thread_thislock = {}
thread_blocktime = {}
lock_waits = {} # long-lived stats on (tid,lock) blockage elapsed time
process_names = {} # long-lived pid-to-execname mapping
def syscalls__sys_enter_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, uaddr, op, val, utime, uaddr2, val3):
cmd = op & FUTEX_CMD_MASK
if cmd != FUTEX_WAIT:
return # we don't care about originators of WAKE events
process_names[tid] = comm
thread_thislock[tid] = uaddr
thread_blocktime[tid] = nsecs(s, ns)
def syscalls__sys_exit_futex(event, ctxt, cpu, s, ns, tid, comm,
nr, ret):
if thread_blocktime.has_key(tid):
elapsed = nsecs(s, ns) - thread_blocktime[tid]
add_stats(lock_waits, (tid, thread_thislock[tid]), elapsed)
del thread_blocktime[tid]
del thread_thislock[tid]
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
for (tid, lock) in lock_waits:
min, max, avg, count = lock_waits[tid, lock]
print "%s[%d] lock %x contended %d times, %d avg ns" % \
(process_names[tid], tid, lock, count, avg)
| gpl-2.0 |
appleseedhq/gaffer | python/GafferUITest/NoduleLayoutTest.py | 11 | 6511 | ##########################################################################
#
# Copyright (c) 2016, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import Gaffer
import GafferTest
import GafferUI
import GafferUITest
class NoduleLayoutTest( GafferUITest.TestCase ) :
def testChangingSection( self ) :
n = GafferTest.AddNode()
top = GafferUI.NoduleLayout( n, "top" )
left = GafferUI.NoduleLayout( n, "left" )
self.assertTrue( top.nodule( n["op1"] ) is not None )
self.assertTrue( left.nodule( n["op1"] ) is None )
Gaffer.Metadata.registerValue( n["op1"], "noduleLayout:section", "left" )
self.assertTrue( top.nodule( n["op1"] ) is None )
self.assertTrue( left.nodule( n["op1"] ) is not None )
def testDefaultDirection( self ) :
n = GafferTest.AddNode()
top = GafferUI.NoduleLayout( n, "top" )
self.assertGreater( top.bound().size().x, top.bound().size().y )
self.assertGreater(
top.nodule( n["op2"] ).transformedBound( None ).center().x,
top.nodule( n["op1"] ).transformedBound( None ).center().x
)
Gaffer.Metadata.registerValue( n["op1"], "noduleLayout:section", "left" )
Gaffer.Metadata.registerValue( n["op2"], "noduleLayout:section", "left" )
left = GafferUI.NoduleLayout( n, "left" )
self.assertGreater( left.bound().size().y, left.bound().size().x )
self.assertGreater(
left.nodule( n["op1"] ).transformedBound( None ).center().y,
left.nodule( n["op2"] ).transformedBound( None ).center().y
)
def testExplicitDirection( self ) :
n = GafferTest.AddNode()
top = GafferUI.NoduleLayout( n, "top" )
self.assertGreater( top.bound().size().x, top.bound().size().y )
self.assertGreater(
top.nodule( n["op2"] ).transformedBound( None ).center().x,
top.nodule( n["op1"] ).transformedBound( None ).center().x
)
Gaffer.Metadata.registerValue( n, "noduleLayout:section:top:direction", "decreasing" )
self.assertGreater( top.bound().size().x, top.bound().size().y )
self.assertLess(
top.nodule( n["op2"] ).transformedBound( None ).center().x,
top.nodule( n["op1"] ).transformedBound( None ).center().x
)
def testVisible( self ) :
n = GafferTest.AddNode()
top = GafferUI.NoduleLayout( n, "top" )
self.assertTrue( top.nodule( n["op1"] ) is not None )
self.assertTrue( top.nodule( n["op2"] ) is not None )
Gaffer.Metadata.registerValue( n["op1"], "noduleLayout:visible", False )
self.assertTrue( top.nodule( n["op1"] ) is None )
self.assertTrue( top.nodule( n["op2"] ) is not None )
Gaffer.Metadata.registerValue( n["op1"], "noduleLayout:visible", True )
self.assertTrue( top.nodule( n["op1"] ) is not None )
self.assertTrue( top.nodule( n["op2"] ) is not None )
def testCustomGadget( self ) :
# Define a custom gadget
class CustomGadget( GafferUI.Gadget ) :
def __init__( self, node ) :
GafferUI.Gadget.__init__( self )
self.addChild( GafferUI.ImageGadget( "minus.png" ) )
self.node = node
GafferUI.NoduleLayout.registerCustomGadget( "CustomGadget", CustomGadget )
# Create a node and make a top and bottom
# nodule layout for it.
n = GafferTest.AddNode()
topLayout = GafferUI.NoduleLayout( n, "top" )
bottomLayout = GafferUI.NoduleLayout( n, "bottom" )
topLayoutBound = topLayout.bound()
bottomLayoutBound = bottomLayout.bound()
# These shouldn't contain any custom gadgets.
self.assertEqual( topLayout.customGadget( "test" ), None )
self.assertEqual( bottomLayout.customGadget( "test" ), None )
# Register our custom gadget into the top layout
Gaffer.Metadata.registerValue( n, "noduleLayout:customGadget:test:gadgetType", "CustomGadget" )
Gaffer.Metadata.registerValue( n, "noduleLayout:customGadget:test:section", "top" )
# Check that it appears
gadget = topLayout.customGadget( "test" )
self.assertTrue( isinstance( gadget, CustomGadget ) )
self.assertTrue( gadget.node.isSame( n ) )
self.assertGreater( topLayout.bound().size().x, topLayoutBound.size().x )
# And is to the right of the nodules
nodule = topLayout.nodule( n["op2"] )
self.assertGreater( gadget.transformedBound().center().x, nodule.transformedBound().center().x )
# Check that nothing has appeared in the bottom layout
self.assertEqual( bottomLayout.customGadget( "test" ), None )
self.assertEqual( bottomLayout.bound(), bottomLayout.bound() )
# Change the index for our gadget, and check that
# the same one is reused, but now appears to the left
# of the nodules.
topLayoutBound = topLayout.bound()
Gaffer.Metadata.registerValue( n, "noduleLayout:customGadget:test:index", 0 )
self.assertTrue( topLayout.customGadget( "test" ).isSame( gadget ) )
self.assertEqual( topLayout.bound(), topLayoutBound )
self.assertLess( gadget.transformedBound().center().x, nodule.transformedBound().center().x )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
treefrogframework/FrameworkBenchmarks | toolset/utils/docker_helper.py | 7 | 15102 | import os
import socket
import json
import docker
import time
import re
import traceback
from threading import Thread
from colorama import Fore, Style
from toolset.utils.output_helper import log
from toolset.databases import databases
from psutil import virtual_memory
# total memory limit allocated for the test container
mem_limit = int(round(virtual_memory().total * .95))
class DockerHelper:
def __init__(self, benchmarker=None):
self.benchmarker = benchmarker
self.client = docker.DockerClient(
base_url=self.benchmarker.config.client_docker_host)
self.server = docker.DockerClient(
base_url=self.benchmarker.config.server_docker_host)
self.database = docker.DockerClient(
base_url=self.benchmarker.config.database_docker_host)
def __build(self, base_url, path, build_log_file, log_prefix, dockerfile,
tag, buildargs={}):
'''
Builds docker containers using docker-py low-level api
'''
self.benchmarker.time_logger.mark_build_start()
with open(build_log_file, 'w') as build_log:
try:
client = docker.APIClient(base_url=base_url)
output = client.build(
path=path,
dockerfile=dockerfile,
tag=tag,
forcerm=True,
timeout=3600,
pull=True,
buildargs=buildargs
)
buffer = ""
for token in output:
if token.startswith('{"stream":'):
token = json.loads(token)
token = token[token.keys()[0]].encode('utf-8')
buffer += token
elif token.startswith('{"errorDetail":'):
token = json.loads(token)
raise Exception(token['errorDetail']['message'])
while "\n" in buffer:
index = buffer.index("\n")
line = buffer[:index]
buffer = buffer[index + 1:]
log(line,
prefix=log_prefix,
file=build_log,
color=Fore.WHITE + Style.BRIGHT \
if re.match(r'^Step \d+\/\d+', line) else '')
# Kill docker builds if they exceed 60 mins. This will only
# catch builds that are still printing output.
if self.benchmarker.time_logger.time_since_start() > 3600:
log("Build time exceeded 60 minutes",
prefix=log_prefix,
file=build_log,
color=Fore.RED)
raise Exception
if buffer:
log(buffer,
prefix=log_prefix,
file=build_log,
color=Fore.WHITE + Style.BRIGHT \
if re.match(r'^Step \d+\/\d+', buffer) else '')
except Exception:
tb = traceback.format_exc()
log("Docker build failed; terminating",
prefix=log_prefix,
file=build_log,
color=Fore.RED)
log(tb, prefix=log_prefix, file=build_log)
self.benchmarker.time_logger.log_build_end(
log_prefix=log_prefix, file=build_log)
raise
self.benchmarker.time_logger.log_build_end(
log_prefix=log_prefix, file=build_log)
def clean(self):
'''
Cleans all the docker images from the system
'''
self.server.images.prune()
for image in self.server.images.list():
if len(image.tags) > 0:
# 'techempower/tfb.test.gemini:0.1' -> 'techempower/tfb.test.gemini'
image_tag = image.tags[0].split(':')[0]
if image_tag != 'techempower/tfb' and 'techempower' in image_tag:
self.server.images.remove(image.id, force=True)
self.server.images.prune()
self.database.images.prune()
for image in self.database.images.list():
if len(image.tags) > 0:
# 'techempower/tfb.test.gemini:0.1' -> 'techempower/tfb.test.gemini'
image_tag = image.tags[0].split(':')[0]
if image_tag != 'techempower/tfb' and 'techempower' in image_tag:
self.database.images.remove(image.id, force=True)
self.database.images.prune()
def build(self, test, build_log_dir=os.devnull):
'''
Builds the test docker containers
'''
log_prefix = "%s: " % test.name
# Build the test image
test_docker_file = '%s.dockerfile' % test.name
if hasattr(test, 'dockerfile'):
test_docker_file = test.dockerfile
test_database = ''
if hasattr(test, 'database'):
test_database = test.database
build_log_file = build_log_dir
if build_log_dir is not os.devnull:
build_log_file = os.path.join(
build_log_dir,
"%s.log" % test_docker_file.replace(".dockerfile", "").lower())
try:
self.__build(
base_url=self.benchmarker.config.server_docker_host,
build_log_file=build_log_file,
log_prefix=log_prefix,
path=test.directory,
dockerfile=test_docker_file,
buildargs=({
'BENCHMARK_ENV':
self.benchmarker.config.results_environment,
'TFB_TEST_NAME': test.name,
'TFB_TEST_DATABASE': test_database
}),
tag="techempower/tfb.test.%s" % test.name)
except Exception:
return 1
return 0
def run(self, test, run_log_dir):
'''
Run the given Docker container(s)
'''
log_prefix = "%s: " % test.name
container = None
try:
def watch_container(docker_container, docker_file):
with open(
os.path.join(
run_log_dir, "%s.log" % docker_file.replace(
".dockerfile", "").lower()), 'w') as run_log:
for line in docker_container.logs(stream=True):
log(line, prefix=log_prefix, file=run_log)
extra_hosts = None
name = "tfb-server"
if self.benchmarker.config.network is None:
extra_hosts = {
socket.gethostname():
str(self.benchmarker.config.server_host),
'tfb-server':
str(self.benchmarker.config.server_host),
'tfb-database':
str(self.benchmarker.config.database_host)
}
name = None
sysctl = {'net.core.somaxconn': 65535}
ulimit = [{
'name': 'nofile',
'hard': 200000,
'soft': 200000
}, {
'name': 'rtprio',
'hard': 99,
'soft': 99
}]
docker_cmd = ''
if hasattr(test, 'docker_cmd'):
docker_cmd = test.docker_cmd
# Expose ports in debugging mode
ports = {}
if self.benchmarker.config.mode == "debug":
ports = {test.port: test.port}
container = self.server.containers.run(
"techempower/tfb.test.%s" % test.name,
name=name,
command=docker_cmd,
network=self.benchmarker.config.network,
network_mode=self.benchmarker.config.network_mode,
ports=ports,
stderr=True,
detach=True,
init=True,
extra_hosts=extra_hosts,
privileged=True,
ulimits=ulimit,
mem_limit=mem_limit,
sysctls=sysctl,
remove=True,
log_config={'type': None})
watch_thread = Thread(
target=watch_container,
args=(
container,
"%s.dockerfile" % test.name,
))
watch_thread.daemon = True
watch_thread.start()
except Exception:
with open(
os.path.join(run_log_dir, "%s.log" % test.name.lower()),
'w') as run_log:
tb = traceback.format_exc()
log("Running docker container: %s.dockerfile failed" %
test.name,
prefix=log_prefix,
file=run_log)
log(tb, prefix=log_prefix, file=run_log)
return container
@staticmethod
def __stop_container(container):
try:
container.stop(timeout=2)
time.sleep(2)
except:
# container has already been killed
pass
@staticmethod
def __stop_all(docker_client):
for container in docker_client.containers.list():
if len(container.image.tags) > 0 \
and 'techempower' in container.image.tags[0] \
and 'tfb:latest' not in container.image.tags[0]:
DockerHelper.__stop_container(container)
def stop(self, containers=None):
'''
Attempts to stop a container or list of containers.
If no containers are passed, stops all running containers.
'''
is_multi_setup = self.benchmarker.config.server_docker_host != \
self.benchmarker.config.database_docker_host
if containers:
if not isinstance(containers, list):
containers = [containers]
for container in containers:
DockerHelper.__stop_container(container)
else:
DockerHelper.__stop_all(self.server)
if is_multi_setup:
DockerHelper.__stop_all(self.database)
DockerHelper.__stop_all(self.client)
self.database.containers.prune()
if is_multi_setup:
# Then we're on a 3 machine set up
self.server.containers.prune()
self.client.containers.prune()
def build_databases(self):
'''
Builds all the databases necessary to run the list of benchmarker tests
'''
built = []
for test in self.benchmarker.tests:
db = test.database.lower()
if db not in built and db != "none":
image_name = "techempower/%s:latest" % db
log_prefix = image_name + ": "
database_dir = os.path.join(self.benchmarker.config.db_root,
db)
docker_file = "%s.dockerfile" % db
self.__build(
base_url=self.benchmarker.config.database_docker_host,
path=database_dir,
dockerfile=docker_file,
log_prefix=log_prefix,
build_log_file=os.devnull,
tag="techempower/%s" % db)
built.append(db)
def start_database(self, database):
'''
Sets up a container for the given database and port, and starts said docker
container.
'''
image_name = "techempower/%s:latest" % database
log_prefix = image_name + ": "
sysctl = {
'net.core.somaxconn': 65535,
'kernel.sem': "250 32000 256 512"
}
ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]
container = self.database.containers.run(
"techempower/%s" % database,
name="tfb-database",
network=self.benchmarker.config.network,
network_mode=self.benchmarker.config.network_mode,
detach=True,
ulimits=ulimit,
sysctls=sysctl,
remove=True,
log_config={'type': None})
# Sleep until the database accepts connections
slept = 0
max_sleep = 60
database_ready = False
while not database_ready and slept < max_sleep:
time.sleep(1)
slept += 1
database_ready = databases[database].test_connection(self.benchmarker.config)
if not database_ready:
log("Database was not ready after startup", prefix=log_prefix)
return container
def build_wrk(self):
'''
Builds the techempower/tfb.wrk container
'''
self.__build(
base_url=self.benchmarker.config.client_docker_host,
path=self.benchmarker.config.wrk_root,
dockerfile="wrk.dockerfile",
log_prefix="wrk: ",
build_log_file=os.devnull,
tag="techempower/tfb.wrk")
def test_client_connection(self, url):
'''
Tests that the app server at the given url responds successfully to a
request.
'''
try:
self.client.containers.run(
'techempower/tfb.wrk',
'curl --fail --max-time 5 %s' % url,
remove=True,
log_config={'type': None},
network=self.benchmarker.config.network,
network_mode=self.benchmarker.config.network_mode)
except Exception:
return False
return True
def server_container_exists(self, container_id_or_name):
'''
Returns True if the container still exists on the server.
'''
try:
self.server.containers.get(container_id_or_name)
return True
except:
return False
def benchmark(self, script, variables, raw_file):
'''
Runs the given remote_script on the wrk container on the client machine.
'''
def watch_container(container):
with open(raw_file, 'w') as benchmark_file:
for line in container.logs(stream=True):
log(line, file=benchmark_file)
sysctl = {'net.core.somaxconn': 65535}
ulimit = [{'name': 'nofile', 'hard': 65535, 'soft': 65535}]
watch_container(
self.client.containers.run(
"techempower/tfb.wrk",
"/bin/bash /%s" % script,
environment=variables,
network=self.benchmarker.config.network,
network_mode=self.benchmarker.config.network_mode,
detach=True,
stderr=True,
ulimits=ulimit,
sysctls=sysctl,
remove=True,
log_config={'type': None}))
| bsd-3-clause |
DhashS/scala_comp_robo_sign_detection | src/main/venv/lib/python3.5/site-packages/setuptools/command/py36compat.py | 130 | 4968 | import os
from glob import glob
from distutils.util import convert_path
from distutils.command import sdist
from six.moves import filter
class sdist_add_defaults:
"""
Mix-in providing forward-compatibility for functionality as found in
distutils on Python 3.7.
Do not edit the code in this class except to update functionality
as implemented in distutils. Instead, override in the subclass.
"""
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
self._add_defaults_standards()
self._add_defaults_optional()
self._add_defaults_python()
self._add_defaults_data_files()
self._add_defaults_ext()
self._add_defaults_c_libs()
self._add_defaults_scripts()
@staticmethod
def _cs_path_exists(fspath):
"""
Case-sensitive path existence check
>>> sdist_add_defaults._cs_path_exists(__file__)
True
>>> sdist_add_defaults._cs_path_exists(__file__.upper())
False
"""
if not os.path.exists(fspath):
return False
# make absolute so we always have a directory
abspath = os.path.abspath(fspath)
directory, filename = os.path.split(abspath)
return filename in os.listdir(directory)
def _add_defaults_standards(self):
standards = [self.READMES, self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if self._cs_path_exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if self._cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
def _add_defaults_optional(self):
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
def _add_defaults_python(self):
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
def _add_defaults_data_files(self):
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
# plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
# a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
def _add_defaults_ext(self):
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
def _add_defaults_c_libs(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
def _add_defaults_scripts(self):
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
if hasattr(sdist.sdist, '_add_defaults_standards'):
# disable the functionality already available upstream
class sdist_add_defaults:
pass
| gpl-3.0 |
makinacorpus/libkml | examples/python/hellowalk.py | 25 | 3057 | #!/usr/bin/env python
# Copyright 2008, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of Google Inc. nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# This program demonstrates use of the KML DOM Python SWIG bindings
# for walking the feature hierarchy of a KML file.
import sys
import kmldom
import kmlengine
argc = len(sys.argv)
if argc != 2:
print 'usage: %s input.kml' % sys.argv[0]
sys.exit(1)
inputkml = sys.argv[1]
def ReadFile(filename):
f = open(filename, 'r')
data = f.read()
return data
def Indent(depth):
while depth:
print ' ',
depth -= 1
def PrFeatureType(type):
if type == kmldom.Type_Placemark:
print 'Placemark',
else:
print 'some other Feature',
# This visits a feature. The type of feature is printed. If the feature
# is a container such is visited recursively.
def VisitFeature(feature, depth):
Indent(depth)
PrFeatureType(feature.Type())
print 'id=',feature.get_id()
container = kmldom.AsContainer(feature)
if container:
WalkContainer(container, depth+1)
# This visits each feature in the given container (<Document> or <Folder>).
def WalkContainer(container, depth):
for i in range(container.get_feature_array_size()):
VisitFeature(container.get_feature_array_at(i), depth)
# Program main: read the file to memory, parse it, get and visit
# the root feature if such exists.
def main():
feature = kmlengine.GetRootFeature(kmldom.ParseKml(ReadFile(inputkml)))
if feature:
VisitFeature(feature, 0)
# Python deletes the feature and all of its descendant elements in turn.
else:
# The file is a KML fragment.
print 'No root feature in %s' % inputkml
if __name__ == '__main__':
main()
| bsd-3-clause |
rcbops/python-django-buildpackage | django/contrib/admindocs/views.py | 296 | 15504 | from django import template, templatetags
from django.template import RequestContext
from django.conf import settings
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.shortcuts import render_to_response
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.contrib.sites.models import Site
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
from django.utils.safestring import mark_safe
import inspect, os, re
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class GenericSite(object):
domain = 'example.com'
name = 'my site'
def get_root_path():
try:
return urlresolvers.reverse('admin:index')
except urlresolvers.NoReverseMatch:
from django.contrib import admin
try:
return urlresolvers.reverse(admin.site.root, args=[''])
except urlresolvers.NoReverseMatch:
return getattr(settings, "ADMIN_SITE_ROOT_URL", "/admin/")
def doc_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
return render_to_response('admin_doc/index.html', {
'root_path': get_root_path(),
}, context_instance=RequestContext(request))
doc_index = staff_member_required(doc_index)
def bookmarklets(request):
admin_root = get_root_path()
return render_to_response('admin_doc/bookmarklets.html', {
'root_path': admin_root,
'admin_url': mark_safe("%s://%s%s" % (request.is_secure() and 'https' or 'http', request.get_host(), admin_root)),
}, context_instance=RequestContext(request))
bookmarklets = staff_member_required(bookmarklets)
def template_tag_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
tags = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_tag_index.html', {
'root_path': get_root_path(),
'tags': tags
}, context_instance=RequestContext(request))
template_tag_index = staff_member_required(template_tag_index)
def template_filter_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
load_all_installed_template_libraries()
filters = []
app_libs = template.libraries.items()
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = None
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
return render_to_response('admin_doc/template_filter_index.html', {
'root_path': get_root_path(),
'filters': filters
}, context_instance=RequestContext(request))
template_filter_index = staff_member_required(template_filter_index)
def view_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
if settings.ADMIN_FOR:
settings_modules = [import_module(m) for m in settings.ADMIN_FOR]
else:
settings_modules = [settings]
views = []
for settings_mod in settings_modules:
urlconf = import_module(settings_mod.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for (func, regex) in view_functions:
views.append({
'name': getattr(func, '__name__', func.__class__.__name__),
'module': func.__module__,
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'url': simplify_regex(regex),
})
return render_to_response('admin_doc/view_index.html', {
'root_path': get_root_path(),
'views': views
}, context_instance=RequestContext(request))
view_index = staff_member_required(view_index)
def view_detail(request, view):
if not utils.docutils_is_available:
return missing_docutils_page(request)
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
return render_to_response('admin_doc/view_detail.html', {
'root_path': get_root_path(),
'name': view,
'summary': title,
'body': body,
'meta': metadata,
}, context_instance=RequestContext(request))
view_detail = staff_member_required(view_detail)
def model_index(request):
if not utils.docutils_is_available:
return missing_docutils_page(request)
m_list = [m._meta for m in models.get_models()]
return render_to_response('admin_doc/model_index.html', {
'root_path': get_root_path(),
'models': m_list
}, context_instance=RequestContext(request))
model_index = staff_member_required(model_index)
def model_detail(request, app_label, model_name):
if not utils.docutils_is_available:
return missing_docutils_page(request)
# Get the model class.
try:
app_mod = models.get_app(app_label)
except ImproperlyConfigured:
raise Http404(_("App %r not found") % app_label)
model = None
for m in models.get_models(app_mod):
if m._meta.object_name.lower() == model_name:
model = m
break
if model is None:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % {'model_name': model_name, 'app_label': app_label})
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst((_("the related `%(app_label)s.%(data_type)s` object") % {'app_label': app_label, 'data_type': data_type}), 'model', _('model:') + data_type)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = related_object_name = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': app_label, 'object_name': data_type}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % field.name,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.module_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {'app_label': rel.opts.app_label, 'object_name': rel.opts.object_name}
accessor = rel.get_accessor_name()
fields.append({
'name' : "%s.all" % accessor,
'data_type' : 'List',
'verbose' : utils.parse_rst(_("all %s") % verbose , 'model', _('model:') + opts.module_name),
})
fields.append({
'name' : "%s.count" % accessor,
'data_type' : 'Integer',
'verbose' : utils.parse_rst(_("number of %s") % verbose , 'model', _('model:') + opts.module_name),
})
return render_to_response('admin_doc/model_detail.html', {
'root_path': get_root_path(),
'name': '%s.%s' % (opts.app_label, opts.object_name),
'summary': _("Fields on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
}, context_instance=RequestContext(request))
model_detail = staff_member_required(model_detail)
def template_detail(request, template):
templates = []
for site_settings_module in settings.ADMIN_FOR:
settings_mod = import_module(site_settings_module)
if Site._meta.installed:
site_obj = Site.objects.get(pk=settings_mod.SITE_ID)
else:
site_obj = GenericSite()
for dir in settings_mod.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: os.path.exists(template_file) and open(template_file).read() or '',
'site_id': settings_mod.SITE_ID,
'site': site_obj,
'order': list(settings_mod.TEMPLATE_DIRS).index(dir),
})
return render_to_response('admin_doc/template_detail.html', {
'root_path': get_root_path(),
'name': template,
'templates': templates,
}, context_instance=RequestContext(request))
template_detail = staff_member_required(template_detail)
####################
# Helper functions #
####################
def missing_docutils_page(request):
"""Display an error message for people without docutils"""
return render_to_response('admin_doc/missing_docutils.html')
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(mod.__file__))
if p.endswith('.py') and p[0].isalpha()
]
for library_name in libraries:
try:
lib = template.get_library(library_name)
except template.InvalidTemplateLibrary, e:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base=''):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, '_get_callback'):
try:
views.append((p._get_callback(), base + p.regex.pattern))
except ViewDoesNotExist:
continue
elif hasattr(p, '_get_url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(patterns, base + p.regex.pattern))
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
| bsd-3-clause |
atplanet/ansible-modules-extras | notification/mail.py | 44 | 10185 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2012 Dag Wieers <dag@wieers.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
author: "Dag Wieers (@dagwieers)"
module: mail
short_description: Send an email
description:
- This module is useful for sending emails from playbooks.
- One may wonder why automate sending emails? In complex environments
there are from time to time processes that cannot be automated, either
because you lack the authority to make it so, or because not everyone
agrees to a common approach.
- If you cannot automate a specific step, but the step is non-blocking,
sending out an email to the responsible party to make him perform his
part of the bargain is an elegant way to put the responsibility in
someone else's lap.
- Of course sending out a mail can be equally useful as a way to notify
one or more people in a team that a specific action has been
(successfully) taken.
version_added: "0.8"
options:
from:
description:
- The email-address the mail is sent from. May contain address and phrase.
default: root
required: false
to:
description:
- The email-address(es) the mail is being sent to. This is
a comma-separated list, which may contain address and phrase portions.
default: root
required: false
cc:
description:
- The email-address(es) the mail is being copied to. This is
a comma-separated list, which may contain address and phrase portions.
required: false
bcc:
description:
- The email-address(es) the mail is being 'blind' copied to. This is
a comma-separated list, which may contain address and phrase portions.
required: false
subject:
description:
- The subject of the email being sent.
required: true
body:
description:
- The body of the email being sent.
default: $subject
required: false
username:
description:
- If SMTP requires username
default: null
required: false
version_added: "1.9"
password:
description:
- If SMTP requires password
default: null
required: false
version_added: "1.9"
host:
description:
- The mail server
default: 'localhost'
required: false
port:
description:
- The mail server port
default: '25'
required: false
version_added: "1.0"
attach:
description:
- A space-separated list of pathnames of files to attach to the message.
Attached files will have their content-type set to C(application/octet-stream).
default: null
required: false
version_added: "1.0"
headers:
description:
- A vertical-bar-separated list of headers which should be added to the message.
Each individual header is specified as C(header=value) (see example below).
default: null
required: false
version_added: "1.0"
charset:
description:
- The character set of email being sent
default: 'us-ascii'
required: false
subtype:
description:
- The minor mime type, can be either text or html. The major type is always text.
default: 'plain'
required: false
version_added: "2.0"
"""
EXAMPLES = '''
# Example playbook sending mail to root
- local_action: mail subject='System {{ ansible_hostname }} has been successfully provisioned.'
# Sending an e-mail using Gmail SMTP servers
- local_action: mail
host='smtp.gmail.com'
port=587
username=username@gmail.com
password='mysecret'
to="John Smith <john.smith@example.com>"
subject='Ansible-report'
body='System {{ ansible_hostname }} has been successfully provisioned.'
# Send e-mail to a bunch of users, attaching files
- local_action: mail
host='127.0.0.1'
port=2025
subject="Ansible-report"
body="Hello, this is an e-mail. I hope you like it ;-)"
from="jane@example.net (Jane Jolie)"
to="John Doe <j.d@example.org>, Suzie Something <sue@example.com>"
cc="Charlie Root <root@localhost>"
attach="/etc/group /tmp/pavatar2.png"
headers=Reply-To=john@example.com|X-Special="Something or other"
charset=utf8
# Sending an e-mail using the remote machine, not the Ansible controller node
- mail:
host='localhost'
port=25
to="John Smith <john.smith@example.com>"
subject='Ansible-report'
body='System {{ ansible_hostname }} has been successfully provisioned.'
'''
import os
import sys
import smtplib
import ssl
try:
from email import encoders
import email.utils
from email.utils import parseaddr, formataddr
from email.mime.base import MIMEBase
from mail.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
except ImportError:
from email import Encoders as encoders
import email.Utils
from email.Utils import parseaddr, formataddr
from email.MIMEBase import MIMEBase
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
def main():
module = AnsibleModule(
argument_spec = dict(
username = dict(default=None),
password = dict(default=None, no_log=True),
host = dict(default='localhost'),
port = dict(default='25'),
sender = dict(default='root', aliases=['from']),
to = dict(default='root', aliases=['recipients']),
cc = dict(default=None),
bcc = dict(default=None),
subject = dict(required=True, aliases=['msg']),
body = dict(default=None),
attach = dict(default=None),
headers = dict(default=None),
charset = dict(default='us-ascii'),
subtype = dict(default='plain')
)
)
username = module.params.get('username')
password = module.params.get('password')
host = module.params.get('host')
port = module.params.get('port')
sender = module.params.get('sender')
recipients = module.params.get('to')
copies = module.params.get('cc')
blindcopies = module.params.get('bcc')
subject = module.params.get('subject')
body = module.params.get('body')
attach_files = module.params.get('attach')
headers = module.params.get('headers')
charset = module.params.get('charset')
subtype = module.params.get('subtype')
sender_phrase, sender_addr = parseaddr(sender)
if not body:
body = subject
try:
try:
smtp = smtplib.SMTP_SSL(host, port=int(port))
except (smtplib.SMTPException, ssl.SSLError):
smtp = smtplib.SMTP(host, port=int(port))
except Exception, e:
module.fail_json(rc=1, msg='Failed to send mail to server %s on port %s: %s' % (host, port, e))
smtp.ehlo()
if username and password:
if smtp.has_extn('STARTTLS'):
smtp.starttls()
try:
smtp.login(username, password)
except smtplib.SMTPAuthenticationError:
module.fail_json(msg="Authentication to %s:%s failed, please check your username and/or password" % (host, port))
msg = MIMEMultipart()
msg['Subject'] = subject
msg['From'] = formataddr((sender_phrase, sender_addr))
msg.preamble = "Multipart message"
if headers is not None:
for hdr in [x.strip() for x in headers.split('|')]:
try:
h_key, h_val = hdr.split('=')
msg.add_header(h_key, h_val)
except:
pass
if 'X-Mailer' not in msg:
msg.add_header('X-Mailer', "Ansible")
to_list = []
cc_list = []
addr_list = []
if recipients is not None:
for addr in [x.strip() for x in recipients.split(',')]:
to_list.append( formataddr( parseaddr(addr)) )
addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase
if copies is not None:
for addr in [x.strip() for x in copies.split(',')]:
cc_list.append( formataddr( parseaddr(addr)) )
addr_list.append( parseaddr(addr)[1] ) # address only, w/o phrase
if blindcopies is not None:
for addr in [x.strip() for x in blindcopies.split(',')]:
addr_list.append( parseaddr(addr)[1] )
if len(to_list) > 0:
msg['To'] = ", ".join(to_list)
if len(cc_list) > 0:
msg['Cc'] = ", ".join(cc_list)
part = MIMEText(body + "\n\n", _subtype=subtype, _charset=charset)
msg.attach(part)
if attach_files is not None:
for file in attach_files.split():
try:
fp = open(file, 'rb')
part = MIMEBase('application', 'octet-stream')
part.set_payload(fp.read())
fp.close()
encoders.encode_base64(part)
part.add_header('Content-disposition', 'attachment', filename=os.path.basename(file))
msg.attach(part)
except Exception, e:
module.fail_json(rc=1, msg="Failed to send mail: can't attach file %s: %s" % (file, e))
composed = msg.as_string()
try:
smtp.sendmail(sender_addr, set(addr_list), composed)
except Exception, e:
module.fail_json(rc=1, msg='Failed to send mail to %s: %s' % (", ".join(addr_list), e))
smtp.quit()
module.exit_json(changed=False)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
cchurch/ansible | lib/ansible/modules/cloud/azure/azure_rm_containerregistry.py | 8 | 14258 | #!/usr/bin/python
#
# Copyright (c) 2017 Yawei Wang, <yaweiw@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_containerregistry
version_added: "2.5"
short_description: Manage an Azure Container Registry
description:
- Create, update and delete an Azure Container Registry.
options:
resource_group:
description:
- Name of a resource group where the Container Registry exists or will be created.
required: true
name:
description:
- Name of the Container Registry.
required: true
state:
description:
- Assert the state of the container registry. Use C(present) to create or update an container registry and C(absent) to delete it.
default: present
choices:
- absent
- present
location:
description:
- Valid azure location. Defaults to location of the resource group.
admin_user_enabled:
description:
- If enabled, you can use the registry name as username and admin user access key as password to docker login to your container registry.
type: bool
default: no
sku:
description:
- Specifies the SKU to use. Currently can be either C(Basic), C(Standard) or C(Premium).
default: Standard
choices:
- Basic
- Standard
- Premium
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yawei Wang (@yaweiw)
'''
EXAMPLES = '''
- name: Create an azure container registry
azure_rm_containerregistry:
name: myRegistry
location: eastus
resource_group: myResourceGroup
admin_user_enabled: true
sku: Premium
tags:
Release: beta1
Environment: Production
- name: Remove an azure container registry
azure_rm_containerregistry:
name: myRegistry
resource_group: myResourceGroup
state: absent
'''
RETURN = '''
id:
description:
- Resource ID.
returned: always
type: str
sample: /subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourceGroups/myResourceGroup/providers/Microsoft.ContainerRegistry/registries/myRegistry
name:
description:
- Registry name.
returned: always
type: str
sample: myregistry
location:
description:
- Resource location.
returned: always
type: str
sample: westus
admin_user_enabled:
description:
- Is admin user enabled.
returned: always
type: bool
sample: true
sku:
description:
- The SKU name of the container registry.
returned: always
type: str
sample: Standard
provisioning_state:
description:
- Provisioning state.
returned: always
type: str
sample: Succeeded
login_server:
description:
- Registry login server.
returned: always
type: str
sample: myregistry.azurecr.io
credentials:
description:
- Passwords defined for the registry.
returned: always
type: complex
contains:
password:
description:
- password value.
returned: when registry exists and C(admin_user_enabled) is set
type: str
sample: pass1value
password2:
description:
- password2 value.
returned: when registry exists and C(admin_user_enabled) is set
type: str
sample: pass2value
tags:
description:
- Tags assigned to the resource. Dictionary of string:string parirs.
returned: always
type: dict
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
from azure.mgmt.containerregistry.models import (
Registry,
RegistryUpdateParameters,
StorageAccountProperties,
Sku,
SkuName,
SkuTier,
ProvisioningState,
PasswordName,
WebhookCreateParameters,
WebhookUpdateParameters,
WebhookAction,
WebhookStatus
)
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
except ImportError as exc:
# This is handled in azure_rm_common
pass
def create_containerregistry_dict(registry, credentials):
'''
Helper method to deserialize a ContainerRegistry to a dict
:param: registry: return container registry object from Azure rest API call
:param: credentials: return credential objects from Azure rest API call
:return: dict of return container registry and it's credentials
'''
results = dict(
id=registry.id if registry is not None else "",
name=registry.name if registry is not None else "",
location=registry.location if registry is not None else "",
admin_user_enabled=registry.admin_user_enabled if registry is not None else "",
sku=registry.sku.name if registry is not None else "",
provisioning_state=registry.provisioning_state if registry is not None else "",
login_server=registry.login_server if registry is not None else "",
credentials=dict(),
tags=registry.tags if registry is not None else ""
)
if credentials:
results['credentials'] = dict(
password=credentials.passwords[0].value,
password2=credentials.passwords[1].value
)
return results
class Actions:
NoAction, Create, Update = range(3)
class AzureRMContainerRegistry(AzureRMModuleBase):
"""Configuration class for an Azure RM container registry resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
),
location=dict(
type='str'
),
admin_user_enabled=dict(
type='bool',
default=False
),
sku=dict(
type='str',
default='Standard',
choices=['Basic', 'Standard', 'Premium']
)
)
self.resource_group = None
self.name = None
self.location = None
self.state = None
self.sku = None
self.tags = None
self.results = dict(changed=False, state=dict())
super(AzureRMContainerRegistry, self).__init__(
derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
setattr(self, key, kwargs[key])
resource_group = None
response = None
to_do = Actions.NoAction
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
self.location = resource_group.location
# Check if the container registry instance already present in the RG
if self.state == 'present':
response = self.get_containerregistry()
if not response:
to_do = Actions.Create
else:
self.log('Results : {0}'.format(response))
self.results.update(response)
if response['provisioning_state'] == "Succeeded":
to_do = Actions.NoAction
if (self.location is not None) and self.location != response['location']:
to_do = Actions.Update
elif (self.sku is not None) and self.location != response['sku']:
to_do = Actions.Update
else:
to_do = Actions.NoAction
self.log("Create / Update the container registry instance")
if self.check_mode:
return self.results
self.results.update(self.create_update_containerregistry(to_do))
if to_do != Actions.NoAction:
self.results['changed'] = True
else:
self.results['changed'] = False
self.log("Container registry instance created or updated")
elif self.state == 'absent':
if self.check_mode:
return self.results
self.delete_containerregistry()
self.log("Container registry instance deleted")
return self.results
def create_update_containerregistry(self, to_do):
'''
Creates or updates a container registry.
:return: deserialized container registry instance state dictionary
'''
self.log("Creating / Updating the container registry instance {0}".format(self.name))
try:
if to_do != Actions.NoAction:
if to_do == Actions.Create:
name_status = self.containerregistry_client.registries.check_name_availability(self.name)
if name_status.name_available:
poller = self.containerregistry_client.registries.create(
resource_group_name=self.resource_group,
registry_name=self.name,
registry=Registry(
location=self.location,
sku=Sku(
name=self.sku
),
tags=self.tags,
admin_user_enabled=self.admin_user_enabled
)
)
else:
raise Exception("Invalid registry name. reason: " + name_status.reason + " message: " + name_status.message)
else:
registry = self.containerregistry_client.registries.get(self.resource_group, self.name)
if registry is not None:
poller = self.containerregistry_client.registries.update(
resource_group_name=self.resource_group,
registry_name=self.name,
registry_update_parameters=RegistryUpdateParameters(
sku=Sku(
name=self.sku
),
tags=self.tags,
admin_user_enabled=self.admin_user_enabled
)
)
else:
raise Exception("Update registry failed as registry '" + self.name + "' doesn't exist.")
response = self.get_poller_result(poller)
if self.admin_user_enabled:
credentials = self.containerregistry_client.registries.list_credentials(self.resource_group, self.name)
else:
self.log('Cannot perform credential operations as admin user is disabled')
credentials = None
else:
response = None
credentials = None
except (CloudError, Exception) as exc:
self.log('Error attempting to create / update the container registry instance.')
self.fail("Error creating / updating the container registry instance: {0}".format(str(exc)))
return create_containerregistry_dict(response, credentials)
def delete_containerregistry(self):
'''
Deletes the specified container registry in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the container registry instance {0}".format(self.name))
try:
self.containerregistry_client.registries.delete(self.resource_group, self.name).wait()
except CloudError as e:
self.log('Error attempting to delete the container registry instance.')
self.fail("Error deleting the container registry instance: {0}".format(str(e)))
return True
def get_containerregistry(self):
'''
Gets the properties of the specified container registry.
:return: deserialized container registry state dictionary
'''
self.log("Checking if the container registry instance {0} is present".format(self.name))
found = False
try:
response = self.containerregistry_client.registries.get(self.resource_group, self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Container registry instance : {0} found".format(response.name))
except CloudError as e:
if e.error.error == 'ResourceNotFound':
self.log('Did not find the container registry instance: {0}'.format(str(e)))
else:
self.fail('Error while trying to get container registry instance: {0}'.format(str(e)))
response = None
if found is True and self.admin_user_enabled is True:
try:
credentials = self.containerregistry_client.registries.list_credentials(self.resource_group, self.name)
except CloudError as e:
self.fail('List registry credentials failed: {0}'.format(str(e)))
credentials = None
elif found is True and self.admin_user_enabled is False:
credentials = None
else:
return None
return create_containerregistry_dict(response, credentials)
def main():
"""Main execution"""
AzureRMContainerRegistry()
if __name__ == '__main__':
main()
| gpl-3.0 |
prutseltje/ansible | test/units/modules/network/onyx/test_onyx_protocols.py | 50 | 4685 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.onyx import onyx_protocol
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxProtocolModule(TestOnyxModule):
module = onyx_protocol
def setUp(self):
super(TestOnyxProtocolModule, self).setUp()
self.mock_get_config = patch.object(
onyx_protocol.OnyxProtocolModule,
"_get_protocols")
self.get_config = self.mock_get_config.start()
self.mock_get_ip_config = patch.object(
onyx_protocol.OnyxProtocolModule,
"_get_ip_routing")
self.get_ip_config = self.mock_get_ip_config.start()
self.mock_load_config = patch(
'ansible.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxProtocolModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_protocols_show.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
self.get_ip_config.return_value = "IP routing: enabled"
def test_mlag_enable(self):
set_module_args(dict(mlag='enabled'))
commands = ['protocol mlag']
self.execute_module(changed=True, commands=commands)
def test_mlag_disable(self):
set_module_args(dict(mlag='disabled'))
self.execute_module(changed=False)
def test_magp_enable(self):
set_module_args(dict(magp='enabled'))
commands = ['protocol magp']
self.execute_module(changed=True, commands=commands)
def test_magp_disable(self):
set_module_args(dict(magp='disabled'))
self.execute_module(changed=False)
def test_spanning_tree_enable(self):
set_module_args(dict(spanning_tree='enabled'))
self.execute_module(changed=False)
def test_spanning_tree_disable(self):
set_module_args(dict(spanning_tree='disabled'))
commands = ['no spanning-tree']
self.execute_module(changed=True, commands=commands)
def test_dcb_pfc_enable(self):
set_module_args(dict(dcb_pfc='enabled'))
commands = ['dcb priority-flow-control enable force']
self.execute_module(changed=True, commands=commands)
def test_dcb_pfc_disable(self):
set_module_args(dict(dcb_pfc='disabled'))
self.execute_module(changed=False)
def test_igmp_snooping_enable(self):
set_module_args(dict(igmp_snooping='enabled'))
commands = ['ip igmp snooping']
self.execute_module(changed=True, commands=commands)
def test_igmp_snooping_disable(self):
set_module_args(dict(igmp_snooping='disabled'))
self.execute_module(changed=False)
def test_lacp_enable(self):
set_module_args(dict(lacp='enabled'))
commands = ['lacp']
self.execute_module(changed=True, commands=commands)
def test_lacp_disable(self):
set_module_args(dict(lacp='disabled'))
self.execute_module(changed=False)
def test_ip_routing_enable(self):
set_module_args(dict(ip_routing='enabled'))
self.execute_module(changed=False)
def test_ip_routing_disable(self):
set_module_args(dict(ip_routing='disabled'))
commands = ['no ip routing']
self.execute_module(changed=True, commands=commands)
def test_lldp_enable(self):
set_module_args(dict(lldp='enabled'))
commands = ['lldp']
self.execute_module(changed=True, commands=commands)
def test_lldp_disable(self):
set_module_args(dict(lldp='disabled'))
self.execute_module(changed=False)
def test_bgp_enable(self):
set_module_args(dict(bgp='enabled'))
commands = ['protocol bgp']
self.execute_module(changed=True, commands=commands)
def test_bgp_disable(self):
set_module_args(dict(bgp='disabled'))
self.execute_module(changed=False)
def test_ospf_enable(self):
set_module_args(dict(ospf='enabled'))
commands = ['protocol ospf']
self.execute_module(changed=True, commands=commands)
def test_ospf_disable(self):
set_module_args(dict(ospf='disabled'))
self.execute_module(changed=False)
| gpl-3.0 |
xiaolvmu/villec2-kernel | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
Acehaidrey/incubator-airflow | airflow/api/common/experimental/trigger_dag.py | 8 | 4451 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Triggering DAG runs APIs."""
import json
from datetime import datetime
from typing import List, Optional, Union
from airflow.exceptions import DagNotFound, DagRunAlreadyExists
from airflow.models import DagBag, DagModel, DagRun
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
def _trigger_dag(
dag_id: str,
dag_bag: DagBag,
run_id: Optional[str] = None,
conf: Optional[Union[dict, str]] = None,
execution_date: Optional[datetime] = None,
replace_microseconds: bool = True,
) -> List[DagRun]: # pylint: disable=too-many-arguments
"""Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
"""
dag = dag_bag.get_dag(dag_id) # prefetch dag if it is stored serialized
if dag_id not in dag_bag.dags:
raise DagNotFound(f"Dag id {dag_id} not found")
execution_date = execution_date if execution_date else timezone.utcnow()
if not timezone.is_localized(execution_date):
raise ValueError("The execution_date should be localized")
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if dag.default_args and 'start_date' in dag.default_args:
min_dag_start_date = dag.default_args["start_date"]
if min_dag_start_date and execution_date < min_dag_start_date:
raise ValueError(
"The execution_date [{}] should be >= start_date [{}] from DAG's default_args".format(
execution_date.isoformat(), min_dag_start_date.isoformat()
)
)
run_id = run_id or DagRun.generate_run_id(DagRunType.MANUAL, execution_date)
dag_run = DagRun.find(dag_id=dag_id, run_id=run_id)
if dag_run:
raise DagRunAlreadyExists(f"Run id {run_id} already exists for dag id {dag_id}")
run_conf = None
if conf:
run_conf = conf if isinstance(conf, dict) else json.loads(conf)
triggers = []
dags_to_trigger = [dag] + dag.subdags
for _dag in dags_to_trigger:
trigger = _dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
dag_hash=dag_bag.dags_hash.get(dag_id),
)
triggers.append(trigger)
return triggers
def trigger_dag(
dag_id: str,
run_id: Optional[str] = None,
conf: Optional[Union[dict, str]] = None,
execution_date: Optional[datetime] = None,
replace_microseconds: bool = True,
) -> Optional[DagRun]:
"""Triggers execution of DAG specified by dag_id
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run triggered - even if more than one Dag Runs were triggered or None
"""
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound(f"Dag id {dag_id} not found in DagModel")
dagbag = DagBag(dag_folder=dag_model.fileloc, read_dags_from_db=True)
triggers = _trigger_dag(
dag_id=dag_id,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
execution_date=execution_date,
replace_microseconds=replace_microseconds,
)
return triggers[0] if triggers else None
| apache-2.0 |
webrecorder/warcio | test/test_capture_http_proxy.py | 1 | 7458 | from warcio.capture_http import capture_http
import threading
from wsgiref.simple_server import make_server, WSGIServer
import time
import requests
from warcio.archiveiterator import ArchiveIterator
from pytest import raises
# ==================================================================
class TestCaptureHttpProxy():
def setup(cls):
def app(env, start_response):
result = ('Proxied: ' + env['PATH_INFO']).encode('utf-8')
headers = [('Content-Length', str(len(result)))]
start_response('200 OK', headers=headers)
return iter([result])
from wsgiprox.wsgiprox import WSGIProxMiddleware
wsgiprox = WSGIProxMiddleware(app, '/')
class NoLogServer(WSGIServer):
def handle_error(self, request, client_address):
pass
server = make_server('localhost', 0, wsgiprox, server_class=NoLogServer)
addr, cls.port = server.socket.getsockname()
cls.proxies = {'https': 'localhost:' + str(cls.port),
'http': 'localhost:' + str(cls.port)
}
def run():
try:
server.serve_forever()
except Exception as e:
print(e)
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.1)
def test_capture_http_proxy(self):
with capture_http() as warc_writer:
res = requests.get("http://example.com/test", proxies=self.proxies, verify=False)
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "http://example.com/test"
assert response.content_stream().read().decode('utf-8') == 'Proxied: /http://example.com/test'
assert response.rec_headers['WARC-Proxy-Host'] == 'http://localhost:{0}'.format(self.port)
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "http://example.com/test"
assert request.rec_headers['WARC-Proxy-Host'] == 'http://localhost:{0}'.format(self.port)
with raises(StopIteration):
assert next(ai)
def test_capture_https_proxy(self):
with capture_http() as warc_writer:
res = requests.get("https://example.com/test", proxies=self.proxies, verify=False)
res = requests.get("https://example.com/foo", proxies=self.proxies, verify=False)
# not recording this request
res = requests.get("https://example.com/skip", proxies=self.proxies, verify=False)
with capture_http(warc_writer):
res = requests.get("https://example.com/bar", proxies=self.proxies, verify=False)
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/test'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/foo'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/bar"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/bar'
request = next(ai)
assert request.rec_type == 'request'
with raises(StopIteration):
assert next(ai)
def test_capture_https_proxy_same_session(self):
sesh = requests.session()
with capture_http() as warc_writer:
res = sesh.get("https://example.com/test", proxies=self.proxies, verify=False)
res = sesh.get("https://example.com/foo", proxies=self.proxies, verify=False)
# *will* be captured, as part of same session... (fix this?)
res = sesh.get("https://example.com/skip", proxies=self.proxies, verify=False)
with capture_http(warc_writer):
res = sesh.get("https://example.com/bar", proxies=self.proxies, verify=False)
ai = ArchiveIterator(warc_writer.get_stream())
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/test'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/test"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/foo'
request = next(ai)
assert request.rec_type == 'request'
assert request.rec_headers['WARC-Target-URI'] == "https://example.com/foo"
assert request.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/skip"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/skip'
request = next(ai)
assert request.rec_type == 'request'
response = next(ai)
assert response.rec_type == 'response'
assert response.rec_headers['WARC-Target-URI'] == "https://example.com/bar"
assert response.rec_headers['WARC-Proxy-Host'] == 'https://localhost:{0}'.format(self.port)
assert response.content_stream().read().decode('utf-8') == 'Proxied: /https://example.com/bar'
request = next(ai)
assert request.rec_type == 'request'
with raises(StopIteration):
assert next(ai)
| apache-2.0 |
mwhooker/fleet | Documentation/examples/api.py | 27 | 2250 | #
# This file provides an example of a very simple client library written in Python.
# The client builds an interface for interacting with the fleet API, then retrieves
# a list of Units currently loaded into fleet.
#
# Warning: the code below is a significally simplified version of a typical client
# library. It is an incomplete implementation that is provided to demonstrate
# some aspects of building a client library. It is not production-ready code.
#
# This example assumes that fleet is configured to listen on localhost:8080
#
# Requirements:
# httplib2 - https://github.com/jcgregorio/httplib2
# uritemplate - https://github.com/uri-templates/uritemplate-py
#
import httplib2
import json
import uritemplate
import urllib
import urlparse
import pprint
# Step 1: Fetch Discovery document.
ROOT_URL = "http://localhost:8080/"
DISCOVERY_URI = ROOT_URL + "fleet/v1/discovery"
h = httplib2.Http()
resp, content = h.request(DISCOVERY_URI)
discovery = json.loads(content)
# Step 2.a: Construct base URI
BASE_URI = ROOT_URL + discovery['servicePath']
class Collection(object): pass
def createNewMethod(name, method):
# Step 2.b Compose request
def newMethod(**kwargs):
body = kwargs.pop('body', None)
url = urlparse.urljoin(BASE_URI, uritemplate.expand(method['path'], kwargs))
for pname, pconfig in method.get('parameters', {}).iteritems():
if pconfig['location'] == 'path' and pname in kwargs:
del kwargs[pname]
if kwargs:
url = url + '?' + urllib.urlencode(kwargs)
return h.request(url, method=method['httpMethod'], body=body,
headers={'content-type': 'application/json'})
return newMethod
# Step 3.a: Build client surface
def build(discovery, collection):
for name, resource in discovery.get('resources', {}).iteritems():
setattr(collection, name, build(resource, Collection()))
for name, method in discovery.get('methods', {}).iteritems():
setattr(collection, name, createNewMethod(name, method))
return collection
service = build(discovery, Collection())
# Step 3.b: Use the client
response = service.Machines.List()
# output metadata (status, content-length, etc...)
pprint.pprint(response[0])
# output body
pprint.pprint(json.loads(response[1]))
| apache-2.0 |
fedora-infra/anitya | anitya/tests/lib/backends/test_freshmeat.py | 1 | 4187 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions
# of the GNU General Public License v.2, or (at your option) any later
# version. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY expressed or implied, including the
# implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU General Public License for more details. You
# should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Any Red Hat trademarks that are incorporated in the source
# code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission
# of Red Hat, Inc.
#
"""
anitya tests for the custom backend.
"""
import unittest
import anitya.lib.backends.freshmeat as backend
from anitya.db import models
from anitya.lib.exceptions import AnityaPluginException
from anitya.tests.base import DatabaseTestCase, create_distro
BACKEND = "Freshmeat"
class FreshmeatBackendtests(DatabaseTestCase):
"""Drupal backend tests."""
def setUp(self):
"""Set up the environnment, ran before every tests."""
super(FreshmeatBackendtests, self).setUp()
create_distro(self.session)
self.create_project()
def create_project(self):
"""Create some basic projects to work with."""
project = models.Project(
name="atmail",
homepage="http://freecode.com/projects/atmail",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
project = models.Project(
name="foo", homepage="http://freecode.com/projects/foo", backend=BACKEND
)
self.session.add(project)
self.session.commit()
project = models.Project(
name="awstats",
homepage="http://freecode.com/projects/awstats",
backend=BACKEND,
)
self.session.add(project)
self.session.commit()
def test_get_version(self):
"""Test the get_version function of the freshmeat backend."""
pid = 1
project = models.Project.get(self.session, pid)
exp = "7"
obs = backend.FreshmeatBackend.get_version(project)
self.assertEqual(obs, exp)
pid = 2
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.FreshmeatBackend.get_version, project
)
pid = 3
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.FreshmeatBackend.get_version, project
)
def test_get_version_url(self):
"""Assert that correct url is returned."""
project = models.Project(
name="test", homepage="http://example.org", backend=BACKEND
)
exp = "http://freshmeat.net/projects/test"
obs = backend.FreshmeatBackend.get_version_url(project)
self.assertEqual(obs, exp)
def test_get_versions(self):
"""Test the get_versions function of the debian backend."""
pid = 1
project = models.Project.get(self.session, pid)
exp = ["6.3.5", "6.5.0", "6.6.0", "6.30.3", "7"]
obs = backend.FreshmeatBackend.get_ordered_versions(project)
self.assertEqual(obs, exp)
pid = 2
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.FreshmeatBackend.get_versions, project
)
pid = 3
project = models.Project.get(self.session, pid)
self.assertRaises(
AnityaPluginException, backend.FreshmeatBackend.get_versions, project
)
if __name__ == "__main__":
SUITE = unittest.TestLoader().loadTestsFromTestCase(FreshmeatBackendtests)
unittest.TextTestRunner(verbosity=2).run(SUITE)
| gpl-2.0 |
endorphinl/horizon | openstack_dashboard/dashboards/project/vpn/workflows.py | 36 | 21869 | # Copyright 2013, Mirantis Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard import api
class AddVPNServiceAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
router_id = forms.ChoiceField(label=_("Router"))
subnet_id = forms.ChoiceField(label=_("Subnet"))
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"),
help_text=_("The state to start in."),
required=False)
def __init__(self, request, *args, **kwargs):
super(AddVPNServiceAction, self).__init__(request, *args, **kwargs)
def populate_subnet_id_choices(self, request, context):
subnet_id_choices = [('', _("Select a Subnet"))]
try:
tenant_id = request.user.tenant_id
networks = api.neutron.network_list_for_tenant(request, tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve networks list.'))
networks = []
for n in networks:
for s in n['subnets']:
subnet_id_choices.append((s.id, s.cidr))
self.fields['subnet_id'].choices = subnet_id_choices
return subnet_id_choices
def populate_router_id_choices(self, request, context):
router_id_choices = [('', _("Select a Router"))]
try:
tenant_id = request.user.tenant_id
routers = api.neutron.router_list(request, tenant_id=tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve routers list.'))
routers = []
for r in routers:
router_id_choices.append((r.id, r.name))
self.fields['router_id'].choices = router_id_choices
return router_id_choices
class Meta(object):
name = _("Add New VPN Service")
permissions = ('openstack.services.network',)
help_text = _("Create VPN Service for current project.\n\n"
"Specify a name, description, router, and subnet "
"for the VPN Service. "
"Admin State is Up (checked) by default."
)
class AddVPNServiceStep(workflows.Step):
action_class = AddVPNServiceAction
contributes = ("name", "description", "subnet_id",
"router_id", "admin_state_up")
def contribute(self, data, context):
context = super(AddVPNServiceStep, self).contribute(data, context)
if data:
return context
class AddVPNService(workflows.Workflow):
slug = "addvpnservice"
name = _("Add VPN Service")
finalize_button_name = _("Add")
success_message = _('Added VPN Service "%s".')
failure_message = _('Unable to add VPN Service "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddVPNServiceStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.vpnservice_create(request, **context)
return True
except Exception:
return False
class AddIKEPolicyAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
auth_algorithm = forms.ChoiceField(label=_("Authorization algorithm"),
required=False)
encryption_algorithm = forms.ChoiceField(label=_("Encryption algorithm"),
required=False)
ike_version = forms.ChoiceField(label=_("IKE version"), required=False)
lifetime_units = forms.ChoiceField(label=_("Lifetime units for IKE keys"),
required=False)
lifetime_value = forms.IntegerField(
min_value=60, label=_("Lifetime value for IKE keys"),
initial=3600,
help_text=_("Equal to or greater than 60"),
required=False)
pfs = forms.ChoiceField(label=_("Perfect Forward Secrecy"), required=False)
phase1_negotiation_mode = forms.ChoiceField(
label=_("IKE Phase1 negotiation mode"), required=False)
def __init__(self, request, *args, **kwargs):
super(AddIKEPolicyAction, self).__init__(request, *args, **kwargs)
auth_algorithm_choices = [("sha1", "sha1")]
self.fields['auth_algorithm'].choices = auth_algorithm_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['auth_algorithm'].widget.attrs['readonly'] = True
encryption_algorithm_choices = [("3des", "3des"),
("aes-128", "aes-128"),
("aes-192", "aes-192"),
("aes-256", "aes-256")]
self.fields[
'encryption_algorithm'].choices = encryption_algorithm_choices
self.fields['encryption_algorithm'].initial = "aes-128"
ike_version_choices = [("v1", "v1"),
("v2", "v2")]
self.fields['ike_version'].choices = ike_version_choices
lifetime_units_choices = [("seconds", "seconds")]
self.fields['lifetime_units'].choices = lifetime_units_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['lifetime_units'].widget.attrs['readonly'] = True
pfs_choices = [("group2", "group2"),
("group5", "group5"),
("group14", "group14")]
self.fields['pfs'].choices = pfs_choices
self.fields['pfs'].initial = "group5"
phase1_neg_mode_choices = [("main", "main")]
self.fields[
'phase1_negotiation_mode'].choices = phase1_neg_mode_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['phase1_negotiation_mode'].widget.attrs['readonly'] = True
class Meta(object):
name = _("Add New IKE Policy")
permissions = ('openstack.services.network',)
help_text = _("Create IKE Policy for current project.\n\n"
"Assign a name and description for the IKE Policy. "
)
class AddIKEPolicyStep(workflows.Step):
action_class = AddIKEPolicyAction
contributes = ("name", "description", "auth_algorithm",
"encryption_algorithm", "ike_version",
"lifetime_units", "lifetime_value",
"pfs", "phase1_negotiation_mode")
def contribute(self, data, context):
context = super(AddIKEPolicyStep, self).contribute(data, context)
context['lifetime'] = {'units': data['lifetime_units'],
'value': data['lifetime_value']}
context.pop('lifetime_units')
context.pop('lifetime_value')
if data:
return context
class AddIKEPolicy(workflows.Workflow):
slug = "addikepolicy"
name = _("Add IKE Policy")
finalize_button_name = _("Add")
success_message = _('Added IKE Policy "%s".')
failure_message = _('Unable to add IKE Policy "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIKEPolicyStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ikepolicy_create(request, **context)
return True
except Exception:
return False
class AddIPSecPolicyAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
auth_algorithm = forms.ChoiceField(label=_("Authorization algorithm"),
required=False)
encapsulation_mode = forms.ChoiceField(label=_("Encapsulation mode"),
required=False)
encryption_algorithm = forms.ChoiceField(label=_("Encryption algorithm"),
required=False)
lifetime_units = forms.ChoiceField(label=_("Lifetime units"),
required=False)
lifetime_value = forms.IntegerField(
min_value=60, label=_("Lifetime value for IKE keys "),
initial=3600,
help_text=_("Equal to or greater than 60"),
required=False)
pfs = forms.ChoiceField(label=_("Perfect Forward Secrecy"), required=False)
transform_protocol = forms.ChoiceField(label=_("Transform Protocol"),
required=False)
def __init__(self, request, *args, **kwargs):
super(AddIPSecPolicyAction, self).__init__(request, *args, **kwargs)
auth_algorithm_choices = [("sha1", "sha1")]
self.fields['auth_algorithm'].choices = auth_algorithm_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['auth_algorithm'].widget.attrs['readonly'] = True
encapsulation_mode_choices = [("tunnel", "tunnel"),
("transport", "transport")]
self.fields['encapsulation_mode'].choices = encapsulation_mode_choices
encryption_algorithm_choices = [("3des", "3des"),
("aes-128", "aes-128"),
("aes-192", "aes-192"),
("aes-256", "aes-256")]
self.fields[
'encryption_algorithm'].choices = encryption_algorithm_choices
self.fields['encryption_algorithm'].initial = "aes-128"
lifetime_units_choices = [("seconds", "seconds")]
self.fields['lifetime_units'].choices = lifetime_units_choices
# Currently this field has only one choice, so mark it as readonly.
self.fields['lifetime_units'].widget.attrs['readonly'] = True
pfs_choices = [("group2", "group2"),
("group5", "group5"),
("group14", "group14")]
self.fields['pfs'].choices = pfs_choices
self.fields['pfs'].initial = "group5"
transform_protocol_choices = [("esp", "esp"),
("ah", "ah"),
("ah-esp", "ah-esp")]
self.fields['transform_protocol'].choices = transform_protocol_choices
class Meta(object):
name = _("Add New IPSec Policy")
permissions = ('openstack.services.network',)
help_text = _("Create IPSec Policy for current project.\n\n"
"Assign a name and description for the IPSec Policy. "
)
class AddIPSecPolicyStep(workflows.Step):
action_class = AddIPSecPolicyAction
contributes = ("name", "description", "auth_algorithm",
"encapsulation_mode", "encryption_algorithm",
"lifetime_units", "lifetime_value",
"pfs", "transform_protocol")
def contribute(self, data, context):
context = super(AddIPSecPolicyStep, self).contribute(data, context)
context['lifetime'] = {'units': data['lifetime_units'],
'value': data['lifetime_value']}
context.pop('lifetime_units')
context.pop('lifetime_value')
if data:
return context
class AddIPSecPolicy(workflows.Workflow):
slug = "addipsecpolicy"
name = _("Add IPSec Policy")
finalize_button_name = _("Add")
success_message = _('Added IPSec Policy "%s".')
failure_message = _('Unable to add IPSec Policy "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIPSecPolicyStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ipsecpolicy_create(request, **context)
return True
except Exception:
return False
class AddIPSecSiteConnectionAction(workflows.Action):
name = forms.CharField(max_length=80, label=_("Name"), required=False)
description = forms.CharField(
initial="", required=False,
max_length=80, label=_("Description"))
vpnservice_id = forms.ChoiceField(
label=_("VPN Service associated with this connection"))
ikepolicy_id = forms.ChoiceField(
label=_("IKE Policy associated with this connection"))
ipsecpolicy_id = forms.ChoiceField(
label=_("IPSec Policy associated with this connection"))
peer_address = forms.IPField(
label=_("Peer gateway public IPv4/IPv6 Address or FQDN"),
help_text=_("Peer gateway public IPv4/IPv6 address or FQDN for "
"the VPN Connection"),
version=forms.IPv4 | forms.IPv6,
mask=False)
peer_id = forms.IPField(
label=_("Peer router identity for authentication (Peer ID)"),
help_text=_("Peer router identity for authentication. "
"Can be IPv4/IPv6 address, e-mail, key ID, or FQDN"),
version=forms.IPv4 | forms.IPv6,
mask=False)
peer_cidrs = forms.MultiIPField(
label=_("Remote peer subnet(s)"),
help_text=_("Remote peer subnet(s) address(es) "
"with mask(s) in CIDR format "
"separated with commas if needed "
"(e.g. 20.1.0.0/24, 21.1.0.0/24)"),
version=forms.IPv4 | forms.IPv6,
mask=True)
psk = forms.CharField(max_length=80,
label=_("Pre-Shared Key (PSK) string"))
def populate_ikepolicy_id_choices(self, request, context):
ikepolicy_id_choices = [('', _("Select IKE Policy"))]
try:
tenant_id = self.request.user.tenant_id
ikepolicies = api.vpn.ikepolicy_list(request, tenant_id=tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve IKE Policies list.'))
ikepolicies = []
for p in ikepolicies:
ikepolicy_id_choices.append((p.id, p.name))
self.fields['ikepolicy_id'].choices = ikepolicy_id_choices
return ikepolicy_id_choices
def populate_ipsecpolicy_id_choices(self, request, context):
ipsecpolicy_id_choices = [('', _("Select IPSec Policy"))]
try:
tenant_id = self.request.user.tenant_id
ipsecpolicies = api.vpn.ipsecpolicy_list(request,
tenant_id=tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve IPSec Policies list.'))
ipsecpolicies = []
for p in ipsecpolicies:
ipsecpolicy_id_choices.append((p.id, p.name))
self.fields['ipsecpolicy_id'].choices = ipsecpolicy_id_choices
return ipsecpolicy_id_choices
def populate_vpnservice_id_choices(self, request, context):
vpnservice_id_choices = [('', _("Select VPN Service"))]
try:
tenant_id = self.request.user.tenant_id
vpnservices = api.vpn.vpnservice_list(request, tenant_id=tenant_id)
except Exception:
exceptions.handle(request,
_('Unable to retrieve VPN Services list.'))
vpnservices = []
for s in vpnservices:
vpnservice_id_choices.append((s.id, s.name))
self.fields['vpnservice_id'].choices = vpnservice_id_choices
return vpnservice_id_choices
class Meta(object):
name = _("Add New IPSec Site Connection")
permissions = ('openstack.services.network',)
help_text = _("Create IPSec Site Connection for current project.\n\n"
"Assign a name and description for the "
"IPSec Site Connection. "
"All fields in this tab are required."
)
class AddIPSecSiteConnectionStep(workflows.Step):
action_class = AddIPSecSiteConnectionAction
contributes = ("name", "description",
"vpnservice_id", "ikepolicy_id", "ipsecpolicy_id",
"peer_address", "peer_id", "peer_cidrs", "psk")
class AddIPSecSiteConnectionOptionalAction(workflows.Action):
mtu = forms.IntegerField(
min_value=68,
label=_("Maximum Transmission Unit size for the connection"),
initial=1500,
required=False,
help_text=_("Equal to or greater than 68 if the local subnet is IPv4. "
"Equal to or greater than 1280 if the local subnet "
"is IPv6."))
dpd_action = forms.ChoiceField(label=_("Dead peer detection actions"),
required=False)
dpd_interval = forms.IntegerField(
min_value=1, label=_("Dead peer detection interval"),
initial=30,
required=False,
help_text=_("Valid integer lesser than DPD timeout"))
dpd_timeout = forms.IntegerField(
min_value=1, label=_("Dead peer detection timeout"),
initial=120,
required=False,
help_text=_("Valid integer greater than the DPD interval"))
initiator = forms.ChoiceField(label=_("Initiator state"), required=False)
admin_state_up = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
label=_("Admin State"),
required=False,
help_text=_("The state to start in."))
def __init__(self, request, *args, **kwargs):
super(AddIPSecSiteConnectionOptionalAction, self).__init__(
request, *args, **kwargs)
initiator_choices = [("bi-directional", "bi-directional"),
("response-only", "response-only")]
self.fields['initiator'].choices = initiator_choices
def populate_dpd_action_choices(self, request, context):
dpd_action_choices = [("hold", "hold"),
("clear", "clear"),
("disabled", "disabled"),
("restart", "restart"),
("restart-by-peer", "restart-by-peer")]
self.fields['dpd_action'].choices = dpd_action_choices
return dpd_action_choices
def clean(self):
cleaned_data = super(AddIPSecSiteConnectionOptionalAction,
self).clean()
interval = cleaned_data.get('dpd_interval')
timeout = cleaned_data.get('dpd_timeout')
if not interval < timeout:
msg = _("DPD Timeout must be greater than DPD Interval")
self._errors['dpd_timeout'] = self.error_class([msg])
return cleaned_data
class Meta(object):
name = _("Optional Parameters")
permissions = ('openstack.services.network',)
help_text = _("Fields in this tab are optional. "
"You can configure the detail of "
"IPSec site connection created."
)
class AddIPSecSiteConnectionOptionalStep(workflows.Step):
action_class = AddIPSecSiteConnectionOptionalAction
contributes = ("dpd_action", "dpd_interval", "dpd_timeout",
"initiator", "mtu", "admin_state_up")
def contribute(self, data, context):
context = super(
AddIPSecSiteConnectionOptionalStep, self).contribute(data, context)
context['dpd'] = {'action': data['dpd_action'],
'interval': data['dpd_interval'],
'timeout': data['dpd_timeout']}
context.pop('dpd_action')
context.pop('dpd_interval')
context.pop('dpd_timeout')
cidrs = context['peer_cidrs']
context['peer_cidrs'] = cidrs.replace(" ", "").split(",")
if data:
return context
class AddIPSecSiteConnection(workflows.Workflow):
slug = "addipsecsiteconnection"
name = _("Add IPSec Site Connection")
finalize_button_name = _("Add")
success_message = _('Added IPSec Site Connection "%s".')
failure_message = _('Unable to add IPSec Site Connection "%s".')
success_url = "horizon:project:vpn:index"
default_steps = (AddIPSecSiteConnectionStep,
AddIPSecSiteConnectionOptionalStep)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.vpn.ipsecsiteconnection_create(request, **context)
return True
except Exception:
return False
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.