repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
gkotton/neutron | neutron/plugins/mlnx/common/comm_utils.py | 10 | 2323 | # Copyright 2013 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from oslo.config import cfg
from neutron.openstack.common import log as logging
from neutron.plugins.mlnx.common import config # noqa
LOG = logging.getLogger(__name__)
class RetryDecorator(object):
"""Retry decorator reruns a method 'retries' times if an exception occurs.
Decorator for retrying a method if exceptionToCheck exception occurs
If method raises exception, retries 'retries' times with increasing
back off period between calls with 'interval' multiplier
:param exceptionToCheck: the exception to check
:param interval: initial delay between retries in seconds
:param retries: number of times to try before giving up
:raises: exceptionToCheck
"""
def __init__(self, exceptionToCheck,
interval=cfg.CONF.ESWITCH.request_timeout / 1000,
retries=cfg.CONF.ESWITCH.retries,
backoff_rate=cfg.CONF.ESWITCH.backoff_rate):
self.exc = exceptionToCheck
self.interval = interval
self.retries = retries
self.backoff_rate = backoff_rate
def __call__(self, original_func):
def decorated(*args, **kwargs):
sleep_interval = self.interval
num_of_iter = self.retries
while num_of_iter > 0:
try:
return original_func(*args, **kwargs)
except self.exc:
LOG.debug("Request timeout - call again after "
"%s seconds", sleep_interval)
time.sleep(sleep_interval)
num_of_iter -= 1
sleep_interval *= self.backoff_rate
return original_func(*args, **kwargs)
return decorated
| apache-2.0 |
akvo/akvo-rsr | akvo/rsr/migrations/0042_auto_20153011_1450.py | 1 | 1498 | # -*- coding: utf-8 -*-
from django.db import models, migrations
from ..models.project import Project
def change_primary_organisations(apps, schema_editor):
Project = apps.get_model("rsr", "Project")
for project in Project.objects.all():
primary_organisation = None
# Pick the reporting org first
if project.partnerships.filter(iati_organisation_role=101):
primary_organisation = project.partnerships.filter(
iati_organisation_role=101)[0].organisation
# Otherwise, pick the partner that can publish the project
elif project.partners.filter(can_create_projects=True):
primary_organisation = project.partners.filter(can_create_projects=True)[0]
# Otherwise, grab the first accountable partner we find
elif project.partnerships.filter(iati_organisation_role=2):
primary_organisation = project.partnerships.filter(
iati_organisation_role=2)[0].organisation
# Panic mode: grab the first partner we find
elif project.partners.all():
primary_organisation = project.partners.all()[0]
project.primary_organisation = primary_organisation
project.save(update_fields=['primary_organisation'])
class Migration(migrations.Migration):
dependencies = [
('rsr', '0041_auto_20151116_1250'),
]
operations = [
migrations.RunPython(
change_primary_organisations,
),
]
| agpl-3.0 |
TechBK/horizon-dev | openstack_dashboard/dashboards/project/data_processing/job_binaries/forms.py | 23 | 8922 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import uuid
from django.forms import util
from django.forms import widgets
from django import template
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import sahara as saharaclient
LOG = logging.getLogger(__name__)
class LabeledInput(widgets.Input):
def render(self, name, values, attrs=None):
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
output = "<span id='%s'>%s</span>%s" %\
("id_%s_label" % name,
"swift://",
('<input%s />' % util.flatatt(final_attrs)))
return mark_safe(output)
class JobBinaryCreateForm(forms.SelfHandlingForm):
NEW_SCRIPT = "newscript"
UPLOAD_BIN = "uploadfile"
job_binary_name = forms.CharField(label=_("Name"))
job_binary_type = forms.ChoiceField(
label=_("Storage type"),
widget=forms.Select(
attrs={
'class': 'switchable',
'data-slug': 'jb_type'
}))
job_binary_url = forms.CharField(
label=_("URL"),
required=False,
widget=LabeledInput(
attrs={
'class': 'switched',
'data-switch-on': 'jb_type',
'data-jb_type-swift': _('URL')
}))
job_binary_internal = forms.ChoiceField(
label=_("Internal binary"),
required=False,
widget=forms.Select(
attrs={
'class': 'switched switchable',
'data-slug': 'jb_internal',
'data-switch-on': 'jb_type',
'data-jb_type-internal-db': _('Internal Binary')
}))
job_binary_file = forms.FileField(
label=_("Upload File"),
required=False,
widget=forms.ClearableFileInput(
attrs={
'class': 'switched',
'data-switch-on': 'jb_internal',
'data-jb_internal-uploadfile': _("Upload File")
}))
job_binary_script_name = forms.CharField(
label=_("Script name"),
required=False,
widget=forms.TextInput(
attrs={
'class': 'switched',
'data-switch-on': 'jb_internal',
'data-jb_internal-newscript': _("Script name")
}))
job_binary_script = forms.CharField(
label=_("Script text"),
required=False,
widget=forms.Textarea(
attrs={
'rows': 4,
'class': 'switched',
'data-switch-on': 'jb_internal',
'data-jb_internal-newscript': _("Script text")
}))
job_binary_username = forms.CharField(
label=_("Username"),
required=False,
widget=forms.TextInput(
attrs={
'class': 'switched',
'data-switch-on': 'jb_type',
'data-jb_type-swift': _('Username')
}))
job_binary_password = forms.CharField(
label=_("Password"),
required=False,
widget=forms.PasswordInput(
attrs={
'autocomplete': 'off',
'class': 'switched',
'data-switch-on': 'jb_type',
'data-jb_type-swift': _('Password')
}))
job_binary_description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea())
def __init__(self, request, *args, **kwargs):
super(JobBinaryCreateForm, self).__init__(request, *args, **kwargs)
self.help_text_template = ("project/data_processing.job_binaries/"
"_create_job_binary_help.html")
self.fields["job_binary_type"].choices =\
[("internal-db", "Internal database"),
("swift", "Swift")]
self.fields["job_binary_internal"].choices =\
self.populate_job_binary_internal_choices(request)
def populate_job_binary_internal_choices(self, request):
try:
job_binaries = saharaclient.job_binary_internal_list(request)
except Exception:
exceptions.handle(request,
_("Failed to get list of internal binaries."))
job_binaries = []
choices = [(job_binary.id, job_binary.name)
for job_binary in job_binaries]
choices.insert(0, (self.NEW_SCRIPT, '*Create a script'))
choices.insert(0, (self.UPLOAD_BIN, '*Upload a new file'))
return choices
def handle(self, request, context):
try:
extra = {}
bin_url = "%s://%s" % (context["job_binary_type"],
context["job_binary_url"])
if(context["job_binary_type"] == "internal-db"):
bin_url = self.handle_internal(request, context)
elif(context["job_binary_type"] == "swift"):
extra = self.handle_swift(request, context)
bin_object = saharaclient.job_binary_create(
request,
context["job_binary_name"],
bin_url,
context["job_binary_description"],
extra)
messages.success(request, "Successfully created job binary")
return bin_object
except Exception:
exceptions.handle(request,
_("Unable to create job binary"))
return False
def get_help_text(self, extra_context=None):
text = ""
extra_context = extra_context or {}
if self.help_text_template:
tmpl = template.loader.get_template(self.help_text_template)
context = template.RequestContext(self.request, extra_context)
text += tmpl.render(context)
else:
text += defaultfilters.linebreaks(force_text(self.help_text))
return defaultfilters.safe(text)
class Meta(object):
name = _("Create Job Binary")
help_text_template = ("project/data_processing.job_binaries/"
"_create_job_binary_help.html")
def handle_internal(self, request, context):
result = ""
bin_id = context["job_binary_internal"]
if(bin_id == self.UPLOAD_BIN):
try:
result = saharaclient.job_binary_internal_create(
request,
self.get_unique_binary_name(
request, request.FILES["job_binary_file"].name),
request.FILES["job_binary_file"].read())
bin_id = result.id
except Exception:
exceptions.handle(request,
_("Unable to upload job binary"))
return None
elif(bin_id == self.NEW_SCRIPT):
try:
result = saharaclient.job_binary_internal_create(
request,
self.get_unique_binary_name(
request, context["job_binary_script_name"]),
context["job_binary_script"])
bin_id = result.id
except Exception:
exceptions.handle(request,
_("Unable to create job binary"))
return None
return "internal-db://%s" % bin_id
def handle_swift(self, request, context):
username = context["job_binary_username"]
password = context["job_binary_password"]
extra = {
"user": username,
"password": password
}
return extra
def get_unique_binary_name(self, request, base_name):
try:
internals = saharaclient.job_binary_internal_list(request)
except Exception:
internals = []
exceptions.handle(request,
_("Failed to fetch internal binary list"))
names = [internal.name for internal in internals]
if base_name in names:
return "%s_%s" % (base_name, uuid.uuid1())
return base_name
| apache-2.0 |
hannes/linux | Documentation/target/tcm_mod_builder.py | 497 | 22865 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_proto.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops;\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
if proto_ident == "FC":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
elif proto_ident == "SAS":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
elif proto_ident == "iSCSI":
buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .module = THIS_MODULE,\n"
buf += " .name = \"" + fabric_mod_name + "\",\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi_common.h>\n"
buf += "#include <scsi/scsi_proto.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('aborted_task\)\(', fo):
buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
jzoldak/edx-platform | common/lib/xmodule/xmodule/assetstore/tests/test_asset_xml.py | 113 | 3631 | """
Test for asset XML generation / parsing.
"""
from path import Path as path
from lxml import etree
from contracts import ContractNotRespected
import unittest
from opaque_keys.edx.locator import CourseLocator
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.tests.test_assetstore import AssetStoreTestData
class TestAssetXml(unittest.TestCase):
"""
Tests for storing/querying course asset metadata.
"""
def setUp(self):
super(TestAssetXml, self).setUp()
xsd_filename = "assets.xsd"
self.course_id = CourseLocator('org1', 'course1', 'run1')
self.course_assets = []
for asset in AssetStoreTestData.all_asset_data:
asset_dict = dict(zip(AssetStoreTestData.asset_fields[1:], asset[1:]))
asset_md = AssetMetadata(self.course_id.make_asset_key('asset', asset[0]), **asset_dict)
self.course_assets.append(asset_md)
# Read in the XML schema definition and make a validator.
xsd_path = path(__file__).realpath().parent / xsd_filename
with open(xsd_path, 'r') as f:
schema_root = etree.XML(f.read())
schema = etree.XMLSchema(schema_root)
self.xmlparser = etree.XMLParser(schema=schema)
def test_export_single_asset_to_from_xml(self):
"""
Export a single AssetMetadata to XML and verify the structure and fields.
"""
asset_md = self.course_assets[0]
root = etree.Element("assets")
asset = etree.SubElement(root, "asset")
asset_md.to_xml(asset)
# If this line does *not* raise, the XML is valid.
etree.fromstring(etree.tostring(root), self.xmlparser)
new_asset_key = self.course_id.make_asset_key('tmp', 'tmp')
new_asset_md = AssetMetadata(new_asset_key)
new_asset_md.from_xml(asset)
# Compare asset_md to new_asset_md.
for attr in AssetMetadata.XML_ATTRS:
if attr in AssetMetadata.XML_ONLY_ATTRS:
continue
orig_value = getattr(asset_md, attr)
new_value = getattr(new_asset_md, attr)
self.assertEqual(orig_value, new_value)
def test_export_with_None_value(self):
"""
Export and import a single AssetMetadata to XML with a None created_by field, without causing an exception.
"""
asset_md = AssetMetadata(
self.course_id.make_asset_key('asset', 'none_value'),
created_by=None,
)
asset = etree.Element("asset")
asset_md.to_xml(asset)
asset_md.from_xml(asset)
def test_export_all_assets_to_xml(self):
"""
Export all AssetMetadatas to XML and verify the structure and fields.
"""
root = etree.Element("assets")
AssetMetadata.add_all_assets_as_xml(root, self.course_assets)
# If this line does *not* raise, the XML is valid.
etree.fromstring(etree.tostring(root), self.xmlparser)
def test_wrong_node_type_all(self):
"""
Ensure full asset sections with the wrong tag are detected.
"""
root = etree.Element("glassets")
with self.assertRaises(ContractNotRespected):
AssetMetadata.add_all_assets_as_xml(root, self.course_assets)
def test_wrong_node_type_single(self):
"""
Ensure single asset blocks with the wrong tag are detected.
"""
asset_md = self.course_assets[0]
root = etree.Element("assets")
asset = etree.SubElement(root, "smashset")
with self.assertRaises(ContractNotRespected):
asset_md.to_xml(asset)
| agpl-3.0 |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/skimage/io/tests/test_imread.py | 4 | 2468 | import os
from tempfile import NamedTemporaryFile
import numpy as np
from skimage import data_dir, io
from skimage.io import imread, imsave, use_plugin, reset_plugins
from skimage._shared import testing
from skimage._shared.testing import (TestCase, assert_array_equal,
assert_array_almost_equal)
try:
import imread as _imread
except ImportError:
imread_available = False
else:
imread_available = True
def setup():
if imread_available:
np.random.seed(0)
use_plugin('imread')
def teardown():
reset_plugins()
@testing.skipif(not imread_available, reason="imageread not installed")
def test_imread_flatten():
# a color image is flattened
img = imread(os.path.join(data_dir, 'color.png'), flatten=True)
assert img.ndim == 2
assert img.dtype == np.float64
img = imread(os.path.join(data_dir, 'camera.png'), flatten=True)
# check that flattening does not occur for an image that is grey already.
assert np.sctype2char(img.dtype) in np.typecodes['AllInteger']
@testing.skipif(not imread_available, reason="imageread not installed")
def test_imread_palette():
img = imread(os.path.join(data_dir, 'palette_color.png'))
assert img.ndim == 3
@testing.skipif(not imread_available, reason="imageread not installed")
def test_imread_truncated_jpg():
with testing.raises(RuntimeError):
io.imread(os.path.join(data_dir, 'truncated.jpg'))
@testing.skipif(not imread_available, reason="imageread not installed")
def test_bilevel():
expected = np.zeros((10, 10), bool)
expected[::2] = 1
img = imread(os.path.join(data_dir, 'checker_bilevel.png'))
assert_array_equal(img.astype(bool), expected)
class TestSave(TestCase):
def roundtrip(self, x, scaling=1):
f = NamedTemporaryFile(suffix='.png')
fname = f.name
f.close()
imsave(fname, x)
y = imread(fname)
assert_array_almost_equal((x * scaling).astype(np.int32), y)
@testing.skipif(not imread_available, reason="imageread not installed")
def test_imsave_roundtrip(self):
dtype = np.uint8
for shape in [(10, 10), (10, 10, 3), (10, 10, 4)]:
x = np.ones(shape, dtype=dtype) * np.random.rand(*shape)
if np.issubdtype(dtype, np.floating):
yield self.roundtrip, x, 255
else:
x = (x * 255).astype(dtype)
yield self.roundtrip, x
| gpl-3.0 |
mavit/ansible | lib/ansible/modules/windows/win_timezone.py | 52 | 1808 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Phil Schwartz <schwartzmx@gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_timezone
version_added: '2.1'
short_description: Sets Windows machine timezone
description:
- Sets machine time to the specified timezone.
options:
timezone:
description:
- Timezone to set to.
- 'Example: Central Standard Time'
required: yes
notes:
- The module will check if the provided timezone is supported on the machine.
- A list of possible timezones is available from C(tzutil.exe /l) and from
U(https://msdn.microsoft.com/en-us/library/ms912391.aspx)
- If running on Server 2008 the hotfix
U(https://support.microsoft.com/en-us/help/2556308/tzutil-command-line-tool-is-added-to-windows-vista-and-to-windows-server-2008)
needs to be installed to be able to run this module.
author:
- Phil Schwartz (@schwartzmx)
'''
EXAMPLES = r'''
- name: Set timezone to 'Romance Standard Time' (GMT+01:00)
win_timezone:
timezone: Romance Standard Time
- name: Set timezone to 'GMT Standard Time' (GMT)
win_timezone:
timezone: GMT Standard Time
- name: Set timezone to 'Central Standard Time' (GMT-06:00)
win_timezone:
timezone: Central Standard Time
'''
RETURN = r'''
previous_timezone:
description: The previous timezone if it was changed, otherwise the existing timezone
returned: success
type: string
sample: Central Standard Time
timezone:
description: The current timezone (possibly changed)
returned: success
type: string
sample: Central Standard Time
'''
| gpl-3.0 |
kimjaejoong/nova | nova/tests/unit/api/openstack/compute/contrib/test_admin_password.py | 33 | 7070 | # Copyright 2011 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import webob
from nova.api.openstack.compute.plugins.v3 import admin_password \
as admin_password_v21
from nova.api.openstack.compute import servers
from nova.compute import api as compute_api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
def fake_get(self, context, id, expected_attrs=None, want_objects=False):
return {'uuid': id}
def fake_set_admin_password(self, context, instance, password=None):
pass
class AdminPasswordTestV21(test.NoDBTestCase):
validiation_error = exception.ValidationError
def setUp(self):
super(AdminPasswordTestV21, self).setUp()
self.stubs.Set(compute_api.API, 'set_admin_password',
fake_set_admin_password)
self.stubs.Set(compute_api.API, 'get', fake_get)
self.fake_req = fakes.HTTPRequest.blank('')
def _get_action(self):
return admin_password_v21.AdminPasswordController().change_password
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, controller_method.wsgi_code)
def test_change_password(self):
body = {'changePassword': {'adminPass': 'test'}}
res = self._get_action()(self.fake_req, '1', body=body)
self._check_status(202, res, self._get_action())
def test_change_password_empty_string(self):
body = {'changePassword': {'adminPass': ''}}
res = self._get_action()(self.fake_req, '1', body=body)
self._check_status(202, res, self._get_action())
@mock.patch('nova.compute.api.API.set_admin_password',
side_effect=NotImplementedError())
def test_change_password_with_non_implement(self, mock_set_admin_password):
body = {'changePassword': {'adminPass': 'test'}}
self.assertRaises(webob.exc.HTTPNotImplemented,
self._get_action(),
self.fake_req, '1', body=body)
@mock.patch('nova.compute.api.API.get',
side_effect=exception.InstanceNotFound(instance_id='1'))
def test_change_password_with_non_existed_instance(self, mock_get):
body = {'changePassword': {'adminPass': 'test'}}
self.assertRaises(webob.exc.HTTPNotFound,
self._get_action(),
self.fake_req, '1', body=body)
def test_change_password_with_non_string_password(self):
body = {'changePassword': {'adminPass': 1234}}
self.assertRaises(self.validiation_error,
self._get_action(),
self.fake_req, '1', body=body)
@mock.patch('nova.compute.api.API.set_admin_password',
side_effect=exception.InstancePasswordSetFailed(instance="1",
reason=''))
def test_change_password_failed(self, mock_set_admin_password):
body = {'changePassword': {'adminPass': 'test'}}
self.assertRaises(webob.exc.HTTPConflict,
self._get_action(),
self.fake_req, '1', body=body)
def test_change_password_without_admin_password(self):
body = {'changPassword': {}}
self.assertRaises(self.validiation_error,
self._get_action(),
self.fake_req, '1', body=body)
def test_change_password_none(self):
body = {'changePassword': {'adminPass': None}}
self.assertRaises(self.validiation_error,
self._get_action(),
self.fake_req, '1', body=body)
def test_change_password_adminpass_none(self):
body = {'changePassword': None}
self.assertRaises(self.validiation_error,
self._get_action(),
self.fake_req, '1', body=body)
def test_change_password_bad_request(self):
body = {'changePassword': {'pass': '12345'}}
self.assertRaises(self.validiation_error,
self._get_action(),
self.fake_req, '1', body=body)
def test_server_change_password_pass_disabled(self):
# run with enable_instance_password disabled to verify adminPass
# is missing from response. See lp bug 921814
self.flags(enable_instance_password=False)
body = {'changePassword': {'adminPass': '1234pass'}}
res = self._get_action()(self.fake_req, '1', body=body)
self._check_status(202, res, self._get_action())
@mock.patch('nova.compute.api.API.set_admin_password',
side_effect=exception.InstanceInvalidState(
instance_uuid='fake', attr='vm_state', state='stopped',
method='set_admin_password'))
def test_change_password_invalid_state(self, mock_set_admin_password):
body = {'changePassword': {'adminPass': 'test'}}
self.assertRaises(webob.exc.HTTPConflict,
self._get_action(),
self.fake_req, 'fake', body=body)
class AdminPasswordTestV2(AdminPasswordTestV21):
validiation_error = webob.exc.HTTPBadRequest
def _get_action(self):
class FakeExtManager(object):
def is_loaded(self, ext):
return False
return servers.Controller(ext_mgr=FakeExtManager()).\
_action_change_password
def _check_status(self, expected_status, res, controller_method):
self.assertEqual(expected_status, res.status_int)
class AdminPasswordPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(AdminPasswordPolicyEnforcementV21, self).setUp()
self.controller = admin_password_v21.AdminPasswordController()
self.req = fakes.HTTPRequest.blank('')
def test_change_password_policy_failed(self):
rule_name = "os_compute_api:os-admin-password"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
body = {'changePassword': {'adminPass': '1234pass'}}
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller.change_password,
self.req, fakes.FAKE_UUID, body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| apache-2.0 |
ruibarreira/linuxtrail | usr/lib/python3.4/idlelib/RemoteDebugger.py | 137 | 12029 | """Support for remote Python debugging.
Some ASCII art to describe the structure:
IN PYTHON SUBPROCESS # IN IDLE PROCESS
#
# oid='gui_adapter'
+----------+ # +------------+ +-----+
| GUIProxy |--remote#call-->| GUIAdapter |--calls-->| GUI |
+-----+--calls-->+----------+ # +------------+ +-----+
| Idb | # /
+-----+<-calls--+------------+ # +----------+<--calls-/
| IdbAdapter |<--remote#call--| IdbProxy |
+------------+ # +----------+
oid='idb_adapter' #
The purpose of the Proxy and Adapter classes is to translate certain
arguments and return values that cannot be transported through the RPC
barrier, in particular frame and traceback objects.
"""
import types
from idlelib import rpc
from idlelib import Debugger
debugging = 0
idb_adap_oid = "idb_adapter"
gui_adap_oid = "gui_adapter"
#=======================================
#
# In the PYTHON subprocess:
frametable = {}
dicttable = {}
codetable = {}
tracebacktable = {}
def wrap_frame(frame):
fid = id(frame)
frametable[fid] = frame
return fid
def wrap_info(info):
"replace info[2], a traceback instance, by its ID"
if info is None:
return None
else:
traceback = info[2]
assert isinstance(traceback, types.TracebackType)
traceback_id = id(traceback)
tracebacktable[traceback_id] = traceback
modified_info = (info[0], info[1], traceback_id)
return modified_info
class GUIProxy:
def __init__(self, conn, gui_adap_oid):
self.conn = conn
self.oid = gui_adap_oid
def interaction(self, message, frame, info=None):
# calls rpc.SocketIO.remotecall() via run.MyHandler instance
# pass frame and traceback object IDs instead of the objects themselves
self.conn.remotecall(self.oid, "interaction",
(message, wrap_frame(frame), wrap_info(info)),
{})
class IdbAdapter:
def __init__(self, idb):
self.idb = idb
#----------called by an IdbProxy----------
def set_step(self):
self.idb.set_step()
def set_quit(self):
self.idb.set_quit()
def set_continue(self):
self.idb.set_continue()
def set_next(self, fid):
frame = frametable[fid]
self.idb.set_next(frame)
def set_return(self, fid):
frame = frametable[fid]
self.idb.set_return(frame)
def get_stack(self, fid, tbid):
frame = frametable[fid]
if tbid is None:
tb = None
else:
tb = tracebacktable[tbid]
stack, i = self.idb.get_stack(frame, tb)
stack = [(wrap_frame(frame), k) for frame, k in stack]
return stack, i
def run(self, cmd):
import __main__
self.idb.run(cmd, __main__.__dict__)
def set_break(self, filename, lineno):
msg = self.idb.set_break(filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.idb.clear_break(filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.idb.clear_all_file_breaks(filename)
return msg
#----------called by a FrameProxy----------
def frame_attr(self, fid, name):
frame = frametable[fid]
return getattr(frame, name)
def frame_globals(self, fid):
frame = frametable[fid]
dict = frame.f_globals
did = id(dict)
dicttable[did] = dict
return did
def frame_locals(self, fid):
frame = frametable[fid]
dict = frame.f_locals
did = id(dict)
dicttable[did] = dict
return did
def frame_code(self, fid):
frame = frametable[fid]
code = frame.f_code
cid = id(code)
codetable[cid] = code
return cid
#----------called by a CodeProxy----------
def code_name(self, cid):
code = codetable[cid]
return code.co_name
def code_filename(self, cid):
code = codetable[cid]
return code.co_filename
#----------called by a DictProxy----------
def dict_keys(self, did):
raise NotImplemented("dict_keys not public or pickleable")
## dict = dicttable[did]
## return dict.keys()
### Needed until dict_keys is type is finished and pickealable.
### Will probably need to extend rpc.py:SocketIO._proxify at that time.
def dict_keys_list(self, did):
dict = dicttable[did]
return list(dict.keys())
def dict_item(self, did, key):
dict = dicttable[did]
value = dict[key]
value = repr(value) ### can't pickle module 'builtins'
return value
#----------end class IdbAdapter----------
def start_debugger(rpchandler, gui_adap_oid):
"""Start the debugger and its RPC link in the Python subprocess
Start the subprocess side of the split debugger and set up that side of the
RPC link by instantiating the GUIProxy, Idb debugger, and IdbAdapter
objects and linking them together. Register the IdbAdapter with the
RPCServer to handle RPC requests from the split debugger GUI via the
IdbProxy.
"""
gui_proxy = GUIProxy(rpchandler, gui_adap_oid)
idb = Debugger.Idb(gui_proxy)
idb_adap = IdbAdapter(idb)
rpchandler.register(idb_adap_oid, idb_adap)
return idb_adap_oid
#=======================================
#
# In the IDLE process:
class FrameProxy:
def __init__(self, conn, fid):
self._conn = conn
self._fid = fid
self._oid = "idb_adapter"
self._dictcache = {}
def __getattr__(self, name):
if name[:1] == "_":
raise AttributeError(name)
if name == "f_code":
return self._get_f_code()
if name == "f_globals":
return self._get_f_globals()
if name == "f_locals":
return self._get_f_locals()
return self._conn.remotecall(self._oid, "frame_attr",
(self._fid, name), {})
def _get_f_code(self):
cid = self._conn.remotecall(self._oid, "frame_code", (self._fid,), {})
return CodeProxy(self._conn, self._oid, cid)
def _get_f_globals(self):
did = self._conn.remotecall(self._oid, "frame_globals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_f_locals(self):
did = self._conn.remotecall(self._oid, "frame_locals",
(self._fid,), {})
return self._get_dict_proxy(did)
def _get_dict_proxy(self, did):
if did in self._dictcache:
return self._dictcache[did]
dp = DictProxy(self._conn, self._oid, did)
self._dictcache[did] = dp
return dp
class CodeProxy:
def __init__(self, conn, oid, cid):
self._conn = conn
self._oid = oid
self._cid = cid
def __getattr__(self, name):
if name == "co_name":
return self._conn.remotecall(self._oid, "code_name",
(self._cid,), {})
if name == "co_filename":
return self._conn.remotecall(self._oid, "code_filename",
(self._cid,), {})
class DictProxy:
def __init__(self, conn, oid, did):
self._conn = conn
self._oid = oid
self._did = did
## def keys(self):
## return self._conn.remotecall(self._oid, "dict_keys", (self._did,), {})
# 'temporary' until dict_keys is a pickleable built-in type
def keys(self):
return self._conn.remotecall(self._oid,
"dict_keys_list", (self._did,), {})
def __getitem__(self, key):
return self._conn.remotecall(self._oid, "dict_item",
(self._did, key), {})
def __getattr__(self, name):
##print("*** Failed DictProxy.__getattr__:", name)
raise AttributeError(name)
class GUIAdapter:
def __init__(self, conn, gui):
self.conn = conn
self.gui = gui
def interaction(self, message, fid, modified_info):
##print("*** Interaction: (%s, %s, %s)" % (message, fid, modified_info))
frame = FrameProxy(self.conn, fid)
self.gui.interaction(message, frame, modified_info)
class IdbProxy:
def __init__(self, conn, shell, oid):
self.oid = oid
self.conn = conn
self.shell = shell
def call(self, methodname, *args, **kwargs):
##print("*** IdbProxy.call %s %s %s" % (methodname, args, kwargs))
value = self.conn.remotecall(self.oid, methodname, args, kwargs)
##print("*** IdbProxy.call %s returns %r" % (methodname, value))
return value
def run(self, cmd, locals):
# Ignores locals on purpose!
seq = self.conn.asyncqueue(self.oid, "run", (cmd,), {})
self.shell.interp.active_seq = seq
def get_stack(self, frame, tbid):
# passing frame and traceback IDs, not the objects themselves
stack, i = self.call("get_stack", frame._fid, tbid)
stack = [(FrameProxy(self.conn, fid), k) for fid, k in stack]
return stack, i
def set_continue(self):
self.call("set_continue")
def set_step(self):
self.call("set_step")
def set_next(self, frame):
self.call("set_next", frame._fid)
def set_return(self, frame):
self.call("set_return", frame._fid)
def set_quit(self):
self.call("set_quit")
def set_break(self, filename, lineno):
msg = self.call("set_break", filename, lineno)
return msg
def clear_break(self, filename, lineno):
msg = self.call("clear_break", filename, lineno)
return msg
def clear_all_file_breaks(self, filename):
msg = self.call("clear_all_file_breaks", filename)
return msg
def start_remote_debugger(rpcclt, pyshell):
"""Start the subprocess debugger, initialize the debugger GUI and RPC link
Request the RPCServer start the Python subprocess debugger and link. Set
up the Idle side of the split debugger by instantiating the IdbProxy,
debugger GUI, and debugger GUIAdapter objects and linking them together.
Register the GUIAdapter with the RPCClient to handle debugger GUI
interaction requests coming from the subprocess debugger via the GUIProxy.
The IdbAdapter will pass execution and environment requests coming from the
Idle debugger GUI to the subprocess debugger via the IdbProxy.
"""
global idb_adap_oid
idb_adap_oid = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
idb_proxy = IdbProxy(rpcclt, pyshell, idb_adap_oid)
gui = Debugger.Debugger(pyshell, idb_proxy)
gui_adap = GUIAdapter(rpcclt, gui)
rpcclt.register(gui_adap_oid, gui_adap)
return gui
def close_remote_debugger(rpcclt):
"""Shut down subprocess debugger and Idle side of debugger RPC link
Request that the RPCServer shut down the subprocess debugger and link.
Unregister the GUIAdapter, which will cause a GC on the Idle process
debugger and RPC link objects. (The second reference to the debugger GUI
is deleted in PyShell.close_remote_debugger().)
"""
close_subprocess_debugger(rpcclt)
rpcclt.unregister(gui_adap_oid)
def close_subprocess_debugger(rpcclt):
rpcclt.remotecall("exec", "stop_the_debugger", (idb_adap_oid,), {})
def restart_subprocess_debugger(rpcclt):
idb_adap_oid_ret = rpcclt.remotecall("exec", "start_the_debugger",\
(gui_adap_oid,), {})
assert idb_adap_oid_ret == idb_adap_oid, 'Idb restarted with different oid'
| gpl-3.0 |
hedaoyuan/Paddle | python/paddle/trainer_config_helpers/data_sources.py | 14 | 7727 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Data Sources are helpers to define paddle training data or testing data.
"""
from paddle.trainer.config_parser import *
from .utils import deprecated
try:
import cPickle as pickle
except ImportError:
import pickle
__all__ = ['define_py_data_sources2']
def define_py_data_source(file_list,
cls,
module,
obj,
args=None,
async=False,
data_cls=PyData):
"""
Define a python data source.
For example, the simplest usage in trainer_config.py as follow:
.. code-block:: python
define_py_data_source("train.list", TrainData, "data_provider", "process")
Or. if you want to pass arguments from trainer_config to data_provider.py, then
.. code-block:: python
define_py_data_source("train.list", TrainData, "data_provider", "process",
args={"dictionary": dict_name})
:param data_cls:
:param file_list: file list name, which contains all data file paths
:type file_list: basestring
:param cls: Train or Test Class.
:type cls: TrainData or TestData
:param module: python module name.
:type module: basestring
:param obj: python object name. May be a function name if using
PyDataProviderWrapper.
:type obj: basestring
:param args: The best practice is using dict to pass arguments into
DataProvider, and use :code:`@init_hook_wrapper` to
receive arguments.
:type args: string or picklable object
:param async: Load Data asynchronously or not.
:type async: bool
:return: None
:rtype: None
"""
if isinstance(file_list, list):
file_list_name = 'train.list'
if cls == TestData:
file_list_name = 'test.list'
with open(file_list_name, 'w') as f:
f.writelines(file_list)
file_list = file_list_name
if not isinstance(args, basestring) and args is not None:
args = pickle.dumps(args, 0)
cls(
data_cls(
files=file_list,
load_data_module=module,
load_data_object=obj,
load_data_args=args,
async_load_data=async))
def define_py_data_sources(train_list,
test_list,
module,
obj,
args=None,
train_async=False,
data_cls=PyData):
"""
The annotation is almost the same as define_py_data_sources2, except that
it can specific train_async and data_cls.
:param data_cls:
:param train_list: Train list name.
:type train_list: basestring
:param test_list: Test list name.
:type test_list: basestring
:param module: python module name. If train and test is different, then
pass a tuple or list to this argument.
:type module: basestring or tuple or list
:param obj: python object name. May be a function name if using
PyDataProviderWrapper. If train and test is different, then pass
a tuple or list to this argument.
:type obj: basestring or tuple or list
:param args: The best practice is using dict() to pass arguments into
DataProvider, and use :code:`@init_hook_wrapper` to receive
arguments. If train and test is different, then pass a tuple
or list to this argument.
:type args: string or picklable object or list or tuple.
:param train_async: Is training data load asynchronously or not.
:type train_async: bool
:return: None
:rtype: None
"""
def __is_splitable__(o):
return (isinstance(o, list) or
isinstance(o, tuple)) and hasattr(o, '__len__') and len(o) == 2
assert train_list is not None or test_list is not None
assert module is not None and obj is not None
test_module = module
train_module = module
if __is_splitable__(module):
train_module, test_module = module
test_obj = obj
train_obj = obj
if __is_splitable__(obj):
train_obj, test_obj = obj
if args is None:
args = ""
train_args = args
test_args = args
if __is_splitable__(args):
train_args, test_args = args
if train_list is not None:
define_py_data_source(train_list, TrainData, train_module, train_obj,
train_args, train_async, data_cls)
if test_list is not None:
define_py_data_source(test_list, TestData, test_module, test_obj,
test_args, False, data_cls)
def define_py_data_sources2(train_list, test_list, module, obj, args=None):
"""
Define python Train/Test data sources in one method. If train/test use
the same Data Provider configuration, module/obj/args contain one argument,
otherwise contain a list or tuple of arguments. For example\:
.. code-block:: python
define_py_data_sources2(train_list="train.list",
test_list="test.list",
module="data_provider"
# if train/test use different configurations,
# obj=["process_train", "process_test"]
obj="process",
args={"dictionary": dict_name})
The related data provider can refer to :ref:`api_pydataprovider2_sequential_model` .
:param train_list: Train list name.
:type train_list: basestring
:param test_list: Test list name.
:type test_list: basestring
:param module: python module name. If train and test is different, then
pass a tuple or list to this argument.
:type module: basestring or tuple or list
:param obj: python object name. May be a function name if using
PyDataProviderWrapper. If train and test is different, then pass
a tuple or list to this argument.
:type obj: basestring or tuple or list
:param args: The best practice is using dict() to pass arguments into
DataProvider, and use :code:`@init_hook_wrapper` to receive
arguments. If train and test is different, then pass a tuple
or list to this argument.
:type args: string or picklable object or list or tuple.
:return: None
:rtype: None
"""
def py_data2(files, load_data_module, load_data_object, load_data_args,
**kwargs):
data = create_data_config_proto()
data.type = 'py2'
data.files = files
data.load_data_module = load_data_module
data.load_data_object = load_data_object
data.load_data_args = load_data_args
data.async_load_data = False
return data
define_py_data_sources(
train_list=train_list,
test_list=test_list,
module=module,
obj=obj,
args=args,
data_cls=py_data2)
| apache-2.0 |
Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2021_02_01/aio/operations/_operations.py | 1 | 4679 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["_models.OperationListResult"]:
"""Lists all of the available Network Rest API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.Network/operations'} # type: ignore
| mit |
Tiryoh/mbed | workspace_tools/host_tests/host_tests_plugins/module_copy_mbed.py | 74 | 2858 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from shutil import copy
from host_test_plugins import HostTestPluginBase
from time import sleep
class HostTestPluginCopyMethod_Mbed(HostTestPluginBase):
def generic_mbed_copy(self, image_path, destination_disk):
""" Generic mbed copy method for "mbed enabled" devices.
It uses standard python shuitl function to copy
image_file (target specific binary) to device's disk.
"""
result = True
if not destination_disk.endswith('/') and not destination_disk.endswith('\\'):
destination_disk += '/'
try:
copy(image_path, destination_disk)
except Exception, e:
self.print_plugin_error("shutil.copy('%s', '%s')"% (image_path, destination_disk))
self.print_plugin_error("Error: %s"% str(e))
result = False
return result
# Plugin interface
name = 'HostTestPluginCopyMethod_Mbed'
type = 'CopyMethod'
stable = True
capabilities = ['shutil', 'default']
required_parameters = ['image_path', 'destination_disk', 'program_cycle_s']
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return True
def execute(self, capability, *args, **kwargs):
""" Executes capability by name.
Each capability may directly just call some command line
program or execute building pythonic function
"""
result = False
if self.check_parameters(capability, *args, **kwargs) is True:
# Capability 'default' is a dummy capability
if capability == 'shutil':
image_path = kwargs['image_path']
destination_disk = kwargs['destination_disk']
program_cycle_s = kwargs['program_cycle_s']
# Wait for mount point to be ready
self.check_mount_point_ready(destination_disk) # Blocking
result = self.generic_mbed_copy(image_path, destination_disk)
# Allow mbed to cycle
sleep(program_cycle_s)
return result
def load_plugin():
""" Returns plugin available in this module
"""
return HostTestPluginCopyMethod_Mbed()
| apache-2.0 |
jmehnle/ansible | lib/ansible/parsing/yaml/constructor.py | 16 | 6062 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from yaml.constructor import SafeConstructor, ConstructorError
from yaml.nodes import MappingNode
from ansible.module_utils._text import to_bytes
from ansible.parsing.vault import VaultLib
from ansible.parsing.yaml.objects import AnsibleMapping, AnsibleSequence, AnsibleUnicode, AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import wrap_var
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class AnsibleConstructor(SafeConstructor):
def __init__(self, file_name=None, b_vault_password=None):
self._b_vault_password = b_vault_password
self._ansible_file_name = file_name
super(AnsibleConstructor, self).__init__()
self._vaults = {}
self._vaults['default'] = VaultLib(b_password=self._b_vault_password)
def construct_yaml_map(self, node):
data = AnsibleMapping()
yield data
value = self.construct_mapping(node)
data.update(value)
data.ansible_pos = self._node_position_info(node)
def construct_mapping(self, node, deep=False):
# Most of this is from yaml.constructor.SafeConstructor. We replicate
# it here so that we can warn users when they have duplicate dict keys
# (pyyaml silently allows overwriting keys)
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
self.flatten_mapping(node)
mapping = AnsibleMapping()
# Add our extra information to the returned value
mapping.ansible_pos = self._node_position_info(node)
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise ConstructorError("while constructing a mapping", node.start_mark,
"found unacceptable key (%s)" % exc, key_node.start_mark)
if key in mapping:
display.warning(u'While constructing a mapping from {1}, line {2}, column {3}, found a duplicate dict key ({0}).'
u' Using last defined value only.'.format(key, *mapping.ansible_pos))
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
def construct_yaml_str(self, node, unsafe=False):
# Override the default string handling function
# to always return unicode objects
value = self.construct_scalar(node)
ret = AnsibleUnicode(value)
ret.ansible_pos = self._node_position_info(node)
if unsafe:
ret = wrap_var(ret)
return ret
def construct_vault_encrypted_unicode(self, node):
value = self.construct_scalar(node)
ciphertext_data = to_bytes(value)
if self._b_vault_password is None:
raise ConstructorError(None, None, "found vault but no vault password provided", node.start_mark)
# could pass in a key id here to choose the vault to associate with
vault = self._vaults['default']
ret = AnsibleVaultEncryptedUnicode(ciphertext_data)
ret.vault = vault
return ret
def construct_yaml_seq(self, node):
data = AnsibleSequence()
yield data
data.extend(self.construct_sequence(node))
data.ansible_pos = self._node_position_info(node)
def construct_yaml_unsafe(self, node):
return self.construct_yaml_str(node, unsafe=True)
def _node_position_info(self, node):
# the line number where the previous token has ended (plus empty lines)
# Add one so that the first line is line 1 rather than line 0
column = node.start_mark.column + 1
line = node.start_mark.line + 1
# in some cases, we may have pre-read the data and then
# passed it to the load() call for YAML, in which case we
# want to override the default datasource (which would be
# '<string>') to the actual filename we read in
datasource = self._ansible_file_name or node.start_mark.name
return (datasource, line, column)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:map',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/dict',
AnsibleConstructor.construct_yaml_map)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:str',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:python/unicode',
AnsibleConstructor.construct_yaml_str)
AnsibleConstructor.add_constructor(
u'tag:yaml.org,2002:seq',
AnsibleConstructor.construct_yaml_seq)
AnsibleConstructor.add_constructor(
u'!unsafe',
AnsibleConstructor.construct_yaml_unsafe)
AnsibleConstructor.add_constructor(
u'!vault',
AnsibleConstructor.construct_vault_encrypted_unicode)
AnsibleConstructor.add_constructor(u'!vault-encrypted', AnsibleConstructor.construct_vault_encrypted_unicode)
| gpl-3.0 |
cloudbau/nova | nova/openstack/common/context.py | 27 | 2733 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
from nova.openstack.common import uuidutils
def generate_request_id():
return 'req-%s' % uuidutils.generate_uuid()
class RequestContext(object):
"""Helper class to represent useful information about a request context.
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
def get_admin_context(show_deleted=False):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None
| apache-2.0 |
xapi-project/xapi-storage-datapath-plugins | datapath/test.py | 5 | 2930 | # Run these tests with 'nosetests':
# install the 'python-nose' package (Fedora/CentOS or Ubuntu)
# run 'nosetests' in the root of the repository
import iscsi
import image
import device
import unittest
import os
import socket
import struct
import errno
raw_path = "/tmp/test-raw-disk"
class Tests(unittest.TestCase):
# unittest.TestCase has more methods than Pylint permits
# pylint: disable=R0904
def setUp(self):
try:
os.unlink(raw_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise
with open(raw_path, "w") as f:
f.seek(1024 * 1024 - 1)
f.write("\000")
device.clear()
def test_raw(self):
d = device.create("", image.Raw(raw_path))
d.destroy("")
def test_raw_block(self):
d = device.create("", image.Raw(raw_path))
block = d.block_device()
assert block is not None
d.destroy("")
def test_raw_block_tapdisk(self):
d = device.create("", image.Raw(raw_path))
block = d.block_device()
assert block is not None
d.add_tapdisk("")
d.destroy("")
def test_mirror(self):
d = device.create("", image.Raw(raw_path))
block = d.block_device()
assert block is not None
d.add_tapdisk("")
a, b = socket.socketpair()
d.tapdisk.start_mirror("", a)
b.sendall('NBDMAGIC\x00\x00\x42\x02\x81\x86\x12\x53' +
struct.pack('>Q', 1024 * 1024) + '\0' * 128)
d.destroy("")
def test_nbd(self):
d = device.create("", image.Raw(raw_path))
block = d.block_device()
assert block is not None
d.add_tapdisk("")
a, b = socket.socketpair()
d.tapdisk.receive_nbd("", a)
results = b.recv(256)
self.assertEqual("NBDMAGIC", results[0:8])
d.destroy("")
class SessionTests(unittest.TestCase):
# unittest.TestCase has more methods than Pylint permits
# pylint: disable=R0904
def test_parse(self):
x = iscsi.Session(
"tcp: [9] 10.0.0.1:3260,1 " +
"iqn.2004-04.com.qnap:ts-859uplus:iscsi.foo01.000000 " +
"(non-flash)")
assert x.proto == "tcp"
assert x.index == 9
assert x.address.ip == "10.0.0.1"
assert x.address.port == 3260
assert x.iqn == "iqn.2004-04.com.qnap:ts-859uplus:iscsi.foo01.000000"
class DiscoverTests(unittest.TestCase):
# unittest.TestCase has more methods than Pylint permits
# pylint: disable=R0904
def test_parse(self):
x = iscsi.Target(
"10.0.0.1:3260,1 " +
"iqn.2004-04.com.qnap:ts-859uplus:iscsi.foo01.000000")
assert x.address.ip == "10.0.0.1"
assert x.address.port == 3260
assert x.iqn == "iqn.2004-04.com.qnap:ts-859uplus:iscsi.foo01.000000"
| lgpl-2.1 |
futurepr0n/Books-solutions | Python-For-Everyone-Horstmann/Chapter7-Files-and-Exceptions/P7.10.py | 1 | 1769 | # Get the data for baby names in prior years from the Social Security Administration.
# Paste the table data in files named babynames2010.txt , etc. Modify the babynames.py
# program so that it prompts the user for a file name. Can you spot a trend in the
# frequencies?
##
# This program displays the most common baby names. Half of boys and girls in
# the United States were given these names in 2011.
#
from sys import argv
# The percentage limit to be extracted.
LIMIT = 50.0
def main() :
# MODIFIED
if argv != "":
inputFile = open(argv[1], "r")
else:
filename = str(input("File to open: "))
inputFile = open(filename, "r")
boyTotal = 0.0
girlTotal = 0.0
while boyTotal < LIMIT or girlTotal < LIMIT :
# Extract the data from the next line and split it.
line = inputFile.readline()
dataFields = line.split()
# Extract the individual field values.
rank = int(dataFields[0])
boyName = dataFields[1]
boyPercent = float(dataFields[2].rstrip("%"))
girlName = dataFields[3]
girlPercent = float(dataFields[4].rstrip("%"))
# Process the data.
print("%3d " % rank, end="")
boyTotal = processName(boyName, boyPercent, boyTotal)
girlTotal = processName(girlName, girlPercent, girlTotal)
print()
inputFile.close()
## Prints the name if total >= 0 and adjusts the total.
# @param name the boy or girl name
# @param percent the percentage for this name
# @param total the total percentage processed
# @return the adjusted total
#
def processName(name, percent, total) :
if total < LIMIT :
print("%-15s " % name, end="")
total = total + percent
return total
# Start the program.
main()
| mit |
elkingtonmcb/django | tests/defer/tests.py | 338 | 11262 | from __future__ import unicode_literals
from django.db.models.query_utils import DeferredAttribute, InvalidQuery
from django.test import TestCase
from .models import (
BigChild, Child, ChildProxy, Primary, RefreshPrimaryProxy, Secondary,
)
class AssertionMixin(object):
def assert_delayed(self, obj, num):
"""
Instances with deferred fields look the same as normal instances when
we examine attribute values. Therefore, this method returns the number
of deferred fields on returned instances.
"""
count = 0
for field in obj._meta.fields:
if isinstance(obj.__class__.__dict__.get(field.attname), DeferredAttribute):
count += 1
self.assertEqual(count, num)
class DeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
cls.p1 = Primary.objects.create(name="p1", value="xx", related=cls.s1)
def test_defer(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.defer("related__first")[0], 0)
self.assert_delayed(qs.defer("name").defer("value")[0], 2)
def test_only(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name")[0], 2)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
self.assert_delayed(qs.only("name").only("value")[0], 2)
self.assert_delayed(qs.only("related__first")[0], 2)
# Using 'pk' with only() should result in 3 deferred fields, namely all
# of them except the model's primary key see #15494
self.assert_delayed(qs.only("pk")[0], 3)
# You can use 'pk' with reverse foreign key lookups.
self.assert_delayed(self.s1.primary_set.all().only('pk')[0], 3)
def test_defer_only_chaining(self):
qs = Primary.objects.all()
self.assert_delayed(qs.only("name", "value").defer("name")[0], 2)
self.assert_delayed(qs.defer("name").only("value", "name")[0], 2)
self.assert_delayed(qs.defer("name").only("value")[0], 2)
self.assert_delayed(qs.only("name").defer("value")[0], 2)
def test_defer_on_an_already_deferred_field(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name")[0], 1)
self.assert_delayed(qs.defer("name").defer("name")[0], 1)
def test_defer_none_to_clear_deferred_set(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name", "value")[0], 2)
self.assert_delayed(qs.defer(None)[0], 0)
self.assert_delayed(qs.only("name").defer(None)[0], 0)
def test_only_none_raises_error(self):
msg = 'Cannot pass None as an argument to only().'
with self.assertRaisesMessage(TypeError, msg):
Primary.objects.only(None)
def test_defer_extra(self):
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").extra(select={"a": 1})[0], 1)
self.assert_delayed(qs.extra(select={"a": 1}).defer("name")[0], 1)
def test_defer_values_does_not_defer(self):
# User values() won't defer anything (you get the full list of
# dictionaries back), but it still works.
self.assertEqual(Primary.objects.defer("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_only_values_does_not_defer(self):
self.assertEqual(Primary.objects.only("name").values()[0], {
"id": self.p1.id,
"name": "p1",
"value": "xx",
"related_id": self.s1.id,
})
def test_get(self):
# Using defer() and only() with get() is also valid.
qs = Primary.objects.all()
self.assert_delayed(qs.defer("name").get(pk=self.p1.pk), 1)
self.assert_delayed(qs.only("name").get(pk=self.p1.pk), 2)
def test_defer_with_select_related(self):
obj = Primary.objects.select_related().defer("related__first", "related__second")[0]
self.assert_delayed(obj.related, 2)
self.assert_delayed(obj, 0)
def test_only_with_select_related(self):
obj = Primary.objects.select_related().only("related__first")[0]
self.assert_delayed(obj, 2)
self.assert_delayed(obj.related, 1)
self.assertEqual(obj.related_id, self.s1.pk)
self.assertEqual(obj.name, "p1")
def test_defer_select_related_raises_invalid_query(self):
# When we defer a field and also select_related it, the query is
# invalid and raises an exception.
with self.assertRaises(InvalidQuery):
Primary.objects.defer("related").select_related("related")[0]
def test_only_select_related_raises_invalid_query(self):
with self.assertRaises(InvalidQuery):
Primary.objects.only("name").select_related("related")[0]
def test_defer_foreign_keys_are_deferred_and_not_traversed(self):
# With a depth-based select_related, all deferred ForeignKeys are
# deferred instead of traversed.
with self.assertNumQueries(3):
obj = Primary.objects.defer("related").select_related()[0]
self.assert_delayed(obj, 1)
self.assertEqual(obj.related.id, self.s1.pk)
def test_saving_object_with_deferred_field(self):
# Saving models with deferred fields is possible (but inefficient,
# since every field has to be retrieved first).
Primary.objects.create(name="p2", value="xy", related=self.s1)
obj = Primary.objects.defer("value").get(name="p2")
obj.name = "a new name"
obj.save()
self.assertQuerysetEqual(
Primary.objects.all(), [
"p1", "a new name",
],
lambda p: p.name,
ordered=False,
)
def test_defer_baseclass_when_subclass_has_no_added_fields(self):
# Regression for #10572 - A subclass with no extra fields can defer
# fields from the base class
Child.objects.create(name="c1", value="foo", related=self.s1)
# You can defer a field on a baseclass when the subclass has no fields
obj = Child.objects.defer("value").get(name="c1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
def test_only_baseclass_when_subclass_has_no_added_fields(self):
# You can retrieve a single column on a base class with no fields
Child.objects.create(name="c1", value="foo", related=self.s1)
obj = Child.objects.only("name").get(name="c1")
# on an inherited model, its PK is also fetched, hence '3' deferred fields.
self.assert_delayed(obj, 3)
self.assertEqual(obj.name, "c1")
self.assertEqual(obj.value, "foo")
class BigChildDeferTests(AssertionMixin, TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = Secondary.objects.create(first="x1", second="y1")
BigChild.objects.create(name="b1", value="foo", related=cls.s1, other="bar")
def test_defer_baseclass_when_subclass_has_added_field(self):
# You can defer a field on a baseclass
obj = BigChild.objects.defer("value").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_defer_subclass(self):
# You can defer a field on a subclass
obj = BigChild.objects.defer("other").get(name="b1")
self.assert_delayed(obj, 1)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_baseclass_when_subclass_has_added_field(self):
# You can retrieve a single field on a baseclass
obj = BigChild.objects.only("name").get(name="b1")
# when inherited model, its PK is also fetched, hence '4' deferred fields.
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
def test_only_sublcass(self):
# You can retrieve a single field on a subclass
obj = BigChild.objects.only("other").get(name="b1")
self.assert_delayed(obj, 4)
self.assertEqual(obj.name, "b1")
self.assertEqual(obj.value, "foo")
self.assertEqual(obj.other, "bar")
class TestDefer2(AssertionMixin, TestCase):
def test_defer_proxy(self):
"""
Ensure select_related together with only on a proxy model behaves
as expected. See #17876.
"""
related = Secondary.objects.create(first='x1', second='x2')
ChildProxy.objects.create(name='p1', value='xx', related=related)
children = ChildProxy.objects.all().select_related().only('id', 'name')
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 2)
self.assertEqual(child.name, 'p1')
self.assertEqual(child.value, 'xx')
def test_defer_inheritance_pk_chaining(self):
"""
When an inherited model is fetched from the DB, its PK is also fetched.
When getting the PK of the parent model it is useful to use the already
fetched parent model PK if it happens to be available. Tests that this
is done.
"""
s1 = Secondary.objects.create(first="x1", second="y1")
bc = BigChild.objects.create(name="b1", value="foo", related=s1,
other="bar")
bc_deferred = BigChild.objects.only('name').get(pk=bc.pk)
with self.assertNumQueries(0):
bc_deferred.id
self.assertEqual(bc_deferred.pk, bc_deferred.id)
def test_eq(self):
s1 = Secondary.objects.create(first="x1", second="y1")
s1_defer = Secondary.objects.only('pk').get(pk=s1.pk)
self.assertEqual(s1, s1_defer)
self.assertEqual(s1_defer, s1)
def test_refresh_not_loading_deferred_fields(self):
s = Secondary.objects.create()
rf = Primary.objects.create(name='foo', value='bar', related=s)
rf2 = Primary.objects.only('related', 'value').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
rf2.refresh_from_db()
self.assertEqual(rf2.value, 'new bar')
with self.assertNumQueries(1):
self.assertEqual(rf2.name, 'new foo')
def test_custom_refresh_on_deferred_loading(self):
s = Secondary.objects.create()
rf = RefreshPrimaryProxy.objects.create(name='foo', value='bar', related=s)
rf2 = RefreshPrimaryProxy.objects.only('related').get()
rf.name = 'new foo'
rf.value = 'new bar'
rf.save()
with self.assertNumQueries(1):
# Customized refresh_from_db() reloads all deferred fields on
# access of any of them.
self.assertEqual(rf2.name, 'new foo')
self.assertEqual(rf2.value, 'new bar')
| bsd-3-clause |
jirikuncar/invenio-records | invenio_records/bundles.py | 4 | 1121 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Records bundles."""
from __future__ import unicode_literals
from invenio_ext.assets import Bundle, RequireJSFilter
js = Bundle(
"js/records/init.js",
filters=RequireJSFilter(),
output="records.js",
weight=20
)
css = Bundle(
"css/records/record.css",
output="record.css",
weight=20,
filters="cleancss"
)
| gpl-2.0 |
Aorjoa/aiyara-ceph-dash | .tox/py27/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ssl_match_hostname/_implementation.py | 2360 | 3778 | """The match_hostname() function from Python 3.3.3, essential when using SSL."""
# Note: This file is under the PSF license as the code comes from the python
# stdlib. http://docs.python.org/3/license.html
import re
__version__ = '3.4.0.2'
class CertificateError(ValueError):
pass
def _dnsname_match(dn, hostname, max_wildcards=1):
"""Matching according to RFC 6125, section 6.4.3
http://tools.ietf.org/html/rfc6125#section-6.4.3
"""
pats = []
if not dn:
return False
# Ported from python3-syntax:
# leftmost, *remainder = dn.split(r'.')
parts = dn.split(r'.')
leftmost = parts[0]
remainder = parts[1:]
wildcards = leftmost.count('*')
if wildcards > max_wildcards:
# Issue #17980: avoid denials of service by refusing more
# than one wildcard per fragment. A survey of established
# policy among SSL implementations showed it to be a
# reasonable choice.
raise CertificateError(
"too many wildcards in certificate DNS name: " + repr(dn))
# speed up common case w/o wildcards
if not wildcards:
return dn.lower() == hostname.lower()
# RFC 6125, section 6.4.3, subitem 1.
# The client SHOULD NOT attempt to match a presented identifier in which
# the wildcard character comprises a label other than the left-most label.
if leftmost == '*':
# When '*' is a fragment by itself, it matches a non-empty dotless
# fragment.
pats.append('[^.]+')
elif leftmost.startswith('xn--') or hostname.startswith('xn--'):
# RFC 6125, section 6.4.3, subitem 3.
# The client SHOULD NOT attempt to match a presented identifier
# where the wildcard character is embedded within an A-label or
# U-label of an internationalized domain name.
pats.append(re.escape(leftmost))
else:
# Otherwise, '*' matches any dotless string, e.g. www*
pats.append(re.escape(leftmost).replace(r'\*', '[^.]*'))
# add the remaining fragments, ignore any wildcards
for frag in remainder:
pats.append(re.escape(frag))
pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE)
return pat.match(hostname)
def match_hostname(cert, hostname):
"""Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
"""
if not cert:
raise ValueError("empty or no certificate")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
| bsd-2-clause |
gautamMalu/rootfs_xen_arndale | usr/lib/python3.4/lib2to3/fixes/fix_execfile.py | 175 | 1990 | # Copyright 2006 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Fixer for execfile.
This converts usages of the execfile function into calls to the built-in
exec() function.
"""
from .. import fixer_base
from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node,
ArgList, String, syms)
class FixExecfile(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > >
|
power< 'execfile' trailer< '(' filename=any ')' > >
"""
def transform(self, node, results):
assert results
filename = results["filename"]
globals = results.get("globals")
locals = results.get("locals")
# Copy over the prefix from the right parentheses end of the execfile
# call.
execfile_paren = node.children[-1].children[-1].clone()
# Construct open().read().
open_args = ArgList([filename.clone()], rparen=execfile_paren)
open_call = Node(syms.power, [Name("open"), open_args])
read = [Node(syms.trailer, [Dot(), Name('read')]),
Node(syms.trailer, [LParen(), RParen()])]
open_expr = [open_call] + read
# Wrap the open call in a compile call. This is so the filename will be
# preserved in the execed code.
filename_arg = filename.clone()
filename_arg.prefix = " "
exec_str = String("'exec'", " ")
compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str]
compile_call = Call(Name("compile"), compile_args, "")
# Finally, replace the execfile call with an exec call.
args = [compile_call]
if globals is not None:
args.extend([Comma(), globals.clone()])
if locals is not None:
args.extend([Comma(), locals.clone()])
return Call(Name("exec"), args, prefix=node.prefix)
| gpl-2.0 |
dantebarba/docker-media-server | plex/Sub-Zero.bundle/Contents/Libraries/Shared/html5lib/html5parser.py | 45 | 118951 | from __future__ import absolute_import, division, unicode_literals
from six import with_metaclass, viewkeys
import types
from collections import OrderedDict
from . import _inputstream
from . import _tokenizer
from . import treebuilders
from .treebuilders.base import Marker
from . import _utils
from .constants import (
spaceCharacters, asciiUpper2Lower,
specialElements, headingElements, cdataElements, rcdataElements,
tokenTypes, tagTokenTypes,
namespaces,
htmlIntegrationPointElements, mathmlTextIntegrationPointElements,
adjustForeignAttributes as adjustForeignAttributesMap,
adjustMathMLAttributes, adjustSVGAttributes,
E,
_ReparseException
)
def parse(doc, treebuilder="etree", namespaceHTMLElements=True, **kwargs):
"""Parse an HTML document as a string or file-like object into a tree
:arg doc: the document to parse as a string or file-like object
:arg treebuilder: the treebuilder to use when parsing
:arg namespaceHTMLElements: whether or not to namespace HTML elements
:returns: parsed tree
Example:
>>> from html5lib.html5parser import parse
>>> parse('<html><body><p>This is a doc</p></body></html>')
<Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0>
"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parse(doc, **kwargs)
def parseFragment(doc, container="div", treebuilder="etree", namespaceHTMLElements=True, **kwargs):
"""Parse an HTML fragment as a string or file-like object into a tree
:arg doc: the fragment to parse as a string or file-like object
:arg container: the container context to parse the fragment in
:arg treebuilder: the treebuilder to use when parsing
:arg namespaceHTMLElements: whether or not to namespace HTML elements
:returns: parsed tree
Example:
>>> from html5lib.html5libparser import parseFragment
>>> parseFragment('<b>this is a fragment</b>')
<Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090>
"""
tb = treebuilders.getTreeBuilder(treebuilder)
p = HTMLParser(tb, namespaceHTMLElements=namespaceHTMLElements)
return p.parseFragment(doc, container=container, **kwargs)
def method_decorator_metaclass(function):
class Decorated(type):
def __new__(meta, classname, bases, classDict):
for attributeName, attribute in classDict.items():
if isinstance(attribute, types.FunctionType):
attribute = function(attribute)
classDict[attributeName] = attribute
return type.__new__(meta, classname, bases, classDict)
return Decorated
class HTMLParser(object):
"""HTML parser
Generates a tree structure from a stream of (possibly malformed) HTML.
"""
def __init__(self, tree=None, strict=False, namespaceHTMLElements=True, debug=False):
"""
:arg tree: a treebuilder class controlling the type of tree that will be
returned. Built in treebuilders can be accessed through
html5lib.treebuilders.getTreeBuilder(treeType)
:arg strict: raise an exception when a parse error is encountered
:arg namespaceHTMLElements: whether or not to namespace HTML elements
:arg debug: whether or not to enable debug mode which logs things
Example:
>>> from html5lib.html5parser import HTMLParser
>>> parser = HTMLParser() # generates parser with etree builder
>>> parser = HTMLParser('lxml', strict=True) # generates parser with lxml builder which is strict
"""
# Raise an exception on the first error encountered
self.strict = strict
if tree is None:
tree = treebuilders.getTreeBuilder("etree")
self.tree = tree(namespaceHTMLElements)
self.errors = []
self.phases = dict([(name, cls(self, self.tree)) for name, cls in
getPhases(debug).items()])
def _parse(self, stream, innerHTML=False, container="div", scripting=False, **kwargs):
self.innerHTMLMode = innerHTML
self.container = container
self.scripting = scripting
self.tokenizer = _tokenizer.HTMLTokenizer(stream, parser=self, **kwargs)
self.reset()
try:
self.mainLoop()
except _ReparseException:
self.reset()
self.mainLoop()
def reset(self):
self.tree.reset()
self.firstStartTag = False
self.errors = []
self.log = [] # only used with debug mode
# "quirks" / "limited quirks" / "no quirks"
self.compatMode = "no quirks"
if self.innerHTMLMode:
self.innerHTML = self.container.lower()
if self.innerHTML in cdataElements:
self.tokenizer.state = self.tokenizer.rcdataState
elif self.innerHTML in rcdataElements:
self.tokenizer.state = self.tokenizer.rawtextState
elif self.innerHTML == 'plaintext':
self.tokenizer.state = self.tokenizer.plaintextState
else:
# state already is data state
# self.tokenizer.state = self.tokenizer.dataState
pass
self.phase = self.phases["beforeHtml"]
self.phase.insertHtmlElement()
self.resetInsertionMode()
else:
self.innerHTML = False # pylint:disable=redefined-variable-type
self.phase = self.phases["initial"]
self.lastPhase = None
self.beforeRCDataPhase = None
self.framesetOK = True
@property
def documentEncoding(self):
"""Name of the character encoding that was used to decode the input stream, or
:obj:`None` if that is not determined yet
"""
if not hasattr(self, 'tokenizer'):
return None
return self.tokenizer.stream.charEncoding[0].name
def isHTMLIntegrationPoint(self, element):
if (element.name == "annotation-xml" and
element.namespace == namespaces["mathml"]):
return ("encoding" in element.attributes and
element.attributes["encoding"].translate(
asciiUpper2Lower) in
("text/html", "application/xhtml+xml"))
else:
return (element.namespace, element.name) in htmlIntegrationPointElements
def isMathMLTextIntegrationPoint(self, element):
return (element.namespace, element.name) in mathmlTextIntegrationPointElements
def mainLoop(self):
CharactersToken = tokenTypes["Characters"]
SpaceCharactersToken = tokenTypes["SpaceCharacters"]
StartTagToken = tokenTypes["StartTag"]
EndTagToken = tokenTypes["EndTag"]
CommentToken = tokenTypes["Comment"]
DoctypeToken = tokenTypes["Doctype"]
ParseErrorToken = tokenTypes["ParseError"]
for token in self.normalizedTokens():
prev_token = None
new_token = token
while new_token is not None:
prev_token = new_token
currentNode = self.tree.openElements[-1] if self.tree.openElements else None
currentNodeNamespace = currentNode.namespace if currentNode else None
currentNodeName = currentNode.name if currentNode else None
type = new_token["type"]
if type == ParseErrorToken:
self.parseError(new_token["data"], new_token.get("datavars", {}))
new_token = None
else:
if (len(self.tree.openElements) == 0 or
currentNodeNamespace == self.tree.defaultNamespace or
(self.isMathMLTextIntegrationPoint(currentNode) and
((type == StartTagToken and
token["name"] not in frozenset(["mglyph", "malignmark"])) or
type in (CharactersToken, SpaceCharactersToken))) or
(currentNodeNamespace == namespaces["mathml"] and
currentNodeName == "annotation-xml" and
type == StartTagToken and
token["name"] == "svg") or
(self.isHTMLIntegrationPoint(currentNode) and
type in (StartTagToken, CharactersToken, SpaceCharactersToken))):
phase = self.phase
else:
phase = self.phases["inForeignContent"]
if type == CharactersToken:
new_token = phase.processCharacters(new_token)
elif type == SpaceCharactersToken:
new_token = phase.processSpaceCharacters(new_token)
elif type == StartTagToken:
new_token = phase.processStartTag(new_token)
elif type == EndTagToken:
new_token = phase.processEndTag(new_token)
elif type == CommentToken:
new_token = phase.processComment(new_token)
elif type == DoctypeToken:
new_token = phase.processDoctype(new_token)
if (type == StartTagToken and prev_token["selfClosing"] and
not prev_token["selfClosingAcknowledged"]):
self.parseError("non-void-element-with-trailing-solidus",
{"name": prev_token["name"]})
# When the loop finishes it's EOF
reprocess = True
phases = []
while reprocess:
phases.append(self.phase)
reprocess = self.phase.processEOF()
if reprocess:
assert self.phase not in phases
def normalizedTokens(self):
for token in self.tokenizer:
yield self.normalizeToken(token)
def parse(self, stream, *args, **kwargs):
"""Parse a HTML document into a well-formed tree
:arg stream: a file-like object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element).
:arg scripting: treat noscript elements as if JavaScript was turned on
:returns: parsed tree
Example:
>>> from html5lib.html5parser import HTMLParser
>>> parser = HTMLParser()
>>> parser.parse('<html><body><p>This is a doc</p></body></html>')
<Element u'{http://www.w3.org/1999/xhtml}html' at 0x7feac4909db0>
"""
self._parse(stream, False, None, *args, **kwargs)
return self.tree.getDocument()
def parseFragment(self, stream, *args, **kwargs):
"""Parse a HTML fragment into a well-formed tree fragment
:arg container: name of the element we're setting the innerHTML
property if set to None, default to 'div'
:arg stream: a file-like object or string containing the HTML to be parsed
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
:arg scripting: treat noscript elements as if JavaScript was turned on
:returns: parsed tree
Example:
>>> from html5lib.html5libparser import HTMLParser
>>> parser = HTMLParser()
>>> parser.parseFragment('<b>this is a fragment</b>')
<Element u'DOCUMENT_FRAGMENT' at 0x7feac484b090>
"""
self._parse(stream, True, *args, **kwargs)
return self.tree.getFragment()
def parseError(self, errorcode="XXX-undefined-error", datavars=None):
# XXX The idea is to make errorcode mandatory.
if datavars is None:
datavars = {}
self.errors.append((self.tokenizer.stream.position(), errorcode, datavars))
if self.strict:
raise ParseError(E[errorcode] % datavars)
def normalizeToken(self, token):
# HTML5 specific normalizations to the token stream
if token["type"] == tokenTypes["StartTag"]:
raw = token["data"]
token["data"] = OrderedDict(raw)
if len(raw) > len(token["data"]):
# we had some duplicated attribute, fix so first wins
token["data"].update(raw[::-1])
return token
def adjustMathMLAttributes(self, token):
adjust_attributes(token, adjustMathMLAttributes)
def adjustSVGAttributes(self, token):
adjust_attributes(token, adjustSVGAttributes)
def adjustForeignAttributes(self, token):
adjust_attributes(token, adjustForeignAttributesMap)
def reparseTokenNormal(self, token):
# pylint:disable=unused-argument
self.parser.phase()
def resetInsertionMode(self):
# The name of this method is mostly historical. (It's also used in the
# specification.)
last = False
newModes = {
"select": "inSelect",
"td": "inCell",
"th": "inCell",
"tr": "inRow",
"tbody": "inTableBody",
"thead": "inTableBody",
"tfoot": "inTableBody",
"caption": "inCaption",
"colgroup": "inColumnGroup",
"table": "inTable",
"head": "inBody",
"body": "inBody",
"frameset": "inFrameset",
"html": "beforeHead"
}
for node in self.tree.openElements[::-1]:
nodeName = node.name
new_phase = None
if node == self.tree.openElements[0]:
assert self.innerHTML
last = True
nodeName = self.innerHTML
# Check for conditions that should only happen in the innerHTML
# case
if nodeName in ("select", "colgroup", "head", "html"):
assert self.innerHTML
if not last and node.namespace != self.tree.defaultNamespace:
continue
if nodeName in newModes:
new_phase = self.phases[newModes[nodeName]]
break
elif last:
new_phase = self.phases["inBody"]
break
self.phase = new_phase
def parseRCDataRawtext(self, token, contentType):
# Generic RCDATA/RAWTEXT Parsing algorithm
assert contentType in ("RAWTEXT", "RCDATA")
self.tree.insertElement(token)
if contentType == "RAWTEXT":
self.tokenizer.state = self.tokenizer.rawtextState
else:
self.tokenizer.state = self.tokenizer.rcdataState
self.originalPhase = self.phase
self.phase = self.phases["text"]
@_utils.memoize
def getPhases(debug):
def log(function):
"""Logger that records which phase processes each token"""
type_names = dict((value, key) for key, value in
tokenTypes.items())
def wrapped(self, *args, **kwargs):
if function.__name__.startswith("process") and len(args) > 0:
token = args[0]
try:
info = {"type": type_names[token['type']]}
except:
raise
if token['type'] in tagTokenTypes:
info["name"] = token['name']
self.parser.log.append((self.parser.tokenizer.state.__name__,
self.parser.phase.__class__.__name__,
self.__class__.__name__,
function.__name__,
info))
return function(self, *args, **kwargs)
else:
return function(self, *args, **kwargs)
return wrapped
def getMetaclass(use_metaclass, metaclass_func):
if use_metaclass:
return method_decorator_metaclass(metaclass_func)
else:
return type
# pylint:disable=unused-argument
class Phase(with_metaclass(getMetaclass(debug, log))):
"""Base class for helper object that implements each phase of processing
"""
def __init__(self, parser, tree):
self.parser = parser
self.tree = tree
def processEOF(self):
raise NotImplementedError
def processComment(self, token):
# For most phases the following is correct. Where it's not it will be
# overridden.
self.tree.insertComment(token, self.tree.openElements[-1])
def processDoctype(self, token):
self.parser.parseError("unexpected-doctype")
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processSpaceCharacters(self, token):
self.tree.insertText(token["data"])
def processStartTag(self, token):
return self.startTagHandler[token["name"]](token)
def startTagHtml(self, token):
if not self.parser.firstStartTag and token["name"] == "html":
self.parser.parseError("non-html-root")
# XXX Need a check here to see if the first start tag token emitted is
# this token... If it's not, invoke self.parser.parseError().
for attr, value in token["data"].items():
if attr not in self.tree.openElements[0].attributes:
self.tree.openElements[0].attributes[attr] = value
self.parser.firstStartTag = False
def processEndTag(self, token):
return self.endTagHandler[token["name"]](token)
class InitialPhase(Phase):
def processSpaceCharacters(self, token):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
correct = token["correct"]
if (name != "html" or publicId is not None or
systemId is not None and systemId != "about:legacy-compat"):
self.parser.parseError("unknown-doctype")
if publicId is None:
publicId = ""
self.tree.insertDoctype(token)
if publicId != "":
publicId = publicId.translate(asciiUpper2Lower)
if (not correct or token["name"] != "html" or
publicId.startswith(
("+//silmaril//dtd html pro v0r11 19970101//",
"-//advasoft ltd//dtd html 3.0 aswedit + extensions//",
"-//as//dtd html 3.0 aswedit + extensions//",
"-//ietf//dtd html 2.0 level 1//",
"-//ietf//dtd html 2.0 level 2//",
"-//ietf//dtd html 2.0 strict level 1//",
"-//ietf//dtd html 2.0 strict level 2//",
"-//ietf//dtd html 2.0 strict//",
"-//ietf//dtd html 2.0//",
"-//ietf//dtd html 2.1e//",
"-//ietf//dtd html 3.0//",
"-//ietf//dtd html 3.2 final//",
"-//ietf//dtd html 3.2//",
"-//ietf//dtd html 3//",
"-//ietf//dtd html level 0//",
"-//ietf//dtd html level 1//",
"-//ietf//dtd html level 2//",
"-//ietf//dtd html level 3//",
"-//ietf//dtd html strict level 0//",
"-//ietf//dtd html strict level 1//",
"-//ietf//dtd html strict level 2//",
"-//ietf//dtd html strict level 3//",
"-//ietf//dtd html strict//",
"-//ietf//dtd html//",
"-//metrius//dtd metrius presentational//",
"-//microsoft//dtd internet explorer 2.0 html strict//",
"-//microsoft//dtd internet explorer 2.0 html//",
"-//microsoft//dtd internet explorer 2.0 tables//",
"-//microsoft//dtd internet explorer 3.0 html strict//",
"-//microsoft//dtd internet explorer 3.0 html//",
"-//microsoft//dtd internet explorer 3.0 tables//",
"-//netscape comm. corp.//dtd html//",
"-//netscape comm. corp.//dtd strict html//",
"-//o'reilly and associates//dtd html 2.0//",
"-//o'reilly and associates//dtd html extended 1.0//",
"-//o'reilly and associates//dtd html extended relaxed 1.0//",
"-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//",
"-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//",
"-//spyglass//dtd html 2.0 extended//",
"-//sq//dtd html 2.0 hotmetal + extensions//",
"-//sun microsystems corp.//dtd hotjava html//",
"-//sun microsystems corp.//dtd hotjava strict html//",
"-//w3c//dtd html 3 1995-03-24//",
"-//w3c//dtd html 3.2 draft//",
"-//w3c//dtd html 3.2 final//",
"-//w3c//dtd html 3.2//",
"-//w3c//dtd html 3.2s draft//",
"-//w3c//dtd html 4.0 frameset//",
"-//w3c//dtd html 4.0 transitional//",
"-//w3c//dtd html experimental 19960712//",
"-//w3c//dtd html experimental 970421//",
"-//w3c//dtd w3 html//",
"-//w3o//dtd w3 html 3.0//",
"-//webtechs//dtd mozilla html 2.0//",
"-//webtechs//dtd mozilla html//")) or
publicId in ("-//w3o//dtd w3 html strict 3.0//en//",
"-/w3c/dtd html 4.0 transitional/en",
"html") or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is None or
systemId and systemId.lower() == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd"):
self.parser.compatMode = "quirks"
elif (publicId.startswith(
("-//w3c//dtd xhtml 1.0 frameset//",
"-//w3c//dtd xhtml 1.0 transitional//")) or
publicId.startswith(
("-//w3c//dtd html 4.01 frameset//",
"-//w3c//dtd html 4.01 transitional//")) and
systemId is not None):
self.parser.compatMode = "limited quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def anythingElse(self):
self.parser.compatMode = "quirks"
self.parser.phase = self.parser.phases["beforeHtml"]
def processCharacters(self, token):
self.parser.parseError("expected-doctype-but-got-chars")
self.anythingElse()
return token
def processStartTag(self, token):
self.parser.parseError("expected-doctype-but-got-start-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEndTag(self, token):
self.parser.parseError("expected-doctype-but-got-end-tag",
{"name": token["name"]})
self.anythingElse()
return token
def processEOF(self):
self.parser.parseError("expected-doctype-but-got-eof")
self.anythingElse()
return True
class BeforeHtmlPhase(Phase):
# helper methods
def insertHtmlElement(self):
self.tree.insertRoot(impliedTagToken("html", "StartTag"))
self.parser.phase = self.parser.phases["beforeHead"]
# other
def processEOF(self):
self.insertHtmlElement()
return True
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.insertHtmlElement()
return token
def processStartTag(self, token):
if token["name"] == "html":
self.parser.firstStartTag = True
self.insertHtmlElement()
return token
def processEndTag(self, token):
if token["name"] not in ("head", "body", "html", "br"):
self.parser.parseError("unexpected-end-tag-before-html",
{"name": token["name"]})
else:
self.insertHtmlElement()
return token
class BeforeHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("head", "body", "html", "br"), self.endTagImplyHead)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.startTagHead(impliedTagToken("head", "StartTag"))
return True
def processSpaceCharacters(self, token):
pass
def processCharacters(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.tree.insertElement(token)
self.tree.headPointer = self.tree.openElements[-1]
self.parser.phase = self.parser.phases["inHead"]
def startTagOther(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagImplyHead(self, token):
self.startTagHead(impliedTagToken("head", "StartTag"))
return token
def endTagOther(self, token):
self.parser.parseError("end-tag-after-implied-root",
{"name": token["name"]})
class InHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("title", self.startTagTitle),
(("noframes", "style"), self.startTagNoFramesStyle),
("noscript", self.startTagNoscript),
("script", self.startTagScript),
(("base", "basefont", "bgsound", "command", "link"),
self.startTagBaseLinkCommand),
("meta", self.startTagMeta),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("head", self.endTagHead),
(("br", "html", "body"), self.endTagHtmlBodyBr)
])
self.endTagHandler.default = self.endTagOther
# the real thing
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagHead(self, token):
self.parser.parseError("two-heads-are-not-better-than-one")
def startTagBaseLinkCommand(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMeta(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
attributes = token["data"]
if self.parser.tokenizer.stream.charEncoding[1] == "tentative":
if "charset" in attributes:
self.parser.tokenizer.stream.changeEncoding(attributes["charset"])
elif ("content" in attributes and
"http-equiv" in attributes and
attributes["http-equiv"].lower() == "content-type"):
# Encoding it as UTF-8 here is a hack, as really we should pass
# the abstract Unicode string, and just use the
# ContentAttrParser on that, but using UTF-8 allows all chars
# to be encoded and as a ASCII-superset works.
data = _inputstream.EncodingBytes(attributes["content"].encode("utf-8"))
parser = _inputstream.ContentAttrParser(data)
codec = parser.parse()
self.parser.tokenizer.stream.changeEncoding(codec)
def startTagTitle(self, token):
self.parser.parseRCDataRawtext(token, "RCDATA")
def startTagNoFramesStyle(self, token):
# Need to decide whether to implement the scripting-disabled case
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagNoscript(self, token):
if self.parser.scripting:
self.parser.parseRCDataRawtext(token, "RAWTEXT")
else:
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inHeadNoscript"]
def startTagScript(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.scriptDataState
self.parser.originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["text"]
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHead(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "head", "Expected head got %s" % node.name
self.parser.phase = self.parser.phases["afterHead"]
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.endTagHead(impliedTagToken("head"))
class InHeadNoscriptPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("basefont", "bgsound", "link", "meta", "noframes", "style"), self.startTagBaseLinkCommand),
(("head", "noscript"), self.startTagHeadNoscript),
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("noscript", self.endTagNoscript),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.parseError("eof-in-head-noscript")
self.anythingElse()
return True
def processComment(self, token):
return self.parser.phases["inHead"].processComment(token)
def processCharacters(self, token):
self.parser.parseError("char-in-head-noscript")
self.anythingElse()
return token
def processSpaceCharacters(self, token):
return self.parser.phases["inHead"].processSpaceCharacters(token)
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBaseLinkCommand(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagHeadNoscript(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagNoscript(self, token):
node = self.parser.tree.openElements.pop()
assert node.name == "noscript", "Expected noscript got %s" % node.name
self.parser.phase = self.parser.phases["inHead"]
def endTagBr(self, token):
self.parser.parseError("unexpected-inhead-noscript-tag", {"name": token["name"]})
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
# Caller must raise parse error first!
self.endTagNoscript(impliedTagToken("noscript"))
class AfterHeadPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("base", "basefont", "bgsound", "link", "meta", "noframes", "script",
"style", "title"),
self.startTagFromHead),
("head", self.startTagHead)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([(("body", "html", "br"),
self.endTagHtmlBodyBr)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.anythingElse()
return True
def processCharacters(self, token):
self.anythingElse()
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagBody(self, token):
self.parser.framesetOK = False
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inBody"]
def startTagFrameset(self, token):
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagFromHead(self, token):
self.parser.parseError("unexpected-start-tag-out-of-my-head",
{"name": token["name"]})
self.tree.openElements.append(self.tree.headPointer)
self.parser.phases["inHead"].processStartTag(token)
for node in self.tree.openElements[::-1]:
if node.name == "head":
self.tree.openElements.remove(node)
break
def startTagHead(self, token):
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
def startTagOther(self, token):
self.anythingElse()
return token
def endTagHtmlBodyBr(self, token):
self.anythingElse()
return token
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def anythingElse(self):
self.tree.insertElement(impliedTagToken("body", "StartTag"))
self.parser.phase = self.parser.phases["inBody"]
self.parser.framesetOK = True
class InBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#parsing-main-inbody
# the really-really-really-very crazy mode
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
# Set this to the default handler
self.processSpaceCharacters = self.processSpaceCharactersNonPre
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("base", "basefont", "bgsound", "command", "link", "meta",
"script", "style", "title"),
self.startTagProcessInHead),
("body", self.startTagBody),
("frameset", self.startTagFrameset),
(("address", "article", "aside", "blockquote", "center", "details",
"dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "main", "menu", "nav", "ol", "p",
"section", "summary", "ul"),
self.startTagCloseP),
(headingElements, self.startTagHeading),
(("pre", "listing"), self.startTagPreListing),
("form", self.startTagForm),
(("li", "dd", "dt"), self.startTagListItem),
("plaintext", self.startTagPlaintext),
("a", self.startTagA),
(("b", "big", "code", "em", "font", "i", "s", "small", "strike",
"strong", "tt", "u"), self.startTagFormatting),
("nobr", self.startTagNobr),
("button", self.startTagButton),
(("applet", "marquee", "object"), self.startTagAppletMarqueeObject),
("xmp", self.startTagXmp),
("table", self.startTagTable),
(("area", "br", "embed", "img", "keygen", "wbr"),
self.startTagVoidFormatting),
(("param", "source", "track"), self.startTagParamSource),
("input", self.startTagInput),
("hr", self.startTagHr),
("image", self.startTagImage),
("isindex", self.startTagIsIndex),
("textarea", self.startTagTextarea),
("iframe", self.startTagIFrame),
("noscript", self.startTagNoscript),
(("noembed", "noframes"), self.startTagRawtext),
("select", self.startTagSelect),
(("rp", "rt"), self.startTagRpRt),
(("option", "optgroup"), self.startTagOpt),
(("math"), self.startTagMath),
(("svg"), self.startTagSvg),
(("caption", "col", "colgroup", "frame", "head",
"tbody", "td", "tfoot", "th", "thead",
"tr"), self.startTagMisplaced)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("body", self.endTagBody),
("html", self.endTagHtml),
(("address", "article", "aside", "blockquote", "button", "center",
"details", "dialog", "dir", "div", "dl", "fieldset", "figcaption", "figure",
"footer", "header", "hgroup", "listing", "main", "menu", "nav", "ol", "pre",
"section", "summary", "ul"), self.endTagBlock),
("form", self.endTagForm),
("p", self.endTagP),
(("dd", "dt", "li"), self.endTagListItem),
(headingElements, self.endTagHeading),
(("a", "b", "big", "code", "em", "font", "i", "nobr", "s", "small",
"strike", "strong", "tt", "u"), self.endTagFormatting),
(("applet", "marquee", "object"), self.endTagAppletMarqueeObject),
("br", self.endTagBr),
])
self.endTagHandler.default = self.endTagOther
def isMatchingFormattingElement(self, node1, node2):
return (node1.name == node2.name and
node1.namespace == node2.namespace and
node1.attributes == node2.attributes)
# helper
def addFormattingElement(self, token):
self.tree.insertElement(token)
element = self.tree.openElements[-1]
matchingElements = []
for node in self.tree.activeFormattingElements[::-1]:
if node is Marker:
break
elif self.isMatchingFormattingElement(node, element):
matchingElements.append(node)
assert len(matchingElements) <= 3
if len(matchingElements) == 3:
self.tree.activeFormattingElements.remove(matchingElements[-1])
self.tree.activeFormattingElements.append(element)
# the real deal
def processEOF(self):
allowed_elements = frozenset(("dd", "dt", "li", "p", "tbody", "td",
"tfoot", "th", "thead", "tr", "body",
"html"))
for node in self.tree.openElements[::-1]:
if node.name not in allowed_elements:
self.parser.parseError("expected-closing-tag-but-got-eof")
break
# Stop parsing
def processSpaceCharactersDropNewline(self, token):
# Sometimes (start of <pre>, <listing>, and <textarea> blocks) we
# want to drop leading newlines
data = token["data"]
self.processSpaceCharacters = self.processSpaceCharactersNonPre
if (data.startswith("\n") and
self.tree.openElements[-1].name in ("pre", "listing", "textarea") and
not self.tree.openElements[-1].hasContent()):
data = data[1:]
if data:
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(data)
def processCharacters(self, token):
if token["data"] == "\u0000":
# The tokenizer should always emit null on its own
return
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
# This must be bad for performance
if (self.parser.framesetOK and
any([char not in spaceCharacters
for char in token["data"]])):
self.parser.framesetOK = False
def processSpaceCharactersNonPre(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertText(token["data"])
def startTagProcessInHead(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagBody(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "body"})
if (len(self.tree.openElements) == 1 or
self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
else:
self.parser.framesetOK = False
for attr, value in token["data"].items():
if attr not in self.tree.openElements[1].attributes:
self.tree.openElements[1].attributes[attr] = value
def startTagFrameset(self, token):
self.parser.parseError("unexpected-start-tag", {"name": "frameset"})
if (len(self.tree.openElements) == 1 or self.tree.openElements[1].name != "body"):
assert self.parser.innerHTML
elif not self.parser.framesetOK:
pass
else:
if self.tree.openElements[1].parent:
self.tree.openElements[1].parent.removeChild(self.tree.openElements[1])
while self.tree.openElements[-1].name != "html":
self.tree.openElements.pop()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inFrameset"]
def startTagCloseP(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
def startTagPreListing(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
def startTagForm(self, token):
if self.tree.formPointer:
self.parser.parseError("unexpected-start-tag", {"name": "form"})
else:
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
def startTagListItem(self, token):
self.parser.framesetOK = False
stopNamesMap = {"li": ["li"],
"dt": ["dt", "dd"],
"dd": ["dt", "dd"]}
stopNames = stopNamesMap[token["name"]]
for node in reversed(self.tree.openElements):
if node.name in stopNames:
self.parser.phase.processEndTag(
impliedTagToken(node.name, "EndTag"))
break
if (node.nameTuple in specialElements and
node.name not in ("address", "div", "p")):
break
if self.tree.elementInScope("p", variant="button"):
self.parser.phase.processEndTag(
impliedTagToken("p", "EndTag"))
self.tree.insertElement(token)
def startTagPlaintext(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.plaintextState
def startTagHeading(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
if self.tree.openElements[-1].name in headingElements:
self.parser.parseError("unexpected-start-tag", {"name": token["name"]})
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagA(self, token):
afeAElement = self.tree.elementInActiveFormattingElements("a")
if afeAElement:
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "a", "endName": "a"})
self.endTagFormatting(impliedTagToken("a"))
if afeAElement in self.tree.openElements:
self.tree.openElements.remove(afeAElement)
if afeAElement in self.tree.activeFormattingElements:
self.tree.activeFormattingElements.remove(afeAElement)
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagNobr(self, token):
self.tree.reconstructActiveFormattingElements()
if self.tree.elementInScope("nobr"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "nobr", "endName": "nobr"})
self.processEndTag(impliedTagToken("nobr"))
# XXX Need tests that trigger the following
self.tree.reconstructActiveFormattingElements()
self.addFormattingElement(token)
def startTagButton(self, token):
if self.tree.elementInScope("button"):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "button", "endName": "button"})
self.processEndTag(impliedTagToken("button"))
return token
else:
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
def startTagAppletMarqueeObject(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.activeFormattingElements.append(Marker)
self.parser.framesetOK = False
def startTagXmp(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.reconstructActiveFormattingElements()
self.parser.framesetOK = False
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagTable(self, token):
if self.parser.compatMode != "quirks":
if self.tree.elementInScope("p", variant="button"):
self.processEndTag(impliedTagToken("p"))
self.tree.insertElement(token)
self.parser.framesetOK = False
self.parser.phase = self.parser.phases["inTable"]
def startTagVoidFormatting(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagInput(self, token):
framesetOK = self.parser.framesetOK
self.startTagVoidFormatting(token)
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
# input type=hidden doesn't change framesetOK
self.parser.framesetOK = framesetOK
def startTagParamSource(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagHr(self, token):
if self.tree.elementInScope("p", variant="button"):
self.endTagP(impliedTagToken("p"))
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
self.parser.framesetOK = False
def startTagImage(self, token):
# No really...
self.parser.parseError("unexpected-start-tag-treated-as",
{"originalName": "image", "newName": "img"})
self.processStartTag(impliedTagToken("img", "StartTag",
attributes=token["data"],
selfClosing=token["selfClosing"]))
def startTagIsIndex(self, token):
self.parser.parseError("deprecated-tag", {"name": "isindex"})
if self.tree.formPointer:
return
form_attrs = {}
if "action" in token["data"]:
form_attrs["action"] = token["data"]["action"]
self.processStartTag(impliedTagToken("form", "StartTag",
attributes=form_attrs))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processStartTag(impliedTagToken("label", "StartTag"))
# XXX Localization ...
if "prompt" in token["data"]:
prompt = token["data"]["prompt"]
else:
prompt = "This is a searchable index. Enter search keywords: "
self.processCharacters(
{"type": tokenTypes["Characters"], "data": prompt})
attributes = token["data"].copy()
if "action" in attributes:
del attributes["action"]
if "prompt" in attributes:
del attributes["prompt"]
attributes["name"] = "isindex"
self.processStartTag(impliedTagToken("input", "StartTag",
attributes=attributes,
selfClosing=token["selfClosing"]))
self.processEndTag(impliedTagToken("label"))
self.processStartTag(impliedTagToken("hr", "StartTag"))
self.processEndTag(impliedTagToken("form"))
def startTagTextarea(self, token):
self.tree.insertElement(token)
self.parser.tokenizer.state = self.parser.tokenizer.rcdataState
self.processSpaceCharacters = self.processSpaceCharactersDropNewline
self.parser.framesetOK = False
def startTagIFrame(self, token):
self.parser.framesetOK = False
self.startTagRawtext(token)
def startTagNoscript(self, token):
if self.parser.scripting:
self.startTagRawtext(token)
else:
self.startTagOther(token)
def startTagRawtext(self, token):
"""iframe, noembed noframes, noscript(if scripting enabled)"""
self.parser.parseRCDataRawtext(token, "RAWTEXT")
def startTagOpt(self, token):
if self.tree.openElements[-1].name == "option":
self.parser.phase.processEndTag(impliedTagToken("option"))
self.tree.reconstructActiveFormattingElements()
self.parser.tree.insertElement(token)
def startTagSelect(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
self.parser.framesetOK = False
if self.parser.phase in (self.parser.phases["inTable"],
self.parser.phases["inCaption"],
self.parser.phases["inColumnGroup"],
self.parser.phases["inTableBody"],
self.parser.phases["inRow"],
self.parser.phases["inCell"]):
self.parser.phase = self.parser.phases["inSelectInTable"]
else:
self.parser.phase = self.parser.phases["inSelect"]
def startTagRpRt(self, token):
if self.tree.elementInScope("ruby"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "ruby":
self.parser.parseError()
self.tree.insertElement(token)
def startTagMath(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustMathMLAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["mathml"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagSvg(self, token):
self.tree.reconstructActiveFormattingElements()
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = namespaces["svg"]
self.tree.insertElement(token)
# Need to get the parse error right for the case where the token
# has a namespace not equal to the xmlns attribute
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagMisplaced(self, token):
""" Elements that should be children of other elements that have a
different insertion mode; here they are ignored
"caption", "col", "colgroup", "frame", "frameset", "head",
"option", "optgroup", "tbody", "td", "tfoot", "th", "thead",
"tr", "noscript"
"""
self.parser.parseError("unexpected-start-tag-ignored", {"name": token["name"]})
def startTagOther(self, token):
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(token)
def endTagP(self, token):
if not self.tree.elementInScope("p", variant="button"):
self.startTagCloseP(impliedTagToken("p", "StartTag"))
self.parser.parseError("unexpected-end-tag", {"name": "p"})
self.endTagP(impliedTagToken("p", "EndTag"))
else:
self.tree.generateImpliedEndTags("p")
if self.tree.openElements[-1].name != "p":
self.parser.parseError("unexpected-end-tag", {"name": "p"})
node = self.tree.openElements.pop()
while node.name != "p":
node = self.tree.openElements.pop()
def endTagBody(self, token):
if not self.tree.elementInScope("body"):
self.parser.parseError()
return
elif self.tree.openElements[-1].name != "body":
for node in self.tree.openElements[2:]:
if node.name not in frozenset(("dd", "dt", "li", "optgroup",
"option", "p", "rp", "rt",
"tbody", "td", "tfoot",
"th", "thead", "tr", "body",
"html")):
# Not sure this is the correct name for the parse error
self.parser.parseError(
"expected-one-end-tag-but-got-another",
{"gotName": "body", "expectedName": node.name})
break
self.parser.phase = self.parser.phases["afterBody"]
def endTagHtml(self, token):
# We repeat the test for the body end tag token being ignored here
if self.tree.elementInScope("body"):
self.endTagBody(impliedTagToken("body"))
return token
def endTagBlock(self, token):
# Put us back in the right whitespace handling mode
if token["name"] == "pre":
self.processSpaceCharacters = self.processSpaceCharactersNonPre
inScope = self.tree.elementInScope(token["name"])
if inScope:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if inScope:
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagForm(self, token):
node = self.tree.formPointer
self.tree.formPointer = None
if node is None or not self.tree.elementInScope(node):
self.parser.parseError("unexpected-end-tag",
{"name": "form"})
else:
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1] != node:
self.parser.parseError("end-tag-too-early-ignored",
{"name": "form"})
self.tree.openElements.remove(node)
def endTagListItem(self, token):
if token["name"] == "li":
variant = "list"
else:
variant = None
if not self.tree.elementInScope(token["name"], variant=variant):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
else:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError(
"end-tag-too-early",
{"name": token["name"]})
node = self.tree.openElements.pop()
while node.name != token["name"]:
node = self.tree.openElements.pop()
def endTagHeading(self, token):
for item in headingElements:
if self.tree.elementInScope(item):
self.tree.generateImpliedEndTags()
break
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
for item in headingElements:
if self.tree.elementInScope(item):
item = self.tree.openElements.pop()
while item.name not in headingElements:
item = self.tree.openElements.pop()
break
def endTagFormatting(self, token):
"""The much-feared adoption agency algorithm"""
# http://svn.whatwg.org/webapps/complete.html#adoptionAgency revision 7867
# XXX Better parseError messages appreciated.
# Step 1
outerLoopCounter = 0
# Step 2
while outerLoopCounter < 8:
# Step 3
outerLoopCounter += 1
# Step 4:
# Let the formatting element be the last element in
# the list of active formatting elements that:
# - is between the end of the list and the last scope
# marker in the list, if any, or the start of the list
# otherwise, and
# - has the same tag name as the token.
formattingElement = self.tree.elementInActiveFormattingElements(
token["name"])
if (not formattingElement or
(formattingElement in self.tree.openElements and
not self.tree.elementInScope(formattingElement.name))):
# If there is no such node, then abort these steps
# and instead act as described in the "any other
# end tag" entry below.
self.endTagOther(token)
return
# Otherwise, if there is such a node, but that node is
# not in the stack of open elements, then this is a
# parse error; remove the element from the list, and
# abort these steps.
elif formattingElement not in self.tree.openElements:
self.parser.parseError("adoption-agency-1.2", {"name": token["name"]})
self.tree.activeFormattingElements.remove(formattingElement)
return
# Otherwise, if there is such a node, and that node is
# also in the stack of open elements, but the element
# is not in scope, then this is a parse error; ignore
# the token, and abort these steps.
elif not self.tree.elementInScope(formattingElement.name):
self.parser.parseError("adoption-agency-4.4", {"name": token["name"]})
return
# Otherwise, there is a formatting element and that
# element is in the stack and is in scope. If the
# element is not the current node, this is a parse
# error. In any case, proceed with the algorithm as
# written in the following steps.
else:
if formattingElement != self.tree.openElements[-1]:
self.parser.parseError("adoption-agency-1.3", {"name": token["name"]})
# Step 5:
# Let the furthest block be the topmost node in the
# stack of open elements that is lower in the stack
# than the formatting element, and is an element in
# the special category. There might not be one.
afeIndex = self.tree.openElements.index(formattingElement)
furthestBlock = None
for element in self.tree.openElements[afeIndex:]:
if element.nameTuple in specialElements:
furthestBlock = element
break
# Step 6:
# If there is no furthest block, then the UA must
# first pop all the nodes from the bottom of the stack
# of open elements, from the current node up to and
# including the formatting element, then remove the
# formatting element from the list of active
# formatting elements, and finally abort these steps.
if furthestBlock is None:
element = self.tree.openElements.pop()
while element != formattingElement:
element = self.tree.openElements.pop()
self.tree.activeFormattingElements.remove(element)
return
# Step 7
commonAncestor = self.tree.openElements[afeIndex - 1]
# Step 8:
# The bookmark is supposed to help us identify where to reinsert
# nodes in step 15. We have to ensure that we reinsert nodes after
# the node before the active formatting element. Note the bookmark
# can move in step 9.7
bookmark = self.tree.activeFormattingElements.index(formattingElement)
# Step 9
lastNode = node = furthestBlock
innerLoopCounter = 0
index = self.tree.openElements.index(node)
while innerLoopCounter < 3:
innerLoopCounter += 1
# Node is element before node in open elements
index -= 1
node = self.tree.openElements[index]
if node not in self.tree.activeFormattingElements:
self.tree.openElements.remove(node)
continue
# Step 9.6
if node == formattingElement:
break
# Step 9.7
if lastNode == furthestBlock:
bookmark = self.tree.activeFormattingElements.index(node) + 1
# Step 9.8
clone = node.cloneNode()
# Replace node with clone
self.tree.activeFormattingElements[
self.tree.activeFormattingElements.index(node)] = clone
self.tree.openElements[
self.tree.openElements.index(node)] = clone
node = clone
# Step 9.9
# Remove lastNode from its parents, if any
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
node.appendChild(lastNode)
# Step 9.10
lastNode = node
# Step 10
# Foster parent lastNode if commonAncestor is a
# table, tbody, tfoot, thead, or tr we need to foster
# parent the lastNode
if lastNode.parent:
lastNode.parent.removeChild(lastNode)
if commonAncestor.name in frozenset(("table", "tbody", "tfoot", "thead", "tr")):
parent, insertBefore = self.tree.getTableMisnestedNodePosition()
parent.insertBefore(lastNode, insertBefore)
else:
commonAncestor.appendChild(lastNode)
# Step 11
clone = formattingElement.cloneNode()
# Step 12
furthestBlock.reparentChildren(clone)
# Step 13
furthestBlock.appendChild(clone)
# Step 14
self.tree.activeFormattingElements.remove(formattingElement)
self.tree.activeFormattingElements.insert(bookmark, clone)
# Step 15
self.tree.openElements.remove(formattingElement)
self.tree.openElements.insert(
self.tree.openElements.index(furthestBlock) + 1, clone)
def endTagAppletMarqueeObject(self, token):
if self.tree.elementInScope(token["name"]):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("end-tag-too-early", {"name": token["name"]})
if self.tree.elementInScope(token["name"]):
element = self.tree.openElements.pop()
while element.name != token["name"]:
element = self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
def endTagBr(self, token):
self.parser.parseError("unexpected-end-tag-treated-as",
{"originalName": "br", "newName": "br element"})
self.tree.reconstructActiveFormattingElements()
self.tree.insertElement(impliedTagToken("br", "StartTag"))
self.tree.openElements.pop()
def endTagOther(self, token):
for node in self.tree.openElements[::-1]:
if node.name == token["name"]:
self.tree.generateImpliedEndTags(exclude=token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while self.tree.openElements.pop() != node:
pass
break
else:
if node.nameTuple in specialElements:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
break
class TextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("script", self.endTagScript)])
self.endTagHandler.default = self.endTagOther
def processCharacters(self, token):
self.tree.insertText(token["data"])
def processEOF(self):
self.parser.parseError("expected-named-closing-tag-but-got-eof",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
return True
def startTagOther(self, token):
assert False, "Tried to process start tag %s in RCDATA/RAWTEXT mode" % token['name']
def endTagScript(self, token):
node = self.tree.openElements.pop()
assert node.name == "script"
self.parser.phase = self.parser.originalPhase
# The rest of this method is all stuff that only happens if
# document.write works
def endTagOther(self, token):
self.tree.openElements.pop()
self.parser.phase = self.parser.originalPhase
class InTablePhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("caption", self.startTagCaption),
("colgroup", self.startTagColgroup),
("col", self.startTagCol),
(("tbody", "tfoot", "thead"), self.startTagRowGroup),
(("td", "th", "tr"), self.startTagImplyTbody),
("table", self.startTagTable),
(("style", "script"), self.startTagStyleScript),
("input", self.startTagInput),
("form", self.startTagForm)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "tbody", "td",
"tfoot", "th", "thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableContext(self):
# "clear the stack back to a table context"
while self.tree.openElements[-1].name not in ("table", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
# When the current node is <html> it's an innerHTML case
# processing methods
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-table")
else:
assert self.parser.innerHTML
# Stop parsing
def processSpaceCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processSpaceCharacters(token)
def processCharacters(self, token):
originalPhase = self.parser.phase
self.parser.phase = self.parser.phases["inTableText"]
self.parser.phase.originalPhase = originalPhase
self.parser.phase.processCharacters(token)
def insertText(self, token):
# If we get here there must be at least one non-whitespace character
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processCharacters(token)
self.tree.insertFromTable = False
def startTagCaption(self, token):
self.clearStackToTableContext()
self.tree.activeFormattingElements.append(Marker)
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCaption"]
def startTagColgroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inColumnGroup"]
def startTagCol(self, token):
self.startTagColgroup(impliedTagToken("colgroup", "StartTag"))
return token
def startTagRowGroup(self, token):
self.clearStackToTableContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inTableBody"]
def startTagImplyTbody(self, token):
self.startTagRowGroup(impliedTagToken("tbody", "StartTag"))
return token
def startTagTable(self, token):
self.parser.parseError("unexpected-start-tag-implies-end-tag",
{"startName": "table", "endName": "table"})
self.parser.phase.processEndTag(impliedTagToken("table"))
if not self.parser.innerHTML:
return token
def startTagStyleScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagInput(self, token):
if ("type" in token["data"] and
token["data"]["type"].translate(asciiUpper2Lower) == "hidden"):
self.parser.parseError("unexpected-hidden-input-in-table")
self.tree.insertElement(token)
# XXX associate with form
self.tree.openElements.pop()
else:
self.startTagOther(token)
def startTagForm(self, token):
self.parser.parseError("unexpected-form-in-table")
if self.tree.formPointer is None:
self.tree.insertElement(token)
self.tree.formPointer = self.tree.openElements[-1]
self.tree.openElements.pop()
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processStartTag(token)
self.tree.insertFromTable = False
def endTagTable(self, token):
if self.tree.elementInScope("table", variant="table"):
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "table":
self.parser.parseError("end-tag-too-early-named",
{"gotName": "table",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "table":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-implies-table-voodoo", {"name": token["name"]})
# Do the table magic!
self.tree.insertFromTable = True
self.parser.phases["inBody"].processEndTag(token)
self.tree.insertFromTable = False
class InTableTextPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.originalPhase = None
self.characterTokens = []
def flushCharacters(self):
data = "".join([item["data"] for item in self.characterTokens])
if any([item not in spaceCharacters for item in data]):
token = {"type": tokenTypes["Characters"], "data": data}
self.parser.phases["inTable"].insertText(token)
elif data:
self.tree.insertText(data)
self.characterTokens = []
def processComment(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEOF(self):
self.flushCharacters()
self.parser.phase = self.originalPhase
return True
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.characterTokens.append(token)
def processSpaceCharacters(self, token):
# pretty sure we should never reach here
self.characterTokens.append(token)
# assert False
def processStartTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
def processEndTag(self, token):
self.flushCharacters()
self.parser.phase = self.originalPhase
return token
class InCaptionPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-caption
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableElement)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("caption", self.endTagCaption),
("table", self.endTagTable),
(("body", "col", "colgroup", "html", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagCaption(self):
return not self.tree.elementInScope("caption", variant="table")
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableElement(self, token):
self.parser.parseError()
# XXX Have to duplicate logic here to find out if the tag is ignored
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagCaption(self, token):
if not self.ignoreEndTagCaption():
# AT this code is quite similar to endTagTable in "InTable"
self.tree.generateImpliedEndTags()
if self.tree.openElements[-1].name != "caption":
self.parser.parseError("expected-one-end-tag-but-got-another",
{"gotName": "caption",
"expectedName": self.tree.openElements[-1].name})
while self.tree.openElements[-1].name != "caption":
self.tree.openElements.pop()
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inTable"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
self.parser.parseError()
ignoreEndTag = self.ignoreEndTagCaption()
self.parser.phase.processEndTag(impliedTagToken("caption"))
if not ignoreEndTag:
return token
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InColumnGroupPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-column
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("col", self.startTagCol)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("colgroup", self.endTagColgroup),
("col", self.endTagCol)
])
self.endTagHandler.default = self.endTagOther
def ignoreEndTagColgroup(self):
return self.tree.openElements[-1].name == "html"
def processEOF(self):
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
return
else:
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return True
def processCharacters(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def startTagCol(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def startTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
def endTagColgroup(self, token):
if self.ignoreEndTagColgroup():
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
else:
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
def endTagCol(self, token):
self.parser.parseError("no-end-tag", {"name": "col"})
def endTagOther(self, token):
ignoreEndTag = self.ignoreEndTagColgroup()
self.endTagColgroup(impliedTagToken("colgroup"))
if not ignoreEndTag:
return token
class InTableBodyPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-table0
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("tr", self.startTagTr),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead"),
self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
("table", self.endTagTable),
(("body", "caption", "col", "colgroup", "html", "td", "th",
"tr"), self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods
def clearStackToTableBodyContext(self):
while self.tree.openElements[-1].name not in ("tbody", "tfoot",
"thead", "html"):
# self.parser.parseError("unexpected-implied-end-tag-in-table",
# {"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "html":
assert self.parser.innerHTML
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTr(self, token):
self.clearStackToTableBodyContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inRow"]
def startTagTableCell(self, token):
self.parser.parseError("unexpected-cell-in-table-body",
{"name": token["name"]})
self.startTagTr(impliedTagToken("tr", "StartTag"))
return token
def startTagTableOther(self, token):
# XXX AT Any ideas on how to share this with endTagTable?
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.clearStackToTableBodyContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTable"]
else:
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagTable(self, token):
if (self.tree.elementInScope("tbody", variant="table") or
self.tree.elementInScope("thead", variant="table") or
self.tree.elementInScope("tfoot", variant="table")):
self.clearStackToTableBodyContext()
self.endTagTableRowGroup(
impliedTagToken(self.tree.openElements[-1].name))
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-body",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InRowPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-row
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("td", "th"), self.startTagTableCell),
(("caption", "col", "colgroup", "tbody", "tfoot", "thead",
"tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("tr", self.endTagTr),
("table", self.endTagTable),
(("tbody", "tfoot", "thead"), self.endTagTableRowGroup),
(("body", "caption", "col", "colgroup", "html", "td", "th"),
self.endTagIgnore)
])
self.endTagHandler.default = self.endTagOther
# helper methods (XXX unify this with other table helper methods)
def clearStackToTableRowContext(self):
while self.tree.openElements[-1].name not in ("tr", "html"):
self.parser.parseError("unexpected-implied-end-tag-in-table-row",
{"name": self.tree.openElements[-1].name})
self.tree.openElements.pop()
def ignoreEndTagTr(self):
return not self.tree.elementInScope("tr", variant="table")
# the rest
def processEOF(self):
self.parser.phases["inTable"].processEOF()
def processSpaceCharacters(self, token):
return self.parser.phases["inTable"].processSpaceCharacters(token)
def processCharacters(self, token):
return self.parser.phases["inTable"].processCharacters(token)
def startTagTableCell(self, token):
self.clearStackToTableRowContext()
self.tree.insertElement(token)
self.parser.phase = self.parser.phases["inCell"]
self.tree.activeFormattingElements.append(Marker)
def startTagTableOther(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def startTagOther(self, token):
return self.parser.phases["inTable"].processStartTag(token)
def endTagTr(self, token):
if not self.ignoreEndTagTr():
self.clearStackToTableRowContext()
self.tree.openElements.pop()
self.parser.phase = self.parser.phases["inTableBody"]
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagTable(self, token):
ignoreEndTag = self.ignoreEndTagTr()
self.endTagTr(impliedTagToken("tr"))
# Reprocess the current tag if the tr end tag was not ignored
# XXX how are we sure it's always ignored in the innerHTML case?
if not ignoreEndTag:
return token
def endTagTableRowGroup(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagTr(impliedTagToken("tr"))
return token
else:
self.parser.parseError()
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag-in-table-row",
{"name": token["name"]})
def endTagOther(self, token):
return self.parser.phases["inTable"].processEndTag(token)
class InCellPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-cell
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
(("caption", "col", "colgroup", "tbody", "td", "tfoot", "th",
"thead", "tr"), self.startTagTableOther)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("td", "th"), self.endTagTableCell),
(("body", "caption", "col", "colgroup", "html"), self.endTagIgnore),
(("table", "tbody", "tfoot", "thead", "tr"), self.endTagImply)
])
self.endTagHandler.default = self.endTagOther
# helper
def closeCell(self):
if self.tree.elementInScope("td", variant="table"):
self.endTagTableCell(impliedTagToken("td"))
elif self.tree.elementInScope("th", variant="table"):
self.endTagTableCell(impliedTagToken("th"))
# the rest
def processEOF(self):
self.parser.phases["inBody"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inBody"].processCharacters(token)
def startTagTableOther(self, token):
if (self.tree.elementInScope("td", variant="table") or
self.tree.elementInScope("th", variant="table")):
self.closeCell()
return token
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def startTagOther(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def endTagTableCell(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.tree.generateImpliedEndTags(token["name"])
if self.tree.openElements[-1].name != token["name"]:
self.parser.parseError("unexpected-cell-end-tag",
{"name": token["name"]})
while True:
node = self.tree.openElements.pop()
if node.name == token["name"]:
break
else:
self.tree.openElements.pop()
self.tree.clearActiveFormattingElements()
self.parser.phase = self.parser.phases["inRow"]
else:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagIgnore(self, token):
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
def endTagImply(self, token):
if self.tree.elementInScope(token["name"], variant="table"):
self.closeCell()
return token
else:
# sometimes innerHTML case
self.parser.parseError()
def endTagOther(self, token):
return self.parser.phases["inBody"].processEndTag(token)
class InSelectPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("option", self.startTagOption),
("optgroup", self.startTagOptgroup),
("select", self.startTagSelect),
(("input", "keygen", "textarea"), self.startTagInput),
("script", self.startTagScript)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("option", self.endTagOption),
("optgroup", self.endTagOptgroup),
("select", self.endTagSelect)
])
self.endTagHandler.default = self.endTagOther
# http://www.whatwg.org/specs/web-apps/current-work/#in-select
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-select")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
if token["data"] == "\u0000":
return
self.tree.insertText(token["data"])
def startTagOption(self, token):
# We need to imply </option> if <option> is the current node.
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagOptgroup(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
self.tree.insertElement(token)
def startTagSelect(self, token):
self.parser.parseError("unexpected-select-in-select")
self.endTagSelect(impliedTagToken("select"))
def startTagInput(self, token):
self.parser.parseError("unexpected-input-in-select")
if self.tree.elementInScope("select", variant="select"):
self.endTagSelect(impliedTagToken("select"))
return token
else:
assert self.parser.innerHTML
def startTagScript(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-select",
{"name": token["name"]})
def endTagOption(self, token):
if self.tree.openElements[-1].name == "option":
self.tree.openElements.pop()
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "option"})
def endTagOptgroup(self, token):
# </optgroup> implicitly closes <option>
if (self.tree.openElements[-1].name == "option" and
self.tree.openElements[-2].name == "optgroup"):
self.tree.openElements.pop()
# It also closes </optgroup>
if self.tree.openElements[-1].name == "optgroup":
self.tree.openElements.pop()
# But nothing else
else:
self.parser.parseError("unexpected-end-tag-in-select",
{"name": "optgroup"})
def endTagSelect(self, token):
if self.tree.elementInScope("select", variant="select"):
node = self.tree.openElements.pop()
while node.name != "select":
node = self.tree.openElements.pop()
self.parser.resetInsertionMode()
else:
# innerHTML case
assert self.parser.innerHTML
self.parser.parseError()
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-select",
{"name": token["name"]})
class InSelectInTablePhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.startTagTable)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
(("caption", "table", "tbody", "tfoot", "thead", "tr", "td", "th"),
self.endTagTable)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
self.parser.phases["inSelect"].processEOF()
def processCharacters(self, token):
return self.parser.phases["inSelect"].processCharacters(token)
def startTagTable(self, token):
self.parser.parseError("unexpected-table-element-start-tag-in-select-in-table", {"name": token["name"]})
self.endTagOther(impliedTagToken("select"))
return token
def startTagOther(self, token):
return self.parser.phases["inSelect"].processStartTag(token)
def endTagTable(self, token):
self.parser.parseError("unexpected-table-element-end-tag-in-select-in-table", {"name": token["name"]})
if self.tree.elementInScope(token["name"], variant="table"):
self.endTagOther(impliedTagToken("select"))
return token
def endTagOther(self, token):
return self.parser.phases["inSelect"].processEndTag(token)
class InForeignContentPhase(Phase):
breakoutElements = frozenset(["b", "big", "blockquote", "body", "br",
"center", "code", "dd", "div", "dl", "dt",
"em", "embed", "h1", "h2", "h3",
"h4", "h5", "h6", "head", "hr", "i", "img",
"li", "listing", "menu", "meta", "nobr",
"ol", "p", "pre", "ruby", "s", "small",
"span", "strong", "strike", "sub", "sup",
"table", "tt", "u", "ul", "var"])
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
def adjustSVGTagNames(self, token):
replacements = {"altglyph": "altGlyph",
"altglyphdef": "altGlyphDef",
"altglyphitem": "altGlyphItem",
"animatecolor": "animateColor",
"animatemotion": "animateMotion",
"animatetransform": "animateTransform",
"clippath": "clipPath",
"feblend": "feBlend",
"fecolormatrix": "feColorMatrix",
"fecomponenttransfer": "feComponentTransfer",
"fecomposite": "feComposite",
"feconvolvematrix": "feConvolveMatrix",
"fediffuselighting": "feDiffuseLighting",
"fedisplacementmap": "feDisplacementMap",
"fedistantlight": "feDistantLight",
"feflood": "feFlood",
"fefunca": "feFuncA",
"fefuncb": "feFuncB",
"fefuncg": "feFuncG",
"fefuncr": "feFuncR",
"fegaussianblur": "feGaussianBlur",
"feimage": "feImage",
"femerge": "feMerge",
"femergenode": "feMergeNode",
"femorphology": "feMorphology",
"feoffset": "feOffset",
"fepointlight": "fePointLight",
"fespecularlighting": "feSpecularLighting",
"fespotlight": "feSpotLight",
"fetile": "feTile",
"feturbulence": "feTurbulence",
"foreignobject": "foreignObject",
"glyphref": "glyphRef",
"lineargradient": "linearGradient",
"radialgradient": "radialGradient",
"textpath": "textPath"}
if token["name"] in replacements:
token["name"] = replacements[token["name"]]
def processCharacters(self, token):
if token["data"] == "\u0000":
token["data"] = "\uFFFD"
elif (self.parser.framesetOK and
any(char not in spaceCharacters for char in token["data"])):
self.parser.framesetOK = False
Phase.processCharacters(self, token)
def processStartTag(self, token):
currentNode = self.tree.openElements[-1]
if (token["name"] in self.breakoutElements or
(token["name"] == "font" and
set(token["data"].keys()) & set(["color", "face", "size"]))):
self.parser.parseError("unexpected-html-element-in-foreign-content",
{"name": token["name"]})
while (self.tree.openElements[-1].namespace !=
self.tree.defaultNamespace and
not self.parser.isHTMLIntegrationPoint(self.tree.openElements[-1]) and
not self.parser.isMathMLTextIntegrationPoint(self.tree.openElements[-1])):
self.tree.openElements.pop()
return token
else:
if currentNode.namespace == namespaces["mathml"]:
self.parser.adjustMathMLAttributes(token)
elif currentNode.namespace == namespaces["svg"]:
self.adjustSVGTagNames(token)
self.parser.adjustSVGAttributes(token)
self.parser.adjustForeignAttributes(token)
token["namespace"] = currentNode.namespace
self.tree.insertElement(token)
if token["selfClosing"]:
self.tree.openElements.pop()
token["selfClosingAcknowledged"] = True
def processEndTag(self, token):
nodeIndex = len(self.tree.openElements) - 1
node = self.tree.openElements[-1]
if node.name.translate(asciiUpper2Lower) != token["name"]:
self.parser.parseError("unexpected-end-tag", {"name": token["name"]})
while True:
if node.name.translate(asciiUpper2Lower) == token["name"]:
# XXX this isn't in the spec but it seems necessary
if self.parser.phase == self.parser.phases["inTableText"]:
self.parser.phase.flushCharacters()
self.parser.phase = self.parser.phase.originalPhase
while self.tree.openElements.pop() != node:
assert self.tree.openElements
new_token = None
break
nodeIndex -= 1
node = self.tree.openElements[nodeIndex]
if node.namespace != self.tree.defaultNamespace:
continue
else:
new_token = self.parser.phase.processEndTag(token)
break
return new_token
class AfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([("html", self.endTagHtml)])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processComment(self, token):
# This is needed because data is to be appended to the <html> element
# here and not to whatever is currently open.
self.tree.insertComment(token, self.tree.openElements[0])
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-body")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def endTagHtml(self, name):
if self.parser.innerHTML:
self.parser.parseError("unexpected-end-tag-after-body-innerhtml")
else:
self.parser.phase = self.parser.phases["afterAfterBody"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-body",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class InFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#in-frameset
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("frameset", self.startTagFrameset),
("frame", self.startTagFrame),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("frameset", self.endTagFrameset)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
if self.tree.openElements[-1].name != "html":
self.parser.parseError("eof-in-frameset")
else:
assert self.parser.innerHTML
def processCharacters(self, token):
self.parser.parseError("unexpected-char-in-frameset")
def startTagFrameset(self, token):
self.tree.insertElement(token)
def startTagFrame(self, token):
self.tree.insertElement(token)
self.tree.openElements.pop()
def startTagNoframes(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-in-frameset",
{"name": token["name"]})
def endTagFrameset(self, token):
if self.tree.openElements[-1].name == "html":
# innerHTML case
self.parser.parseError("unexpected-frameset-in-frameset-innerhtml")
else:
self.tree.openElements.pop()
if (not self.parser.innerHTML and
self.tree.openElements[-1].name != "frameset"):
# If we're not in innerHTML mode and the current node is not a
# "frameset" element (anymore) then switch.
self.parser.phase = self.parser.phases["afterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-in-frameset",
{"name": token["name"]})
class AfterFramesetPhase(Phase):
# http://www.whatwg.org/specs/web-apps/current-work/#after3
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoframes)
])
self.startTagHandler.default = self.startTagOther
self.endTagHandler = _utils.MethodDispatcher([
("html", self.endTagHtml)
])
self.endTagHandler.default = self.endTagOther
def processEOF(self):
# Stop parsing
pass
def processCharacters(self, token):
self.parser.parseError("unexpected-char-after-frameset")
def startTagNoframes(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("unexpected-start-tag-after-frameset",
{"name": token["name"]})
def endTagHtml(self, token):
self.parser.phase = self.parser.phases["afterAfterFrameset"]
def endTagOther(self, token):
self.parser.parseError("unexpected-end-tag-after-frameset",
{"name": token["name"]})
class AfterAfterBodyPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
self.parser.phase = self.parser.phases["inBody"]
return token
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
self.parser.phase = self.parser.phases["inBody"]
return token
class AfterAfterFramesetPhase(Phase):
def __init__(self, parser, tree):
Phase.__init__(self, parser, tree)
self.startTagHandler = _utils.MethodDispatcher([
("html", self.startTagHtml),
("noframes", self.startTagNoFrames)
])
self.startTagHandler.default = self.startTagOther
def processEOF(self):
pass
def processComment(self, token):
self.tree.insertComment(token, self.tree.document)
def processSpaceCharacters(self, token):
return self.parser.phases["inBody"].processSpaceCharacters(token)
def processCharacters(self, token):
self.parser.parseError("expected-eof-but-got-char")
def startTagHtml(self, token):
return self.parser.phases["inBody"].processStartTag(token)
def startTagNoFrames(self, token):
return self.parser.phases["inHead"].processStartTag(token)
def startTagOther(self, token):
self.parser.parseError("expected-eof-but-got-start-tag",
{"name": token["name"]})
def processEndTag(self, token):
self.parser.parseError("expected-eof-but-got-end-tag",
{"name": token["name"]})
# pylint:enable=unused-argument
return {
"initial": InitialPhase,
"beforeHtml": BeforeHtmlPhase,
"beforeHead": BeforeHeadPhase,
"inHead": InHeadPhase,
"inHeadNoscript": InHeadNoscriptPhase,
"afterHead": AfterHeadPhase,
"inBody": InBodyPhase,
"text": TextPhase,
"inTable": InTablePhase,
"inTableText": InTableTextPhase,
"inCaption": InCaptionPhase,
"inColumnGroup": InColumnGroupPhase,
"inTableBody": InTableBodyPhase,
"inRow": InRowPhase,
"inCell": InCellPhase,
"inSelect": InSelectPhase,
"inSelectInTable": InSelectInTablePhase,
"inForeignContent": InForeignContentPhase,
"afterBody": AfterBodyPhase,
"inFrameset": InFramesetPhase,
"afterFrameset": AfterFramesetPhase,
"afterAfterBody": AfterAfterBodyPhase,
"afterAfterFrameset": AfterAfterFramesetPhase,
# XXX after after frameset
}
def adjust_attributes(token, replacements):
needs_adjustment = viewkeys(token['data']) & viewkeys(replacements)
if needs_adjustment:
token['data'] = OrderedDict((replacements.get(k, k), v)
for k, v in token['data'].items())
def impliedTagToken(name, type="EndTag", attributes=None,
selfClosing=False):
if attributes is None:
attributes = {}
return {"type": tokenTypes[type], "name": name, "data": attributes,
"selfClosing": selfClosing}
class ParseError(Exception):
"""Error in parsed document"""
pass
| gpl-3.0 |
rmills220/p2pool-maxcoin | wstools/XMLSchema.py | 289 | 109858 | # Copyright (c) 2003, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory (subject to receipt of
# any required approvals from the U.S. Dept. of Energy). All rights
# reserved.
#
# Copyright (c) 2001 Zope Corporation and Contributors. All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
ident = "$Id$"
import types, weakref, sys, warnings
from Namespaces import SCHEMA, XMLNS, SOAP, APACHE
from Utility import DOM, DOMException, Collection, SplitQName, basejoin
from StringIO import StringIO
# If we have no threading, this should be a no-op
try:
from threading import RLock
except ImportError:
class RLock:
def acquire():
pass
def release():
pass
#
# Collections in XMLSchema class
#
TYPES = 'types'
ATTRIBUTE_GROUPS = 'attr_groups'
ATTRIBUTES = 'attr_decl'
ELEMENTS = 'elements'
MODEL_GROUPS = 'model_groups'
BUILT_IN_NAMESPACES = [SOAP.ENC,] + SCHEMA.XSD_LIST + [APACHE.AXIS_NS]
def GetSchema(component):
"""convience function for finding the parent XMLSchema instance.
"""
parent = component
while not isinstance(parent, XMLSchema):
parent = parent._parent()
return parent
class SchemaReader:
"""A SchemaReader creates XMLSchema objects from urls and xml data.
"""
namespaceToSchema = {}
def __init__(self, domReader=None, base_url=None):
"""domReader -- class must implement DOMAdapterInterface
base_url -- base url string
"""
self.__base_url = base_url
self.__readerClass = domReader
if not self.__readerClass:
self.__readerClass = DOMAdapter
self._includes = {}
self._imports = {}
def __setImports(self, schema):
"""Add dictionary of imports to schema instance.
schema -- XMLSchema instance
"""
for ns,val in schema.imports.items():
if self._imports.has_key(ns):
schema.addImportSchema(self._imports[ns])
def __setIncludes(self, schema):
"""Add dictionary of includes to schema instance.
schema -- XMLSchema instance
"""
for schemaLocation, val in schema.includes.items():
if self._includes.has_key(schemaLocation):
schema.addIncludeSchema(schemaLocation, self._imports[schemaLocation])
def addSchemaByLocation(self, location, schema):
"""provide reader with schema document for a location.
"""
self._includes[location] = schema
def addSchemaByNamespace(self, schema):
"""provide reader with schema document for a targetNamespace.
"""
self._imports[schema.targetNamespace] = schema
def loadFromNode(self, parent, element):
"""element -- DOM node or document
parent -- WSDLAdapter instance
"""
reader = self.__readerClass(element)
schema = XMLSchema(parent)
#HACK to keep a reference
schema.wsdl = parent
schema.setBaseUrl(self.__base_url)
schema.load(reader)
return schema
def loadFromStream(self, file, url=None):
"""Return an XMLSchema instance loaded from a file object.
file -- file object
url -- base location for resolving imports/includes.
"""
reader = self.__readerClass()
reader.loadDocument(file)
schema = XMLSchema()
if url is not None:
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromString(self, data):
"""Return an XMLSchema instance loaded from an XML string.
data -- XML string
"""
return self.loadFromStream(StringIO(data))
def loadFromURL(self, url, schema=None):
"""Return an XMLSchema instance loaded from the given url.
url -- URL to dereference
schema -- Optional XMLSchema instance.
"""
reader = self.__readerClass()
if self.__base_url:
url = basejoin(self.__base_url,url)
reader.loadFromURL(url)
schema = schema or XMLSchema()
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema
def loadFromFile(self, filename):
"""Return an XMLSchema instance loaded from the given file.
filename -- name of file to open
"""
if self.__base_url:
filename = basejoin(self.__base_url,filename)
file = open(filename, 'rb')
try:
schema = self.loadFromStream(file, filename)
finally:
file.close()
return schema
class SchemaError(Exception):
pass
class NoSchemaLocationWarning(Exception):
pass
###########################
# DOM Utility Adapters
##########################
class DOMAdapterInterface:
def hasattr(self, attr, ns=None):
"""return true if node has attribute
attr -- attribute to check for
ns -- namespace of attribute, by default None
"""
raise NotImplementedError, 'adapter method not implemented'
def getContentList(self, *contents):
"""returns an ordered list of child nodes
*contents -- list of node names to return
"""
raise NotImplementedError, 'adapter method not implemented'
def setAttributeDictionary(self, attributes):
"""set attribute dictionary
"""
raise NotImplementedError, 'adapter method not implemented'
def getAttributeDictionary(self):
"""returns a dict of node's attributes
"""
raise NotImplementedError, 'adapter method not implemented'
def getNamespace(self, prefix):
"""returns namespace referenced by prefix.
"""
raise NotImplementedError, 'adapter method not implemented'
def getTagName(self):
"""returns tagName of node
"""
raise NotImplementedError, 'adapter method not implemented'
def getParentNode(self):
"""returns parent element in DOMAdapter or None
"""
raise NotImplementedError, 'adapter method not implemented'
def loadDocument(self, file):
"""load a Document from a file object
file --
"""
raise NotImplementedError, 'adapter method not implemented'
def loadFromURL(self, url):
"""load a Document from an url
url -- URL to dereference
"""
raise NotImplementedError, 'adapter method not implemented'
class DOMAdapter(DOMAdapterInterface):
"""Adapter for ZSI.Utility.DOM
"""
def __init__(self, node=None):
"""Reset all instance variables.
element -- DOM document, node, or None
"""
if hasattr(node, 'documentElement'):
self.__node = node.documentElement
else:
self.__node = node
self.__attributes = None
def getNode(self):
return self.__node
def hasattr(self, attr, ns=None):
"""attr -- attribute
ns -- optional namespace, None means unprefixed attribute.
"""
if not self.__attributes:
self.setAttributeDictionary()
if ns:
return self.__attributes.get(ns,{}).has_key(attr)
return self.__attributes.has_key(attr)
def getContentList(self, *contents):
nodes = []
ELEMENT_NODE = self.__node.ELEMENT_NODE
for child in DOM.getElements(self.__node, None):
if child.nodeType == ELEMENT_NODE and\
SplitQName(child.tagName)[1] in contents:
nodes.append(child)
return map(self.__class__, nodes)
def setAttributeDictionary(self):
self.__attributes = {}
for v in self.__node._attrs.values():
self.__attributes[v.nodeName] = v.nodeValue
def getAttributeDictionary(self):
if not self.__attributes:
self.setAttributeDictionary()
return self.__attributes
def getTagName(self):
return self.__node.tagName
def getParentNode(self):
if self.__node.parentNode.nodeType == self.__node.ELEMENT_NODE:
return DOMAdapter(self.__node.parentNode)
return None
def getNamespace(self, prefix):
"""prefix -- deference namespace prefix in node's context.
Ascends parent nodes until found.
"""
namespace = None
if prefix == 'xmlns':
namespace = DOM.findDefaultNS(prefix, self.__node)
else:
try:
namespace = DOM.findNamespaceURI(prefix, self.__node)
except DOMException, ex:
if prefix != 'xml':
raise SchemaError, '%s namespace not declared for %s'\
%(prefix, self.__node._get_tagName())
namespace = XMLNS.XML
return namespace
def loadDocument(self, file):
self.__node = DOM.loadDocument(file)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
def loadFromURL(self, url):
self.__node = DOM.loadFromURL(url)
if hasattr(self.__node, 'documentElement'):
self.__node = self.__node.documentElement
class XMLBase:
""" These class variables are for string indentation.
"""
tag = None
__indent = 0
__rlock = RLock()
def __str__(self):
XMLBase.__rlock.acquire()
XMLBase.__indent += 1
tmp = "<" + str(self.__class__) + '>\n'
for k,v in self.__dict__.items():
tmp += "%s* %s = %s\n" %(XMLBase.__indent*' ', k, v)
XMLBase.__indent -= 1
XMLBase.__rlock.release()
return tmp
"""Marker Interface: can determine something about an instances properties by using
the provided convenience functions.
"""
class DefinitionMarker:
"""marker for definitions
"""
pass
class DeclarationMarker:
"""marker for declarations
"""
pass
class AttributeMarker:
"""marker for attributes
"""
pass
class AttributeGroupMarker:
"""marker for attribute groups
"""
pass
class WildCardMarker:
"""marker for wildcards
"""
pass
class ElementMarker:
"""marker for wildcards
"""
pass
class ReferenceMarker:
"""marker for references
"""
pass
class ModelGroupMarker:
"""marker for model groups
"""
pass
class AllMarker(ModelGroupMarker):
"""marker for all model group
"""
pass
class ChoiceMarker(ModelGroupMarker):
"""marker for choice model group
"""
pass
class SequenceMarker(ModelGroupMarker):
"""marker for sequence model group
"""
pass
class ExtensionMarker:
"""marker for extensions
"""
pass
class RestrictionMarker:
"""marker for restrictions
"""
facets = ['enumeration', 'length', 'maxExclusive', 'maxInclusive',\
'maxLength', 'minExclusive', 'minInclusive', 'minLength',\
'pattern', 'fractionDigits', 'totalDigits', 'whiteSpace']
class SimpleMarker:
"""marker for simple type information
"""
pass
class ListMarker:
"""marker for simple type list
"""
pass
class UnionMarker:
"""marker for simple type Union
"""
pass
class ComplexMarker:
"""marker for complex type information
"""
pass
class LocalMarker:
"""marker for complex type information
"""
pass
class MarkerInterface:
def isDefinition(self):
return isinstance(self, DefinitionMarker)
def isDeclaration(self):
return isinstance(self, DeclarationMarker)
def isAttribute(self):
return isinstance(self, AttributeMarker)
def isAttributeGroup(self):
return isinstance(self, AttributeGroupMarker)
def isElement(self):
return isinstance(self, ElementMarker)
def isReference(self):
return isinstance(self, ReferenceMarker)
def isWildCard(self):
return isinstance(self, WildCardMarker)
def isModelGroup(self):
return isinstance(self, ModelGroupMarker)
def isAll(self):
return isinstance(self, AllMarker)
def isChoice(self):
return isinstance(self, ChoiceMarker)
def isSequence(self):
return isinstance(self, SequenceMarker)
def isExtension(self):
return isinstance(self, ExtensionMarker)
def isRestriction(self):
return isinstance(self, RestrictionMarker)
def isSimple(self):
return isinstance(self, SimpleMarker)
def isComplex(self):
return isinstance(self, ComplexMarker)
def isLocal(self):
return isinstance(self, LocalMarker)
def isList(self):
return isinstance(self, ListMarker)
def isUnion(self):
return isinstance(self, UnionMarker)
##########################################################
# Schema Components
#########################################################
class XMLSchemaComponent(XMLBase, MarkerInterface):
"""
class variables:
required -- list of required attributes
attributes -- dict of default attribute values, including None.
Value can be a function for runtime dependencies.
contents -- dict of namespace keyed content lists.
'xsd' content of xsd namespace.
xmlns_key -- key for declared xmlns namespace.
xmlns -- xmlns is special prefix for namespace dictionary
xml -- special xml prefix for xml namespace.
"""
required = []
attributes = {}
contents = {}
xmlns_key = ''
xmlns = 'xmlns'
xml = 'xml'
def __init__(self, parent=None):
"""parent -- parent instance
instance variables:
attributes -- dictionary of node's attributes
"""
self.attributes = None
self._parent = parent
if self._parent:
self._parent = weakref.ref(parent)
if not self.__class__ == XMLSchemaComponent\
and not (type(self.__class__.required) == type(XMLSchemaComponent.required)\
and type(self.__class__.attributes) == type(XMLSchemaComponent.attributes)\
and type(self.__class__.contents) == type(XMLSchemaComponent.contents)):
raise RuntimeError, 'Bad type for a class variable in %s' %self.__class__
def getItemTrace(self):
"""Returns a node trace up to the <schema> item.
"""
item, path, name, ref = self, [], 'name', 'ref'
while not isinstance(item,XMLSchema) and not isinstance(item,WSDLToolsAdapter):
attr = item.getAttribute(name)
if not attr:
attr = item.getAttribute(ref)
if not attr:
path.append('<%s>' %(item.tag))
else:
path.append('<%s ref="%s">' %(item.tag, attr))
else:
path.append('<%s name="%s">' %(item.tag,attr))
item = item._parent()
try:
tns = item.getTargetNamespace()
except:
tns = ''
path.append('<%s targetNamespace="%s">' %(item.tag, tns))
path.reverse()
return ''.join(path)
def getTargetNamespace(self):
"""return targetNamespace
"""
parent = self
targetNamespace = 'targetNamespace'
tns = self.attributes.get(targetNamespace)
while not tns and parent and parent._parent is not None:
parent = parent._parent()
tns = parent.attributes.get(targetNamespace)
return tns or ''
def getAttributeDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute(ATTRIBUTES, attribute)
def getAttributeGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute(ATTRIBUTE_GROUPS, attribute)
def getTypeDefinition(self, attribute):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return self.getQNameAttribute(TYPES, attribute)
def getElementDeclaration(self, attribute):
"""attribute -- attribute with a QName value (eg. element).
collection -- check elements collection in parent Schema instance.
"""
return self.getQNameAttribute(ELEMENTS, attribute)
def getModelGroup(self, attribute):
"""attribute -- attribute with a QName value (eg. ref).
collection -- check model_group collection in parent Schema instance.
"""
return self.getQNameAttribute(MODEL_GROUPS, attribute)
def getQNameAttribute(self, collection, attribute):
"""returns object instance representing QName --> (namespace,name),
or if does not exist return None.
attribute -- an information item attribute, with a QName value.
collection -- collection in parent Schema instance to search.
"""
tdc = self.getAttributeQName(attribute)
if not tdc:
return
obj = self.getSchemaItem(collection, tdc.getTargetNamespace(), tdc.getName())
if obj:
return obj
# raise SchemaError, 'No schema item "%s" in collection %s' %(tdc, collection)
return
def getSchemaItem(self, collection, namespace, name):
"""returns object instance representing namespace, name,
or if does not exist return None if built-in, else
raise SchemaError.
namespace -- namespace item defined in.
name -- name of item.
collection -- collection in parent Schema instance to search.
"""
parent = GetSchema(self)
if parent.targetNamespace == namespace:
try:
obj = getattr(parent, collection)[name]
except KeyError, ex:
raise KeyError, 'targetNamespace(%s) collection(%s) has no item(%s)'\
%(namespace, collection, name)
return obj
if not parent.imports.has_key(namespace):
if namespace in BUILT_IN_NAMESPACES:
# built-in just return
# WARNING: expecting import if "redefine" or add to built-in namespace.
return
raise SchemaError, 'schema "%s" does not import namespace "%s"' %(
parent.targetNamespace, namespace)
# Lazy Eval
schema = parent.imports[namespace]
if not isinstance(schema, XMLSchema):
schema = schema.getSchema()
if schema is not None:
parent.imports[namespace] = schema
if schema is None:
if namespace in BUILT_IN_NAMESPACES:
# built-in just return
return
raise SchemaError, 'no schema instance for imported namespace (%s).'\
%(namespace)
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting XMLSchema instance not "%r"' %schema
try:
obj = getattr(schema, collection)[name]
except KeyError, ex:
raise KeyError, 'targetNamespace(%s) collection(%s) has no item(%s)'\
%(namespace, collection, name)
return obj
def getXMLNS(self, prefix=None):
"""deference prefix or by default xmlns, returns namespace.
"""
if prefix == XMLSchemaComponent.xml:
return XMLNS.XML
parent = self
ns = self.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
while not ns:
parent = parent._parent()
ns = parent.attributes[XMLSchemaComponent.xmlns].get(prefix or\
XMLSchemaComponent.xmlns_key)
if not ns and isinstance(parent, WSDLToolsAdapter):
if prefix is None:
return ''
raise SchemaError, 'unknown prefix %s' %prefix
return ns
def getAttribute(self, attribute):
"""return requested attribute value or None
"""
if type(attribute) in (list, tuple):
if len(attribute) != 2:
raise LookupError, 'To access attributes must use name or (namespace,name)'
ns_dict = self.attributes.get(attribute[0])
if ns_dict is None:
return None
return ns_dict.get(attribute[1])
return self.attributes.get(attribute)
def getAttributeQName(self, attribute):
"""return requested attribute value as (namespace,name) or None
"""
qname = self.getAttribute(attribute)
if isinstance(qname, TypeDescriptionComponent) is True:
return qname
if qname is None:
return None
prefix,ncname = SplitQName(qname)
namespace = self.getXMLNS(prefix)
return TypeDescriptionComponent((namespace,ncname))
def getAttributeName(self):
"""return attribute name or None
"""
return self.getAttribute('name')
def setAttributes(self, node):
"""Sets up attribute dictionary, checks for required attributes and
sets default attribute values. attr is for default attribute values
determined at runtime.
structure of attributes dictionary
['xmlns'][xmlns_key] -- xmlns namespace
['xmlns'][prefix] -- declared namespace prefix
[namespace][prefix] -- attributes declared in a namespace
[attribute] -- attributes w/o prefix, default namespaces do
not directly apply to attributes, ie Name can't collide
with QName.
"""
self.attributes = {XMLSchemaComponent.xmlns:{}}
for k,v in node.getAttributeDictionary().items():
prefix,value = SplitQName(k)
if value == XMLSchemaComponent.xmlns:
self.attributes[value][prefix or XMLSchemaComponent.xmlns_key] = v
elif prefix:
ns = node.getNamespace(prefix)
if not ns:
raise SchemaError, 'no namespace for attribute prefix %s'\
%prefix
if not self.attributes.has_key(ns):
self.attributes[ns] = {}
elif self.attributes[ns].has_key(value):
raise SchemaError, 'attribute %s declared multiple times in %s'\
%(value, ns)
self.attributes[ns][value] = v
elif not self.attributes.has_key(value):
self.attributes[value] = v
else:
raise SchemaError, 'attribute %s declared multiple times' %value
if not isinstance(self, WSDLToolsAdapter):
self.__checkAttributes()
self.__setAttributeDefaults()
#set QNames
for k in ['type', 'element', 'base', 'ref', 'substitutionGroup', 'itemType']:
if self.attributes.has_key(k):
prefix, value = SplitQName(self.attributes.get(k))
self.attributes[k] = \
TypeDescriptionComponent((self.getXMLNS(prefix), value))
#Union, memberTypes is a whitespace separated list of QNames
for k in ['memberTypes']:
if self.attributes.has_key(k):
qnames = self.attributes[k]
self.attributes[k] = []
for qname in qnames.split():
prefix, value = SplitQName(qname)
self.attributes['memberTypes'].append(\
TypeDescriptionComponent(\
(self.getXMLNS(prefix), value)))
def getContents(self, node):
"""retrieve xsd contents
"""
return node.getContentList(*self.__class__.contents['xsd'])
def __setAttributeDefaults(self):
"""Looks for default values for unset attributes. If
class variable representing attribute is None, then
it must be defined as an instance variable.
"""
for k,v in self.__class__.attributes.items():
if v is not None and self.attributes.has_key(k) is False:
if isinstance(v, types.FunctionType):
self.attributes[k] = v(self)
else:
self.attributes[k] = v
def __checkAttributes(self):
"""Checks that required attributes have been defined,
attributes w/default cannot be required. Checks
all defined attributes are legal, attribute
references are not subject to this test.
"""
for a in self.__class__.required:
if not self.attributes.has_key(a):
raise SchemaError,\
'class instance %s, missing required attribute %s'\
%(self.__class__, a)
for a,v in self.attributes.items():
# attribute #other, ie. not in empty namespace
if type(v) is dict:
continue
# predefined prefixes xmlns, xml
if a in (XMLSchemaComponent.xmlns, XMLNS.XML):
continue
if (a not in self.__class__.attributes.keys()) and not\
(self.isAttribute() and self.isReference()):
raise SchemaError, '%s, unknown attribute(%s,%s)' \
%(self.getItemTrace(), a, self.attributes[a])
class WSDLToolsAdapter(XMLSchemaComponent):
"""WSDL Adapter to grab the attributes from the wsdl document node.
"""
attributes = {'name':None, 'targetNamespace':None}
tag = 'definitions'
def __init__(self, wsdl):
XMLSchemaComponent.__init__(self, parent=wsdl)
self.setAttributes(DOMAdapter(wsdl.document))
def getImportSchemas(self):
"""returns WSDLTools.WSDL types Collection
"""
return self._parent().types
class Notation(XMLSchemaComponent):
"""<notation>
parent:
schema
attributes:
id -- ID
name -- NCName, Required
public -- token, Required
system -- anyURI
contents:
annotation?
"""
required = ['name', 'public']
attributes = {'id':None, 'name':None, 'public':None, 'system':None}
contents = {'xsd':('annotation')}
tag = 'notation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Annotation(XMLSchemaComponent):
"""<annotation>
parent:
all,any,anyAttribute,attribute,attributeGroup,choice,complexContent,
complexType,element,extension,field,group,import,include,key,keyref,
list,notation,redefine,restriction,schema,selector,simpleContent,
simpleType,union,unique
attributes:
id -- ID
contents:
(documentation | appinfo)*
"""
attributes = {'id':None}
contents = {'xsd':('documentation', 'appinfo')}
tag = 'annotation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'documentation':
#print_debug('class %s, documentation skipped' %self.__class__, 5)
continue
elif component == 'appinfo':
#print_debug('class %s, appinfo skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Documentation(XMLSchemaComponent):
"""<documentation>
parent:
annotation
attributes:
source, anyURI
xml:lang, language
contents:
mixed, any
"""
attributes = {'source':None, 'xml:lang':None}
contents = {'xsd':('mixed', 'any')}
tag = 'documentation'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Appinfo(XMLSchemaComponent):
"""<appinfo>
parent:
annotation
attributes:
source, anyURI
contents:
mixed, any
"""
attributes = {'source':None, 'anyURI':None}
contents = {'xsd':('mixed', 'any')}
tag = 'appinfo'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'mixed':
#print_debug('class %s, mixed skipped' %self.__class__, 5)
continue
elif component == 'any':
#print_debug('class %s, any skipped' %self.__class__, 5)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class XMLSchemaFake:
# This is temporary, for the benefit of WSDL until the real thing works.
def __init__(self, element):
self.targetNamespace = DOM.getAttr(element, 'targetNamespace')
self.element = element
class XMLSchema(XMLSchemaComponent):
"""A schema is a collection of schema components derived from one
or more schema documents, that is, one or more <schema> element
information items. It represents the abstract notion of a schema
rather than a single schema document (or other representation).
<schema>
parent:
ROOT
attributes:
id -- ID
version -- token
xml:lang -- language
targetNamespace -- anyURI
attributeFormDefault -- 'qualified' | 'unqualified', 'unqualified'
elementFormDefault -- 'qualified' | 'unqualified', 'unqualified'
blockDefault -- '#all' | list of
('substitution | 'extension' | 'restriction')
finalDefault -- '#all' | list of
('extension' | 'restriction' | 'list' | 'union')
contents:
((include | import | redefine | annotation)*,
(attribute, attributeGroup, complexType, element, group,
notation, simpleType)*, annotation*)*
attributes -- schema attributes
imports -- import statements
includes -- include statements
redefines --
types -- global simpleType, complexType definitions
elements -- global element declarations
attr_decl -- global attribute declarations
attr_groups -- attribute Groups
model_groups -- model Groups
notations -- global notations
"""
attributes = {'id':None,
'version':None,
'xml:lang':None,
'targetNamespace':None,
'attributeFormDefault':'unqualified',
'elementFormDefault':'unqualified',
'blockDefault':None,
'finalDefault':None}
contents = {'xsd':('include', 'import', 'redefine', 'annotation',
'attribute', 'attributeGroup', 'complexType',
'element', 'group', 'notation', 'simpleType',
'annotation')}
empty_namespace = ''
tag = 'schema'
def __init__(self, parent=None):
"""parent --
instance variables:
targetNamespace -- schema's declared targetNamespace, or empty string.
_imported_schemas -- namespace keyed dict of schema dependencies, if
a schema is provided instance will not resolve import statement.
_included_schemas -- schemaLocation keyed dict of component schemas,
if schema is provided instance will not resolve include statement.
_base_url -- needed for relative URLs support, only works with URLs
relative to initial document.
includes -- collection of include statements
imports -- collection of import statements
elements -- collection of global element declarations
types -- collection of global type definitions
attr_decl -- collection of global attribute declarations
attr_groups -- collection of global attribute group definitions
model_groups -- collection of model group definitions
notations -- collection of notations
"""
self.__node = None
self.targetNamespace = None
XMLSchemaComponent.__init__(self, parent)
f = lambda k: k.attributes['name']
ns = lambda k: k.attributes['namespace']
sl = lambda k: k.attributes['schemaLocation']
self.includes = Collection(self, key=sl)
self.imports = Collection(self, key=ns)
self.elements = Collection(self, key=f)
self.types = Collection(self, key=f)
self.attr_decl = Collection(self, key=f)
self.attr_groups = Collection(self, key=f)
self.model_groups = Collection(self, key=f)
self.notations = Collection(self, key=f)
self._imported_schemas = {}
self._included_schemas = {}
self._base_url = None
def getNode(self):
"""
Interacting with the underlying DOM tree.
"""
return self.__node
def addImportSchema(self, schema):
"""for resolving import statements in Schema instance
schema -- schema instance
_imported_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if schema.targetNamespace != self.targetNamespace:
self._imported_schemas[schema.targetNamespace] = schema
else:
raise SchemaError, 'import schema bad targetNamespace'
def addIncludeSchema(self, schemaLocation, schema):
"""for resolving include statements in Schema instance
schemaLocation -- schema location
schema -- schema instance
_included_schemas
"""
if not isinstance(schema, XMLSchema):
raise TypeError, 'expecting a Schema instance'
if not schema.targetNamespace or\
schema.targetNamespace == self.targetNamespace:
self._included_schemas[schemaLocation] = schema
else:
raise SchemaError, 'include schema bad targetNamespace'
def setImportSchemas(self, schema_dict):
"""set the import schema dictionary, which is used to
reference depedent schemas.
"""
self._imported_schemas = schema_dict
def getImportSchemas(self):
"""get the import schema dictionary, which is used to
reference depedent schemas.
"""
return self._imported_schemas
def getSchemaNamespacesToImport(self):
"""returns tuple of namespaces the schema instance has declared
itself to be depedent upon.
"""
return tuple(self.includes.keys())
def setIncludeSchemas(self, schema_dict):
"""set the include schema dictionary, which is keyed with
schemaLocation (uri).
This is a means of providing
schemas to the current schema for content inclusion.
"""
self._included_schemas = schema_dict
def getIncludeSchemas(self):
"""get the include schema dictionary, which is keyed with
schemaLocation (uri).
"""
return self._included_schemas
def getBaseUrl(self):
"""get base url, used for normalizing all relative uri's
"""
return self._base_url
def setBaseUrl(self, url):
"""set base url, used for normalizing all relative uri's
"""
self._base_url = url
def getElementFormDefault(self):
"""return elementFormDefault attribute
"""
return self.attributes.get('elementFormDefault')
def isElementFormDefaultQualified(self):
return self.attributes.get('elementFormDefault') == 'qualified'
def getAttributeFormDefault(self):
"""return attributeFormDefault attribute
"""
return self.attributes.get('attributeFormDefault')
def getBlockDefault(self):
"""return blockDefault attribute
"""
return self.attributes.get('blockDefault')
def getFinalDefault(self):
"""return finalDefault attribute
"""
return self.attributes.get('finalDefault')
def load(self, node, location=None):
self.__node = node
pnode = node.getParentNode()
if pnode:
pname = SplitQName(pnode.getTagName())[1]
if pname == 'types':
attributes = {}
self.setAttributes(pnode)
attributes.update(self.attributes)
self.setAttributes(node)
for k,v in attributes['xmlns'].items():
if not self.attributes['xmlns'].has_key(k):
self.attributes['xmlns'][k] = v
else:
self.setAttributes(node)
else:
self.setAttributes(node)
self.targetNamespace = self.getTargetNamespace()
for childNode in self.getContents(node):
component = SplitQName(childNode.getTagName())[1]
if component == 'include':
tp = self.__class__.Include(self)
tp.fromDom(childNode)
sl = tp.attributes['schemaLocation']
schema = tp.getSchema()
if not self.getIncludeSchemas().has_key(sl):
self.addIncludeSchema(sl, schema)
self.includes[sl] = tp
pn = childNode.getParentNode().getNode()
pn.removeChild(childNode.getNode())
for child in schema.getNode().getNode().childNodes:
pn.appendChild(child.cloneNode(1))
for collection in ['imports','elements','types',
'attr_decl','attr_groups','model_groups',
'notations']:
for k,v in getattr(schema,collection).items():
if not getattr(self,collection).has_key(k):
v._parent = weakref.ref(self)
getattr(self,collection)[k] = v
else:
warnings.warn("Not keeping schema component.")
elif component == 'import':
slocd = SchemaReader.namespaceToSchema
tp = self.__class__.Import(self)
tp.fromDom(childNode)
import_ns = tp.getAttribute('namespace') or\
self.__class__.empty_namespace
schema = slocd.get(import_ns)
if schema is None:
schema = XMLSchema()
slocd[import_ns] = schema
try:
tp.loadSchema(schema)
except NoSchemaLocationWarning, ex:
# Dependency declaration, hopefully implementation
# is aware of this namespace (eg. SOAP,WSDL,?)
print "IMPORT: ", import_ns
print ex
del slocd[import_ns]
continue
except SchemaError, ex:
#warnings.warn(\
# '<import namespace="%s" schemaLocation=?>, %s'\
# %(import_ns, 'failed to load schema instance')
#)
print ex
del slocd[import_ns]
class _LazyEvalImport(str):
'''Lazy evaluation of import, replace entry in self.imports.'''
#attributes = dict(namespace=import_ns)
def getSchema(namespace):
schema = slocd.get(namespace)
if schema is None:
parent = self._parent()
wstypes = parent
if isinstance(parent, WSDLToolsAdapter):
wstypes = parent.getImportSchemas()
schema = wstypes.get(namespace)
if isinstance(schema, XMLSchema):
self.imports[namespace] = schema
return schema
return None
self.imports[import_ns] = _LazyEvalImport(import_ns)
continue
else:
tp._schema = schema
if self.getImportSchemas().has_key(import_ns):
warnings.warn(\
'Detected multiple imports of the namespace "%s" '\
%import_ns)
self.addImportSchema(schema)
# spec says can have multiple imports of same namespace
# but purpose of import is just dependency declaration.
self.imports[import_ns] = tp
elif component == 'redefine':
warnings.warn('redefine is ignored')
elif component == 'annotation':
warnings.warn('annotation is ignored')
elif component == 'attribute':
tp = AttributeDeclaration(self)
tp.fromDom(childNode)
self.attr_decl[tp.getAttribute('name')] = tp
elif component == 'attributeGroup':
tp = AttributeGroupDefinition(self)
tp.fromDom(childNode)
self.attr_groups[tp.getAttribute('name')] = tp
elif component == 'element':
tp = ElementDeclaration(self)
tp.fromDom(childNode)
self.elements[tp.getAttribute('name')] = tp
elif component == 'group':
tp = ModelGroupDefinition(self)
tp.fromDom(childNode)
self.model_groups[tp.getAttribute('name')] = tp
elif component == 'notation':
tp = Notation(self)
tp.fromDom(childNode)
self.notations[tp.getAttribute('name')] = tp
elif component == 'complexType':
tp = ComplexType(self)
tp.fromDom(childNode)
self.types[tp.getAttribute('name')] = tp
elif component == 'simpleType':
tp = SimpleType(self)
tp.fromDom(childNode)
self.types[tp.getAttribute('name')] = tp
else:
break
class Import(XMLSchemaComponent):
"""<import>
parent:
schema
attributes:
id -- ID
namespace -- anyURI
schemaLocation -- anyURI
contents:
annotation?
"""
attributes = {'id':None,
'namespace':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'import'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
if self.attributes['namespace'] == self.getTargetNamespace():
raise SchemaError, 'namespace of schema and import match'
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance, and keep a hard reference.
"""
if not self._schema:
ns = self.attributes['namespace']
schema = self._parent().getImportSchemas().get(ns)
if not schema and self._parent()._parent:
schema = self._parent()._parent().getImportSchemas().get(ns)
if not schema:
url = self.attributes.get('schemaLocation')
if not url:
raise SchemaError, 'namespace(%s) is unknown' %ns
base_url = self._parent().getBaseUrl()
reader = SchemaReader(base_url=base_url)
reader._imports = self._parent().getImportSchemas()
reader._includes = self._parent().getIncludeSchemas()
self._schema = reader.loadFromURL(url)
return self._schema or schema
def loadSchema(self, schema):
"""
"""
base_url = self._parent().getBaseUrl()
reader = SchemaReader(base_url=base_url)
reader._imports = self._parent().getImportSchemas()
reader._includes = self._parent().getIncludeSchemas()
self._schema = schema
if not self.attributes.has_key('schemaLocation'):
raise NoSchemaLocationWarning('no schemaLocation attribute in import')
reader.loadFromURL(self.attributes.get('schemaLocation'), schema)
class Include(XMLSchemaComponent):
"""<include schemaLocation>
parent:
schema
attributes:
id -- ID
schemaLocation -- anyURI, required
contents:
annotation?
"""
required = ['schemaLocation']
attributes = {'id':None,
'schemaLocation':None}
contents = {'xsd':['annotation']}
tag = 'include'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self._schema = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
def getSchema(self):
"""if schema is not defined, first look for a Schema class instance
in parent Schema. Else if not defined resolve schemaLocation
and create a new Schema class instance.
"""
if not self._schema:
schema = self._parent()
self._schema = schema.getIncludeSchemas().get(\
self.attributes['schemaLocation']
)
if not self._schema:
url = self.attributes['schemaLocation']
reader = SchemaReader(base_url=schema.getBaseUrl())
reader._imports = schema.getImportSchemas()
reader._includes = schema.getIncludeSchemas()
# create schema before loading so chameleon include
# will evalute targetNamespace correctly.
self._schema = XMLSchema(schema)
reader.loadFromURL(url, self._schema)
return self._schema
class AttributeDeclaration(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker):
"""<attribute name>
parent:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
""" No list or union support
"""
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class LocalAttributeDeclaration(AttributeDeclaration,\
AttributeMarker,\
LocalMarker,\
DeclarationMarker):
"""<attribute name>
parent:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
name -- NCName, required
type -- QName
form -- ('qualified' | 'unqualified'), schema.attributeFormDefault
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?, simpleType?
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'form':lambda self: GetSchema(self).getAttributeFormDefault(),
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation','simpleType']}
def __init__(self, parent):
AttributeDeclaration.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType':
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeWildCard(XMLSchemaComponent,\
AttributeMarker,\
DeclarationMarker,\
WildCardMarker):
"""<anyAttribute>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
attributes = {'id':None,
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'anyAttribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeReference(XMLSchemaComponent,\
AttributeMarker,\
ReferenceMarker):
"""<attribute ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
use -- ('optional' | 'prohibited' | 'required'), optional
default -- string
fixed -- string
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'use':'optional',
'default':None,
'fixed':None}
contents = {'xsd':['annotation']}
tag = 'attribute'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeDeclaration(self, attribute='ref'):
return XMLSchemaComponent.getAttributeDeclaration(self, attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AttributeGroupDefinition(XMLSchemaComponent,\
AttributeGroupMarker,\
DefinitionMarker):
"""<attributeGroup name>
parents:
schema, redefine
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'attribute', 'attributeGroup', 'anyAttribute']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif component == 'attribute':
if contents[indx].hasattr('name'):
content.append(LocalAttributeDeclaration(self))
elif contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
raise SchemaError, 'Unknown attribute type'
content[-1].fromDom(contents[indx])
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
content[-1].fromDom(contents[indx])
elif component == 'anyAttribute':
if len(contents) != indx+1:
raise SchemaError, 'anyAttribute is out of order in %s' %self.getItemTrace()
content.append(AttributeWildCard(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content = tuple(content)
class AttributeGroupReference(XMLSchemaComponent,\
AttributeGroupMarker,\
ReferenceMarker):
"""<attributeGroup ref>
parents:
complexType, restriction, extension, attributeGroup
attributes:
id -- ID
ref -- QName, required
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None}
contents = {'xsd':['annotation']}
tag = 'attributeGroup'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getAttributeGroup(self, attribute='ref'):
"""attribute -- attribute with a QName value (eg. type).
collection -- check types collection in parent Schema instance
"""
return XMLSchemaComponent.getAttributeGroup(self, attribute)
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Elements
#####################################################
class IdentityConstrants(XMLSchemaComponent):
"""Allow one to uniquely identify nodes in a document and ensure the
integrity of references between them.
attributes -- dictionary of attributes
selector -- XPath to selected nodes
fields -- list of XPath to key field
"""
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.selector = None
self.fields = None
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
fields = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'selector':
self.selector = self.Selector(self)
self.selector.fromDom(i)
continue
elif component == 'field':
fields.append(self.Field(self))
fields[-1].fromDom(i)
continue
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.fields = tuple(fields)
class Constraint(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class Selector(Constraint):
"""<selector xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'selector'
class Field(Constraint):
"""<field xpath>
parent:
unique, key, keyref
attributes:
id -- ID
xpath -- XPath subset, required
contents:
annotation?
"""
required = ['xpath']
attributes = {'id':None,
'xpath':None}
contents = {'xsd':['annotation']}
tag = 'field'
class Unique(IdentityConstrants):
"""<unique name> Enforce fields are unique w/i a specified scope.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'unique'
class Key(IdentityConstrants):
"""<key name> Enforce fields are unique w/i a specified scope, and all
field values are present w/i document. Fields cannot
be nillable.
parent:
element
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, selector, field+
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'key'
class KeyRef(IdentityConstrants):
"""<keyref name refer> Ensure a match between two sets of values in an
instance.
parent:
element
attributes:
id -- ID
name -- NCName, required
refer -- QName, required
contents:
annotation?, selector, field+
"""
required = ['name', 'refer']
attributes = {'id':None,
'name':None,
'refer':None}
contents = {'xsd':['annotation', 'selector', 'field']}
tag = 'keyref'
class ElementDeclaration(XMLSchemaComponent,\
ElementMarker,\
DeclarationMarker):
"""<element name>
parents:
schema
attributes:
id -- ID
name -- NCName, required
type -- QName
default -- string
fixed -- string
nillable -- boolean, false
abstract -- boolean, false
substitutionGroup -- QName
block -- ('#all' | ('substition' | 'extension' | 'restriction')*),
schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*),
schema.finalDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'type':None,
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'substitutionGroup':None,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.constraints = ()
def isQualified(self):
"""Global elements are always qualified.
"""
return True
def getAttribute(self, attribute):
"""return attribute.
If attribute is type and it's None, and no simple or complex content,
return the default type "xsd:anyType"
"""
value = XMLSchemaComponent.getAttribute(self, attribute)
if attribute != 'type' or value is not None:
return value
if self.content is not None:
return None
parent = self
while 1:
nsdict = parent.attributes[XMLSchemaComponent.xmlns]
for k,v in nsdict.items():
if v not in SCHEMA.XSD_LIST: continue
return TypeDescriptionComponent((v, 'anyType'))
if isinstance(parent, WSDLToolsAdapter)\
or not hasattr(parent, '_parent'):
break
parent = parent._parent()
raise SchemaError, 'failed to locate the XSD namespace'
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute=None):
"""If attribute is None, "type" is assumed, return the corresponding
representation of the global type definition (TypeDefinition),
or the local definition if don't find "type". To maintain backwards
compat, if attribute is provided call base class method.
"""
if attribute:
return XMLSchemaComponent.getTypeDefinition(self, attribute)
gt = XMLSchemaComponent.getTypeDefinition(self, 'type')
if gt:
return gt
return self.content
def getConstraints(self):
return self._constraints
def setConstraints(self, constraints):
self._constraints = tuple(constraints)
constraints = property(getConstraints, setConstraints, None, "tuple of key, keyref, unique constraints")
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
constraints = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
elif component == 'simpleType' and not self.content:
self.content = AnonymousSimpleType(self)
self.content.fromDom(i)
elif component == 'complexType' and not self.content:
self.content = LocalComplexType(self)
self.content.fromDom(i)
elif component == 'key':
constraints.append(Key(self))
constraints[-1].fromDom(i)
elif component == 'keyref':
constraints.append(KeyRef(self))
constraints[-1].fromDom(i)
elif component == 'unique':
constraints.append(Unique(self))
constraints[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.constraints = constraints
class LocalElementDeclaration(ElementDeclaration,\
LocalMarker):
"""<element>
parents:
all, choice, sequence
attributes:
id -- ID
name -- NCName, required
form -- ('qualified' | 'unqualified'), schema.elementFormDefault
type -- QName
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
default -- string
fixed -- string
nillable -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
contents:
annotation?, (simpleType,complexType)?, (key | keyref | unique)*
"""
required = ['name']
attributes = {'id':None,
'name':None,
'form':lambda self: GetSchema(self).getElementFormDefault(),
'type':None,
'minOccurs':'1',
'maxOccurs':'1',
'default':None,
'fixed':None,
'nillable':0,
'abstract':0,
'block':lambda self: GetSchema(self).getBlockDefault()}
contents = {'xsd':['annotation', 'simpleType', 'complexType', 'key',\
'keyref', 'unique']}
def isQualified(self):
"""
Local elements can be qualified or unqualifed according
to the attribute form, or the elementFormDefault. By default
local elements are unqualified.
"""
form = self.getAttribute('form')
if form == 'qualified':
return True
if form == 'unqualified':
return False
raise SchemaError, 'Bad form (%s) for element: %s' %(form, self.getItemTrace())
class ElementReference(XMLSchemaComponent,\
ElementMarker,\
ReferenceMarker):
"""<element ref>
parents:
all, choice, sequence
attributes:
id -- ID
ref -- QName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'element'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getElementDeclaration(self, attribute=None):
"""If attribute is None, "ref" is assumed, return the corresponding
representation of the global element declaration (ElementDeclaration),
To maintain backwards compat, if attribute is provided call base class method.
"""
if attribute:
return XMLSchemaComponent.getElementDeclaration(self, attribute)
return XMLSchemaComponent.getElementDeclaration(self, 'ref')
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ElementWildCard(LocalElementDeclaration, WildCardMarker):
"""<any>
parents:
choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
namespace -- '##any' | '##other' |
(anyURI* | '##targetNamespace' | '##local'), ##any
processContents -- 'lax' | 'skip' | 'strict', strict
contents:
annotation?
"""
required = []
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1',
'namespace':'##any',
'processContents':'strict'}
contents = {'xsd':['annotation']}
tag = 'any'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def isQualified(self):
"""
Global elements are always qualified, but if processContents
are not strict could have dynamically generated local elements.
"""
return GetSchema(self).isElementFormDefaultQualified()
def getAttribute(self, attribute):
"""return attribute.
"""
return XMLSchemaComponent.getAttribute(self, attribute)
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' % self.tag
def fromDom(self, node):
self.annotation = None
self.setAttributes(node)
for i in self.getContents(node):
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
######################################################
# Model Groups
#####################################################
class Sequence(XMLSchemaComponent,\
SequenceMarker):
"""<sequence>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'sequence'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class All(XMLSchemaComponent,\
AllMarker):
"""<all>
parents:
complexType, extension, restriction, group
attributes:
id -- ID
minOccurs -- '0' | '1', 1
maxOccurs -- '1', 1
contents:
annotation?, element*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element']}
tag = 'all'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Choice(XMLSchemaComponent,\
ChoiceMarker):
"""<choice>
parents:
complexType, extension, restriction, group, choice, sequence
attributes:
id -- ID
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?, (element | group | choice | sequence | any)*
"""
attributes = {'id':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation', 'element', 'group', 'choice', 'sequence',\
'any']}
tag = 'choice'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'element':
if i.hasattr('ref'):
content.append(ElementReference(self))
else:
content.append(LocalElementDeclaration(self))
elif component == 'group':
content.append(ModelGroupReference(self))
elif component == 'choice':
content.append(Choice(self))
elif component == 'sequence':
content.append(Sequence(self))
elif component == 'any':
content.append(ElementWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
content[-1].fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class ModelGroupDefinition(XMLSchemaComponent,\
ModelGroupMarker,\
DefinitionMarker):
"""<group name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
contents:
annotation?, (all | choice | sequence)?
"""
required = ['name']
attributes = {'id':None,
'name':None}
contents = {'xsd':['annotation', 'all', 'choice', 'sequence']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'all' and not self.content:
self.content = All(self)
elif component == 'choice' and not self.content:
self.content = Choice(self)
elif component == 'sequence' and not self.content:
self.content = Sequence(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ModelGroupReference(XMLSchemaComponent,\
ModelGroupMarker,\
ReferenceMarker):
"""<group ref>
parents:
choice, complexType, extension, restriction, sequence
attributes:
id -- ID
ref -- NCName, required
minOccurs -- Whole Number, 1
maxOccurs -- (Whole Number | 'unbounded'), 1
contents:
annotation?
"""
required = ['ref']
attributes = {'id':None,
'ref':None,
'minOccurs':'1',
'maxOccurs':'1'}
contents = {'xsd':['annotation']}
tag = 'group'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
def getModelGroupReference(self):
return self.getModelGroup('ref')
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class ComplexType(XMLSchemaComponent,\
DefinitionMarker,\
ComplexMarker):
"""<complexType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
mixed -- boolean, false
abstract -- boolean, false
block -- ('#all' | ('extension' | 'restriction')*), schema.blockDefault
final -- ('#all' | ('extension' | 'restriction')*), schema.finalDefault
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = ['name']
attributes = {'id':None,
'name':None,
'mixed':0,
'abstract':0,
'block':lambda self: self._parent().getBlockDefault(),
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'simpleContent', 'complexContent',\
'group', 'all', 'choice', 'sequence', 'attribute', 'attributeGroup',\
'anyAttribute', 'any']}
tag = 'complexType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def isMixed(self):
m = self.getAttribute('mixed')
if m == 0 or m == False:
return False
if isinstance(m, basestring) is True:
if m in ('false', '0'):
return False
if m in ('true', '1'):
return True
raise SchemaError, 'invalid value for attribute mixed(%s): %s'\
%(m, self.getItemTrace())
def getAttributeContent(self):
return self.attr_content
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
if indx < num:
component = SplitQName(contents[indx].getTagName())[1]
self.content = None
if component == 'simpleContent':
self.content = self.__class__.SimpleContent(self)
self.content.fromDom(contents[indx])
elif component == 'complexContent':
self.content = self.__class__.ComplexContent(self)
self.content.fromDom(contents[indx])
else:
if component == 'all':
self.content = All(self)
elif component == 'choice':
self.content = Choice(self)
elif component == 'sequence':
self.content = Sequence(self)
elif component == 'group':
self.content = ModelGroupReference(self)
if self.content:
self.content.fromDom(contents[indx])
indx += 1
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
self.attr_content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s): %s' \
%(contents[indx].getTagName(),self.getItemTrace())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class _DerivedType(XMLSchemaComponent):
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
# XXX remove attribute derivation, inconsistent
self.derivation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for i in contents:
component = SplitQName(i.getTagName())[1]
if component in self.__class__.contents['xsd']:
if component == 'annotation' and not self.annotation:
self.annotation = Annotation(self)
self.annotation.fromDom(i)
continue
elif component == 'restriction' and not self.derivation:
self.derivation = self.__class__.Restriction(self)
elif component == 'extension' and not self.derivation:
self.derivation = self.__class__.Extension(self)
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.derivation.fromDom(i)
self.content = self.derivation
class ComplexContent(_DerivedType,\
ComplexMarker):
"""<complexContent>
parents:
complexType
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None,
'mixed':0}
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'complexContent'
def isMixed(self):
m = self.getAttribute('mixed')
if m == 0 or m == False:
return False
if isinstance(m, basestring) is True:
if m in ('false', '0'):
return False
if m in ('true', '1'):
return True
raise SchemaError, 'invalid value for attribute mixed(%s): %s'\
%(m, self.getItemTrace())
class _DerivationBase(XMLSchemaComponent):
"""<extension>,<restriction>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'group', 'all', 'choice',\
'sequence', 'attribute', 'attributeGroup', 'anyAttribute']}
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
#XXX ugly
if not num:
return
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
if component == 'all':
self.content = All(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'choice':
self.content = Choice(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'sequence':
self.content = Sequence(self)
self.content.fromDom(contents[indx])
indx += 1
elif component == 'group':
self.content = ModelGroupReference(self)
self.content.fromDom(contents[indx])
indx += 1
else:
self.content = None
self.attr_content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeReference(self))
else:
self.attr_content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
if contents[indx].hasattr('ref'):
self.attr_content.append(AttributeGroupReference(self))
else:
self.attr_content.append(AttributeGroupDefinition(self))
elif component == 'anyAttribute':
self.attr_content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)' %(contents[indx].getTagName())
self.attr_content[-1].fromDom(contents[indx])
indx += 1
class Extension(_DerivationBase,
ExtensionMarker):
"""<extension base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'extension'
class Restriction(_DerivationBase,\
RestrictionMarker):
"""<restriction base>
parents:
complexContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (group | all | choice | sequence)?,
(attribute | attributeGroup)*, anyAttribute?
"""
tag = 'restriction'
class SimpleContent(_DerivedType,\
SimpleMarker):
"""<simpleContent>
parents:
complexType
attributes:
id -- ID
contents:
annotation?, (restriction | extension)
"""
attributes = {'id':None}
contents = {'xsd':['annotation', 'restriction', 'extension']}
tag = 'simpleContent'
class Extension(XMLSchemaComponent,\
ExtensionMarker):
"""<extension base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, (attribute | attributeGroup)*, anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'attribute', 'attributeGroup',
'anyAttribute']}
tag = 'extension'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
if num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleContent
attributes:
id -- ID
base -- QName, required
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*, (attribute | attributeGroup)*,
anyAttribute?
"""
required = ['base']
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType', 'attribute',\
'attributeGroup', 'anyAttribute'] + RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.attr_content = None
def getAttributeContent(self):
return self.attr_content
def fromDom(self, node):
self.content = []
self.setAttributes(node)
contents = self.getContents(node)
indx = 0
num = len(contents)
component = SplitQName(contents[indx].getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
indx += 1
component = SplitQName(contents[indx].getTagName())[1]
content = []
while indx < num:
component = SplitQName(contents[indx].getTagName())[1]
if component == 'attribute':
if contents[indx].hasattr('ref'):
content.append(AttributeReference(self))
else:
content.append(LocalAttributeDeclaration(self))
elif component == 'attributeGroup':
content.append(AttributeGroupReference(self))
elif component == 'anyAttribute':
content.append(AttributeWildCard(self))
elif component == 'simpleType':
self.content.append(AnonymousSimpleType(self))
self.content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)'\
%(contents[indx].getTagName())
content[-1].fromDom(contents[indx])
indx += 1
self.attr_content = tuple(content)
class LocalComplexType(ComplexType,\
LocalMarker):
"""<complexType>
parents:
element
attributes:
id -- ID
mixed -- boolean, false
contents:
annotation?, (simpleContent | complexContent |
((group | all | choice | sequence)?, (attribute | attributeGroup)*, anyAttribute?))
"""
required = []
attributes = {'id':None,
'mixed':0}
tag = 'complexType'
class SimpleType(XMLSchemaComponent,\
DefinitionMarker,\
SimpleMarker):
"""<simpleType name>
parents:
redefine, schema
attributes:
id -- ID
name -- NCName, required
final -- ('#all' | ('extension' | 'restriction' | 'list' | 'union')*),
schema.finalDefault
contents:
annotation?, (restriction | list | union)
"""
required = ['name']
attributes = {'id':None,
'name':None,
'final':lambda self: self._parent().getFinalDefault()}
contents = {'xsd':['annotation', 'restriction', 'list', 'union']}
tag = 'simpleType'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getElementDeclaration(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def getTypeDefinition(self, attribute):
raise Warning, 'invalid operation for <%s>' %self.tag
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
for child in contents:
component = SplitQName(child.getTagName())[1]
if component == 'annotation':
self.annotation = Annotation(self)
self.annotation.fromDom(child)
continue
break
else:
return
if component == 'restriction':
self.content = self.__class__.Restriction(self)
elif component == 'list':
self.content = self.__class__.List(self)
elif component == 'union':
self.content = self.__class__.Union(self)
else:
raise SchemaError, 'Unknown component (%s)' %(component)
self.content.fromDom(child)
class Restriction(XMLSchemaComponent,\
RestrictionMarker):
"""<restriction base>
parents:
simpleType
attributes:
id -- ID
base -- QName, required or simpleType child
contents:
annotation?, simpleType?, (enumeration | length |
maxExclusive | maxInclusive | maxLength | minExclusive |
minInclusive | minLength | pattern | fractionDigits |
totalDigits | whiteSpace)*
"""
attributes = {'id':None,
'base':None }
contents = {'xsd':['annotation', 'simpleType']+RestrictionMarker.facets}
tag = 'restriction'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
self.facets = None
def getAttributeBase(self):
return XMLSchemaComponent.getAttribute(self, 'base')
def getTypeDefinition(self, attribute='base'):
return XMLSchemaComponent.getTypeDefinition(self, attribute)
def getSimpleTypeContent(self):
for el in self.content:
if el.isSimple(): return el
return None
def fromDom(self, node):
self.facets = []
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
continue
elif (component == 'simpleType') and (not indx or indx == 1):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
elif component in RestrictionMarker.facets:
self.facets.append(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class Union(XMLSchemaComponent,
UnionMarker):
"""<union>
parents:
simpleType
attributes:
id -- ID
memberTypes -- list of QNames, required or simpleType child.
contents:
annotation?, simpleType*
"""
attributes = {'id':None,
'memberTypes':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'union'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def fromDom(self, node):
self.setAttributes(node)
contents = self.getContents(node)
content = []
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
content.append(AnonymousSimpleType(self))
content[-1].fromDom(contents[indx])
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
self.content = tuple(content)
class List(XMLSchemaComponent,
ListMarker):
"""<list>
parents:
simpleType
attributes:
id -- ID
itemType -- QName, required or simpleType child.
contents:
annotation?, simpleType?
"""
attributes = {'id':None,
'itemType':None }
contents = {'xsd':['annotation', 'simpleType']}
tag = 'list'
def __init__(self, parent):
XMLSchemaComponent.__init__(self, parent)
self.annotation = None
self.content = None
def getItemType(self):
return self.attributes.get('itemType')
def getTypeDefinition(self, attribute='itemType'):
"""
return the type refered to by itemType attribute or
the simpleType content. If returns None, then the
type refered to by itemType is primitive.
"""
tp = XMLSchemaComponent.getTypeDefinition(self, attribute)
return tp or self.content
def fromDom(self, node):
self.annotation = None
self.content = None
self.setAttributes(node)
contents = self.getContents(node)
for indx in range(len(contents)):
component = SplitQName(contents[indx].getTagName())[1]
if (component == 'annotation') and (not indx):
self.annotation = Annotation(self)
self.annotation.fromDom(contents[indx])
elif (component == 'simpleType'):
self.content = AnonymousSimpleType(self)
self.content.fromDom(contents[indx])
break
else:
raise SchemaError, 'Unknown component (%s)' %(i.getTagName())
class AnonymousSimpleType(SimpleType,\
SimpleMarker,\
LocalMarker):
"""<simpleType>
parents:
attribute, element, list, restriction, union
attributes:
id -- ID
contents:
annotation?, (restriction | list | union)
"""
required = []
attributes = {'id':None}
tag = 'simpleType'
class Redefine:
"""<redefine>
parents:
attributes:
contents:
"""
tag = 'redefine'
###########################
###########################
if sys.version_info[:2] >= (2, 2):
tupleClass = tuple
else:
import UserTuple
tupleClass = UserTuple.UserTuple
class TypeDescriptionComponent(tupleClass):
"""Tuple of length 2, consisting of
a namespace and unprefixed name.
"""
def __init__(self, args):
"""args -- (namespace, name)
Remove the name's prefix, irrelevant.
"""
if len(args) != 2:
raise TypeError, 'expecting tuple (namespace, name), got %s' %args
elif args[1].find(':') >= 0:
args = (args[0], SplitQName(args[1])[1])
tuple.__init__(self, args)
return
def getTargetNamespace(self):
return self[0]
def getName(self):
return self[1]
| gpl-3.0 |
pepsipepsi/nodebox_opengl_python3 | nodebox/ext/psyco/src/test/bpnn.py | 4 | 5155 | #!/usr/bin/python
# Back-Propagation Neural Networks
#
# Written in Python. See http://www.python.org/
#
# Neil Schemenauer <nascheme@enme.ucalgary.ca>
import math
import random
import operator
import string
import psyco
#psyco.full()
from psyco.classes import *
#psyco.log()
#psyco.profile()
#__metaclass__ = type
random.seed(0)
def time(fn, *args):
import time, traceback
begin = time.clock()
try:
result = fn(*args)
except:
end = time.clock()
traceback.print_exc()
result = '<exception>'
else:
end = time.clock()
return result, end-begin
# calculate a random number where: a <= rand < b
def rand(a, b):
return (b-a)*random.random() + a
# Make a matrix (we could use NumPy to speed this up)
def makeMatrix(I, J, fill=0.0):
m = []
for i in range(I):
m.append([fill]*J)
return m
class NN:
def __init__(self, ni, nh, no):
# number of input, hidden, and output nodes
self.ni = ni + 1 # +1 for bias node
self.nh = nh
self.no = no
# activations for nodes
self.ai = [1.0]*self.ni
self.ah = [1.0]*self.nh
self.ao = [1.0]*self.no
# create weights
self.wi = makeMatrix(self.ni, self.nh)
self.wo = makeMatrix(self.nh, self.no)
# set them to random vaules
for i in range(self.ni):
for j in range(self.nh):
self.wi[i][j] = rand(-2.0, 2.0)
for j in range(self.nh):
for k in range(self.no):
self.wo[j][k] = rand(-2.0, 2.0)
# last change in weights for momentum
self.ci = makeMatrix(self.ni, self.nh)
self.co = makeMatrix(self.nh, self.no)
def update(self, inputs):
if len(inputs) != self.ni-1:
raise ValueError, 'wrong number of inputs'
# input activations
for i in range(self.ni-1):
#self.ai[i] = 1.0/(1.0+math.exp(-inputs[i]))
self.ai[i] = inputs[i]
# hidden activations
for j in range(self.nh):
sum = 0.0
for i in range(self.ni):
sum = sum + self.ai[i] * self.wi[i][j]
self.ah[j] = 1.0/(1.0+math.exp(-sum))
# output activations
for k in range(self.no):
sum = 0.0
for j in range(self.nh):
sum = sum + self.ah[j] * self.wo[j][k]
self.ao[k] = 1.0/(1.0+math.exp(-sum))
return self.ao[:]
def backPropagate(self, targets, N, M):
if len(targets) != self.no:
raise ValueError, 'wrong number of target values'
# calculate error terms for output
output_deltas = [0.0] * self.no
for k in range(self.no):
ao = self.ao[k]
output_deltas[k] = ao*(1-ao)*(targets[k]-ao)
# calculate error terms for hidden
hidden_deltas = [0.0] * self.nh
for j in range(self.nh):
sum = 0.0
for k in range(self.no):
sum = sum + output_deltas[k]*self.wo[j][k]
hidden_deltas[j] = self.ah[j]*(1-self.ah[j])*sum
# update output weights
for j in range(self.nh):
for k in range(self.no):
change = output_deltas[k]*self.ah[j]
self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
self.co[j][k] = change
#print N*change, M*self.co[j][k]
# update input weights
for i in range(self.ni):
for j in range(self.nh):
change = hidden_deltas[j]*self.ai[i]
self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
self.ci[i][j] = change
# calculate error
error = 0.0
for k in range(len(targets)):
error = error + 0.5*(targets[k]-self.ao[k])**2
return error
def test(self, patterns):
for p in patterns:
print p[0], '->', self.update(p[0])
def weights(self):
print 'Input weights:'
for i in range(self.ni):
print self.wi[i]
print
print 'Output weights:'
for j in range(self.nh):
print self.wo[j]
def train(self, patterns, iterations=2000, N=0.5, M=0.1):
# N: learning rate
# M: momentum factor
for i in xrange(iterations):
error = 0.0
for p in patterns:
inputs = p[0]
targets = p[1]
self.update(inputs)
error = error + self.backPropagate(targets, N, M)
if i % 100 == 0:
print 'error %-14f' % error
def demo():
# Teach network XOR function
pat = [
[[0,0], [0]],
[[0,1], [1]],
[[1,0], [1]],
[[1,1], [0]]
]
# create a network with two input, two hidden, and two output nodes
n = NN(2, 3, 1)
# train it with some patterns
n.train(pat, 2000)
# test it
n.test(pat)
if __name__ == '__main__':
v, t1 = time(demo)
v, t2 = time(demo)
v, t3 = time(demo)
v, t4 = time(demo)
v, t5 = time(demo)
print t1, t2, t3, t4, t5
psyco.dumpcodebuf()
| bsd-3-clause |
rhaschke/catkin_tools | tests/system/verbs/catkin_build/test_modify_ws.py | 2 | 1193 | from __future__ import print_function
import os
TEST_DIR = os.path.dirname(__file__)
RESOURCES_DIR = os.path.join(os.path.dirname(__file__), '..', '..', 'resources')
BUILD = ['build', '--no-notify', '--no-status']
CLEAN = ['clean', '--all', '--yes'] # , '--no-notify', '--no-color', '--no-status']
def test_add_package():
"""Test build behavior when adding packages to the workspace"""
pass # TODO: Implement this for various dependency relationships
def test_remove_package():
"""Test build behavior when removing packages from the workspace"""
pass # TODO: Implement this for various dependency relationships
def test_rename_package():
"""Test build behavior when renaming a package in the workspace"""
pass # TODO: Implement this for various dependency relationships
def test_ignore_package():
"""Test build behavior when adding a CATKIN_IGNORE file to a package in the workspace"""
pass # TODO: Implement this for various dependency relationships
def test_deblacklist():
"""Test build behavior when removing a package from the blacklist that has yet to be built"""
pass # TODO: Implement this for various dependency relationships
| apache-2.0 |
fernandezcuesta/ansible | test/units/plugins/strategy/test_strategy_base.py | 69 | 21292 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.mock.loader import DictDataLoader
import uuid
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.module_utils.six.moves import queue as Queue
from ansible.playbook.block import Block
from ansible.playbook.handler import Handler
from ansible.plugins.strategy import StrategyBase
class TestStrategyBase(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_strategy_base_init(self):
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = mock_queue
mock_tqm._options = MagicMock()
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base.cleanup()
def test_strategy_base_run(self):
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm = MagicMock(TaskQueueManager)
mock_tqm._final_q = mock_queue
mock_tqm._stats = MagicMock()
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
mock_tqm.send_callback.return_value = None
for attr in ('RUN_OK', 'RUN_ERROR', 'RUN_FAILED_HOSTS', 'RUN_UNREACHABLE_HOSTS'):
setattr(mock_tqm, attr, getattr(TaskQueueManager, attr))
mock_iterator = MagicMock()
mock_iterator._play = MagicMock()
mock_iterator._play.handlers = []
mock_play_context = MagicMock()
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
mock_tqm._options = MagicMock()
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
strategy_base = StrategyBase(tqm=mock_tqm)
mock_host = MagicMock()
mock_host.name = 'host1'
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context), mock_tqm.RUN_OK)
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=TaskQueueManager.RUN_ERROR), mock_tqm.RUN_ERROR)
mock_tqm._failed_hosts = dict(host1=True)
mock_iterator.get_failed_hosts.return_value = [mock_host]
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_FAILED_HOSTS)
mock_tqm._unreachable_hosts = dict(host1=True)
mock_iterator.get_failed_hosts.return_value = []
self.assertEqual(strategy_base.run(iterator=mock_iterator, play_context=mock_play_context, result=False), mock_tqm.RUN_UNREACHABLE_HOSTS)
strategy_base.cleanup()
def test_strategy_base_get_hosts(self):
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_hosts = []
for i in range(0, 5):
mock_host = MagicMock()
mock_host.name = "host%02d" % (i + 1)
mock_host.has_hostkey = True
mock_hosts.append(mock_host)
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = mock_hosts
mock_tqm = MagicMock()
mock_tqm._final_q = mock_queue
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
mock_tqm.get_inventory.return_value = mock_inventory
mock_play = MagicMock()
mock_play.hosts = ["host%02d" % (i + 1) for i in range(0, 5)]
strategy_base = StrategyBase(tqm=mock_tqm)
mock_tqm._failed_hosts = []
mock_tqm._unreachable_hosts = []
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts)
mock_tqm._failed_hosts = ["host01"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[1:])
self.assertEqual(strategy_base.get_failed_hosts(play=mock_play), [mock_hosts[0]])
mock_tqm._unreachable_hosts = ["host02"]
self.assertEqual(strategy_base.get_hosts_remaining(play=mock_play), mock_hosts[2:])
strategy_base.cleanup()
@patch.object(WorkerProcess, 'run')
def test_strategy_base_queue_task(self, mock_worker):
def fake_run(self):
return
mock_worker.run.side_effect = fake_run
fake_loader = DictDataLoader()
mock_var_manager = MagicMock()
mock_host = MagicMock()
mock_host.get_vars.return_value = dict()
mock_host.has_hostkey = True
mock_inventory = MagicMock()
mock_inventory.get.return_value = mock_host
mock_options = MagicMock()
mock_options.module_path = None
tqm = TaskQueueManager(
inventory=mock_inventory,
variable_manager=mock_var_manager,
loader=fake_loader,
options=mock_options,
passwords=None,
)
tqm._initialize_processes(3)
tqm.hostvars = dict()
try:
strategy_base = StrategyBase(tqm=tqm)
strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 1)
self.assertEqual(strategy_base._pending_results, 1)
strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 2)
self.assertEqual(strategy_base._pending_results, 2)
strategy_base._queue_task(host=mock_host, task=MagicMock(), task_vars=dict(), play_context=MagicMock())
self.assertEqual(strategy_base._cur_worker, 0)
self.assertEqual(strategy_base._pending_results, 3)
finally:
tqm.cleanup()
def test_strategy_base_process_pending_results(self):
mock_tqm = MagicMock()
mock_tqm._terminated = False
mock_tqm._failed_hosts = dict()
mock_tqm._unreachable_hosts = dict()
mock_tqm.send_callback.return_value = None
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm._final_q = mock_queue
mock_tqm._stats = MagicMock()
mock_tqm._stats.increment.return_value = None
mock_play = MagicMock()
mock_host = MagicMock()
mock_host.name = 'test01'
mock_host.vars = dict()
mock_host.get_vars.return_value = dict()
mock_host.has_hostkey = True
mock_task = MagicMock()
mock_task._role = None
mock_task._parent = None
mock_task.ignore_errors = False
mock_task._uuid = uuid.uuid4()
mock_task.loop = None
mock_task.copy.return_value = mock_task
mock_handler_task = MagicMock(Handler)
mock_handler_task.name = 'test handler'
mock_handler_task.action = 'foo'
mock_handler_task._parent = None
mock_handler_task.get_name.return_value = "test handler"
mock_handler_task.has_triggered.return_value = False
mock_handler_task._uuid = 'xxxxxxxxxxxxx'
mock_handler_task.copy.return_value = mock_handler_task
mock_iterator = MagicMock()
mock_iterator._play = mock_play
mock_iterator.mark_host_failed.return_value = None
mock_iterator.get_next_task_for_host.return_value = (None, None)
mock_iterator.get_original_task.return_value = mock_task
mock_handler_block = MagicMock()
mock_handler_block.block = [mock_handler_task]
mock_handler_block.rescue = []
mock_handler_block.always = []
mock_play.handlers = [mock_handler_block]
mock_tqm._notified_handlers = {mock_handler_task._uuid: []}
mock_tqm._listening_handlers = {}
mock_group = MagicMock()
mock_group.add_host.return_value = None
def _get_host(host_name):
if host_name == 'test01':
return mock_host
return None
def _get_group(group_name):
if group_name in ('all', 'foo'):
return mock_group
return None
mock_inventory = MagicMock()
mock_inventory._hosts_cache = dict()
mock_inventory.hosts.return_value = mock_host
mock_inventory.get_host.side_effect = _get_host
mock_inventory.get_group.side_effect = _get_group
mock_inventory.clear_pattern_cache.return_value = None
mock_inventory.get_host_vars.return_value = {}
mock_inventory.hosts.get.return_value = mock_host
mock_var_mgr = MagicMock()
mock_var_mgr.set_host_variable.return_value = None
mock_var_mgr.set_host_facts.return_value = None
mock_var_mgr.get_vars.return_value = dict()
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._inventory = mock_inventory
strategy_base._variable_manager = mock_var_mgr
strategy_base._blocked_hosts = dict()
def _has_dead_workers():
return False
strategy_base._tqm.has_dead_workers.side_effect = _has_dead_workers
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 0)
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True))
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"failed":true}')
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
mock_iterator.is_failed.return_value = True
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
# self.assertIn('test01', mock_tqm._failed_hosts)
# del mock_tqm._failed_hosts['test01']
mock_iterator.is_failed.return_value = False
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"unreachable": true}')
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertIn('test01', mock_tqm._unreachable_hosts)
del mock_tqm._unreachable_hosts['test01']
task_result = TaskResult(host=mock_host.name, task=mock_task._uuid, return_data='{"skipped": true}')
queue_items.append(task_result)
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(results[0], task_result)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_host=dict(host_name='newhost01', new_groups=['foo']))))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(add_group=dict(group_name='foo'))))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
queue_items.append(TaskResult(host=mock_host.name, task=mock_task._uuid, return_data=dict(changed=True, _ansible_notify=['test handler'])))
strategy_base._blocked_hosts['test01'] = True
strategy_base._pending_results = 1
results = strategy_base._wait_on_pending_results(iterator=mock_iterator)
self.assertEqual(len(results), 1)
self.assertEqual(strategy_base._pending_results, 0)
self.assertNotIn('test01', strategy_base._blocked_hosts)
self.assertIn(mock_handler_task._uuid, strategy_base._notified_handlers)
self.assertIn(mock_host, strategy_base._notified_handlers[mock_handler_task._uuid])
# queue_items.append(('set_host_var', mock_host, mock_task, None, 'foo', 'bar'))
# results = strategy_base._process_pending_results(iterator=mock_iterator)
# self.assertEqual(len(results), 0)
# self.assertEqual(strategy_base._pending_results, 1)
# queue_items.append(('set_host_facts', mock_host, mock_task, None, 'foo', dict()))
# results = strategy_base._process_pending_results(iterator=mock_iterator)
# self.assertEqual(len(results), 0)
# self.assertEqual(strategy_base._pending_results, 1)
# queue_items.append(('bad'))
# self.assertRaises(AnsibleError, strategy_base._process_pending_results, iterator=mock_iterator)
strategy_base.cleanup()
def test_strategy_base_load_included_file(self):
fake_loader = DictDataLoader({
"test.yml": """
- debug: msg='foo'
""",
"bad.yml": """
""",
})
queue_items = []
def _queue_empty(*args, **kwargs):
return len(queue_items) == 0
def _queue_get(*args, **kwargs):
if len(queue_items) == 0:
raise Queue.Empty
else:
return queue_items.pop()
def _queue_put(item, *args, **kwargs):
queue_items.append(item)
mock_queue = MagicMock()
mock_queue.empty.side_effect = _queue_empty
mock_queue.get.side_effect = _queue_get
mock_queue.put.side_effect = _queue_put
mock_tqm = MagicMock()
mock_tqm._final_q = mock_queue
mock_tqm._notified_handlers = {}
mock_tqm._listening_handlers = {}
strategy_base = StrategyBase(tqm=mock_tqm)
strategy_base._loader = fake_loader
strategy_base.cleanup()
mock_play = MagicMock()
mock_block = MagicMock()
mock_block._play = mock_play
mock_block.vars = dict()
mock_task = MagicMock()
mock_task._block = mock_block
mock_task._role = None
mock_task._parent = None
mock_iterator = MagicMock()
mock_iterator.mark_host_failed.return_value = None
mock_inc_file = MagicMock()
mock_inc_file._task = mock_task
mock_inc_file._filename = "test.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
mock_inc_file._filename = "bad.yml"
res = strategy_base._load_included_file(included_file=mock_inc_file, iterator=mock_iterator)
self.assertEqual(res, [])
@patch.object(WorkerProcess, 'run')
def test_strategy_base_run_handlers(self, mock_worker):
def fake_run(*args):
return
mock_worker.side_effect = fake_run
mock_play_context = MagicMock()
mock_handler_task = MagicMock(Handler)
mock_handler_task.action = 'foo'
mock_handler_task.get_name.return_value = "test handler"
mock_handler_task.has_triggered.return_value = False
mock_handler_task.listen = None
mock_handler_task._role = None
mock_handler_task._parent = None
mock_handler_task._uuid = 'xxxxxxxxxxxxxxxx'
mock_handler = MagicMock()
mock_handler.block = [mock_handler_task]
mock_handler.flag_for_host.return_value = False
mock_play = MagicMock()
mock_play.handlers = [mock_handler]
mock_host = MagicMock(Host)
mock_host.name = "test01"
mock_host.has_hostkey = True
mock_inventory = MagicMock()
mock_inventory.get_hosts.return_value = [mock_host]
mock_inventory.get.return_value = mock_host
mock_var_mgr = MagicMock()
mock_var_mgr.get_vars.return_value = dict()
mock_iterator = MagicMock()
mock_iterator._play = mock_play
mock_iterator.get_original_task.return_value = mock_handler_task
fake_loader = DictDataLoader()
mock_options = MagicMock()
mock_options.module_path = None
tqm = TaskQueueManager(
inventory=mock_inventory,
variable_manager=mock_var_mgr,
loader=fake_loader,
options=mock_options,
passwords=None,
)
tqm._initialize_processes(3)
tqm._initialize_notified_handlers(mock_play)
tqm.hostvars = dict()
try:
strategy_base = StrategyBase(tqm=tqm)
strategy_base._inventory = mock_inventory
strategy_base._notified_handlers = {mock_handler_task._uuid: [mock_host]}
task_result = TaskResult(Host('host01'), Handler(), dict(changed=False))
tqm._final_q.put(task_result)
result = strategy_base.run_handlers(iterator=mock_iterator, play_context=mock_play_context)
finally:
strategy_base.cleanup()
tqm.cleanup()
| gpl-3.0 |
samchrisinger/osf.io | website/notifications/events/utils.py | 66 | 5782 | from itertools import product
from website.notifications.emails import compile_subscriptions
from website.notifications import utils, constants
def get_file_subs_from_folder(addon, user, kind, path, name):
"""Find the file tree under a specified folder."""
folder = dict(kind=kind, path=path, name=name)
file_tree = addon._get_file_tree(filenode=folder, user=user, version='latest-published')
return list_of_files(file_tree)
def list_of_files(file_object):
files = []
if file_object['kind'] == 'file':
return [file_object['path']]
else:
for child in file_object['children']:
files.extend(list_of_files(child))
return files
def compile_user_lists(files, user, source_node, node):
"""Take multiple file ids and compiles them.
:param files: List of WaterButler paths
:param user: User who initiated action/event
:param source_node: Node instance from
:param node: Node instance to
:return: move, warn, and remove dicts
"""
# initialise subscription dictionaries
move = {key: [] for key in constants.NOTIFICATION_TYPES}
warn = {key: [] for key in constants.NOTIFICATION_TYPES}
remove = {key: [] for key in constants.NOTIFICATION_TYPES}
# get the node subscription
if len(files) == 0:
move, warn, remove = categorize_users(
user, 'file_updated', source_node, 'file_updated', node
)
# iterate through file subscriptions
for file_path in files:
path = file_path.strip('/')
t_move, t_warn, t_remove = categorize_users(
user, path + '_file_updated', source_node,
path + '_file_updated', node
)
# Add file subs to overall list of subscriptions
for notification in constants.NOTIFICATION_TYPES:
move[notification] = list(set(move[notification]).union(set(t_move[notification])))
warn[notification] = list(set(warn[notification]).union(set(t_warn[notification])))
remove[notification] = list(set(remove[notification]).union(set(t_remove[notification])))
return move, warn, remove
def categorize_users(user, source_event, source_node, event, node):
"""Categorize users from a file subscription into three categories.
Puts users in one of three bins:
- Moved: User has permissions on both nodes, subscribed to both
- Warned: User has permissions on both, not subscribed to destination
- Removed: Does not have permission on destination node
:param user: User instance who started the event
:param source_event: <guid>_event_name
:param source_node: node from where the event happened
:param event: new guid event name
:param node: node where event ends up
:return: Moved, to be warned, and removed users.
"""
remove = utils.users_to_remove(source_event, source_node, node)
source_node_subs = compile_subscriptions(source_node, utils.find_subscription_type(source_event))
new_subs = compile_subscriptions(node, utils.find_subscription_type(source_event), event)
# Moves users into the warn bucket or the move bucket
move = subscriptions_users_union(source_node_subs, new_subs)
warn = subscriptions_users_difference(source_node_subs, new_subs)
# Removes users without permissions
warn, remove = subscriptions_node_permissions(node, warn, remove)
# Remove duplicates
warn = subscriptions_users_remove_duplicates(warn, new_subs, remove_same=False)
move = subscriptions_users_remove_duplicates(move, new_subs, remove_same=False)
# Remove duplicates between move and warn; and move and remove
move = subscriptions_users_remove_duplicates(move, warn, remove_same=True)
move = subscriptions_users_remove_duplicates(move, remove, remove_same=True)
for notifications in constants.NOTIFICATION_TYPES:
# Remove the user who started this whole thing.
user_id = user._id
if user_id in warn[notifications]:
warn[notifications].remove(user_id)
if user_id in move[notifications]:
move[notifications].remove(user_id)
if user_id in remove[notifications]:
remove[notifications].remove(user_id)
return move, warn, remove
def subscriptions_node_permissions(node, warn_subscription, remove_subscription):
for notification in constants.NOTIFICATION_TYPES:
subbed, removed = utils.separate_users(node, warn_subscription[notification])
warn_subscription[notification] = subbed
remove_subscription[notification].extend(removed)
remove_subscription[notification] = list(set(remove_subscription[notification]))
return warn_subscription, remove_subscription
def subscriptions_users_union(emails_1, emails_2):
return {
notification:
list(
set(emails_1[notification]).union(set(emails_2[notification]))
)
for notification in constants.NOTIFICATION_TYPES.keys()
}
def subscriptions_users_difference(emails_1, emails_2):
return {
notification:
list(
set(emails_1[notification]).difference(set(emails_2[notification]))
)
for notification in constants.NOTIFICATION_TYPES.keys()
}
def subscriptions_users_remove_duplicates(emails_1, emails_2, remove_same=False):
emails_list = dict(emails_1)
product_list = product(constants.NOTIFICATION_TYPES, repeat=2)
for notification_1, notification_2 in product_list:
if notification_2 == notification_1 and not remove_same or notification_2 == 'none':
continue
emails_list[notification_1] = list(
set(emails_list[notification_1]).difference(set(emails_2[notification_2]))
)
return emails_list
| apache-2.0 |
flavour/ifrc_qa | static/scripts/tools/xls2xml.py | 16 | 10070 | # -*- coding: utf-8 -*-
#
# Debug/Helper script for XLS stylesheet development
#
# >>> python xls2xml <XLS File>
# ... converts the XLS file into XML
#
# >>> python xls2xml <XLS File> <XSLT Stylesheet>
# ... converts the XLS file into XML and transforms it using the stylesheet
#
import datetime
import sys
from lxml import etree
from xml.sax.saxutils import escape, unescape
TABLE = "table"
ROW = "row"
COL = "col"
FIELD = "field"
TAG = "tag"
HASHTAG = "hashtag"
# -----------------------------------------------------------------------------
def xml_encode(s):
if s:
s = escape(s, {"'": "'", '"': """})
return s
# -----------------------------------------------------------------------------
def xml_decode(s):
if s:
s = unescape(s, {"'": "'", """: '"'})
return s
# -----------------------------------------------------------------------------
def parse(source):
parser = etree.XMLParser(no_network=False)
result = etree.parse(source, parser)
return result
# -----------------------------------------------------------------------------
def s3_unicode(s, encoding="utf-8"):
if type(s) is unicode:
return s
try:
if not isinstance(s, basestring):
if hasattr(s, "__unicode__"):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, "strict")
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
s = " ".join([s3_unicode(arg, encoding) for arg in s])
else:
s = s.decode(encoding)
except UnicodeDecodeError:
if not isinstance(s, Exception):
raise
else:
s = " ".join([s3_unicode(arg, encoding) for arg in s])
return s
# -------------------------------------------------------------------------
def encode_iso_datetime(dt):
dx = dt - datetime.timedelta(microseconds=dt.microsecond)
return dx.isoformat()
# -------------------------------------------------------------------------
def xls2tree(source,
resourcename=None,
extra_data=None,
hashtags=None,
sheet=None,
rows=None,
cols=None,
fields=None,
header_row=True):
import xlrd
# Shortcuts
SubElement = etree.SubElement
DEFAULT_SHEET_NAME = "SahanaData"
# Root element
root = etree.Element(TABLE)
if resourcename is not None:
root.set("name", resourcename)
if isinstance(sheet, xlrd.sheet.Sheet):
# Open work sheet passed as argument => use this
s = sheet
else:
if hasattr(source, "read"):
# Source is a stream
if hasattr(source, "seek"):
source.seek(0)
wb = xlrd.open_workbook(file_contents=source.read(),
# requires xlrd 0.7.x or higher
on_demand=True)
elif isinstance(source, xlrd.book.Book):
# Source is an open work book
wb = source
else:
# Unsupported source type
raise RuntimeError("xls2tree: invalid source %s" % type(source))
# Find the sheet
try:
if isinstance(sheet, (int, long)):
s = wb.sheet_by_index(sheet)
elif isinstance(sheet, basestring):
s = wb.sheet_by_name(sheet)
elif sheet is None:
if DEFAULT_SHEET_NAME in wb.sheet_names():
s = wb.sheet_by_name(DEFAULT_SHEET_NAME)
else:
s = wb.sheet_by_index(0)
else:
raise SyntaxError("xls2tree: invalid sheet %s" % sheet)
except IndexError, xlrd.XLRDError:
s = None
def cell_range(cells, max_cells):
"""
Helper method to calculate a cell range
@param cells: the specified range
@param max_cells: maximum number of cells
"""
if not cells:
cells = (0, max_cells)
elif not isinstance(cells, (tuple, list)):
cells = (0, cells)
elif len(cells) == 1:
cells = (cells[0], max_cells)
else:
cells = (cells[0], cells[0] + cells[1])
return cells
if s:
# Calculate cell range
rows = cell_range(rows, s.nrows)
cols = cell_range(cols, s.ncols)
# Column headers
if fields:
headers = fields
elif not header_row:
headers = dict((i, "%s" % i) for i in range(cols[1]- cols[0]))
else:
# Use header row in the work sheet
headers = {}
# Lambda to decode XLS dates into an ISO datetime-string
decode_date = lambda v: datetime.datetime(*xlrd.xldate_as_tuple(v, wb.datemode))
def decode(t, v):
"""
Helper method to decode the cell value by type
@param t: the cell type
@param v: the cell value
@return: text representation of the cell value
"""
text = ""
if v:
if t is None:
text = s3_unicode(v).strip()
elif t == xlrd.XL_CELL_TEXT:
text = v.strip()
elif t == xlrd.XL_CELL_NUMBER:
text = str(long(v)) if long(v) == v else str(v)
elif t == xlrd.XL_CELL_DATE:
text = encode_iso_datetime(decode_date(v))
elif t == xlrd.XL_CELL_BOOLEAN:
text = str(value).lower()
return text
def add_col(row, name, t, v, hashtags=None):
"""
Helper method to add a column to an output row
@param row: the output row (etree.Element)
@param name: the column name
@param t: the cell type
@param v: the cell value
"""
col = SubElement(row, COL)
col.set(FIELD, name)
if hashtags:
hashtag = hashtags.get(name)
if hashtag and hashtag[1:]:
col.set(HASHTAG, hashtag)
col.text = decode(t, v)
hashtags = dict(hashtags) if hashtags else {}
# Process the rows
record_idx = 0
extra_fields = set(extra_data) if extra_data else None
check_headers = extra_fields is not None
for ridx in range(*rows):
# Read types and values
types = s.row_types(ridx, *cols)
values = s.row_values(ridx, *cols)
# Skip empty rows
if not any(v != "" for v in values):
continue
if header_row and record_idx == 0:
# Read column headers
if not fields:
for cidx, value in enumerate(values):
header = decode(types[cidx], value)
headers[cidx] = header
if check_headers:
extra_fields.discard(header)
check_headers = False
else:
if not fields and \
(header_row and record_idx == 1 or record_idx == 0):
# Autodetect hashtags
items = {}
for cidx, name in headers.items():
try:
t = types[cidx]
v = values[cidx]
except IndexError:
continue
if t not in (xlrd.XL_CELL_TEXT, xlrd.XL_CELL_EMPTY):
items = None
break
elif v:
items[name] = v
if items and all(v[0] == '#' for v in items.values()):
hashtags.update(items)
continue
# Add output row
orow = SubElement(root, ROW)
for cidx, name in headers.items():
if check_headers:
extra_fields.discard(name)
try:
t = types[cidx]
v = values[cidx]
except IndexError:
pass
else:
add_col(orow, name, t, v, hashtags=hashtags)
check_headers = False
# Add extra data
if extra_fields:
for key in extra_fields:
add_col(orow, key, None, extra_data[key], hashtags=hashtags)
record_idx += 1
return etree.ElementTree(root)
# -----------------------------------------------------------------------------
def transform(tree, stylesheet_path, **args):
if args:
_args = [(k, "'%s'" % args[k]) for k in args]
_args = dict(_args)
else:
_args = None
stylesheet = etree.parse(stylesheet_path)
ac = etree.XSLTAccessControl(read_file=True, read_network=True)
transformer = etree.XSLT(stylesheet, access_control=ac)
if _args:
result = transformer(tree, **_args)
else:
result = transformer(tree)
return result
# -----------------------------------------------------------------------------
def main(argv):
try:
xlspath = argv[0]
except:
print "Usage: python xls2xml <XLS File> [<XSLT Stylesheet>]"
return
try:
xslpath = argv[1]
except:
xslpath = None
xlsfile = open(xlspath)
tree = xls2tree(xlsfile)
if xslpath is not None:
tree = transform(tree, xslpath)
print etree.tostring(tree, pretty_print=True)
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
# END =========================================================================
| mit |
IS-ENES-Data/esgf-pid | esgfpid/rabbit/asynchronous/thread_confirmer.py | 1 | 9200 | import logging
import copy
from esgfpid.utils import loginfo, logdebug, logtrace, logerror, logwarn, log_every_x_times
from .exceptions import UnknownServerResponse
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
'''
=========
Confirmer
=========
The confirmer is responsible for handling the confirms sent by RabbitMQ.
This is a quite simple module, it only reacts and does not trigger any
other actions itself.
It has a stack of unconfirmed messages (which is filled by the feeder,
it puts every message it has successfully published into that stack).
For each confirm, it must check what kind of confirm it is (ack/nack, single/multiple),
and act accordingly. Confirmed messages and their delivery numbers must be deleted
from the stack. Unconfirmed messages remain. Nacked messages are stored in an
extra stack.
The unconfirmed messages can be retrieved from the confirmer to be republished.
API:
* on_delivery_confirmation() is called by RabbitMQ.
* reset_unconfirmed_messages_and_delivery_tags() called by builder, during reconnection
* get_unconfirmed_messages_as_list_copy() called by builder, during reconnection
* put_to_unconfirmed_delivery_tags() called by feeder, to fill the stack
* put_to_unconfirmed_messages_dict() called by feeder, to fill the stack
'''
class Confirmer(object):
def __init__(self):
# Logging:
self.__first_confirm_receival = True
self.__logcounter = 1
self.__LOGFREQUENCY = 10
# Stacks of unconfirmed/nacked messages:
self.__unconfirmed_delivery_tags = [] # only accessed internally
self.__unconfirmed_messages_dict = {} # dict, because I need to retrieve them by delivery tag (on ack/nack)
self.__nacked_messages = [] # only accessed internally, and from outside after thread is dead
'''
Callback, called by RabbitMQ.
'''
def on_delivery_confirmation(self, method_frame):
deliv_tag, confirmation_type, multiple = self.__get_confirm_info(method_frame)
log_every_x_times(LOGGER, self.__logcounter, self.__LOGFREQUENCY, 'Received a confirm (%s)', confirmation_type)
self.__logcounter += 1
if confirmation_type == 'ack':
logtrace(LOGGER, 'Received "ACK" from messaging service.')
self.__react_on_ack(deliv_tag, multiple)
elif confirmation_type == 'nack':
logtrace(LOGGER, 'Received "NACK" from messaging service.')
self.__react_on_nack(deliv_tag, multiple)
else:
msg = 'Received asynchronous response of unknown type from messaging service.'
logwarn(LOGGER, msg)
raise UnknownServerResponse(msg+':'+str(method_frame))
# This should never happen, unless if I parse the server's response wrongly.
def __react_on_ack(self, deliv_tag, multiple):
if self.__first_confirm_receival:
self.__first_confirm_receival = False
loginfo(LOGGER, 'Received first message confirmation from RabbitMQ.')
if multiple:
logtrace(LOGGER, 'Received "ACK" for multiple messages from messaging service.')
self.__react_on_multiple_delivery_ack(deliv_tag)
else:
logtrace(LOGGER, 'Received "ACK" for single message from messaging service.')
self.__react_on_single_delivery_ack(deliv_tag)
def __react_on_nack(self, deliv_tag, multiple):
if multiple:
logwarn(LOGGER, 'Received "NACK" for delivery tag: %i and below.', deliv_tag)
self.__nack_delivery_tag_and_message_several(deliv_tag)
else:
logwarn(LOGGER, 'Received "NACK" for delivery tag: %i.', deliv_tag)
self.__nack_delivery_tag_and_message_single(deliv_tag)
def __nack_delivery_tag_and_message_single(self, deliv_tag):
msg = self.__unconfirmed_messages_dict.pop(str(deliv_tag))
self.__nacked_messages.append(msg)
self.__unconfirmed_delivery_tags.remove(deliv_tag)
def __nack_delivery_tag_and_message_several(self, deliv_tag):
for candidate_deliv_tag in copy.copy(self.__unconfirmed_delivery_tags):
if candidate_deliv_tag <= deliv_tag:
self.__nack_delivery_tag_and_message_single(candidate_deliv_tag)
def __get_confirm_info(self, method_frame):
try:
deliv_tag = method_frame.method.delivery_tag # integer
confirmation_type = method_frame.method.NAME.split('.')[1].lower() # "ack" or "nack" or ...
multiple = method_frame.method.multiple # Boolean
return deliv_tag, confirmation_type, multiple
except AttributeError as e:
raise UnknownServerResponse(str(method_frame)+' - '+repr(e))
except IndexError as e:
raise UnknownServerResponse(str(method_frame)+' - '+repr(e))
def __react_on_single_delivery_ack(self, deliv_tag):
self.__remove_delivery_tag_and_message_single(deliv_tag)
logdebug(LOGGER, 'Received ack for delivery tag %i. Waiting for %i confirms.', deliv_tag, len(self.__unconfirmed_delivery_tags))
logtrace(LOGGER, 'Received ack for delivery tag %i.', deliv_tag)
logtrace(LOGGER, 'Now left in queue to be confirmed: %i messages.', len(self.__unconfirmed_delivery_tags))
def __react_on_multiple_delivery_ack(self, deliv_tag):
self.__remove_delivery_tag_and_message_several(deliv_tag)
logdebug(LOGGER, 'Received ack for delivery tag %i and all below. Waiting for %i confirms.', deliv_tag, len(self.__unconfirmed_delivery_tags))
logtrace(LOGGER, 'Received ack for delivery tag %i and all below.', deliv_tag)
logtrace(LOGGER, 'Now left in queue to be confirmed: %i messages.', len(self.__unconfirmed_delivery_tags))
def __remove_delivery_tag_and_message_single(self, deliv_tag):
try:
self.__unconfirmed_delivery_tags.remove(deliv_tag)
ms = self.__unconfirmed_messages_dict.pop(str(deliv_tag))
logtrace(LOGGER, 'Received ack for message %s.', ms)
except ValueError as e:
logdebug(LOGGER, 'Could not remove %i from unconfirmed.', deliv_tag)
def __remove_delivery_tag_and_message_several(self, deliv_tag):
for candidate_deliv_tag in copy.copy(self.__unconfirmed_delivery_tags):
if candidate_deliv_tag <= deliv_tag:
self.__remove_delivery_tag_and_message_single(candidate_deliv_tag)
''' Called by unit test.'''
def get_num_unconfirmed(self):
return len(self.__unconfirmed_messages_dict)
''' Called by unit test.'''
def get_copy_of_unconfirmed_tags(self):
return self.__unconfirmed_delivery_tags[:]
'''
Called by the main thread, for rescuing, after joining.
And by unit test.
'''
def get_copy_of_nacked(self):
return self.__nacked_messages[:]
'''
Called by feeder, to let the confirmer know which had been sent.
'''
def put_to_unconfirmed_delivery_tags(self, delivery_tag):
logtrace(LOGGER, 'Adding delivery tag %i to unconfirmed.', delivery_tag)
self.__unconfirmed_delivery_tags.append(delivery_tag)
'''
Called by feeder, to let the confirmer know which had been sent.
'''
def put_to_unconfirmed_messages_dict(self, delivery_tag, msg):
logtrace(LOGGER, 'Adding message with delivery tag %i to unconfirmed: %s', delivery_tag, msg)
self.__unconfirmed_messages_dict[str(delivery_tag)] = msg
'''
This resets which messages had not be confirmed yet.
IMPORTANT:
Before doing this, retrieve the unconfirmed messages
and republish them! Otherwise, they will be lost!
(We do not do this here, as we have no reference to
the builder module, to avoid circular references).
Called by builder, during reconnection.
After a reconnection, no more confirms can be received,
so the messages that are unconfirmed now cannot be
confirmed any more.
Any new confirms that arrive after a reconnection will
carry new delivery tags (as they start over at one),
so we need to reset the old ones, otherwise we'll take
the wrong messages as confirmed.
From RabbitMQ docs:
"The server-assigned and channel-specific delivery tag. The delivery tag is valid only within the channel from which the message was received. [...] The server MUST NOT use a zero value for delivery tags."
See: https://www.rabbitmq.com/amqp-0-9-1-reference.html
'''
def reset_unconfirmed_messages_and_delivery_tags(self):
self.__unconfirmed_delivery_tags = []
self.__unconfirmed_messages_dict = {}
'''
Called by builder, during reconnection,
to rescue these unconfirmed messages, in order to
republish them, as soon as a new connection is
available.
Also called by the main thread, for rescuing messages
after joining.
As dict objects are not thread-safe, better call
this only after joining.
'''
def get_unconfirmed_messages_as_list_copy(self):
newlist = []
for deliv_tag,message in self.__unconfirmed_messages_dict.items():
newlist.append(message)
return newlist
| apache-2.0 |
15Dkatz/pants | tests/python/pants_test/goal/test_products.py | 4 | 4421 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from contextlib import contextmanager
from pants.goal.products import MultipleRootedProducts, ProductError, Products
from pants.util.contextutil import temporary_dir
from pants.util.dirutil import safe_open
from pants_test.base_test import BaseTest
class ProductsTest(BaseTest):
def setUp(self):
super(ProductsTest, self).setUp()
self.products = Products()
def test_require(self):
self.products.require('foo')
self.assertTrue(self.products.isrequired('foo'))
self.assertFalse(self.products.isrequired('bar'))
# require should not cross-contaminate require_data
self.assertFalse(self.products.is_required_data('foo'))
self.assertFalse(self.products.is_required_data('bar'))
def test_get(self):
foo_product_mapping1 = self.products.get('foo')
foo_product_mapping2 = self.products.get('foo')
self.assertIsInstance(foo_product_mapping1, Products.ProductMapping)
self.assertIs(foo_product_mapping1, foo_product_mapping2)
def test_get_does_not_require(self):
self.assertFalse(self.products.isrequired('foo'))
self.products.get('foo')
self.assertFalse(self.products.isrequired('foo'))
self.products.require('foo')
self.assertTrue(self.products.isrequired('foo'))
def test_require_data(self):
self.products.require_data('foo')
self.assertTrue(self.products.is_required_data('foo'))
self.assertFalse(self.products.is_required_data('bar'))
# require_data should not cross-contaminate require
self.assertFalse(self.products.isrequired('foo'))
self.assertFalse(self.products.isrequired('bar'))
def test_get_data(self):
self.assertIsNone(self.products.get_data('foo'))
data1 = self.products.get_data('foo', dict)
data2 = self.products.get_data('foo', dict)
self.assertIsInstance(data1, dict)
self.assertIs(data1, data2)
def test_get_data_does_not_require_data(self):
self.assertFalse(self.products.is_required_data('foo'))
self.products.get_data('foo')
self.assertFalse(self.products.is_required_data('foo'))
self.products.require_data('foo')
self.assertTrue(self.products.is_required_data('foo'))
def test_register_data(self):
data = {}
self.assertIs(data, self.products.register_data('foo', data))
with self.assertRaises(ProductError):
self.products.register_data('foo', data)
def test_empty_products(self):
foo_product_mapping = self.products.get('foo')
self.assertFalse(foo_product_mapping)
@contextmanager
def add_products(self, context_products, product_type, target, *products):
product_mapping = context_products.get(product_type)
with temporary_dir() as outdir:
def create_product(product):
with safe_open(os.path.join(outdir, product), mode='w') as fp:
fp.write(product)
return product
product_mapping.add(target, outdir, map(create_product, products))
yield temporary_dir
def test_non_empty_products(self):
target = self.make_target('c')
with self.add_products(self.products, 'foo', target, 'a.class'):
foo_product_mapping = self.products.get('foo')
self.assertTrue(foo_product_mapping)
def test_empty_data(self):
foo_product_mapping = self.products.get_data('foo')
self.assertFalse(foo_product_mapping)
@contextmanager
def add_data(self, context_products, data_type, target, *products):
make_products = lambda: defaultdict(MultipleRootedProducts)
data_by_target = context_products.get_data(data_type, make_products)
with temporary_dir() as outdir:
def create_product(product):
abspath = os.path.join(outdir, product)
with safe_open(abspath, mode='w') as fp:
fp.write(product)
return abspath
data_by_target[target].add_abs_paths(outdir, map(create_product, products))
yield temporary_dir
def test_non_empty_data(self):
target = self.make_target('c')
with self.add_data(self.products, 'foo', target, 'a.class'):
foo_product_mapping = self.products.get_data('foo')
self.assertTrue(foo_product_mapping)
| apache-2.0 |
EliotBerriot/django | tests/model_fields/test_uuid.py | 119 | 6590 | import json
import uuid
from django.core import exceptions, serializers
from django.db import IntegrityError, models
from django.test import (
SimpleTestCase, TestCase, TransactionTestCase, skipUnlessDBFeature,
)
from .models import (
NullableUUIDModel, PrimaryKeyUUIDModel, RelatedToUUIDModel, UUIDGrandchild,
UUIDModel,
)
class TestSaveLoad(TestCase):
def test_uuid_instance(self):
instance = UUIDModel.objects.create(field=uuid.uuid4())
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, instance.field)
def test_str_instance_no_hyphens(self):
UUIDModel.objects.create(field='550e8400e29b41d4a716446655440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_str_instance_hyphens(self):
UUIDModel.objects.create(field='550e8400-e29b-41d4-a716-446655440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_str_instance_bad_hyphens(self):
UUIDModel.objects.create(field='550e84-00-e29b-41d4-a716-4-466-55440000')
loaded = UUIDModel.objects.get()
self.assertEqual(loaded.field, uuid.UUID('550e8400e29b41d4a716446655440000'))
def test_null_handling(self):
NullableUUIDModel.objects.create(field=None)
loaded = NullableUUIDModel.objects.get()
self.assertEqual(loaded.field, None)
def test_pk_validated(self):
with self.assertRaisesMessage(TypeError, 'is not a valid UUID'):
PrimaryKeyUUIDModel.objects.get(pk={})
with self.assertRaisesMessage(TypeError, 'is not a valid UUID'):
PrimaryKeyUUIDModel.objects.get(pk=[])
def test_wrong_value(self):
self.assertRaisesMessage(
ValueError, 'badly formed hexadecimal UUID string',
UUIDModel.objects.get, field='not-a-uuid')
self.assertRaisesMessage(
ValueError, 'badly formed hexadecimal UUID string',
UUIDModel.objects.create, field='not-a-uuid')
class TestMigrations(SimpleTestCase):
def test_deconstruct(self):
field = models.UUIDField()
name, path, args, kwargs = field.deconstruct()
self.assertEqual(kwargs, {})
class TestQuerying(TestCase):
def setUp(self):
self.objs = [
NullableUUIDModel.objects.create(field=uuid.uuid4()),
NullableUUIDModel.objects.create(field='550e8400e29b41d4a716446655440000'),
NullableUUIDModel.objects.create(field=None),
]
def test_exact(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__exact='550e8400e29b41d4a716446655440000'),
[self.objs[1]]
)
def test_isnull(self):
self.assertSequenceEqual(
NullableUUIDModel.objects.filter(field__isnull=True),
[self.objs[2]]
)
class TestSerialization(SimpleTestCase):
test_data = (
'[{"fields": {"field": "550e8400-e29b-41d4-a716-446655440000"}, '
'"model": "model_fields.uuidmodel", "pk": null}]'
)
def test_dumping(self):
instance = UUIDModel(field=uuid.UUID('550e8400e29b41d4a716446655440000'))
data = serializers.serialize('json', [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize('json', self.test_data))[0].object
self.assertEqual(instance.field, uuid.UUID('550e8400-e29b-41d4-a716-446655440000'))
class TestValidation(SimpleTestCase):
def test_invalid_uuid(self):
field = models.UUIDField()
with self.assertRaises(exceptions.ValidationError) as cm:
field.clean('550e8400', None)
self.assertEqual(cm.exception.code, 'invalid')
self.assertEqual(cm.exception.message % cm.exception.params, "'550e8400' is not a valid UUID.")
def test_uuid_instance_ok(self):
field = models.UUIDField()
field.clean(uuid.uuid4(), None) # no error
class TestAsPrimaryKey(TestCase):
def test_creation(self):
PrimaryKeyUUIDModel.objects.create()
loaded = PrimaryKeyUUIDModel.objects.get()
self.assertIsInstance(loaded.pk, uuid.UUID)
def test_uuid_pk_on_save(self):
saved = PrimaryKeyUUIDModel.objects.create(id=None)
loaded = PrimaryKeyUUIDModel.objects.get()
self.assertIsNotNone(loaded.id, None)
self.assertEqual(loaded.id, saved.id)
def test_uuid_pk_on_bulk_create(self):
u1 = PrimaryKeyUUIDModel()
u2 = PrimaryKeyUUIDModel(id=None)
PrimaryKeyUUIDModel.objects.bulk_create([u1, u2])
# Check that the two objects were correctly created.
u1_found = PrimaryKeyUUIDModel.objects.filter(id=u1.id).exists()
u2_found = PrimaryKeyUUIDModel.objects.exclude(id=u1.id).exists()
self.assertTrue(u1_found)
self.assertTrue(u2_found)
self.assertEqual(PrimaryKeyUUIDModel.objects.count(), 2)
def test_underlying_field(self):
pk_model = PrimaryKeyUUIDModel.objects.create()
RelatedToUUIDModel.objects.create(uuid_fk=pk_model)
related = RelatedToUUIDModel.objects.get()
self.assertEqual(related.uuid_fk.pk, related.uuid_fk_id)
def test_update_with_related_model_instance(self):
# regression for #24611
u1 = PrimaryKeyUUIDModel.objects.create()
u2 = PrimaryKeyUUIDModel.objects.create()
r = RelatedToUUIDModel.objects.create(uuid_fk=u1)
RelatedToUUIDModel.objects.update(uuid_fk=u2)
r.refresh_from_db()
self.assertEqual(r.uuid_fk, u2)
def test_update_with_related_model_id(self):
u1 = PrimaryKeyUUIDModel.objects.create()
u2 = PrimaryKeyUUIDModel.objects.create()
r = RelatedToUUIDModel.objects.create(uuid_fk=u1)
RelatedToUUIDModel.objects.update(uuid_fk=u2.pk)
r.refresh_from_db()
self.assertEqual(r.uuid_fk, u2)
def test_two_level_foreign_keys(self):
# exercises ForeignKey.get_db_prep_value()
UUIDGrandchild().save()
class TestAsPrimaryKeyTransactionTests(TransactionTestCase):
# Need a TransactionTestCase to avoid deferring FK constraint checking.
available_apps = ['model_fields']
@skipUnlessDBFeature('supports_foreign_keys')
def test_unsaved_fk(self):
u1 = PrimaryKeyUUIDModel()
with self.assertRaises(IntegrityError):
RelatedToUUIDModel.objects.create(uuid_fk=u1)
| bsd-3-clause |
thaumos/ansible | lib/ansible/modules/network/fortimanager/fmgr_ha.py | 38 | 13484 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community"
}
DOCUMENTATION = '''
---
module: fmgr_ha
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Manages the High-Availability State of FortiManager Clusters and Nodes.
description: Change HA state or settings of FortiManager nodes (Standalone/Master/Slave).
options:
fmgr_ha_mode:
description:
- Sets the role of the FortiManager host for HA.
required: false
choices: ["standalone", "master", "slave"]
fmgr_ha_peer_ipv4:
description:
- Sets the IPv4 address of a HA peer.
required: false
fmgr_ha_peer_ipv6:
description:
- Sets the IPv6 address of a HA peer.
required: false
fmgr_ha_peer_sn:
description:
- Sets the HA Peer Serial Number.
required: false
fmgr_ha_peer_status:
description:
- Sets the peer status to enable or disable.
required: false
choices: ["enable", "disable"]
fmgr_ha_cluster_pw:
description:
- Sets the password for the HA cluster. Only required once. System remembers between HA mode switches.
required: false
fmgr_ha_cluster_id:
description:
- Sets the ID number of the HA cluster. Defaults to 1.
required: false
default: 1
fmgr_ha_hb_threshold:
description:
- Sets heartbeat lost threshold (1-255).
required: false
default: 3
fmgr_ha_hb_interval:
description:
- Sets the heartbeat interval (1-255).
required: false
default: 5
fmgr_ha_file_quota:
description:
- Sets the File quota in MB (2048-20480).
required: false
default: 4096
'''
EXAMPLES = '''
- name: SET FORTIMANAGER HA NODE TO MASTER
fmgr_ha:
fmgr_ha_mode: "master"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
- name: SET FORTIMANAGER HA NODE TO SLAVE
fmgr_ha:
fmgr_ha_mode: "slave"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
- name: SET FORTIMANAGER HA NODE TO STANDALONE
fmgr_ha:
fmgr_ha_mode: "standalone"
- name: ADD FORTIMANAGER HA PEER
fmgr_ha:
fmgr_ha_peer_ipv4: "192.168.1.254"
fmgr_ha_peer_sn: "FMG-VM1234567890"
fmgr_ha_peer_status: "enable"
- name: CREATE CLUSTER ON MASTER
fmgr_ha:
fmgr_ha_mode: "master"
fmgr_ha_cluster_pw: "fortinet"
fmgr_ha_cluster_id: "1"
fmgr_ha_hb_threshold: "10"
fmgr_ha_hb_interval: "15"
fmgr_ha_file_quota: "2048"
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import FMGRMethods
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def fmgr_set_ha_mode(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
if paramgram["fmgr_ha_cluster_pw"] is not None and str(paramgram["fmgr_ha_mode"].lower()) != "standalone":
datagram = {
"mode": paramgram["fmgr_ha_mode"],
"file-quota": paramgram["fmgr_ha_file_quota"],
"hb-interval": paramgram["fmgr_ha_hb_interval"],
"hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"],
"password": paramgram["fmgr_ha_cluster_pw"],
"clusterid": paramgram["fmgr_ha_cluster_id"]
}
elif str(paramgram["fmgr_ha_mode"].lower()) == "standalone":
datagram = {
"mode": paramgram["fmgr_ha_mode"],
"file-quota": paramgram["fmgr_ha_file_quota"],
"hb-interval": paramgram["fmgr_ha_hb_interval"],
"hb-lost-threshold": paramgram["fmgr_ha_hb_threshold"],
"clusterid": paramgram["fmgr_ha_cluster_id"]
}
url = '/cli/global/system/ha'
response = fmgr.process_request(url, datagram, FMGRMethods.SET)
return response
def fmgr_get_ha_peer_list(fmgr):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
datagram = {}
paramgram = {}
url = '/cli/global/system/ha/peer/'
response = fmgr.process_request(url, datagram, FMGRMethods.GET)
return response
def fmgr_set_ha_peer(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
datagram = {
"ip": paramgram["fmgr_ha_peer_ipv4"],
"ip6": paramgram["fmgr_ha_peer_ipv6"],
"serial-number": paramgram["fmgr_ha_peer_sn"],
"status": paramgram["fmgr_ha_peer_status"],
"id": paramgram["peer_id"]
}
url = '/cli/global/system/ha/peer/'
response = fmgr.process_request(url, datagram, FMGRMethods.SET)
return response
def main():
argument_spec = dict(
fmgr_ha_mode=dict(required=False, type="str", choices=["standalone", "master", "slave"]),
fmgr_ha_cluster_pw=dict(required=False, type="str", no_log=True),
fmgr_ha_peer_status=dict(required=False, type="str", choices=["enable", "disable"]),
fmgr_ha_peer_sn=dict(required=False, type="str"),
fmgr_ha_peer_ipv4=dict(required=False, type="str"),
fmgr_ha_peer_ipv6=dict(required=False, type="str"),
fmgr_ha_hb_threshold=dict(required=False, type="int", default=3),
fmgr_ha_hb_interval=dict(required=False, type="int", default=5),
fmgr_ha_file_quota=dict(required=False, type="int", default=4096),
fmgr_ha_cluster_id=dict(required=False, type="int", default=1)
)
required_if = [
['fmgr_ha_peer_ipv4', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']],
['fmgr_ha_peer_ipv6', 'present', ['fmgr_ha_peer_sn', 'fmgr_ha_peer_status']],
['fmgr_ha_mode', 'master', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']],
['fmgr_ha_mode', 'slave', ['fmgr_ha_cluster_pw', 'fmgr_ha_cluster_id']],
]
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, required_if=required_if)
paramgram = {
"fmgr_ha_mode": module.params["fmgr_ha_mode"],
"fmgr_ha_cluster_pw": module.params["fmgr_ha_cluster_pw"],
"fmgr_ha_peer_status": module.params["fmgr_ha_peer_status"],
"fmgr_ha_peer_sn": module.params["fmgr_ha_peer_sn"],
"fmgr_ha_peer_ipv4": module.params["fmgr_ha_peer_ipv4"],
"fmgr_ha_peer_ipv6": module.params["fmgr_ha_peer_ipv6"],
"fmgr_ha_hb_threshold": module.params["fmgr_ha_hb_threshold"],
"fmgr_ha_hb_interval": module.params["fmgr_ha_hb_interval"],
"fmgr_ha_file_quota": module.params["fmgr_ha_file_quota"],
"fmgr_ha_cluster_id": module.params["fmgr_ha_cluster_id"],
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
# INIT FLAGS AND COUNTERS
get_ha_peers = 0
results = DEFAULT_RESULT_OBJ
try:
if any(v is not None for v in (paramgram["fmgr_ha_peer_sn"], paramgram["fmgr_ha_peer_ipv4"],
paramgram["fmgr_ha_peer_ipv6"], paramgram["fmgr_ha_peer_status"])):
get_ha_peers = 1
except Exception as err:
raise FMGBaseException(err)
try:
# IF HA MODE IS NOT NULL, SWITCH THAT
if paramgram["fmgr_ha_mode"] is not None:
if (str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and paramgram["fmgr_ha_cluster_pw"] is not None)\
or str.lower(paramgram["fmgr_ha_mode"]) == "standalone":
results = fmgr_set_ha_mode(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=False,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
elif str.lower(paramgram["fmgr_ha_mode"]) != "standalone" and\
paramgram["fmgr_ha_mode"] is not None and\
paramgram["fmgr_ha_cluster_pw"] is None:
module.exit_json(msg="If setting HA Mode of MASTER or SLAVE, you must specify a cluster password")
except Exception as err:
raise FMGBaseException(err)
# IF GET_HA_PEERS IS ENABLED, LETS PROCESS THE PEERS
try:
if get_ha_peers == 1:
# GET THE CURRENT LIST OF PEERS FROM THE NODE
peers = fmgr_get_ha_peer_list(fmgr)
# GET LENGTH OF RETURNED PEERS LIST AND ADD ONE FOR THE NEXT ID
paramgram["next_peer_id"] = len(peers[1]) + 1
# SET THE ACTUAL NUMBER OF PEERS
num_of_peers = len(peers[1])
# SET THE PEER ID FOR DISABLE METHOD
paramgram["peer_id"] = len(peers) - 1
# SET THE PEER LOOPCOUNT TO 1 TO START THE LOOP
peer_loopcount = 1
# LOOP THROUGH PEERS TO FIND THE SERIAL NUMBER MATCH TO GET THE RIGHT PEER ID
# IDEA BEING WE DON'T WANT TO SUBMIT A BAD peer_id THAT DOESN'T JIVE WITH CURRENT DB ON FMG
# SO LETS SEARCH FOR IT, AND IF WE FIND IT, WE WILL CHANGE THE PEER ID VARIABLES TO MATCH
# IF NOT FOUND, LIFE GOES ON AND WE ASSUME THAT WE'RE ADDING A PEER
# AT WHICH POINT THE next_peer_id VARIABLE WILL HAVE THE RIGHT PRIMARY KEY
if paramgram["fmgr_ha_peer_sn"] is not None:
while peer_loopcount <= num_of_peers:
# GET THE SERIAL NUMBER FOR CURRENT PEER IN LOOP TO COMPARE TO SN IN PLAYBOOK
try:
sn_compare = peers[1][peer_loopcount - 1]["serial-number"]
# IF THE SN IN THE PEERS MATCHES THE PLAYBOOK SN, SET THE IDS
if sn_compare == paramgram["fmgr_ha_peer_sn"]:
paramgram["peer_id"] = peer_loopcount
paramgram["next_peer_id"] = paramgram["peer_id"]
except Exception as err:
raise FMGBaseException(err)
# ADVANCE THE LOOP AND REPEAT UNTIL DONE
peer_loopcount += 1
# IF THE PEER STATUS ISN'T IN THE PLAYBOOK, ASSUME ITS ENABLE
if paramgram["fmgr_ha_peer_status"] is None:
paramgram["fmgr_ha_peer_status"] = "enable"
# IF THE PEER STATUS IS ENABLE, USE THE next_peer_id IN THE API CALL FOR THE ID
if paramgram["fmgr_ha_peer_status"] == "enable":
results = fmgr_set_ha_peer(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=True,
ansible_facts=fmgr.construct_ansible_facts(results,
module.params, paramgram))
# IF THE PEER STATUS IS DISABLE, WE HAVE TO HANDLE THAT A BIT DIFFERENTLY
# JUST USING TWO DIFFERENT peer_id 's HERE
if paramgram["fmgr_ha_peer_status"] == "disable":
results = fmgr_set_ha_peer(fmgr, paramgram)
fmgr.govern_response(module=module, results=results, stop_on_success=True,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 |
NTAWolf/pyExamPrepper | interfaces/base_interface.py | 1 | 3185 | # encoding: utf-8
class QuizInterfaceBase(object):
"""This is the base user interface for an ExamPrepper quiz.
Extend and implement it in subclasses, catering to different views.
"""
def set_media_folder(s, path):
s.media_folder = path
def select_categories(s, categories):
"""Allow the user to pick categories.
Returns a list of selected categories.
"""
raise NotImplementedError('select_categories is an abstract method - implement it yourself!')
def select_ordering(s, order_options):
"""order_options is a list of methods, each defining a type of ordering.
Each method has an instructive docstring, which can be used to explain it
to a user.
Allow user to pick an ordering of the questions and categories.
Returns one of the methods in order_options
"""
raise NotImplementedError('select_ordering is an abstract method - implement it yourself!')
def select_repetition_lag(s):
"""Allow user to select how many questions should pass before
a previously failed question is asked again.
Returns an integer or a two-tuple of integers.
In the future, this might change to a range, allowing for some randomness.
Put the decision in s.repetition_lag, and set it to a negative value
to just put the failed question at the end of the queue.
"""
raise NotImplementedError('select_repetition_lag is an abstract method - implement it yourself!')
def show_current_info(s, quiz_conductor):
"""Display whatever info you think the user would like to see.
quiz_conductor is the object in charge of the quiz, and
can give a lot of different information about it.
Called before display_question.
"""
raise NotImplementedError('show_current_info is an abstract method - implement it yourself!')
def show_question(s, qa):
"""Present the given question to the user.
"""
raise NotImplementedError('show_question is an abstract method - implement it yourself!')
def show_answer(s, qa):
"""Show the reference answer to the user.
"""
raise NotImplementedError('show_answer is an abstract method - implement it yourself!')
def get_response(s):
"""Returns the user's response to the current question
"""
raise NotImplementedError('get_response is an abstract method - implement it yourself!')
def get_evaluation(s):
"""Returns the user's evaluation of their own current response.
True for a correct response, False for incorrect.
"""
raise NotImplementedError('get_evaluation is an abstract method - implement it yourself!')
def end_of_quiz(s, end_options):
"""Tell the user that the quiz is over
end_options is a list of strings describing what options
the user may take now for restarting the quiz.
Returns a list of indices of the end_options list.
"""
raise NotImplementedError('end_of_quiz is an abstract method - implement it yourself!')
# End of class QuizInterfaceBase | gpl-2.0 |
deepmind/dm_control | dm_control/locomotion/walkers/rodent.py | 1 | 12164 | # Copyright 2020 The dm_control Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""A Rodent walker."""
import os
import re
from dm_control import composer
from dm_control import mjcf
from dm_control.composer.observation import observable
from dm_control.locomotion.walkers import base
from dm_control.locomotion.walkers import legacy_base
from dm_control.mujoco import wrapper as mj_wrapper
import numpy as np
_XML_PATH = os.path.join(os.path.dirname(__file__),
'assets/rodent.xml')
_RAT_MOCAP_JOINTS = [
'vertebra_1_extend', 'vertebra_2_bend', 'vertebra_3_twist',
'vertebra_4_extend', 'vertebra_5_bend', 'vertebra_6_twist',
'hip_L_supinate', 'hip_L_abduct', 'hip_L_extend', 'knee_L', 'ankle_L',
'toe_L', 'hip_R_supinate', 'hip_R_abduct', 'hip_R_extend', 'knee_R',
'ankle_R', 'toe_R', 'vertebra_C1_extend', 'vertebra_C1_bend',
'vertebra_C2_extend', 'vertebra_C2_bend', 'vertebra_C3_extend',
'vertebra_C3_bend', 'vertebra_C4_extend', 'vertebra_C4_bend',
'vertebra_C5_extend', 'vertebra_C5_bend', 'vertebra_C6_extend',
'vertebra_C6_bend', 'vertebra_C7_extend', 'vertebra_C9_bend',
'vertebra_C11_extend', 'vertebra_C13_bend', 'vertebra_C15_extend',
'vertebra_C17_bend', 'vertebra_C19_extend', 'vertebra_C21_bend',
'vertebra_C23_extend', 'vertebra_C25_bend', 'vertebra_C27_extend',
'vertebra_C29_bend', 'vertebra_cervical_5_extend',
'vertebra_cervical_4_bend', 'vertebra_cervical_3_twist',
'vertebra_cervical_2_extend', 'vertebra_cervical_1_bend',
'vertebra_axis_twist', 'vertebra_atlant_extend', 'atlas', 'mandible',
'scapula_L_supinate', 'scapula_L_abduct', 'scapula_L_extend', 'shoulder_L',
'shoulder_sup_L', 'elbow_L', 'wrist_L', 'finger_L', 'scapula_R_supinate',
'scapula_R_abduct', 'scapula_R_extend', 'shoulder_R', 'shoulder_sup_R',
'elbow_R', 'wrist_R', 'finger_R'
]
_UPRIGHT_POS = (0.0, 0.0, 0.0)
_UPRIGHT_QUAT = (1., 0., 0., 0.)
_TORQUE_THRESHOLD = 60
class Rat(legacy_base.Walker):
"""A position-controlled rat with control range scaled to [-1, 1]."""
def _build(self,
params=None,
name='walker',
torque_actuators=False,
foot_mods=False,
initializer=None):
self.params = params
self._mjcf_root = mjcf.from_path(_XML_PATH)
if name:
self._mjcf_root.model = name
self.body_sites = []
super()._build(initializer=initializer)
# modify actuators
if torque_actuators:
for actuator in self._mjcf_root.find_all('actuator'):
actuator.gainprm = [actuator.forcerange[1]]
del actuator.biastype
del actuator.biasprm
# modify ankle and toe limits
if foot_mods:
self._mjcf_root.find('default', 'ankle').joint.range = [-0.1, 2.]
self._mjcf_root.find('default', 'toe').joint.range = [-0.7, 0.87]
@property
def upright_pose(self):
"""Reset pose to upright position."""
return base.WalkerPose(xpos=_UPRIGHT_POS, xquat=_UPRIGHT_QUAT)
@property
def mjcf_model(self):
"""Return the model root."""
return self._mjcf_root
@composer.cached_property
def actuators(self):
"""Return all actuators."""
return tuple(self._mjcf_root.find_all('actuator'))
@composer.cached_property
def root_body(self):
"""Return the body."""
return self._mjcf_root.find('body', 'torso')
@composer.cached_property
def pelvis_body(self):
"""Return the body."""
return self._mjcf_root.find('body', 'pelvis')
@composer.cached_property
def head(self):
"""Return the head."""
return self._mjcf_root.find('body', 'skull')
@composer.cached_property
def left_arm_root(self):
"""Return the left arm."""
return self._mjcf_root.find('body', 'scapula_L')
@composer.cached_property
def right_arm_root(self):
"""Return the right arm."""
return self._mjcf_root.find('body', 'scapula_R')
@composer.cached_property
def ground_contact_geoms(self):
"""Return ground contact geoms."""
return tuple(
self._mjcf_root.find('body', 'foot_L').find_all('geom') +
self._mjcf_root.find('body', 'foot_R').find_all('geom') +
self._mjcf_root.find('body', 'hand_L').find_all('geom') +
self._mjcf_root.find('body', 'hand_R').find_all('geom') +
self._mjcf_root.find('body', 'vertebra_C1').find_all('geom')
)
@composer.cached_property
def standing_height(self):
"""Return standing height."""
return self.params['_STAND_HEIGHT']
@composer.cached_property
def end_effectors(self):
"""Return end effectors."""
return (self._mjcf_root.find('body', 'lower_arm_R'),
self._mjcf_root.find('body', 'lower_arm_L'),
self._mjcf_root.find('body', 'foot_R'),
self._mjcf_root.find('body', 'foot_L'))
@composer.cached_property
def observable_joints(self):
"""Return observable joints."""
return tuple(actuator.joint
for actuator in self.actuators # This lint is mistaken; pylint: disable=not-an-iterable
if actuator.joint is not None)
@composer.cached_property
def observable_tendons(self):
return self._mjcf_root.find_all('tendon')
@composer.cached_property
def mocap_joints(self):
return tuple(
self._mjcf_root.find('joint', name) for name in _RAT_MOCAP_JOINTS)
@composer.cached_property
def mocap_joint_order(self):
return tuple([jnt.name for jnt in self.mocap_joints]) # This lint is mistaken; pylint: disable=not-an-iterable
@composer.cached_property
def bodies(self):
"""Return all bodies."""
return tuple(self._mjcf_root.find_all('body'))
@composer.cached_property
def mocap_tracking_bodies(self):
"""Return bodies for mocap comparison."""
return tuple(body for body in self._mjcf_root.find_all('body')
if not re.match(r'(vertebra|hand|toe)', body.name))
@composer.cached_property
def primary_joints(self):
"""Return primary (non-vertebra) joints."""
return tuple(jnt for jnt in self._mjcf_root.find_all('joint')
if 'vertebra' not in jnt.name)
@composer.cached_property
def vertebra_joints(self):
"""Return vertebra joints."""
return tuple(jnt for jnt in self._mjcf_root.find_all('joint')
if 'vertebra' in jnt.name)
@composer.cached_property
def primary_joint_order(self):
joint_names = self.mocap_joint_order
primary_names = tuple([jnt.name for jnt in self.primary_joints]) # pylint: disable=not-an-iterable
primary_order = []
for nm in primary_names:
primary_order.append(joint_names.index(nm))
return primary_order
@composer.cached_property
def vertebra_joint_order(self):
joint_names = self.mocap_joint_order
vertebra_names = tuple([jnt.name for jnt in self.vertebra_joints]) # pylint: disable=not-an-iterable
vertebra_order = []
for nm in vertebra_names:
vertebra_order.append(joint_names.index(nm))
return vertebra_order
@composer.cached_property
def egocentric_camera(self):
"""Return the egocentric camera."""
return self._mjcf_root.find('camera', 'egocentric')
@property
def _xml_path(self):
"""Return the path to th model .xml file."""
return self.params['_XML_PATH']
@composer.cached_property
def joint_actuators(self):
"""Return all joint actuators."""
return tuple([act for act in self._mjcf_root.find_all('actuator')
if act.joint])
@composer.cached_property
def joint_actuators_range(self):
act_joint_range = []
for act in self.joint_actuators: # This lint is mistaken; pylint: disable=not-an-iterable
associated_joint = self._mjcf_root.find('joint', act.name)
act_range = associated_joint.dclass.joint.range
act_joint_range.append(act_range)
return act_joint_range
def pose_to_actuation(self, pose):
# holds for joint actuators, find desired torque = 0
# u_ref = [2 q_ref - (r_low + r_up) ]/(r_up - r_low)
r_lower = np.array([ajr[0] for ajr in self.joint_actuators_range]) # This lint is mistaken; pylint: disable=not-an-iterable
r_upper = np.array([ajr[1] for ajr in self.joint_actuators_range]) # This lint is mistaken; pylint: disable=not-an-iterable
num_tendon_actuators = len(self.actuators) - len(self.joint_actuators)
tendon_actions = np.zeros(num_tendon_actuators)
return np.hstack([tendon_actions, (2*pose[self.joint_actuator_order]-
(r_lower+r_upper))/(r_upper-r_lower)])
@composer.cached_property
def joint_actuator_order(self):
joint_names = self.mocap_joint_order
joint_actuator_names = tuple([act.name for act in self.joint_actuators]) # This lint is mistaken; pylint: disable=not-an-iterable
actuator_order = []
for nm in joint_actuator_names:
actuator_order.append(joint_names.index(nm))
return actuator_order
def _build_observables(self):
return RodentObservables(self)
class RodentObservables(legacy_base.WalkerObservables):
"""Observables for the Rat."""
@composer.observable
def head_height(self):
"""Observe the head height."""
return observable.MJCFFeature('xpos', self._entity.head)[2]
@composer.observable
def sensors_torque(self):
"""Observe the torque sensors."""
return observable.MJCFFeature(
'sensordata',
self._entity.mjcf_model.sensor.torque,
corruptor=lambda v, random_state: np.tanh(2 * v / _TORQUE_THRESHOLD)
)
@composer.observable
def tendons_pos(self):
return observable.MJCFFeature('length', self._entity.observable_tendons)
@composer.observable
def tendons_vel(self):
return observable.MJCFFeature('velocity', self._entity.observable_tendons)
@composer.observable
def actuator_activation(self):
"""Observe the actuator activation."""
model = self._entity.mjcf_model
return observable.MJCFFeature('act', model.find_all('actuator'))
@composer.observable
def appendages_pos(self):
"""Equivalent to `end_effectors_pos` with head's position appended."""
def relative_pos_in_egocentric_frame(physics):
end_effectors_with_head = (
self._entity.end_effectors + (self._entity.head,))
end_effector = physics.bind(end_effectors_with_head).xpos
torso = physics.bind(self._entity.root_body).xpos
xmat = \
np.reshape(physics.bind(self._entity.root_body).xmat, (3, 3))
return np.reshape(np.dot(end_effector - torso, xmat), -1)
return observable.Generic(relative_pos_in_egocentric_frame)
@property
def proprioception(self):
"""Return proprioceptive information."""
return [
self.joints_pos, self.joints_vel,
self.tendons_pos, self.tendons_vel,
self.actuator_activation,
self.body_height, self.end_effectors_pos, self.appendages_pos,
self.world_zaxis
] + self._collect_from_attachments('proprioception')
@composer.observable
def egocentric_camera(self):
"""Observable of the egocentric camera."""
if not hasattr(self, '_scene_options'):
# Don't render this walker's geoms.
self._scene_options = mj_wrapper.MjvOption()
collision_geom_group = 2
self._scene_options.geomgroup[collision_geom_group] = 0
cosmetic_geom_group = 1
self._scene_options.geomgroup[cosmetic_geom_group] = 0
return observable.MJCFCamera(self._entity.egocentric_camera,
width=64, height=64,
scene_option=self._scene_options
)
| apache-2.0 |
antoviaque/edx-platform | common/lib/xmodule/xmodule/poll_module.py | 146 | 7498 | """Poll module is ungraded xmodule used by students to
to do set of polls.
On the client side we show:
If student does not yet anwered - Question with set of choices.
If student have answered - Question with statistics for each answers.
"""
import cgi
import json
import logging
from copy import deepcopy
from collections import OrderedDict
from lxml import etree
from pkg_resources import resource_string
from xmodule.x_module import XModule
from xmodule.stringify import stringify_children
from xmodule.mako_module import MakoModuleDescriptor
from xmodule.xml_module import XmlDescriptor
from xblock.fields import Scope, String, Dict, Boolean, List
log = logging.getLogger(__name__)
class PollFields(object):
# Name of poll to use in links to this poll
display_name = String(help="Display name for this module", scope=Scope.settings)
voted = Boolean(help="Whether this student has voted on the poll", scope=Scope.user_state, default=False)
poll_answer = String(help="Student answer", scope=Scope.user_state, default='')
poll_answers = Dict(help="Poll answers from all students", scope=Scope.user_state_summary)
# List of answers, in the form {'id': 'some id', 'text': 'the answer text'}
answers = List(help="Poll answers from xml", scope=Scope.content, default=[])
question = String(help="Poll question", scope=Scope.content, default='')
class PollModule(PollFields, XModule):
"""Poll Module"""
js = {
'coffee': [resource_string(__name__, 'js/src/javascript_loader.coffee')],
'js': [
resource_string(__name__, 'js/src/poll/poll.js'),
resource_string(__name__, 'js/src/poll/poll_main.js')
]
}
css = {'scss': [resource_string(__name__, 'css/poll/display.scss')]}
js_module_name = "Poll"
def handle_ajax(self, dispatch, data):
"""Ajax handler.
Args:
dispatch: string request slug
data: dict request data parameters
Returns:
json string
"""
if dispatch in self.poll_answers and not self.voted:
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[dispatch] += 1
self.poll_answers = temp_poll_answers
self.voted = True
self.poll_answer = dispatch
return json.dumps({'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values()),
'callback': {'objectName': 'Conditional'}
})
elif dispatch == 'get_state':
return json.dumps({'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers,
'total': sum(self.poll_answers.values())
})
elif dispatch == 'reset_poll' and self.voted and \
self.descriptor.xml_attributes.get('reset', 'True').lower() != 'false':
self.voted = False
# FIXME: fix this, when xblock will support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
temp_poll_answers[self.poll_answer] -= 1
self.poll_answers = temp_poll_answers
self.poll_answer = ''
return json.dumps({'status': 'success'})
else: # return error message
return json.dumps({'error': 'Unknown Command!'})
def get_html(self):
"""Renders parameters to template."""
params = {
'element_id': self.location.html_id(),
'element_class': self.location.category,
'ajax_url': self.system.ajax_url,
'configuration_json': self.dump_poll(),
}
self.content = self.system.render_template('poll.html', params)
return self.content
def dump_poll(self):
"""Dump poll information.
Returns:
string - Serialize json.
"""
# FIXME: hack for resolving caching `default={}` during definition
# poll_answers field
if self.poll_answers is None:
self.poll_answers = {}
answers_to_json = OrderedDict()
# FIXME: fix this, when xblock support mutable types.
# Now we use this hack.
temp_poll_answers = self.poll_answers
# Fill self.poll_answers, prepare data for template context.
for answer in self.answers:
# Set default count for answer = 0.
if answer['id'] not in temp_poll_answers:
temp_poll_answers[answer['id']] = 0
answers_to_json[answer['id']] = cgi.escape(answer['text'])
self.poll_answers = temp_poll_answers
return json.dumps({
'answers': answers_to_json,
'question': cgi.escape(self.question),
# to show answered poll after reload:
'poll_answer': self.poll_answer,
'poll_answers': self.poll_answers if self.voted else {},
'total': sum(self.poll_answers.values()) if self.voted else 0,
'reset': str(self.descriptor.xml_attributes.get('reset', 'true')).lower()
})
class PollDescriptor(PollFields, MakoModuleDescriptor, XmlDescriptor):
_tag_name = 'poll_question'
_child_tag_name = 'answer'
module_class = PollModule
@classmethod
def definition_from_xml(cls, xml_object, system):
"""Pull out the data into dictionary.
Args:
xml_object: xml from file.
system: `system` object.
Returns:
(definition, children) - tuple
definition - dict:
{
'answers': <List of answers>,
'question': <Question string>
}
"""
# Check for presense of required tags in xml.
if len(xml_object.xpath(cls._child_tag_name)) == 0:
raise ValueError("Poll_question definition must include \
at least one 'answer' tag")
xml_object_copy = deepcopy(xml_object)
answers = []
for element_answer in xml_object_copy.findall(cls._child_tag_name):
answer_id = element_answer.get('id', None)
if answer_id:
answers.append({
'id': answer_id,
'text': stringify_children(element_answer)
})
xml_object_copy.remove(element_answer)
definition = {
'answers': answers,
'question': stringify_children(xml_object_copy)
}
children = []
return (definition, children)
def definition_to_xml(self, resource_fs):
"""Return an xml element representing to this definition."""
poll_str = u'<{tag_name}>{text}</{tag_name}>'.format(
tag_name=self._tag_name, text=self.question)
xml_object = etree.fromstring(poll_str)
xml_object.set('display_name', self.display_name)
def add_child(xml_obj, answer):
child_str = u'<{tag_name} id="{id}">{text}</{tag_name}>'.format(
tag_name=self._child_tag_name, id=answer['id'],
text=answer['text'])
child_node = etree.fromstring(child_str)
xml_object.append(child_node)
for answer in self.answers:
add_child(xml_object, answer)
return xml_object
| agpl-3.0 |
nrgaway/qubes-core-admin | qubes/tests/tarwriter.py | 5 | 5801 | #
# The Qubes OS Project, http://www.qubes-os.org
#
# Copyright (C) 2016 Marek Marczykowski-Górecki
# <marmarek@invisiblethingslab.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <https://www.gnu.org/licenses/>.
#
import os
import subprocess
import tempfile
import shutil
import qubes.tarwriter
import qubes.tests
class TC_00_TarWriter(qubes.tests.QubesTestCase):
def setUp(self):
super(TC_00_TarWriter, self).setUp()
self.input_path = tempfile.mktemp()
self.output_path = tempfile.mktemp()
self.extract_dir = tempfile.mkdtemp()
def tearDown(self):
if os.path.exists(self.input_path):
os.unlink(self.input_path)
if os.path.exists(self.output_path):
os.unlink(self.output_path)
if os.path.exists(self.extract_dir):
shutil.rmtree(self.extract_dir)
return super(TC_00_TarWriter, self).tearDown()
def assertTarExtractable(self, expected_name=None):
if expected_name is None:
expected_name = self.input_path
with self.assertNotRaises(subprocess.CalledProcessError):
tar_output = subprocess.check_output(
['tar', 'xvf', self.output_path],
cwd=self.extract_dir,
stderr=subprocess.STDOUT)
expected_output = expected_name + '\n'
if expected_name[0] == '/':
expected_output = (
'tar: Removing leading `/\' from member names\n' +
expected_output)
self.assertEqual(tar_output.decode(), expected_output)
extracted_path = os.path.join(self.extract_dir,
expected_name.lstrip('/'))
with self.assertNotRaises(subprocess.CalledProcessError):
subprocess.check_call(
['diff', '-q', self.input_path, extracted_path])
# make sure the file is still sparse
orig_stat = os.stat(self.input_path)
extracted_stat = os.stat(extracted_path)
self.assertEqual(orig_stat.st_blocks, extracted_stat.st_blocks)
self.assertEqual(orig_stat.st_size, extracted_stat.st_size)
def write_sparse_chunks(self, num_chunks):
with open(self.input_path, 'w') as f:
for i in range(num_chunks):
f.seek(8192 * i)
f.write('a' * 4096)
def test_000_simple(self):
self.write_sparse_chunks(1)
with open(self.input_path, 'w') as f:
f.write('a' * 4096)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_001_simple_sparse2(self):
self.write_sparse_chunks(2)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_002_simple_sparse3(self):
# tar header contains info about 4 chunks, check for off-by-one errors
self.write_sparse_chunks(3)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_003_simple_sparse4(self):
# tar header contains info about 4 chunks, check for off-by-one errors
self.write_sparse_chunks(4)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_004_simple_sparse5(self):
# tar header contains info about 4 chunks, check for off-by-one errors
self.write_sparse_chunks(5)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_005_simple_sparse24(self):
# tar header contains info about 4 chunks, next header contains 21 of
# them, check for off-by-one errors
self.write_sparse_chunks(24)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_006_simple_sparse25(self):
# tar header contains info about 4 chunks, next header contains 21 of
# them, check for off-by-one errors
self.write_sparse_chunks(25)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_007_simple_sparse26(self):
# tar header contains info about 4 chunks, next header contains 21 of
# them, check for off-by-one errors
self.write_sparse_chunks(26)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_010_override_name(self):
self.write_sparse_chunks(1)
qubes.tarwriter.main(['--override-name',
'different-name', self.input_path, self.output_path])
self.assertTarExtractable(expected_name='different-name')
def test_011_empty(self):
self.write_sparse_chunks(0)
qubes.tarwriter.main([self.input_path, self.output_path])
self.assertTarExtractable()
def test_012_gzip(self):
self.write_sparse_chunks(0)
qubes.tarwriter.main([
'--use-compress-program=gzip', self.input_path, self.output_path])
with self.assertNotRaises(subprocess.CalledProcessError):
subprocess.check_call(['gzip', '--test', self.output_path])
self.assertTarExtractable()
| gpl-2.0 |
foobarbazblarg/stayclean | stayclean-2020-december/venv/lib/python3.8/site-packages/pip/_vendor/toml/decoder.py | 11 | 38954 | import datetime
import io
from os import linesep
import re
import sys
from pip._vendor.toml.tz import TomlTz
if sys.version_info < (3,):
_range = xrange # noqa: F821
else:
unicode = str
_range = range
basestring = str
unichr = chr
def _detect_pathlib_path(p):
if (3, 4) <= sys.version_info:
import pathlib
if isinstance(p, pathlib.PurePath):
return True
return False
def _ispath(p):
if isinstance(p, (bytes, basestring)):
return True
return _detect_pathlib_path(p)
def _getpath(p):
if (3, 6) <= sys.version_info:
import os
return os.fspath(p)
if _detect_pathlib_path(p):
return str(p)
return p
try:
FNFError = FileNotFoundError
except NameError:
FNFError = IOError
TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
class TomlDecodeError(ValueError):
"""Base toml Exception / Error."""
def __init__(self, msg, doc, pos):
lineno = doc.count('\n', 0, pos) + 1
colno = pos - doc.rfind('\n', 0, pos)
emsg = '{} (line {} column {} char {})'.format(msg, lineno, colno, pos)
ValueError.__init__(self, emsg)
self.msg = msg
self.doc = doc
self.pos = pos
self.lineno = lineno
self.colno = colno
# Matches a TOML number, which allows underscores for readability
_number_with_underscores = re.compile('([0-9])(_([0-9]))*')
class CommentValue(object):
def __init__(self, val, comment, beginline, _dict):
self.val = val
separator = "\n" if beginline else " "
self.comment = separator + comment
self._dict = _dict
def __getitem__(self, key):
return self.val[key]
def __setitem__(self, key, value):
self.val[key] = value
def dump(self, dump_value_func):
retstr = dump_value_func(self.val)
if isinstance(self.val, self._dict):
return self.comment + "\n" + unicode(retstr)
else:
return unicode(retstr) + self.comment
def _strictly_valid_num(n):
n = n.strip()
if not n:
return False
if n[0] == '_':
return False
if n[-1] == '_':
return False
if "_." in n or "._" in n:
return False
if len(n) == 1:
return True
if n[0] == '0' and n[1] not in ['.', 'o', 'b', 'x']:
return False
if n[0] == '+' or n[0] == '-':
n = n[1:]
if len(n) > 1 and n[0] == '0' and n[1] != '.':
return False
if '__' in n:
return False
return True
def load(f, _dict=dict, decoder=None):
"""Parses named file or files as toml and returns a dictionary
Args:
f: Path to the file to open, array of files to read into single dict
or a file descriptor
_dict: (optional) Specifies the class of the returned toml dictionary
decoder: The decoder to use
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError -- When f is invalid type
TomlDecodeError: Error while decoding toml
IOError / FileNotFoundError -- When an array with no valid (existing)
(Python 2 / Python 3) file paths is passed
"""
if _ispath(f):
with io.open(_getpath(f), encoding='utf-8') as ffile:
return loads(ffile.read(), _dict, decoder)
elif isinstance(f, list):
from os import path as op
from warnings import warn
if not [path for path in f if op.exists(path)]:
error_msg = "Load expects a list to contain filenames only."
error_msg += linesep
error_msg += ("The list needs to contain the path of at least one "
"existing file.")
raise FNFError(error_msg)
if decoder is None:
decoder = TomlDecoder(_dict)
d = decoder.get_empty_table()
for l in f: # noqa: E741
if op.exists(l):
d.update(load(l, _dict, decoder))
else:
warn("Non-existent filename in list with at least one valid "
"filename")
return d
else:
try:
return loads(f.read(), _dict, decoder)
except AttributeError:
raise TypeError("You can only load a file descriptor, filename or "
"list")
_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$')
def loads(s, _dict=dict, decoder=None):
"""Parses string as toml
Args:
s: String to be parsed
_dict: (optional) Specifies the class of the returned toml dictionary
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError: When a non-string is passed
TomlDecodeError: Error while decoding toml
"""
implicitgroups = []
if decoder is None:
decoder = TomlDecoder(_dict)
retval = decoder.get_empty_table()
currentlevel = retval
if not isinstance(s, basestring):
raise TypeError("Expecting something like a string")
if not isinstance(s, unicode):
s = s.decode('utf8')
original = s
sl = list(s)
openarr = 0
openstring = False
openstrchar = ""
multilinestr = False
arrayoftables = False
beginline = True
keygroup = False
dottedkey = False
keyname = 0
key = ''
prev_key = ''
line_no = 1
for i, item in enumerate(sl):
if item == '\r' and sl[i + 1] == '\n':
sl[i] = ' '
continue
if keyname:
key += item
if item == '\n':
raise TomlDecodeError("Key name found without value."
" Reached end of line.", original, i)
if openstring:
if item == openstrchar:
oddbackslash = False
k = 1
while i >= k and sl[i - k] == '\\':
oddbackslash = not oddbackslash
k += 1
if not oddbackslash:
keyname = 2
openstring = False
openstrchar = ""
continue
elif keyname == 1:
if item.isspace():
keyname = 2
continue
elif item == '.':
dottedkey = True
continue
elif item.isalnum() or item == '_' or item == '-':
continue
elif (dottedkey and sl[i - 1] == '.' and
(item == '"' or item == "'")):
openstring = True
openstrchar = item
continue
elif keyname == 2:
if item.isspace():
if dottedkey:
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '.':
dottedkey = True
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '=':
keyname = 0
prev_key = key[:-1].rstrip()
key = ''
dottedkey = False
else:
raise TomlDecodeError("Found invalid character in key name: '" +
item + "'. Try quoting the key name.",
original, i)
if item == "'" and openstrchar != '"':
k = 1
try:
while sl[i - k] == "'":
k += 1
if k == 3:
break
except IndexError:
pass
if k == 3:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = "'"
else:
openstrchar = ""
if item == '"' and openstrchar != "'":
oddbackslash = False
k = 1
tripquote = False
try:
while sl[i - k] == '"':
k += 1
if k == 3:
tripquote = True
break
if k == 1 or (k == 3 and tripquote):
while sl[i - k] == '\\':
oddbackslash = not oddbackslash
k += 1
except IndexError:
pass
if not oddbackslash:
if tripquote:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = '"'
else:
openstrchar = ""
if item == '#' and (not openstring and not keygroup and
not arrayoftables):
j = i
comment = ""
try:
while sl[j] != '\n':
comment += s[j]
sl[j] = ' '
j += 1
except IndexError:
break
if not openarr:
decoder.preserve_comment(line_no, prev_key, comment, beginline)
if item == '[' and (not openstring and not keygroup and
not arrayoftables):
if beginline:
if len(sl) > i + 1 and sl[i + 1] == '[':
arrayoftables = True
else:
keygroup = True
else:
openarr += 1
if item == ']' and not openstring:
if keygroup:
keygroup = False
elif arrayoftables:
if sl[i - 1] == ']':
arrayoftables = False
else:
openarr -= 1
if item == '\n':
if openstring or multilinestr:
if not multilinestr:
raise TomlDecodeError("Unbalanced quotes", original, i)
if ((sl[i - 1] == "'" or sl[i - 1] == '"') and (
sl[i - 2] == sl[i - 1])):
sl[i] = sl[i - 1]
if sl[i - 3] == sl[i - 1]:
sl[i - 3] = ' '
elif openarr:
sl[i] = ' '
else:
beginline = True
line_no += 1
elif beginline and sl[i] != ' ' and sl[i] != '\t':
beginline = False
if not keygroup and not arrayoftables:
if sl[i] == '=':
raise TomlDecodeError("Found empty keyname. ", original, i)
keyname = 1
key += item
if keyname:
raise TomlDecodeError("Key name found without value."
" Reached end of file.", original, len(s))
if openstring: # reached EOF and have an unterminated string
raise TomlDecodeError("Unterminated string found."
" Reached end of file.", original, len(s))
s = ''.join(sl)
s = s.split('\n')
multikey = None
multilinestr = ""
multibackslash = False
pos = 0
for idx, line in enumerate(s):
if idx > 0:
pos += len(s[idx - 1]) + 1
decoder.embed_comments(idx, currentlevel)
if not multilinestr or multibackslash or '\n' not in multilinestr:
line = line.strip()
if line == "" and (not multikey or multibackslash):
continue
if multikey:
if multibackslash:
multilinestr += line
else:
multilinestr += line
multibackslash = False
closed = False
if multilinestr[0] == '[':
closed = line[-1] == ']'
elif len(line) > 2:
closed = (line[-1] == multilinestr[0] and
line[-2] == multilinestr[0] and
line[-3] == multilinestr[0])
if closed:
try:
value, vtype = decoder.load_value(multilinestr)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
currentlevel[multikey] = value
multikey = None
multilinestr = ""
else:
k = len(multilinestr) - 1
while k > -1 and multilinestr[k] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
multilinestr = multilinestr[:-1]
else:
multilinestr += "\n"
continue
if line[0] == '[':
arrayoftables = False
if len(line) == 1:
raise TomlDecodeError("Opening key group bracket on line by "
"itself.", original, pos)
if line[1] == '[':
arrayoftables = True
line = line[2:]
splitstr = ']]'
else:
line = line[1:]
splitstr = ']'
i = 1
quotesplits = decoder._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if not quoted and splitstr in quotesplit:
break
i += quotesplit.count(splitstr)
quoted = not quoted
line = line.split(splitstr, i)
if len(line) < i + 1 or line[-1].strip() != "":
raise TomlDecodeError("Key group not on a line by itself.",
original, pos)
groups = splitstr.join(line[:-1]).split('.')
i = 0
while i < len(groups):
groups[i] = groups[i].strip()
if len(groups[i]) > 0 and (groups[i][0] == '"' or
groups[i][0] == "'"):
groupstr = groups[i]
j = i + 1
while ((not groupstr[0] == groupstr[-1]) or
len(groupstr) == 1):
j += 1
if j > len(groups) + 2:
raise TomlDecodeError("Invalid group name '" +
groupstr + "' Something " +
"went wrong.", original, pos)
groupstr = '.'.join(groups[i:j]).strip()
groups[i] = groupstr[1:-1]
groups[i + 1:j] = []
else:
if not _groupname_re.match(groups[i]):
raise TomlDecodeError("Invalid group name '" +
groups[i] + "'. Try quoting it.",
original, pos)
i += 1
currentlevel = retval
for i in _range(len(groups)):
group = groups[i]
if group == "":
raise TomlDecodeError("Can't have a keygroup with an empty "
"name", original, pos)
try:
currentlevel[group]
if i == len(groups) - 1:
if group in implicitgroups:
implicitgroups.remove(group)
if arrayoftables:
raise TomlDecodeError("An implicitly defined "
"table can't be an array",
original, pos)
elif arrayoftables:
currentlevel[group].append(decoder.get_empty_table()
)
else:
raise TomlDecodeError("What? " + group +
" already exists?" +
str(currentlevel),
original, pos)
except TypeError:
currentlevel = currentlevel[-1]
if group not in currentlevel:
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
except KeyError:
if i != len(groups) - 1:
implicitgroups.append(group)
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
currentlevel = currentlevel[group]
if arrayoftables:
try:
currentlevel = currentlevel[-1]
except KeyError:
pass
elif line[0] == "{":
if line[-1] != "}":
raise TomlDecodeError("Line breaks are not allowed in inline"
"objects", original, pos)
try:
decoder.load_inline_object(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
elif "=" in line:
try:
ret = decoder.load_line(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
if ret is not None:
multikey, multilinestr, multibackslash = ret
return retval
def _load_date(val):
microsecond = 0
tz = None
try:
if len(val) > 19:
if val[19] == '.':
if val[-1].upper() == 'Z':
subsecondval = val[20:-1]
tzval = "Z"
else:
subsecondvalandtz = val[20:]
if '+' in subsecondvalandtz:
splitpoint = subsecondvalandtz.index('+')
subsecondval = subsecondvalandtz[:splitpoint]
tzval = subsecondvalandtz[splitpoint:]
elif '-' in subsecondvalandtz:
splitpoint = subsecondvalandtz.index('-')
subsecondval = subsecondvalandtz[:splitpoint]
tzval = subsecondvalandtz[splitpoint:]
else:
tzval = None
subsecondval = subsecondvalandtz
if tzval is not None:
tz = TomlTz(tzval)
microsecond = int(int(subsecondval) *
(10 ** (6 - len(subsecondval))))
else:
tz = TomlTz(val[19:])
except ValueError:
tz = None
if "-" not in val[1:]:
return None
try:
if len(val) == 10:
d = datetime.date(
int(val[:4]), int(val[5:7]),
int(val[8:10]))
else:
d = datetime.datetime(
int(val[:4]), int(val[5:7]),
int(val[8:10]), int(val[11:13]),
int(val[14:16]), int(val[17:19]), microsecond, tz)
except ValueError:
return None
return d
def _load_unicode_escapes(v, hexbytes, prefix):
skip = False
i = len(v) - 1
while i > -1 and v[i] == '\\':
skip = not skip
i -= 1
for hx in hexbytes:
if skip:
skip = False
i = len(hx) - 1
while i > -1 and hx[i] == '\\':
skip = not skip
i -= 1
v += prefix
v += hx
continue
hxb = ""
i = 0
hxblen = 4
if prefix == "\\U":
hxblen = 8
hxb = ''.join(hx[i:i + hxblen]).lower()
if hxb.strip('0123456789abcdef'):
raise ValueError("Invalid escape sequence: " + hxb)
if hxb[0] == "d" and hxb[1].strip('01234567'):
raise ValueError("Invalid escape sequence: " + hxb +
". Only scalar unicode points are allowed.")
v += unichr(int(hxb, 16))
v += unicode(hx[len(hxb):])
return v
# Unescape TOML string values.
# content after the \
_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"']
# What it should be replaced by
_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"']
# Used for substitution
_escape_to_escapedchars = dict(zip(_escapes, _escapedchars))
def _unescape(v):
"""Unescape characters in a TOML string."""
i = 0
backslash = False
while i < len(v):
if backslash:
backslash = False
if v[i] in _escapes:
v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:]
elif v[i] == '\\':
v = v[:i - 1] + v[i:]
elif v[i] == 'u' or v[i] == 'U':
i += 1
else:
raise ValueError("Reserved escape sequence used")
continue
elif v[i] == '\\':
backslash = True
i += 1
return v
class InlineTableDict(object):
"""Sentinel subclass of dict for inline tables."""
class TomlDecoder(object):
def __init__(self, _dict=dict):
self._dict = _dict
def get_empty_table(self):
return self._dict()
def get_empty_inline_table(self):
class DynamicInlineTableDict(self._dict, InlineTableDict):
"""Concrete sentinel subclass for inline tables.
It is a subclass of _dict which is passed in dynamically at load
time
It is also a subclass of InlineTableDict
"""
return DynamicInlineTableDict()
def load_inline_object(self, line, currentlevel, multikey=False,
multibackslash=False):
candidate_groups = line[1:-1].split(",")
groups = []
if len(candidate_groups) == 1 and not candidate_groups[0].strip():
candidate_groups.pop()
while len(candidate_groups) > 0:
candidate_group = candidate_groups.pop(0)
try:
_, value = candidate_group.split('=', 1)
except ValueError:
raise ValueError("Invalid inline table encountered")
value = value.strip()
if ((value[0] == value[-1] and value[0] in ('"', "'")) or (
value[0] in '-0123456789' or
value in ('true', 'false') or
(value[0] == "[" and value[-1] == "]") or
(value[0] == '{' and value[-1] == '}'))):
groups.append(candidate_group)
elif len(candidate_groups) > 0:
candidate_groups[0] = (candidate_group + "," +
candidate_groups[0])
else:
raise ValueError("Invalid inline table value encountered")
for group in groups:
status = self.load_line(group, currentlevel, multikey,
multibackslash)
if status is not None:
break
def _get_split_on_quotes(self, line):
doublequotesplits = line.split('"')
quoted = False
quotesplits = []
if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]:
singlequotesplits = doublequotesplits[0].split("'")
doublequotesplits = doublequotesplits[1:]
while len(singlequotesplits) % 2 == 0 and len(doublequotesplits):
singlequotesplits[-1] += '"' + doublequotesplits[0]
doublequotesplits = doublequotesplits[1:]
if "'" in singlequotesplits[-1]:
singlequotesplits = (singlequotesplits[:-1] +
singlequotesplits[-1].split("'"))
quotesplits += singlequotesplits
for doublequotesplit in doublequotesplits:
if quoted:
quotesplits.append(doublequotesplit)
else:
quotesplits += doublequotesplit.split("'")
quoted = not quoted
return quotesplits
def load_line(self, line, currentlevel, multikey, multibackslash):
i = 1
quotesplits = self._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if not quoted and '=' in quotesplit:
break
i += quotesplit.count('=')
quoted = not quoted
pair = line.split('=', i)
strictly_valid = _strictly_valid_num(pair[-1])
if _number_with_underscores.match(pair[-1]):
pair[-1] = pair[-1].replace('_', '')
while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and
pair[-1][0] != "'" and pair[-1][0] != '"' and
pair[-1][0] != '[' and pair[-1][0] != '{' and
pair[-1].strip() != 'true' and
pair[-1].strip() != 'false'):
try:
float(pair[-1])
break
except ValueError:
pass
if _load_date(pair[-1]) is not None:
break
if TIME_RE.match(pair[-1]):
break
i += 1
prev_val = pair[-1]
pair = line.split('=', i)
if prev_val == pair[-1]:
raise ValueError("Invalid date or number")
if strictly_valid:
strictly_valid = _strictly_valid_num(pair[-1])
pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()]
if '.' in pair[0]:
if '"' in pair[0] or "'" in pair[0]:
quotesplits = self._get_split_on_quotes(pair[0])
quoted = False
levels = []
for quotesplit in quotesplits:
if quoted:
levels.append(quotesplit)
else:
levels += [level.strip() for level in
quotesplit.split('.')]
quoted = not quoted
else:
levels = pair[0].split('.')
while levels[-1] == "":
levels = levels[:-1]
for level in levels[:-1]:
if level == "":
continue
if level not in currentlevel:
currentlevel[level] = self.get_empty_table()
currentlevel = currentlevel[level]
pair[0] = levels[-1].strip()
elif (pair[0][0] == '"' or pair[0][0] == "'") and \
(pair[0][-1] == pair[0][0]):
pair[0] = _unescape(pair[0][1:-1])
k, koffset = self._load_line_multiline_str(pair[1])
if k > -1:
while k > -1 and pair[1][k + koffset] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
multilinestr = pair[1][:-1]
else:
multilinestr = pair[1] + "\n"
multikey = pair[0]
else:
value, vtype = self.load_value(pair[1], strictly_valid)
try:
currentlevel[pair[0]]
raise ValueError("Duplicate keys!")
except TypeError:
raise ValueError("Duplicate keys!")
except KeyError:
if multikey:
return multikey, multilinestr, multibackslash
else:
currentlevel[pair[0]] = value
def _load_line_multiline_str(self, p):
poffset = 0
if len(p) < 3:
return -1, poffset
if p[0] == '[' and (p.strip()[-1] != ']' and
self._load_array_isstrarray(p)):
newp = p[1:].strip().split(',')
while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'":
newp = newp[:-2] + [newp[-2] + ',' + newp[-1]]
newp = newp[-1]
poffset = len(p) - len(newp)
p = newp
if p[0] != '"' and p[0] != "'":
return -1, poffset
if p[1] != p[0] or p[2] != p[0]:
return -1, poffset
if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]:
return -1, poffset
return len(p) - 1, poffset
def load_value(self, v, strictly_valid=True):
if not v:
raise ValueError("Empty value is invalid")
if v == 'true':
return (True, "bool")
elif v.lower() == 'true':
raise ValueError("Only all lowercase booleans allowed")
elif v == 'false':
return (False, "bool")
elif v.lower() == 'false':
raise ValueError("Only all lowercase booleans allowed")
elif v[0] == '"' or v[0] == "'":
quotechar = v[0]
testv = v[1:].split(quotechar)
triplequote = False
triplequotecount = 0
if len(testv) > 1 and testv[0] == '' and testv[1] == '':
testv = testv[2:]
triplequote = True
closed = False
for tv in testv:
if tv == '':
if triplequote:
triplequotecount += 1
else:
closed = True
else:
oddbackslash = False
try:
i = -1
j = tv[i]
while j == '\\':
oddbackslash = not oddbackslash
i -= 1
j = tv[i]
except IndexError:
pass
if not oddbackslash:
if closed:
raise ValueError("Found tokens after a closed " +
"string. Invalid TOML.")
else:
if not triplequote or triplequotecount > 1:
closed = True
else:
triplequotecount = 0
if quotechar == '"':
escapeseqs = v.split('\\')[1:]
backslash = False
for i in escapeseqs:
if i == '':
backslash = not backslash
else:
if i[0] not in _escapes and (i[0] != 'u' and
i[0] != 'U' and
not backslash):
raise ValueError("Reserved escape sequence used")
if backslash:
backslash = False
for prefix in ["\\u", "\\U"]:
if prefix in v:
hexbytes = v.split(prefix)
v = _load_unicode_escapes(hexbytes[0], hexbytes[1:],
prefix)
v = _unescape(v)
if len(v) > 1 and v[1] == quotechar and (len(v) < 3 or
v[1] == v[2]):
v = v[2:-2]
return (v[1:-1], "str")
elif v[0] == '[':
return (self.load_array(v), "array")
elif v[0] == '{':
inline_object = self.get_empty_inline_table()
self.load_inline_object(v, inline_object)
return (inline_object, "inline_object")
elif TIME_RE.match(v):
h, m, s, _, ms = TIME_RE.match(v).groups()
time = datetime.time(int(h), int(m), int(s), int(ms) if ms else 0)
return (time, "time")
else:
parsed_date = _load_date(v)
if parsed_date is not None:
return (parsed_date, "date")
if not strictly_valid:
raise ValueError("Weirdness with leading zeroes or "
"underscores in your number.")
itype = "int"
neg = False
if v[0] == '-':
neg = True
v = v[1:]
elif v[0] == '+':
v = v[1:]
v = v.replace('_', '')
lowerv = v.lower()
if '.' in v or ('x' not in v and ('e' in v or 'E' in v)):
if '.' in v and v.split('.', 1)[1] == '':
raise ValueError("This float is missing digits after "
"the point")
if v[0] not in '0123456789':
raise ValueError("This float doesn't have a leading "
"digit")
v = float(v)
itype = "float"
elif len(lowerv) == 3 and (lowerv == 'inf' or lowerv == 'nan'):
v = float(v)
itype = "float"
if itype == "int":
v = int(v, 0)
if neg:
return (0 - v, itype)
return (v, itype)
def bounded_string(self, s):
if len(s) == 0:
return True
if s[-1] != s[0]:
return False
i = -2
backslash = False
while len(s) + i > 0:
if s[i] == "\\":
backslash = not backslash
i -= 1
else:
break
return not backslash
def _load_array_isstrarray(self, a):
a = a[1:-1].strip()
if a != '' and (a[0] == '"' or a[0] == "'"):
return True
return False
def load_array(self, a):
atype = None
retval = []
a = a.strip()
if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip():
strarray = self._load_array_isstrarray(a)
if not a[1:-1].strip().startswith('{'):
a = a[1:-1].split(',')
else:
# a is an inline object, we must find the matching parenthesis
# to define groups
new_a = []
start_group_index = 1
end_group_index = 2
open_bracket_count = 1 if a[start_group_index] == '{' else 0
in_str = False
while end_group_index < len(a[1:]):
if a[end_group_index] == '"' or a[end_group_index] == "'":
if in_str:
backslash_index = end_group_index - 1
while (backslash_index > -1 and
a[backslash_index] == '\\'):
in_str = not in_str
backslash_index -= 1
in_str = not in_str
if not in_str and a[end_group_index] == '{':
open_bracket_count += 1
if in_str or a[end_group_index] != '}':
end_group_index += 1
continue
elif a[end_group_index] == '}' and open_bracket_count > 1:
open_bracket_count -= 1
end_group_index += 1
continue
# Increase end_group_index by 1 to get the closing bracket
end_group_index += 1
new_a.append(a[start_group_index:end_group_index])
# The next start index is at least after the closing
# bracket, a closing bracket can be followed by a comma
# since we are in an array.
start_group_index = end_group_index + 1
while (start_group_index < len(a[1:]) and
a[start_group_index] != '{'):
start_group_index += 1
end_group_index = start_group_index + 1
a = new_a
b = 0
if strarray:
while b < len(a) - 1:
ab = a[b].strip()
while (not self.bounded_string(ab) or
(len(ab) > 2 and
ab[0] == ab[1] == ab[2] and
ab[-2] != ab[0] and
ab[-3] != ab[0])):
a[b] = a[b] + ',' + a[b + 1]
ab = a[b].strip()
if b < len(a) - 2:
a = a[:b + 1] + a[b + 2:]
else:
a = a[:b + 1]
b += 1
else:
al = list(a[1:-1])
a = []
openarr = 0
j = 0
for i in _range(len(al)):
if al[i] == '[':
openarr += 1
elif al[i] == ']':
openarr -= 1
elif al[i] == ',' and not openarr:
a.append(''.join(al[j:i]))
j = i + 1
a.append(''.join(al[j:]))
for i in _range(len(a)):
a[i] = a[i].strip()
if a[i] != '':
nval, ntype = self.load_value(a[i])
if atype:
if ntype != atype:
raise ValueError("Not a homogeneous array")
else:
atype = ntype
retval.append(nval)
return retval
def preserve_comment(self, line_no, key, comment, beginline):
pass
def embed_comments(self, idx, currentlevel):
pass
class TomlPreserveCommentDecoder(TomlDecoder):
def __init__(self, _dict=dict):
self.saved_comments = {}
super(TomlPreserveCommentDecoder, self).__init__(_dict)
def preserve_comment(self, line_no, key, comment, beginline):
self.saved_comments[line_no] = (key, comment, beginline)
def embed_comments(self, idx, currentlevel):
if idx not in self.saved_comments:
return
key, comment, beginline = self.saved_comments[idx]
currentlevel[key] = CommentValue(currentlevel[key], comment, beginline,
self._dict)
| mit |
samedder/azure-cli | src/command_modules/azure-cli-sf/azure/cli/command_modules/sf/tests/test_sf_node.py | 2 | 1601 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
import azure.cli.command_modules.sf.custom as sf_c
from azure.cli.core.util import CLIError
class SfNodeTests(unittest.TestCase):
def none_package_sharing_policies_returns_none_test(self):
self.assertIs(sf_c.parse_package_sharing_policies(None), None)
def empty_package_sharing_policies_returns_none_test(self):
self.assertIs(sf_c.parse_package_sharing_policies([]), None)
def empty_scope_package_sharing_policy_returns_none_scope_test(self):
res = sf_c.parse_package_sharing_policies([{"name": "derp_a"}])
self.assertEqual(len(res), 1)
self.assertEqual(res[0].shared_package_name, "derp_a")
self.assertEqual(res[0].package_sharing_scope, None)
def invalid_scope_package_sharing_policy_raises_error_test(self):
with self.assertRaises(CLIError):
sf_c.parse_package_sharing_policies([{"name": "derp_a", "scope": "InIn"}])
def single_scope_package_sharing_policy_returns_single_policy_test(self):
res = sf_c.parse_package_sharing_policies([{"name": "derp_a", "scope": "All"}])
self.assertEqual(len(res), 1)
self.assertEqual(res[0].shared_package_name, "derp_a")
self.assertEqual(res[0].package_sharing_scope, "All")
| mit |
beckastar/django | tests/select_related_onetoone/tests.py | 59 | 9624 | from __future__ import unicode_literals
import unittest
from django.test import TestCase
from .models import (User, UserProfile, UserStat, UserStatResult, StatDetails,
AdvancedUserStat, Image, Product, Parent1, Parent2, Child1, Child2, Child3,
Child4)
class ReverseSelectRelatedTestCase(TestCase):
def setUp(self):
user = User.objects.create(username="test")
UserProfile.objects.create(user=user, state="KS", city="Lawrence")
results = UserStatResult.objects.create(results='first results')
userstat = UserStat.objects.create(user=user, posts=150,
results=results)
StatDetails.objects.create(base_stats=userstat, comments=259)
user2 = User.objects.create(username="bob")
results2 = UserStatResult.objects.create(results='moar results')
advstat = AdvancedUserStat.objects.create(user=user2, posts=200, karma=5,
results=results2)
StatDetails.objects.create(base_stats=advstat, comments=250)
p1 = Parent1(name1="Only Parent1")
p1.save()
c1 = Child1(name1="Child1 Parent1", name2="Child1 Parent2", value=1)
c1.save()
p2 = Parent2(name2="Child2 Parent2")
p2.save()
c2 = Child2(name1="Child2 Parent1", parent2=p2, value=2)
c2.save()
def test_basic(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
def test_follow_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results").get(username="test")
self.assertEqual(u.userstat.posts, 150)
self.assertEqual(u.userstat.results.results, 'first results')
def test_follow_two(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userprofile", "userstat").get(username="test")
self.assertEqual(u.userprofile.state, "KS")
self.assertEqual(u.userstat.posts, 150)
def test_follow_two_next_level(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat__results", "userstat__statdetails").get(username="test")
self.assertEqual(u.userstat.results.results, 'first results')
self.assertEqual(u.userstat.statdetails.comments, 259)
def test_forward_and_back(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related("user__userprofile").get(user__username="test")
self.assertEqual(stat.user.userprofile.state, 'KS')
self.assertEqual(stat.user.userstat.posts, 150)
def test_back_and_forward(self):
with self.assertNumQueries(1):
u = User.objects.select_related("userstat").get(username="test")
self.assertEqual(u.userstat.user.username, 'test')
def test_not_followed_by_default(self):
with self.assertNumQueries(2):
u = User.objects.select_related().get(username="test")
self.assertEqual(u.userstat.posts, 150)
def test_follow_from_child_class(self):
with self.assertNumQueries(1):
stat = AdvancedUserStat.objects.select_related('user', 'statdetails').get(posts=200)
self.assertEqual(stat.statdetails.comments, 250)
self.assertEqual(stat.user.username, 'bob')
def test_follow_inheritance(self):
with self.assertNumQueries(1):
stat = UserStat.objects.select_related('user', 'advanceduserstat').get(posts=200)
self.assertEqual(stat.advanceduserstat.posts, 200)
self.assertEqual(stat.user.username, 'bob')
self.assertEqual(stat.advanceduserstat.user.username, 'bob')
def test_nullable_relation(self):
im = Image.objects.create(name="imag1")
p1 = Product.objects.create(name="Django Plushie", image=im)
p2 = Product.objects.create(name="Talking Django Plushie")
with self.assertNumQueries(1):
result = sorted(Product.objects.select_related("image"), key=lambda x: x.name)
self.assertEqual([p.name for p in result], ["Django Plushie", "Talking Django Plushie"])
self.assertEqual(p1.image, im)
# Check for ticket #13839
self.assertIsNone(p2.image)
def test_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 1-1 relation.
"""
with self.assertNumQueries(1):
user = User.objects.select_related('userprofile').get(username='bob')
with self.assertRaises(UserProfile.DoesNotExist):
user.userprofile
def test_nullable_missing_reverse(self):
"""
Ticket #13839: select_related() should NOT cache None
for missing objects on a reverse 0-1 relation.
"""
Image.objects.create(name="imag1")
with self.assertNumQueries(1):
image = Image.objects.select_related('product').get()
with self.assertRaises(Product.DoesNotExist):
image.product
def test_parent_only(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Only Parent1")
with self.assertNumQueries(0):
with self.assertRaises(Child1.DoesNotExist):
p.child1
def test_multiple_subclass(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
def test_onetoone_with_subclass(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2').get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
def test_onetoone_with_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child2 Parent2")
self.assertEqual(p.child2.name1, 'Child2 Parent1')
with self.assertRaises(Child3.DoesNotExist):
p.child2.child3
p3 = Parent2(name2="Child3 Parent2")
p3.save()
c2 = Child3(name1="Child3 Parent1", parent2=p3, value=2, value3=3)
c2.save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child2', "child2__child3").get(name2="Child3 Parent2")
self.assertEqual(p.child2.name1, 'Child3 Parent1')
self.assertEqual(p.child2.child3.value3, 3)
self.assertEqual(p.child2.child3.value, p.child2.value)
self.assertEqual(p.child2.name1, p.child2.child3.name1)
def test_multiinheritance_two_subclasses(self):
with self.assertNumQueries(1):
p = Parent1.objects.select_related('child1', 'child1__child4').get(name1="Child1 Parent1")
self.assertEqual(p.child1.name2, 'Child1 Parent2')
self.assertEqual(p.child1.name1, p.name1)
with self.assertRaises(Child4.DoesNotExist):
p.child1.child4
Child4(name1='n1', name2='n2', value=1, value4=4).save()
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child1', 'child1__child4').get(name2="n2")
self.assertEqual(p.name2, 'n2')
self.assertEqual(p.child1.name1, 'n1')
self.assertEqual(p.child1.name2, p.name2)
self.assertEqual(p.child1.value, 1)
self.assertEqual(p.child1.child4.name1, p.child1.name1)
self.assertEqual(p.child1.child4.name2, p.child1.name2)
self.assertEqual(p.child1.child4.value, p.child1.value)
self.assertEqual(p.child1.child4.value4, 4)
@unittest.expectedFailure
def test_inheritance_deferred(self):
c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4)
with self.assertNumQueries(1):
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
self.assertEqual(p.id2, c.id2)
self.assertEqual(p.child1.value, 1)
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.name2, 'n2')
p = Parent2.objects.select_related('child1').only(
'id2', 'child1__value').get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name2, 'n2')
@unittest.expectedFailure
def test_inheritance_deferred2(self):
c = Child4.objects.create(name1='n1', name2='n2', value=1, value4=4)
qs = Parent2.objects.select_related('child1', 'child4').only(
'id2', 'child1__value', 'child1__child4__value4')
with self.assertNumQueries(1):
p = qs.get(name2="n2")
self.assertEqual(p.id2, c.id2)
self.assertEqual(p.child1.value, 1)
self.assertEqual(p.child1.child4.value4, 4)
self.assertEqual(p.child1.child4.id2, c.id2)
p = qs.get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name2, 'n2')
p = qs.get(name2="n2")
with self.assertNumQueries(1):
self.assertEqual(p.child1.name1, 'n1')
with self.assertNumQueries(1):
self.assertEqual(p.child1.child4.name1, 'n1')
| bsd-3-clause |
hans/lightblue-0.4 | build/lib/lightblue/_LightAquaBlue.py | 83 | 2357 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
"""
Provides a python interface to the LightAquaBlue Framework classes, through
PyObjC.
See http://pyobjc.sourceforge.net for details on how to access Objective-C
classes through PyObjC.
"""
import objc
import os.path
_FRAMEWORK_PATH = u'/Library/Frameworks/LightAquaBlue.framework'
if not os.path.isdir(_FRAMEWORK_PATH):
raise ImportError("Cannot load LightAquaBlue framework, not found at" + \
_FRAMEWORK_PATH)
try:
# mac os 10.5 loads frameworks using bridgesupport metadata
__bundle__ = objc.initFrameworkWrapper("LightAquaBlue",
frameworkIdentifier="com.blammit.LightAquaBlue",
frameworkPath=objc.pathForFramework(_FRAMEWORK_PATH),
globals=globals())
except AttributeError:
# earlier versions use loadBundle() and setSignatureForSelector()
objc.loadBundle("LightAquaBlue", globals(),
bundle_path=objc.pathForFramework(_FRAMEWORK_PATH))
# return int, take (object, object, object, output unsigned char, output int)
# i.e. in python: return (int, char, int), take (object, object, object)
objc.setSignatureForSelector("BBServiceAdvertiser",
"addRFCOMMServiceDictionary:withName:UUID:channelID:serviceRecordHandle:",
"i@0:@@@o^Co^I")
# set to take (6-char array, unsigned char, object)
# this seems to work even though the selector doesn't take a char aray,
# it takes a struct 'BluetoothDeviceAddress' which contains a char array.
objc.setSignatureForSelector("BBBluetoothOBEXClient",
"initWithRemoteDeviceAddress:channelID:delegate:",
'@@:r^[6C]C@')
del objc
| gpl-3.0 |
abhattad4/Digi-Menu | digimenu2/django/core/servers/fastcgi.py | 170 | 6631 | """
FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol.
Uses the flup python package: http://www.saddi.com/software/flup/
This is an adaptation of the flup package to add FastCGI server support
to run Django apps from Web servers that support the FastCGI protocol.
This module can be run standalone or from the django-admin / manage.py
scripts using the "runfcgi" directive.
Run with the extra option "help" for a list of additional options you can
pass to this server.
"""
import importlib
import os
import sys
__version__ = "0.1"
__all__ = ["runfastcgi"]
FASTCGI_OPTIONS = {
'protocol': 'fcgi',
'host': None,
'port': None,
'socket': None,
'method': 'fork',
'daemonize': None,
'workdir': '/',
'pidfile': None,
'maxspare': 5,
'minspare': 2,
'maxchildren': 50,
'maxrequests': 0,
'debug': None,
'outlog': None,
'errlog': None,
'umask': None,
}
FASTCGI_HELP = r"""
Run this project as a fastcgi (or some other protocol supported
by flup) application. To do this, the flup package from
http://www.saddi.com/software/flup/ is required.
runfcgi [options] [fcgi settings]
Optional Fcgi settings: (setting=value)
protocol=PROTOCOL fcgi, scgi, ajp, ... (default %(protocol)s)
host=HOSTNAME hostname to listen on.
port=PORTNUM port to listen on.
socket=FILE UNIX socket to listen on.
method=IMPL prefork or threaded (default %(method)s).
maxrequests=NUMBER number of requests a child handles before it is
killed and a new child is forked (0 = no limit).
maxspare=NUMBER max number of spare processes / threads (default %(maxspare)s).
minspare=NUMBER min number of spare processes / threads (default %(minspare)s).
maxchildren=NUMBER hard limit number of processes / threads (default %(maxchildren)s).
daemonize=BOOL whether to detach from terminal.
pidfile=FILE write the spawned process-id to this file.
workdir=DIRECTORY change to this directory when daemonizing (default %(workdir)s).
debug=BOOL set to true to enable flup tracebacks.
outlog=FILE write stdout to this file.
errlog=FILE write stderr to this file.
umask=UMASK umask to use when daemonizing, in octal notation (default 022).
Examples:
Run a "standard" fastcgi process on a file-descriptor
(for Web servers which spawn your processes for you)
$ manage.py runfcgi method=threaded
Run a scgi server on a TCP host/port
$ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025
Run a fastcgi server on a UNIX domain socket (posix platforms only)
$ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock
Run a fastCGI as a daemon and write the spawned PID in a file
$ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \
daemonize=true pidfile=/var/run/django-fcgi.pid
""" % FASTCGI_OPTIONS
def fastcgi_help(message=None):
print(FASTCGI_HELP)
if message:
print(message)
return False
def runfastcgi(argset=[], **kwargs):
options = FASTCGI_OPTIONS.copy()
options.update(kwargs)
for x in argset:
if "=" in x:
k, v = x.split('=', 1)
else:
k, v = x, True
options[k.lower()] = v
if "help" in options:
return fastcgi_help()
try:
import flup # NOQA
except ImportError as e:
sys.stderr.write("ERROR: %s\n" % e)
sys.stderr.write(" Unable to load the flup package. In order to run django\n")
sys.stderr.write(" as a FastCGI application, you will need to get flup from\n")
sys.stderr.write(" http://www.saddi.com/software/flup/ If you've already\n")
sys.stderr.write(" installed flup, then make sure you have it in your PYTHONPATH.\n")
return False
flup_module = 'server.' + options['protocol']
if options['method'] in ('prefork', 'fork'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxChildren': int(options["maxchildren"]),
'maxRequests': int(options["maxrequests"]),
}
flup_module += '_fork'
elif options['method'] in ('thread', 'threaded'):
wsgi_opts = {
'maxSpare': int(options["maxspare"]),
'minSpare': int(options["minspare"]),
'maxThreads': int(options["maxchildren"]),
}
else:
return fastcgi_help("ERROR: Implementation must be one of prefork or "
"thread.")
wsgi_opts['debug'] = options['debug'] is not None
try:
module = importlib.import_module('.%s' % flup_module, 'flup')
WSGIServer = module.WSGIServer
except Exception:
print("Can't import flup." + flup_module)
return False
# Prep up and go
from django.core.servers.basehttp import get_internal_wsgi_application
if options["host"] and options["port"] and not options["socket"]:
wsgi_opts['bindAddress'] = (options["host"], int(options["port"]))
elif options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = options["socket"]
elif not options["socket"] and not options["host"] and not options["port"]:
wsgi_opts['bindAddress'] = None
else:
return fastcgi_help("Invalid combination of host, port, socket.")
if options["daemonize"] is None:
# Default to daemonizing if we're running on a socket/named pipe.
daemonize = (wsgi_opts['bindAddress'] is not None)
else:
if options["daemonize"].lower() in ('true', 'yes', 't'):
daemonize = True
elif options["daemonize"].lower() in ('false', 'no', 'f'):
daemonize = False
else:
return fastcgi_help("ERROR: Invalid option for daemonize "
"parameter.")
daemon_kwargs = {}
if options['outlog']:
daemon_kwargs['out_log'] = options['outlog']
if options['errlog']:
daemon_kwargs['err_log'] = options['errlog']
if options['umask']:
daemon_kwargs['umask'] = int(options['umask'], 8)
if daemonize:
from django.utils.daemonize import become_daemon
become_daemon(our_home_dir=options["workdir"], **daemon_kwargs)
if options["pidfile"]:
with open(options["pidfile"], "w") as fp:
fp.write("%d\n" % os.getpid())
WSGIServer(get_internal_wsgi_application(), **wsgi_opts).run()
if __name__ == '__main__':
runfastcgi(sys.argv[1:])
| bsd-3-clause |
janewangfb/spark | examples/src/main/python/ml/normalizer_example.py | 123 | 1807 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import Normalizer
from pyspark.ml.linalg import Vectors
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("NormalizerExample")\
.getOrCreate()
# $example on$
dataFrame = spark.createDataFrame([
(0, Vectors.dense([1.0, 0.5, -1.0]),),
(1, Vectors.dense([2.0, 1.0, 1.0]),),
(2, Vectors.dense([4.0, 10.0, 2.0]),)
], ["id", "features"])
# Normalize each Vector using $L^1$ norm.
normalizer = Normalizer(inputCol="features", outputCol="normFeatures", p=1.0)
l1NormData = normalizer.transform(dataFrame)
print("Normalized using L^1 norm")
l1NormData.show()
# Normalize each Vector using $L^\infty$ norm.
lInfNormData = normalizer.transform(dataFrame, {normalizer.p: float("inf")})
print("Normalized using L^inf norm")
lInfNormData.show()
# $example off$
spark.stop()
| apache-2.0 |
jasondunsmore/python-heatclient | heatclient/tests/test_software_configs.py | 4 | 3527 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from heatclient.v1.software_configs import SoftwareConfig
from heatclient.v1.software_configs import SoftwareConfigManager
class SoftwareConfigTest(testtools.TestCase):
def setUp(self):
super(SoftwareConfigTest, self).setUp()
config_id = 'bca6871d-86c0-4aff-b792-58a1f6947b57'
self.config = SoftwareConfig(mock.MagicMock(), info={'id': config_id})
self.config_id = config_id
def test_delete(self):
self.config.manager.delete.return_value = None
self.assertIsNone(self.config.delete())
kwargs = self.config.manager.delete.call_args[1]
self.assertEqual(self.config_id, kwargs['config_id'])
def test_data(self):
self.assertEqual(
"<SoftwareConfig {'id': '%s'}>" % self.config_id, str(self.config))
self.config.manager.data.return_value = None
self.config.data(name='config_mysql')
kwargs = self.config.manager.data.call_args[1]
self.assertEqual('config_mysql', kwargs['name'])
class SoftwareConfigManagerTest(testtools.TestCase):
def setUp(self):
super(SoftwareConfigManagerTest, self).setUp()
self.manager = SoftwareConfigManager(mock.MagicMock())
def test_get(self):
config_id = 'bca6871d-86c0-4aff-b792-58a1f6947b57'
data = {
'id': config_id,
'name': 'config_mysql',
'group': 'Heat::Shell',
'config': '#!/bin/bash',
'inputs': [],
'ouputs': [],
'options': []}
self.manager.client.json_request.return_value = (
{}, {'software_config': data})
result = self.manager.get(config_id=config_id)
self.assertEqual(SoftwareConfig(self.manager, data), result)
call_args = self.manager.client.json_request.call_args
self.assertEqual(
('GET', '/software_configs/%s' % config_id), *call_args)
def test_create(self):
config_id = 'bca6871d-86c0-4aff-b792-58a1f6947b57'
body = {
'name': 'config_mysql',
'group': 'Heat::Shell',
'config': '#!/bin/bash',
'inputs': [],
'ouputs': [],
'options': []}
data = body.copy()
data['id'] = config_id
self.manager.client.json_request.return_value = (
{}, {'software_config': data})
result = self.manager.create(**body)
self.assertEqual(SoftwareConfig(self.manager, data), result)
args, kargs = self.manager.client.json_request.call_args
self.assertEqual('POST', args[0])
self.assertEqual('/software_configs', args[1])
self.assertEqual({'data': body}, kargs)
def test_delete(self):
config_id = 'bca6871d-86c0-4aff-b792-58a1f6947b57'
self.manager.delete(config_id)
call_args = self.manager.client.delete.call_args
self.assertEqual(
('/software_configs/%s' % config_id,), *call_args)
| apache-2.0 |
cheertarts/filter-comments | filter-comments.py | 1 | 4693 | #!/usr/bin/env python3
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from getopt import getopt
import sys
class FilterComments:
comment_types = {
'Haskell' : ['---\n', '-- ', '--\t', '--\n'],
'Shell' : ['##\n', '# ', '#\t', '#\n'],
'Lisp' : [';;;\n', ';; ', ';;\t', ';;\n'],
'Java' : ['///\n', '// ', '//\t', '//\n'],
'C' : ['/**\n', ' * ', ' *\t', ' *\n']
}
flags = 'hksljco:'
full_flags = ['help', 'haskell-style', 'shell-style', 'lisp-style',
'java-style', 'c-style', 'output=']
def __init__(self, args):
self.args = args
self.style = self.comment_types['C']
self.output = None
def print_help(self):
print(self.args[0] + " -[flags]o [output] [inputs]")
print("Flags include: ")
print("")
print(" -h, --help:")
print(" Gives this help screen.")
print(" -k, --haskell-style")
print(" Uses haskell style comments.")
print(" -s, --shell-style")
print(" Uses shell style comments.")
print(" -l, --lisp-style")
print(" Uses lisp style comments.")
print(" -j, --java-style")
print(" Uses java (C++ single line comments) style comments.")
print(" -c, --c-style")
print(" Uses c style comments.")
print(" -o, --output")
print(" Sets output file, if none defaults to stdout.")
print("")
def parse_arguments(self):
sorted_args, self.inputs = getopt(self.args[1:], self.flags, self.full_flags)
for pair in sorted_args:
if pair[0] == '--haskell-style' or pair[0] == '-k':
self.style = self.comment_types['Haskell']
elif pair[0] == '--shell-style' or pair[0] == '-s':
self.style = self.comment_types['Shell']
elif pair[0] == '--lisp-style' or pair[0] == '-l':
self.style = self.comment_types['Lisp']
elif pair[0] == '--java-style' or pair[0] == '-j':
self.style = self.comment_types['Java']
elif pair[0] == '--c-style' or pair[0] == '-c':
self.style = self.comment_types['C']
elif pair[0] == '--output' or pair[0] == '-o':
self.output = pair[1]
elif pair[0] == '--help' or pair[0] == '-h':
self.print_help()
sys.exit()
def filter_line(self, line, file_buffer, in_comment):
if not in_comment:
if line == self.style[0]:
in_comment = True
else:
if line[:len(self.style[1])] == self.style[1]:
file_buffer += line[len(self.style[1]):]
elif line[:len(self.style[2])] == self.style[2]:
file_buffer += '\t'
file_buffer += line[len(self.style[2]):]
elif line[:len(self.style[3])] == self.style[3]:
file_buffer += '\n'
else:
file_buffer += '\n'
in_comment = False
return file_buffer, in_comment
def filter_comments(self):
in_comment = False
if self.output == None:
output_file = sys.stdout
else:
output_file = open(self.output, 'w')
file_buffer = '\n'
for filename in self.inputs:
try:
input_file = open(filename, 'r')
except FileNotFoundError:
print("An input file has not been found.", file = sys.stderr)
sys.exit()
for line in input_file.readlines():
file_buffer, in_comment = self.filter_line(
line,
file_buffer,
in_comment)
input_file.close()
output_file.write(file_buffer)
output_file.close()
def main(args = sys.argv):
if len(args) < 2:
print("Need more arguments.", file = sys.stderr)
sys.exit()
app = FilterComments(args)
app.parse_arguments()
app.filter_comments()
if __name__ == '__main__':
main()
| gpl-3.0 |
die88/ardupilot | Tools/autotest/common.py | 32 | 9718 | from __future__ import print_function
import math
import time
from pymavlink import mavwp
from pysim import util
# a list of pexpect objects to read while waiting for
# messages. This keeps the output to stdout flowing
expect_list = []
def expect_list_clear():
"""clear the expect list."""
global expect_list
for p in expect_list[:]:
expect_list.remove(p)
def expect_list_extend(list_to_add):
"""Extend the expect list."""
global expect_list
expect_list.extend(list_to_add)
def idle_hook(mav):
"""Called when waiting for a mavlink message."""
global expect_list
for p in expect_list:
util.pexpect_drain(p)
def message_hook(mav, msg):
"""Called as each mavlink msg is received."""
idle_hook(mav)
def expect_callback(e):
"""Called when waiting for a expect pattern."""
global expect_list
for p in expect_list:
if p == e:
continue
util.pexpect_drain(p)
def get_distance(loc1, loc2):
"""Get ground distance between two locations."""
dlat = loc2.lat - loc1.lat
dlong = loc2.lng - loc1.lng
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
def get_bearing(loc1, loc2):
"""Get bearing from loc1 to loc2."""
off_x = loc2.lng - loc1.lng
off_y = loc2.lat - loc1.lat
bearing = 90.00 + math.atan2(-off_y, off_x) * 57.2957795
if bearing < 0:
bearing += 360.00
return bearing
def wait_seconds(mav, seconds_to_wait):
tstart = get_sim_time(mav)
tnow = tstart
while tstart + seconds_to_wait > tnow:
tnow = get_sim_time(mav)
def get_sim_time(mav):
m = mav.recv_match(type='SYSTEM_TIME', blocking=True)
return m.time_boot_ms * 1.0e-3
def wait_altitude(mav, alt_min, alt_max, timeout=30):
"""Wait for a given altitude range."""
climb_rate = 0
previous_alt = 0
tstart = get_sim_time(mav)
print("Waiting for altitude between %u and %u" % (alt_min, alt_max))
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
climb_rate = m.alt - previous_alt
previous_alt = m.alt
print("Wait Altitude: Cur:%u, min_alt:%u, climb_rate: %u" % (m.alt, alt_min, climb_rate))
if m.alt >= alt_min and m.alt <= alt_max:
print("Altitude OK")
return True
print("Failed to attain altitude range")
return False
def wait_groundspeed(mav, gs_min, gs_max, timeout=30):
"""Wait for a given ground speed range."""
tstart = get_sim_time(mav)
print("Waiting for groundspeed between %.1f and %.1f" % (gs_min, gs_max))
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("Wait groundspeed %.1f, target:%.1f" % (m.groundspeed, gs_min))
if m.groundspeed >= gs_min and m.groundspeed <= gs_max:
return True
print("Failed to attain groundspeed range")
return False
def wait_roll(mav, roll, accuracy, timeout=30):
"""Wait for a given roll in degrees."""
tstart = get_sim_time(mav)
print("Waiting for roll of %d at %s" % (roll, time.ctime()))
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='ATTITUDE', blocking=True)
p = math.degrees(m.pitch)
r = math.degrees(m.roll)
print("Roll %d Pitch %d" % (r, p))
if math.fabs(r - roll) <= accuracy:
print("Attained roll %d" % roll)
return True
print("Failed to attain roll %d" % roll)
return False
def wait_pitch(mav, pitch, accuracy, timeout=30):
"""Wait for a given pitch in degrees."""
tstart = get_sim_time(mav)
print("Waiting for pitch of %u at %s" % (pitch, time.ctime()))
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='ATTITUDE', blocking=True)
p = math.degrees(m.pitch)
r = math.degrees(m.roll)
print("Pitch %d Roll %d" % (p, r))
if math.fabs(p - pitch) <= accuracy:
print("Attained pitch %d" % pitch)
return True
print("Failed to attain pitch %d" % pitch)
return False
def wait_heading(mav, heading, accuracy=5, timeout=30):
"""Wait for a given heading."""
tstart = get_sim_time(mav)
print("Waiting for heading %u with accuracy %u" % (heading, accuracy))
while get_sim_time(mav) < tstart + timeout:
m = mav.recv_match(type='VFR_HUD', blocking=True)
print("Heading %u" % m.heading)
if math.fabs(m.heading - heading) <= accuracy:
print("Attained heading %u" % heading)
return True
print("Failed to attain heading %u" % heading)
return False
def wait_distance(mav, distance, accuracy=5, timeout=30):
"""Wait for flight of a given distance."""
tstart = get_sim_time(mav)
start = mav.location()
while get_sim_time(mav) < tstart + timeout:
pos = mav.location()
delta = get_distance(start, pos)
print("Distance %.2f meters" % delta)
if math.fabs(delta - distance) <= accuracy:
print("Attained distance %.2f meters OK" % delta)
return True
if delta > (distance + accuracy):
print("Failed distance - overshoot delta=%f distance=%f" % (delta, distance))
return False
print("Failed to attain distance %u" % distance)
return False
def wait_location(mav, loc, accuracy=5, timeout=30, target_altitude=None, height_accuracy=-1):
"""Wait for arrival at a location."""
tstart = get_sim_time(mav)
if target_altitude is None:
target_altitude = loc.alt
print("Waiting for location %.4f,%.4f at altitude %.1f height_accuracy=%.1f" % (
loc.lat, loc.lng, target_altitude, height_accuracy))
while get_sim_time(mav) < tstart + timeout:
pos = mav.location()
delta = get_distance(loc, pos)
print("Distance %.2f meters alt %.1f" % (delta, pos.alt))
if delta <= accuracy:
if height_accuracy != -1 and math.fabs(pos.alt - target_altitude) > height_accuracy:
continue
print("Reached location (%.2f meters)" % delta)
return True
print("Failed to attain location")
return False
def wait_waypoint(mav, wpnum_start, wpnum_end, allow_skip=True, max_dist=2, timeout=400):
"""Wait for waypoint ranges."""
tstart = get_sim_time(mav)
# this message arrives after we set the current WP
start_wp = mav.waypoint_current()
current_wp = start_wp
mode = mav.flightmode
print("\ntest: wait for waypoint ranges start=%u end=%u\n\n" % (wpnum_start, wpnum_end))
# if start_wp != wpnum_start:
# print("test: Expected start waypoint %u but got %u" % (wpnum_start, start_wp))
# return False
while get_sim_time(mav) < tstart + timeout:
seq = mav.waypoint_current()
m = mav.recv_match(type='NAV_CONTROLLER_OUTPUT', blocking=True)
wp_dist = m.wp_dist
m = mav.recv_match(type='VFR_HUD', blocking=True)
# if we changed mode, fail
if mav.flightmode != mode:
print('Exited %s mode' % mode)
return False
print("test: WP %u (wp_dist=%u Alt=%d), current_wp: %u, wpnum_end: %u" % (seq, wp_dist, m.alt, current_wp, wpnum_end))
if seq == current_wp+1 or (seq > current_wp+1 and allow_skip):
print("test: Starting new waypoint %u" % seq)
tstart = get_sim_time(mav)
current_wp = seq
# the wp_dist check is a hack until we can sort out the right seqnum
# for end of mission
# if current_wp == wpnum_end or (current_wp == wpnum_end-1 and wp_dist < 2):
if (current_wp == wpnum_end and wp_dist < max_dist):
print("Reached final waypoint %u" % seq)
return True
if (seq >= 255):
print("Reached final waypoint %u" % seq)
return True
if seq > current_wp+1:
print("Failed: Skipped waypoint! Got wp %u expected %u" % (seq, current_wp+1))
return False
print("Failed: Timed out waiting for waypoint %u of %u" % (wpnum_end, wpnum_end))
return False
def save_wp(mavproxy, mav):
mavproxy.send('rc 7 1000\n')
mav.recv_match(condition='RC_CHANNELS.chan7_raw==1000', blocking=True)
wait_seconds(mav, 1)
mavproxy.send('rc 7 2000\n')
mav.recv_match(condition='RC_CHANNELS.chan7_raw==2000', blocking=True)
wait_seconds(mav, 1)
mavproxy.send('rc 7 1000\n')
mav.recv_match(condition='RC_CHANNELS.chan7_raw==1000', blocking=True)
wait_seconds(mav, 1)
def wait_mode(mav, mode, timeout=None):
print("Waiting for mode %s" % mode)
mav.recv_match(condition='MAV.flightmode.upper()=="%s".upper()' % mode, timeout=timeout, blocking=True)
print("Got mode %s" % mode)
return mav.flightmode
def mission_count(filename):
"""Load a mission from a file and return number of waypoints."""
wploader = mavwp.MAVWPLoader()
wploader.load(filename)
num_wp = wploader.count()
return num_wp
def sim_location(mav):
"""Return current simulator location."""
from pymavlink import mavutil
m = mav.recv_match(type='SIMSTATE', blocking=True)
return mavutil.location(m.lat*1.0e-7, m.lng*1.0e-7, 0, math.degrees(m.yaw))
def log_download(mavproxy, mav, filename, timeout=360):
"""Download latest log."""
mavproxy.send("log list\n")
mavproxy.expect("numLogs")
mav.wait_heartbeat()
mav.wait_heartbeat()
mavproxy.send("set shownoise 0\n")
mavproxy.send("log download latest %s\n" % filename)
mavproxy.expect("Finished downloading", timeout=timeout)
mav.wait_heartbeat()
mav.wait_heartbeat()
return True
| gpl-3.0 |
metaplinius/still-lambda | pyglet/media/drivers/openal/__init__.py | 2 | 23315 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
# $Id$
import ctypes
import heapq
import threading
import time
import queue
import atexit
from . import lib_openal as al
from . import lib_alc as alc
from pyglet.media import MediaException, MediaEvent, AbstractAudioPlayer, \
AbstractAudioDriver, AbstractListener, MediaThread
import pyglet
_debug = pyglet.options['debug_media']
_debug_buffers = pyglet.options.get('debug_media_buffers', False)
class OpenALException(MediaException):
pass
# TODO move functions into context/driver?
def _split_nul_strings(s):
# NUL-separated list of strings, double-NUL-terminated.
nul = False
i = 0
while True:
if s[i] == '\0':
if nul:
break
else:
nul = True
else:
nul = False
i += 1
s = s[:i - 1]
return [_f for _f in [ss.strip() for ss in s.split('\0')] if _f]
format_map = {
(1, 8): al.AL_FORMAT_MONO8,
(1, 16): al.AL_FORMAT_MONO16,
(2, 8): al.AL_FORMAT_STEREO8,
(2, 16): al.AL_FORMAT_STEREO16,
}
class OpenALWorker(MediaThread):
# Minimum size to bother refilling (bytes)
_min_write_size = 512
# Time to wait if there are players, but they're all full.
_nap_time = 0.05
# Time to wait if there are no players.
_sleep_time = None
def __init__(self):
super(OpenALWorker, self).__init__()
self.players = set()
def run(self):
while True:
# This is a big lock, but ensures a player is not deleted while
# we're processing it -- this saves on extra checks in the
# player's methods that would otherwise have to check that it's
# still alive.
self.condition.acquire()
if self.stopped:
self.condition.release()
break
sleep_time = -1
# Refill player with least write_size
if self.players:
player = None
write_size = 0
for p in self.players:
s = p.get_write_size()
if s > write_size:
player = p
write_size = s
if write_size > self._min_write_size:
player.refill(write_size)
else:
sleep_time = self._nap_time
else:
sleep_time = self._sleep_time
self.condition.release()
if sleep_time != -1:
self.sleep(sleep_time)
else:
# We MUST sleep, or we will starve pyglet's main loop. It
# also looks like if we don't sleep enough, we'll starve out
# various updates that stop us from properly removing players
# that should be removed.
time.sleep(self._nap_time)
def add(self, player):
self.condition.acquire()
self.players.add(player)
self.condition.notify()
self.condition.release()
def remove(self, player):
self.condition.acquire()
if player in self.players:
self.players.remove(player)
self.condition.notify()
self.condition.release()
class OpenALBufferPool(object):
"""At least Mac OS X doesn't free buffers when a source is deleted; it just
detaches them from the source. So keep our own recycled queue.
"""
def __init__(self):
self._buffers = [] # list of free buffer names
self._sources = {} # { sourceId : [ buffer names used ] }
def getBuffer(self, alSource):
"""Convenience for returning one buffer name"""
return self.getBuffers(alSource, 1)[0]
def getBuffers(self, alSource, i):
"""Returns an array containing i buffer names. The returned list must
not be modified in any way, and may get changed by subsequent calls to
getBuffers.
"""
assert context._lock.locked()
buffs = []
try:
while i > 0:
b = self._buffers.pop()
if not al.alIsBuffer(b):
# Protect against implementations that DO free buffers
# when they delete a source - carry on.
if _debug_buffers:
print("Found a bad buffer")
continue
buffs.append(b)
i -= 1
except IndexError:
while i > 0:
buffer = al.ALuint()
al.alGenBuffers(1, buffer)
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("GEN BUFFERS: " + str(error)))
buffs.append(buffer)
i -= 1
alSourceVal = alSource.value
if alSourceVal not in self._sources:
self._sources[alSourceVal] = buffs
else:
self._sources[alSourceVal].extend(buffs)
return buffs
def deleteSource(self, alSource):
"""Delete a source pointer (self._al_source) and free its buffers"""
assert context._lock.locked()
for buffer in self._sources.pop(alSource.value):
self._buffers.append(buffer)
def dequeueBuffer(self, alSource, buffer):
"""A buffer has finished playing, free it."""
assert context._lock.locked()
sourceBuffs = self._sources[alSource.value]
if buffer in sourceBuffs:
sourceBuffs.remove(buffer)
self._buffers.append(buffer)
elif _debug_buffers:
# This seems to be the problem with Mac OS X - The buffers are
# dequeued, but they're not _actually_ buffers. In other words,
# there's some leakage, so after awhile, things break.
print(("Bad buffer: " + str(buffer)))
def delete(self):
"""Delete all sources and free all buffers"""
assert context._lock.locked()
for source, buffers in list(self._sources.items()):
al.alDeleteSources(1, ctypes.byref(ctypes.c_uint(source)))
for b in buffers:
if not al.alIsBuffer(b):
# Protect against implementations that DO free buffers
# when they delete a source - carry on.
if _debug_buffers:
print("Found a bad buffer")
continue
al.alDeleteBuffers(1, ctypes.byref(b))
for b in self._buffers:
al.alDeleteBuffers(1, ctypes.byref(b))
self._buffers = []
self._sources = {}
bufferPool = OpenALBufferPool()
class OpenALAudioPlayer(AbstractAudioPlayer):
#: Minimum size of an OpenAL buffer worth bothering with, in bytes
_min_buffer_size = 512
#: Aggregate (desired) buffer size, in bytes
_ideal_buffer_size = 44800
def __init__(self, source_group, player):
super(OpenALAudioPlayer, self).__init__(source_group, player)
audio_format = source_group.audio_format
try:
self._al_format = format_map[(audio_format.channels,
audio_format.sample_size)]
except KeyError:
raise OpenALException('Unsupported audio format.')
self._al_source = al.ALuint()
al.alGenSources(1, self._al_source)
# Lock policy: lock all instance vars (except constants). (AL calls
# are locked on context).
self._lock = threading.RLock()
# Cursor positions, like DSound and Pulse drivers, refer to a
# hypothetical infinite-length buffer. Cursor units are in bytes.
# Cursor position of current (head) AL buffer
self._buffer_cursor = 0
# Estimated playback cursor position (last seen)
self._play_cursor = 0
# Cursor position of end of queued AL buffer.
self._write_cursor = 0
# List of currently queued buffer sizes (in bytes)
self._buffer_sizes = []
# List of currently queued buffer timestamps
self._buffer_timestamps = []
# Timestamp at end of last written buffer (timestamp to return in case
# of underrun)
self._underrun_timestamp = None
# List of (cursor, MediaEvent)
self._events = []
# Desired play state (True even if stopped due to underrun)
self._playing = False
# Has source group EOS been seen (and hence, event added to queue)?
self._eos = False
# OpenAL 1.0 timestamp interpolation: system time of current buffer
# playback (best guess)
if not context.have_1_1:
self._buffer_system_time = time.time()
self.refill(self._ideal_buffer_size)
def __del__(self):
try:
self.delete()
except:
pass
def delete(self):
if _debug:
print('OpenALAudioPlayer.delete()')
if not self._al_source:
return
context.worker.remove(self)
self._lock.acquire()
context.lock()
al.alDeleteSources(1, self._al_source)
bufferPool.deleteSource(self._al_source)
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("DELETE ERROR: " + str(error)))
context.unlock()
self._al_source = None
self._lock.release()
def play(self):
if self._playing:
return
if _debug:
print('OpenALAudioPlayer.play()')
self._playing = True
self._al_play()
if not context.have_1_1:
self._buffer_system_time = time.time()
context.worker.add(self)
def _al_play(self):
if _debug:
print('OpenALAudioPlayer._al_play()')
context.lock()
state = al.ALint()
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
al.alSourcePlay(self._al_source)
context.unlock()
def stop(self):
if not self._playing:
return
if _debug:
print('OpenALAudioPlayer.stop()')
self._pause_timestamp = self.get_time()
context.lock()
al.alSourcePause(self._al_source)
context.unlock()
self._playing = False
context.worker.remove(self)
def clear(self):
if _debug:
print('OpenALAudioPlayer.clear()')
self._lock.acquire()
context.lock()
al.alSourceStop(self._al_source)
self._playing = False
del self._events[:]
self._underrun_timestamp = None
self._buffer_timestamps = [None for _ in self._buffer_timestamps]
context.unlock()
self._lock.release()
def _update_play_cursor(self):
if not self._al_source:
return
self._lock.acquire()
context.lock()
# Release spent buffers
processed = al.ALint()
al.alGetSourcei(self._al_source, al.AL_BUFFERS_PROCESSED, processed)
processed = processed.value
if processed:
buffers = (al.ALuint * processed)()
al.alSourceUnqueueBuffers(self._al_source, len(buffers), buffers)
error = al.alGetError()
if error != 0:
if _debug_buffers:
print(("Source unqueue error: " + str(error)))
else:
for b in buffers:
bufferPool.dequeueBuffer(self._al_source, b)
context.unlock()
if processed:
if (len(self._buffer_timestamps) == processed
and self._buffer_timestamps[-1] is not None):
# Underrun, take note of timestamp.
# We check that the timestamp is not None, because otherwise
# our source could have been cleared.
self._underrun_timestamp = \
self._buffer_timestamps[-1] + \
self._buffer_sizes[-1] / \
float(self.source_group.audio_format.bytes_per_second)
self._buffer_cursor += sum(self._buffer_sizes[:processed])
del self._buffer_sizes[:processed]
del self._buffer_timestamps[:processed]
if not context.have_1_1:
self._buffer_system_time = time.time()
# Update play cursor using buffer cursor + estimate into current
# buffer
if context.have_1_1:
bytes = al.ALint()
context.lock()
al.alGetSourcei(self._al_source, al.AL_BYTE_OFFSET, bytes)
context.unlock()
if _debug:
print('got bytes offset', bytes.value)
self._play_cursor = self._buffer_cursor + bytes.value
else:
# Interpolate system time past buffer timestamp
self._play_cursor = \
self._buffer_cursor + int(
(time.time() - self._buffer_system_time) * \
self.source_group.audio_format.bytes_per_second)
# Process events
while self._events and self._events[0][0] < self._play_cursor:
_, event = self._events.pop(0)
event._sync_dispatch_to_player(self.player)
self._lock.release()
def get_write_size(self):
self._lock.acquire()
self._update_play_cursor()
write_size = self._ideal_buffer_size - \
(self._write_cursor - self._play_cursor)
if self._eos:
write_size = 0
self._lock.release()
return write_size
def refill(self, write_size):
if _debug:
print('refill', write_size)
self._lock.acquire()
while write_size > self._min_buffer_size:
audio_data = self.source_group.get_audio_data(write_size)
if not audio_data:
self._eos = True
self._events.append(
(self._write_cursor, MediaEvent(0, 'on_eos')))
self._events.append(
(self._write_cursor, MediaEvent(0, 'on_source_group_eos')))
break
for event in audio_data.events:
cursor = self._write_cursor + event.timestamp * \
self.source_group.audio_format.bytes_per_second
self._events.append((cursor, event))
context.lock()
buffer = bufferPool.getBuffer(self._al_source)
al.alBufferData(buffer,
self._al_format,
audio_data.data,
audio_data.length,
self.source_group.audio_format.sample_rate)
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("BUFFER DATA ERROR: " + str(error)))
al.alSourceQueueBuffers(self._al_source, 1, ctypes.byref(buffer))
if _debug_buffers:
error = al.alGetError()
if error != 0:
print(("QUEUE BUFFER ERROR: " + str(error)))
context.unlock()
self._write_cursor += audio_data.length
self._buffer_sizes.append(audio_data.length)
self._buffer_timestamps.append(audio_data.timestamp)
write_size -= audio_data.length
# Check for underrun stopping playback
if self._playing:
state = al.ALint()
context.lock()
al.alGetSourcei(self._al_source, al.AL_SOURCE_STATE, state)
if state.value != al.AL_PLAYING:
if _debug:
print('underrun')
al.alSourcePlay(self._al_source)
context.unlock()
self._lock.release()
def get_time(self):
try:
buffer_timestamp = self._buffer_timestamps[0]
except IndexError:
return self._underrun_timestamp
if buffer_timestamp is None:
return None
return buffer_timestamp + \
(self._play_cursor - self._buffer_cursor) / \
float(self.source_group.audio_format.bytes_per_second)
def set_volume(self, volume):
context.lock()
al.alSourcef(self._al_source, al.AL_GAIN, max(0, volume))
context.unlock()
def set_position(self, position):
x, y, z = position
context.lock()
al.alSource3f(self._al_source, al.AL_POSITION, x, y, z)
context.unlock()
def set_min_distance(self, min_distance):
context.lock()
al.alSourcef(self._al_source, al.AL_REFERENCE_DISTANCE, min_distance)
context.unlock()
def set_max_distance(self, max_distance):
context.lock()
al.alSourcef(self._al_source, al.AL_MAX_DISTANCE, max_distance)
context.unlock()
def set_pitch(self, pitch):
context.lock()
al.alSourcef(self._al_source, al.AL_PITCH, max(0, pitch))
context.unlock()
def set_cone_orientation(self, cone_orientation):
x, y, z = cone_orientation
context.lock()
al.alSource3f(self._al_source, al.AL_DIRECTION, x, y, z)
context.unlock()
def set_cone_inner_angle(self, cone_inner_angle):
context.lock()
al.alSourcef(self._al_source, al.AL_CONE_INNER_ANGLE, cone_inner_angle)
context.unlock()
def set_cone_outer_angle(self, cone_outer_angle):
context.lock()
al.alSourcef(self._al_source, al.AL_CONE_OUTER_ANGLE, cone_outer_angle)
context.unlock()
def set_cone_outer_gain(self, cone_outer_gain):
context.lock()
al.alSourcef(self._al_source, al.AL_CONE_OUTER_GAIN, cone_outer_gain)
context.unlock()
class OpenALDriver(AbstractAudioDriver):
_forward_orientation = (0, 0, -1)
_up_orientation = (0, 1, 0)
def __init__(self, device_name=None):
super(OpenALDriver, self).__init__()
# TODO devices must be enumerated on Windows, otherwise 1.0 context is
# returned.
self._device = alc.alcOpenDevice(device_name)
if not self._device:
raise Exception('No OpenAL device.')
self._context = alc.alcCreateContext(self._device, None)
alc.alcMakeContextCurrent(self._context)
self.have_1_1 = self.have_version(1, 1) and False
self._lock = threading.Lock()
self._listener = OpenALListener(self)
# Start worker thread
self.worker = OpenALWorker()
self.worker.start()
def create_audio_player(self, source_group, player):
assert self._device is not None, "Device was closed"
return OpenALAudioPlayer(source_group, player)
def delete(self):
self.worker.stop()
self.lock()
alc.alcMakeContextCurrent(None)
alc.alcDestroyContext(self._context)
alc.alcCloseDevice(self._device)
self._device = None
self.unlock()
def lock(self):
self._lock.acquire()
def unlock(self):
self._lock.release()
def have_version(self, major, minor):
return (major, minor) <= self.get_version()
def get_version(self):
major = alc.ALCint()
minor = alc.ALCint()
alc.alcGetIntegerv(self._device, alc.ALC_MAJOR_VERSION,
ctypes.sizeof(major), major)
alc.alcGetIntegerv(self._device, alc.ALC_MINOR_VERSION,
ctypes.sizeof(minor), minor)
return major.value, minor.value
def get_extensions(self):
extensions = alc.alcGetString(self._device, alc.ALC_EXTENSIONS)
if pyglet.compat_platform == 'darwin' or pyglet.compat_platform.startswith('linux'):
return ctypes.cast(extensions, ctypes.c_char_p).value.split(' ')
else:
return _split_nul_strings(extensions)
def have_extension(self, extension):
return extension in self.get_extensions()
def get_listener(self):
return self._listener
class OpenALListener(AbstractListener):
def __init__(self, driver):
self._driver = driver
def _set_volume(self, volume):
self._driver.lock()
al.alListenerf(al.AL_GAIN, volume)
self._driver.unlock()
self._volume = volume
def _set_position(self, position):
x, y, z = position
self._driver.lock()
al.alListener3f(al.AL_POSITION, x, y, z)
self._driver.unlock()
self._position = position
def _set_forward_orientation(self, orientation):
val = (al.ALfloat * 6)(*(orientation + self._up_orientation))
self._driver.lock()
al.alListenerfv(al.AL_ORIENTATION, val)
self._driver.unlock()
self._forward_orientation = orientation
def _set_up_orientation(self, orientation):
val = (al.ALfloat * 6)(*(self._forward_orientation + orientation))
self._driver.lock()
al.alListenerfv(al.AL_ORIENTATION, val)
self._driver.unlock()
self._up_orientation = orientation
context = None
def create_audio_driver(device_name=None):
global context
context = OpenALDriver(device_name)
if _debug:
print('OpenAL', context.get_version())
return context
def cleanup_audio_driver():
global context
if _debug:
print("Cleaning up audio driver")
if context:
context.lock()
bufferPool.delete()
context.unlock()
context.delete()
context = None
if _debug:
print("Cleaning done")
atexit.register(cleanup_audio_driver)
| bsd-3-clause |
wteiken/letsencrypt | certbot/tests/auth_handler_test.py | 4 | 16114 | """Tests for certbot.auth_handler."""
import functools
import logging
import unittest
import mock
from acme import challenges
from acme import client as acme_client
from acme import messages
from certbot import achallenges
from certbot import errors
from certbot import le_util
from certbot.tests import acme_util
class ChallengeFactoryTest(unittest.TestCase):
# pylint: disable=protected-access
def setUp(self):
from certbot.auth_handler import AuthHandler
# Account is mocked...
self.handler = AuthHandler(None, None, mock.Mock(key="mock_key"))
self.dom = "test"
self.handler.authzr[self.dom] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.dom, acme_util.CHALLENGES,
[messages.STATUS_PENDING] * 6, False)
def test_all(self):
achalls = self.handler._challenge_factory(
self.dom, range(0, len(acme_util.CHALLENGES)))
self.assertEqual(
[achall.chall for achall in achalls], acme_util.CHALLENGES)
def test_one_tls_sni(self):
achalls = self.handler._challenge_factory(self.dom, [1])
self.assertEqual(
[achall.chall for achall in achalls], [acme_util.TLSSNI01])
def test_unrecognized(self):
self.handler.authzr["failure.com"] = acme_util.gen_authzr(
messages.STATUS_PENDING, "failure.com",
[mock.Mock(chall="chall", typ="unrecognized")],
[messages.STATUS_PENDING])
self.assertRaises(
errors.Error, self.handler._challenge_factory, "failure.com", [0])
class GetAuthorizationsTest(unittest.TestCase):
"""get_authorizations test.
This tests everything except for all functions under _poll_challenges.
"""
def setUp(self):
from certbot.auth_handler import AuthHandler
self.mock_auth = mock.MagicMock(name="ApacheConfigurator")
self.mock_auth.get_chall_pref.return_value = [challenges.TLSSNI01]
self.mock_auth.perform.side_effect = gen_auth_resp
self.mock_account = mock.Mock(key=le_util.Key("file_path", "PEM"))
self.mock_net = mock.MagicMock(spec=acme_client.Client)
self.handler = AuthHandler(
self.mock_auth, self.mock_net, self.mock_account)
logging.disable(logging.CRITICAL)
def tearDown(self):
logging.disable(logging.NOTSET)
@mock.patch("certbot.auth_handler.AuthHandler._poll_challenges")
def test_name1_tls_sni_01_1(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
mock_poll.side_effect = self._validate_all
authzr = self.handler.get_authorizations(["0"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 1)
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(chall_update.keys(), ["0"])
self.assertEqual(len(chall_update.values()), 1)
self.assertEqual(self.mock_auth.cleanup.call_count, 1)
# Test if list first element is TLSSNI01, use typ because it is an achall
self.assertEqual(
self.mock_auth.cleanup.call_args[0][0][0].typ, "tls-sni-01")
self.assertEqual(len(authzr), 1)
@mock.patch("certbot.auth_handler.AuthHandler._poll_challenges")
def test_name1_tls_sni_01_1_http_01_1_dns_1(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES, combos=False)
mock_poll.side_effect = self._validate_all
self.mock_auth.get_chall_pref.return_value.append(challenges.HTTP01)
self.mock_auth.get_chall_pref.return_value.append(challenges.DNS)
authzr = self.handler.get_authorizations(["0"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 3)
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(chall_update.keys(), ["0"])
self.assertEqual(len(chall_update.values()), 1)
self.assertEqual(self.mock_auth.cleanup.call_count, 1)
# Test if list first element is TLSSNI01, use typ because it is an achall
for achall in self.mock_auth.cleanup.call_args[0][0]:
self.assertTrue(achall.typ in ["tls-sni-01", "http-01", "dns"])
# Length of authorizations list
self.assertEqual(len(authzr), 1)
@mock.patch("certbot.auth_handler.AuthHandler._poll_challenges")
def test_name3_tls_sni_01_3(self, mock_poll):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
mock_poll.side_effect = self._validate_all
authzr = self.handler.get_authorizations(["0", "1", "2"])
self.assertEqual(self.mock_net.answer_challenge.call_count, 3)
# Check poll call
self.assertEqual(mock_poll.call_count, 1)
chall_update = mock_poll.call_args[0][0]
self.assertEqual(len(chall_update.keys()), 3)
self.assertTrue("0" in chall_update.keys())
self.assertEqual(len(chall_update["0"]), 1)
self.assertTrue("1" in chall_update.keys())
self.assertEqual(len(chall_update["1"]), 1)
self.assertTrue("2" in chall_update.keys())
self.assertEqual(len(chall_update["2"]), 1)
self.assertEqual(self.mock_auth.cleanup.call_count, 1)
self.assertEqual(len(authzr), 3)
def test_perform_failure(self):
self.mock_net.request_domain_challenges.side_effect = functools.partial(
gen_dom_authzr, challs=acme_util.CHALLENGES)
self.mock_auth.perform.side_effect = errors.AuthorizationError
self.assertRaises(
errors.AuthorizationError, self.handler.get_authorizations, ["0"])
def test_no_domains(self):
self.assertRaises(errors.AuthorizationError, self.handler.get_authorizations, [])
def _validate_all(self, unused_1, unused_2):
for dom in self.handler.authzr.keys():
azr = self.handler.authzr[dom]
self.handler.authzr[dom] = acme_util.gen_authzr(
messages.STATUS_VALID,
dom,
[challb.chall for challb in azr.body.challenges],
[messages.STATUS_VALID] * len(azr.body.challenges),
azr.body.combinations)
class PollChallengesTest(unittest.TestCase):
# pylint: disable=protected-access
"""Test poll challenges."""
def setUp(self):
from certbot.auth_handler import challb_to_achall
from certbot.auth_handler import AuthHandler
# Account and network are mocked...
self.mock_net = mock.MagicMock()
self.handler = AuthHandler(
None, self.mock_net, mock.Mock(key="mock_key"))
self.doms = ["0", "1", "2"]
self.handler.authzr[self.doms[0]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[0],
[acme_util.HTTP01, acme_util.TLSSNI01],
[messages.STATUS_PENDING] * 2, False)
self.handler.authzr[self.doms[1]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[1],
acme_util.CHALLENGES, [messages.STATUS_PENDING] * 3, False)
self.handler.authzr[self.doms[2]] = acme_util.gen_authzr(
messages.STATUS_PENDING, self.doms[2],
acme_util.CHALLENGES, [messages.STATUS_PENDING] * 3, False)
self.chall_update = {}
for dom in self.doms:
self.chall_update[dom] = [
challb_to_achall(challb, mock.Mock(key="dummy_key"), dom)
for challb in self.handler.authzr[dom].body.challenges]
@mock.patch("certbot.auth_handler.time")
def test_poll_challenges(self, unused_mock_time):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_valid
self.handler._poll_challenges(self.chall_update, False)
for authzr in self.handler.authzr.values():
self.assertEqual(authzr.body.status, messages.STATUS_VALID)
@mock.patch("certbot.auth_handler.time")
def test_poll_challenges_failure_best_effort(self, unused_mock_time):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_invalid
self.handler._poll_challenges(self.chall_update, True)
for authzr in self.handler.authzr.values():
self.assertEqual(authzr.body.status, messages.STATUS_PENDING)
@mock.patch("certbot.auth_handler.time")
@mock.patch("certbot.auth_handler.zope.component.getUtility")
def test_poll_challenges_failure(self, unused_mock_time, unused_mock_zope):
self.mock_net.poll.side_effect = self._mock_poll_solve_one_invalid
self.assertRaises(
errors.AuthorizationError, self.handler._poll_challenges,
self.chall_update, False)
@mock.patch("certbot.auth_handler.time")
def test_unable_to_find_challenge_status(self, unused_mock_time):
from certbot.auth_handler import challb_to_achall
self.mock_net.poll.side_effect = self._mock_poll_solve_one_valid
self.chall_update[self.doms[0]].append(
challb_to_achall(acme_util.DNS_P, "key", self.doms[0]))
self.assertRaises(
errors.AuthorizationError, self.handler._poll_challenges,
self.chall_update, False)
def test_verify_authzr_failure(self):
self.assertRaises(
errors.AuthorizationError, self.handler.verify_authzr_complete)
def _mock_poll_solve_one_valid(self, authzr):
# Pending here because my dummy script won't change the full status.
# Basically it didn't raise an error and it stopped earlier than
# Making all challenges invalid which would make mock_poll_solve_one
# change authzr to invalid
return self._mock_poll_solve_one_chall(authzr, messages.STATUS_VALID)
def _mock_poll_solve_one_invalid(self, authzr):
return self._mock_poll_solve_one_chall(authzr, messages.STATUS_INVALID)
def _mock_poll_solve_one_chall(self, authzr, desired_status):
# pylint: disable=no-self-use
"""Dummy method that solves one chall at a time to desired_status.
When all are solved.. it changes authzr.status to desired_status
"""
new_challbs = authzr.body.challenges
for challb in authzr.body.challenges:
if challb.status != desired_status:
new_challbs = tuple(
challb_temp if challb_temp != challb
else acme_util.chall_to_challb(challb.chall, desired_status)
for challb_temp in authzr.body.challenges
)
break
if all(test_challb.status == desired_status
for test_challb in new_challbs):
status_ = desired_status
else:
status_ = authzr.body.status
new_authzr = messages.AuthorizationResource(
uri=authzr.uri,
new_cert_uri=authzr.new_cert_uri,
body=messages.Authorization(
identifier=authzr.body.identifier,
challenges=new_challbs,
combinations=authzr.body.combinations,
status=status_,
),
)
return (new_authzr, "response")
class ChallbToAchallTest(unittest.TestCase):
"""Tests for certbot.auth_handler.challb_to_achall."""
def _call(self, challb):
from certbot.auth_handler import challb_to_achall
return challb_to_achall(challb, "account_key", "domain")
def test_it(self):
self.assertEqual(
self._call(acme_util.HTTP01_P),
achallenges.KeyAuthorizationAnnotatedChallenge(
challb=acme_util.HTTP01_P, account_key="account_key",
domain="domain"),
)
class GenChallengePathTest(unittest.TestCase):
"""Tests for certbot.auth_handler.gen_challenge_path.
.. todo:: Add more tests for dumb_path... depending on what we want to do.
"""
def setUp(self):
logging.disable(logging.fatal)
def tearDown(self):
logging.disable(logging.NOTSET)
@classmethod
def _call(cls, challbs, preferences, combinations):
from certbot.auth_handler import gen_challenge_path
return gen_challenge_path(challbs, preferences, combinations)
def test_common_case(self):
"""Given TLSSNI01 and HTTP01 with appropriate combos."""
challbs = (acme_util.TLSSNI01_P, acme_util.HTTP01_P)
prefs = [challenges.TLSSNI01, challenges.HTTP01]
combos = ((0,), (1,))
# Smart then trivial dumb path test
self.assertEqual(self._call(challbs, prefs, combos), (0,))
self.assertTrue(self._call(challbs, prefs, None))
# Rearrange order...
self.assertEqual(self._call(challbs[::-1], prefs, combos), (1,))
self.assertTrue(self._call(challbs[::-1], prefs, None))
def test_not_supported(self):
challbs = (acme_util.DNS_P, acme_util.TLSSNI01_P)
prefs = [challenges.TLSSNI01]
combos = ((0, 1),)
# smart path fails because no challs in perfs satisfies combos
self.assertRaises(
errors.AuthorizationError, self._call, challbs, prefs, combos)
# dumb path fails because all challbs are not supported
self.assertRaises(
errors.AuthorizationError, self._call, challbs, prefs, None)
class ReportFailedChallsTest(unittest.TestCase):
"""Tests for certbot.auth_handler._report_failed_challs."""
# pylint: disable=protected-access
def setUp(self):
kwargs = {
"chall": acme_util.HTTP01,
"uri": "uri",
"status": messages.STATUS_INVALID,
"error": messages.Error(typ="urn:acme:error:tls", detail="detail"),
}
# Prevent future regressions if the error type changes
self.assertTrue(kwargs["error"].description is not None)
self.http01 = achallenges.KeyAuthorizationAnnotatedChallenge(
# pylint: disable=star-args
challb=messages.ChallengeBody(**kwargs),
domain="example.com",
account_key="key")
kwargs["chall"] = acme_util.TLSSNI01
self.tls_sni_same = achallenges.KeyAuthorizationAnnotatedChallenge(
# pylint: disable=star-args
challb=messages.ChallengeBody(**kwargs),
domain="example.com",
account_key="key")
kwargs["error"] = messages.Error(typ="dnssec", detail="detail")
self.tls_sni_diff = achallenges.KeyAuthorizationAnnotatedChallenge(
# pylint: disable=star-args
challb=messages.ChallengeBody(**kwargs),
domain="foo.bar",
account_key="key")
@mock.patch("certbot.auth_handler.zope.component.getUtility")
def test_same_error_and_domain(self, mock_zope):
from certbot import auth_handler
auth_handler._report_failed_challs([self.http01, self.tls_sni_same])
call_list = mock_zope().add_message.call_args_list
self.assertTrue(len(call_list) == 1)
self.assertTrue("Domain: example.com\nType: tls\nDetail: detail" in call_list[0][0][0])
@mock.patch("certbot.auth_handler.zope.component.getUtility")
def test_different_errors_and_domains(self, mock_zope):
from certbot import auth_handler
auth_handler._report_failed_challs([self.http01, self.tls_sni_diff])
self.assertTrue(mock_zope().add_message.call_count == 2)
def gen_auth_resp(chall_list):
"""Generate a dummy authorization response."""
return ["%s%s" % (chall.__class__.__name__, chall.domain)
for chall in chall_list]
def gen_dom_authzr(domain, unused_new_authzr_uri, challs, combos=True):
"""Generates new authzr for domains."""
return acme_util.gen_authzr(
messages.STATUS_PENDING, domain, challs,
[messages.STATUS_PENDING] * len(challs), combos)
if __name__ == "__main__":
unittest.main() # pragma: no cover
| apache-2.0 |
HeraclesHX/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 264 | 1804 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, 0.1]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
sysalexis/kbengine | kbe/res/scripts/common/Lib/idlelib/PathBrowser.py | 73 | 3111 | import os
import sys
import importlib.machinery
from idlelib.TreeWidget import TreeItem
from idlelib.ClassBrowser import ClassBrowser, ModuleBrowserTreeItem
from idlelib.PyShell import PyShellFileList
class PathBrowser(ClassBrowser):
def __init__(self, flist, _htest=False):
"""
_htest - bool, change box location when running htest
"""
self._htest = _htest
self.init(flist)
def settitle(self):
self.top.wm_title("Path Browser")
self.top.wm_iconname("Path Browser")
def rootnode(self):
return PathBrowserTreeItem()
class PathBrowserTreeItem(TreeItem):
def GetText(self):
return "sys.path"
def GetSubList(self):
sublist = []
for dir in sys.path:
item = DirBrowserTreeItem(dir)
sublist.append(item)
return sublist
class DirBrowserTreeItem(TreeItem):
def __init__(self, dir, packages=[]):
self.dir = dir
self.packages = packages
def GetText(self):
if not self.packages:
return self.dir
else:
return self.packages[-1] + ": package"
def GetSubList(self):
try:
names = os.listdir(self.dir or os.curdir)
except OSError:
return []
packages = []
for name in names:
file = os.path.join(self.dir, name)
if self.ispackagedir(file):
nn = os.path.normcase(name)
packages.append((nn, name, file))
packages.sort()
sublist = []
for nn, name, file in packages:
item = DirBrowserTreeItem(file, self.packages + [name])
sublist.append(item)
for nn, name in self.listmodules(names):
item = ModuleBrowserTreeItem(os.path.join(self.dir, name))
sublist.append(item)
return sublist
def ispackagedir(self, file):
if not os.path.isdir(file):
return 0
init = os.path.join(file, "__init__.py")
return os.path.exists(init)
def listmodules(self, allnames):
modules = {}
suffixes = importlib.machinery.EXTENSION_SUFFIXES[:]
suffixes += importlib.machinery.SOURCE_SUFFIXES[:]
suffixes += importlib.machinery.BYTECODE_SUFFIXES[:]
sorted = []
for suff in suffixes:
i = -len(suff)
for name in allnames[:]:
normed_name = os.path.normcase(name)
if normed_name[i:] == suff:
mod_name = name[:i]
if mod_name not in modules:
modules[mod_name] = None
sorted.append((normed_name, name))
allnames.remove(name)
sorted.sort()
return sorted
def _path_browser(parent):
flist = PyShellFileList(parent)
PathBrowser(flist, _htest=True)
parent.mainloop()
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_pathbrowser', verbosity=2, exit=False)
from idlelib.idle_test.htest import run
run(_path_browser)
| lgpl-3.0 |
brchiu/tensorflow | tensorflow/python/grappler/layout_optimizer_test.py | 2 | 58174 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Grappler LayoutOptimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import device_properties_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.grappler import cluster as gcluster
from tensorflow.python.grappler import tf_optimizer
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import saver as saver_lib
def _weight(shape):
"""Generates a weight of a given shape."""
return random_ops.truncated_normal(shape, seed=0, stddev=0.1)
def _bias(shape):
"""Generates a bias of a given shape."""
return constant_op.constant(0.1, shape=shape)
def _conv2d(x, w):
"""Returns a 2d convolution layer with full stride."""
return nn.conv2d(x, w, strides=[1, 1, 1, 1], padding='SAME')
def _max_pool_2x2(x):
"""Downsamples a feature map by 2X."""
return nn.max_pool(
x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
# Taken from tensorflow/examples/tutorials/mnist/mnist_deep.py
def _two_layer_model(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
b_conv1 = _bias([32])
h_conv1 = nn.relu(_conv2d(x_image, w_conv1) + b_conv1)
h_pool1 = _max_pool_2x2(h_conv1)
w_conv2 = _weight([5, 5, 32, 64])
b_conv2 = _bias([64])
h_conv2 = nn.relu(_conv2d(h_pool1, w_conv2) + b_conv2)
h_pool2 = _max_pool_2x2(h_conv2)
return h_pool2
def _model_with_second_port():
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([2, 5, 5, 4], seed=0)
scale = constant_op.constant(0.1, shape=[4])
offset = constant_op.constant(0.3, shape=[4])
y, mean, _ = nn.fused_batch_norm(x, scale, offset)
mul = math_ops.add(y, mean)
output = array_ops.identity(mul)
return output
def _model_with_branch(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
w_conv2 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
c_conv2 = _conv2d(x_image, w_conv2)
add = math_ops.add(c_conv1, c_conv2)
return add
def _model_with_vec_and_4d(x):
x_image = array_ops.reshape(x, [-1, 28, 28, 1])
w_conv1 = _weight([5, 5, 1, 32])
c_conv1 = _conv2d(x_image, w_conv1)
vector = constant_op.constant(6.4, shape=[32])
add = math_ops.add(c_conv1, vector)
return add
def _loop():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(_two_layer_model, elems, dtype=dtypes.float32)
return outputs
def _loop_with_branch():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_branch, elems, dtype=dtypes.float32)
return outputs
def _loop_with_vec_and_4d():
random_seed.set_random_seed(0)
x1 = random_ops.truncated_normal([1, 784], seed=0)
x2 = random_ops.truncated_normal([1, 784], seed=0)
x3 = random_ops.truncated_normal([1, 784], seed=0)
x4 = random_ops.truncated_normal([1, 784], seed=0)
elems = (x1, x2, x3, x4)
outputs = functional_ops.map_fn(
_model_with_vec_and_4d, elems, dtype=dtypes.float32)
return outputs
def _get_config(layout_optimizer=True):
if layout_optimizer:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
# do not remove duplicated nodes
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF)
else:
rewrite_options = rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.OFF,
# do not remove duplicated nodes
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF)
rewrite_options.min_graph_nodes = -1
graph_options = config_pb2.GraphOptions(
rewrite_options=rewrite_options, build_cost_model=1)
config = config_pb2.ConfigProto(graph_options=graph_options)
config.graph_options.optimizer_options.opt_level = -1
return config
def _simple_metagraph(depthwise=False):
random_seed.set_random_seed(0)
x = variables.Variable(random_ops.truncated_normal([1, 200, 200, 3], seed=0))
conv = conv_layers.separable_conv2d if depthwise else conv_layers.conv2d
y = conv(x, 32, [3, 3])
z = conv(y, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(1e-4)
loss = math_ops.reduce_mean(z)
train_op = optimizer.minimize(loss)
graph = ops.get_default_graph()
graph.add_to_collection('train_op', train_op)
meta_graph = saver_lib.export_meta_graph(graph_def=graph.as_graph_def())
return meta_graph
def _get_cluster():
named_device = device_properties_pb2.NamedDevice()
named_device.name = '/GPU:0'
named_device.properties.type = 'GPU'
named_device.properties.num_cores = 24
named_device.properties.frequency = 1000
named_device.properties.environment['architecture'] = '4'
cluster = gcluster.Cluster(devices=[named_device])
return cluster
def _is_transpose(node):
return node.endswith('TransposeNHWCToNCHW-LayoutOptimizer') or node.endswith(
'TransposeNCHWToNHWC-LayoutOptimizer')
def _is_permute(node):
return node.endswith('VecPermuteNHWCToNCHW-LayoutOptimizer') or node.endswith(
'VecPermuteNCHWToNHWC-LayoutOptimizer')
class LayoutOptimizerTest(test.TestCase):
"""Tests the Grappler layout optimizer."""
def _assert_trans_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-TransposeNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_trans_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-TransposeNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_map_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-DimMapNHWCToNCHW-LayoutOptimizer', nodes)
def _assert_vec_nchw_to_nhwc(self, name, nodes):
self.assertIn(name + '-VecPermuteNCHWToNHWC-LayoutOptimizer', nodes)
def _assert_vec_nhwc_to_nchw(self, name, nodes):
self.assertIn(name + '-VecPermuteNHWCToNCHW-LayoutOptimizer', nodes)
def _train(self, checkpoint_path, layout_optimizer=False, restore=False):
ops.reset_default_graph()
graph = ops.get_default_graph()
with session.Session(
config=_get_config(layout_optimizer), graph=graph) as sess:
batch = 2
height = 6
width = 7
input_channels = 3
shape = [batch, height, width, input_channels]
image = array_ops.placeholder(dtype='float32', shape=shape)
conv1 = conv_layers.conv2d(image, 32, [3, 3])
conv2 = conv_layers.conv2d(conv1, 32, [3, 3])
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
loss = math_ops.reduce_mean(conv2)
train_op = optimizer.minimize(loss)
saver = saver_lib.Saver(write_version=saver_pb2.SaverDef.V2)
if restore:
saver.restore(sess, checkpoint_path)
else:
sess.run(variables.global_variables_initializer())
np.random.seed(0)
for _ in range(2):
image_val = np.random.rand(*shape).astype(np.float32)
sess.run([loss, train_op], feed_dict={image: image_val})
if restore:
all_vars = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES)
all_vars_values = [var.eval(session=sess) for var in all_vars]
return all_vars_values
else:
saver.save(sess, checkpoint_path)
def testTwoConvLayers(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
output = _two_layer_model(x)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Relu_1-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
split = array_ops.split(conv, 2, axis=dim)
scale = constant_op.constant(0.1, shape=[32])
offset = constant_op.constant(0.3, shape=[32])
bn0 = nn.fused_batch_norm(split[0], scale, offset)
bn1 = nn.fused_batch_norm(split[1], scale, offset)
add = bn0[0] + bn1[0]
output = array_ops.identity(add)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('add_2-0-0', nodes)
self._assert_map_nhwc_to_nchw('split-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSplitVWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dim = array_ops.placeholder(dtype='int32')
sizes = constant_op.constant([50, 10, 4], shape=[3])
split = gen_array_ops.split_v(
value=conv, size_splits=sizes, axis=dim, num_split=3)
output = math_ops.reduce_sum(split[0])
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dim: 3})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata, feed_dict={dim: 3})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('SplitV-0-0', nodes)
self._assert_map_nhwc_to_nchw('SplitV-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
paddings = constant_op.constant(
paddings_val, dtype='int32', name='PaddingsConst')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self.assertIn('Pad-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSum(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testCast(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
cast = math_ops.cast(conv, dtype='bool')
output = array_ops.identity(cast)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Cast-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueeze(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2])
squeeze = array_ops.squeeze(reduce_sum)
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueezeAlongHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSqueezeAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2], keepdims=True)
squeeze = array_ops.squeeze(reduce_sum, axis=[0, 1, 2])
output = array_ops.identity(squeeze)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHWC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[1, 2, 3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongNHW(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[0, 1, 2])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongC(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3])
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Three transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Sum-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongHKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReduceSumAlongWCKeepDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
reduce_sum = math_ops.reduce_sum(conv, axis=[2, 3], keepdims=True)
output = array_ops.identity(reduce_sum)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testConcatWithControlDependency(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
axis = constant_op.constant(3)
var = variables.Variable(3)
assign = state_ops.assign(var, 6)
with ops.control_dependencies([assign]):
concat = array_ops.concat([conv, conv], axis)
output = array_ops.identity(concat)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('concat-0-0', nodes)
self.assertIn('concat-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testFill(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shape = array_ops.shape(conv)
scalar = array_ops.constant(5.7)
fill = array_ops.fill(shape, scalar)
output = array_ops.identity(fill)
x_val = [3.4] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
num_vec_permute = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
if _is_permute(node.name):
num_vec_permute += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
# Two vector permute nodes were initially added in the Expand phase of
# LayoutOptimizer; they cancelled out each other in the Collapse phase.
expected_vec_permute = 0
self.assertEqual(expected_vec_permute, num_vec_permute)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Fill-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testTile(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
multiple = array_ops.placeholder(dtype='int32')
tile = array_ops.tile(conv, multiple)
output = array_ops.identity(tile)
multiple_val = [2, 3, 4, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={multiple: multiple_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
multiple: multiple_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Tile-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Tile-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = constant_op.constant([3, 1], name='DimsConst')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self.assertIn('ReverseV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testReverseWithNonConstDims(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
dims = array_ops.placeholder(dtype='int32')
reverse = array_ops.reverse(conv, dims)
output = array_ops.identity(reverse)
dims_val = [2, 3]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={dims: dims_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
dims: dims_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('ReverseV2-0-0', nodes)
self._assert_map_nhwc_to_nchw('ReverseV2-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOp(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
mean = math_ops.reduce_mean(conv)
condition = math_ops.less(conv, mean)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpConditionUnknownShape(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = array_ops.placeholder(dtype='bool')
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
condition_val = np.zeros((1, 7, 7, 64))
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={condition: condition_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={condition: condition_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSelectOpScalarCondition(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
add = math_ops.add(conv, conv)
condition = constant_op.constant(True)
select = gen_math_ops.select(condition, conv, add)
output = array_ops.identity(select)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Select-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testPadWithNonConstPaddings(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
paddings = array_ops.placeholder(dtype='int32')
pad = array_ops.pad(conv, paddings)
output = array_ops.identity(pad)
paddings_val = [[1, 2], [3, 4], [5, 6], [7, 8]]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={paddings: paddings_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
paddings: paddings_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Pad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Pad-1', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool = gen_nn_ops.max_pool_v2(conv, ksize, strides, 'VALID')
output = array_ops.identity(max_pool)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolV2-2', nodes)
self.assertIn('MaxPoolV2-1-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testMaxPoolGradV2(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
ksize = constant_op.constant([1, 2, 3, 1], shape=[4])
strides = array_ops.placeholder(dtype='int32', shape=[4])
max_pool_grad = gen_nn_ops.max_pool_grad_v2(conv, conv, conv, ksize,
strides, 'VALID')
output = array_ops.identity(max_pool_grad)
strides_val = [1, 3, 2, 1]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={strides: strides_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
strides: strides_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('MaxPoolGradV2-0-0', nodes)
self._assert_vec_nhwc_to_nchw('MaxPoolGradV2-4', nodes)
self.assertIn('MaxPoolGradV2-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
size = array_ops.placeholder(dtype='int32')
s = array_ops.slice(conv, [0, 0, 0, 0], size)
output = array_ops.identity(s)
size_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={size: size_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
size: size_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('Slice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('Slice-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
s = array_ops.strided_slice(conv, [0, 0, 0, 0], end, strides=[1, 2, 3, 1])
output = array_ops.identity(s)
end_val = [1, 2, 3, 4]
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSlice-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSlice-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask1011(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 11(1011).
s = conv[:, :, 1:-1, :]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceWithMask0111(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
# This will generate a StridedSlice op with begin mask and
# end mask 7(0111).
s = conv[:, :, :, 1:-1]
output = array_ops.identity(s)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('strided_slice-0-0', nodes)
self.assertIn('strided_slice-1-LayoutOptimizer', nodes)
self.assertIn('strided_slice-2-LayoutOptimizer', nodes)
self.assertIn('strided_slice-3-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testStridedSliceGradWithNonConstAxis(self):
if test.is_gpu_available(cuda_only=True):
random_seed.set_random_seed(0)
x = random_ops.truncated_normal([1, 784], seed=0)
conv = _two_layer_model(x)
end = array_ops.placeholder(dtype='int32')
shape = array_ops.shape(conv)
end_val = [1, 2, 3, 4]
s = array_ops.strided_slice(
conv, [0, 0, 0, 0], end_val, strides=[1, 2, 3, 1])
s_grad = array_ops.strided_slice_grad(shape, [0, 0, 0, 0], end,
[1, 2, 3, 1], s)
output = array_ops.identity(s_grad)
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={end: end_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
end: end_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('StridedSliceGrad-0-0', nodes)
self._assert_vec_nhwc_to_nchw('StridedSliceGrad-2', nodes)
self.assertIn('StridedSlice-1-LayoutOptimizer', nodes)
self.assertIn('StridedSlice-2-LayoutOptimizer', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testShapeN(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
shapen = array_ops.shape_n([conv, conv])
output = math_ops.add(shapen[0], shapen[1])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={
x: x_val
})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 1
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self._assert_vec_nchw_to_nhwc('ShapeN-0-0', nodes)
self.assertAllEqual(output_val_ref, output_val)
def testShapeNFollowedByNotConvertibleNodeReshape(self):
if test.is_gpu_available(cuda_only=True):
x = array_ops.placeholder(dtype='float32')
conv = _two_layer_model(x)
conv_reshape = array_ops.reshape(conv, [1, 1, 1, -1])
shapen = array_ops.shape_n([conv, conv_reshape])
shape = array_ops.identity(shapen[1])
ones = array_ops.ones(shape)
output = math_ops.add_n([conv_reshape, ones])
x_val = [1.7] * 784
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output, feed_dict={x: x_val})
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(
output, run_metadata=metadata, feed_dict={x: x_val})
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('Conv2D-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoop(self):
if test.is_gpu_available(cuda_only=True):
output = _loop()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
# Four transposes were initially added in the Expand phase of
# LayoutOptimizer; two of them are cancelled out in the Collapse phase.
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/MaxPool_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithBranch(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_branch()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 3
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testLoopWithVecAnd4D(self):
if test.is_gpu_available(cuda_only=True):
output = _loop_with_vec_and_4d()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('map/while/Conv2D-0', nodes)
self._assert_trans_nchw_to_nhwc('map/while/Add_1-0-2', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testBinaryOpSecondPort(self):
if test.is_gpu_available(cuda_only=True):
output = _model_with_second_port()
with session.Session(config=_get_config(False)) as sess:
output_val_ref = sess.run(output)
with session.Session(config=_get_config()) as sess:
metadata = config_pb2.RunMetadata()
output_val = sess.run(output, run_metadata=metadata)
nodes = []
num_transposes = 0
for node in metadata.cost_graph.node:
if _is_transpose(node.name):
num_transposes += 1
nodes.append(node.name)
expected_num_transposes = 2
self.assertEqual(expected_num_transposes, num_transposes)
self._assert_trans_nhwc_to_nchw('FusedBatchNorm-0', nodes)
self._assert_trans_nchw_to_nhwc('Add-0-0', nodes)
self.assertAllClose(output_val_ref, output_val, atol=1e-3)
def testGradient(self):
meta_graph = _simple_metagraph()
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in ['Conv2D', 'Conv2DBackpropFilter', 'Conv2DBackpropInput']:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 5)
def testDepthwise(self):
meta_graph = _simple_metagraph(depthwise=True)
config = config_pb2.ConfigProto()
config.graph_options.rewrite_options.CopyFrom(
rewriter_config_pb2.RewriterConfig(
layout_optimizer=rewriter_config_pb2.RewriterConfig.ON,
min_graph_nodes=-1))
optimized_graph = tf_optimizer.OptimizeGraph(
config, meta_graph, cluster=_get_cluster())
found = 0
for node in optimized_graph.node:
if node.op in [
'DepthwiseConv2dNative', 'DepthwiseConv2dNativeBackpropFilter',
'DepthwiseConv2dNativeBackpropInput'
]:
found += 1
self.assertEqual(node.attr['data_format'].s, b'NCHW')
self.assertEqual(found, 6)
def testCheckpointCompatibility(self):
if not test.is_gpu_available(cuda_only=True):
self.skipTest('GPU required')
checkpoint_path = self.get_temp_dir()
self._train(checkpoint_path)
vars_expected = self._train(checkpoint_path, restore=True)
vars_layout_optimized = self._train(
checkpoint_path, restore=True, layout_optimizer=True)
for var_expected, var_layout_optimized in zip(vars_expected,
vars_layout_optimized):
self.assertAllClose(var_expected, var_layout_optimized, atol=1e-6)
if __name__ == '__main__':
test.main()
| apache-2.0 |
justinHowlett/KivaServer | node_modules/jitsu/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 115 | 13925 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
if (os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# Use the 64-on-64 compiler if we can.
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValue(key, value):
"""Use reg.exe to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _RegistryKeyExists(key):
"""Use reg.exe to see if a key exists.
Args:
key: The registry key to check.
Return:
True if the key exists
"""
if not _RegistryQuery(key):
return False
return True
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005', '9.0': '2008', '10.0': '2010', '11.0': '2012'}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, 'vcexpress.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif os.path.exists(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto'):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version or 'e' not in msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to an "e" version (e.g. 2010e)')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
heromod/migrid | mig/shared/functionality/rmvgridowner.py | 1 | 24999 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# --- BEGIN_HEADER ---
#
# rmvgridowner - remove a vgrid owner
# Copyright (C) 2003-2015 The MiG Project lead by Brian Vinter
#
# This file is part of MiG.
#
# MiG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# MiG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# -- END_HEADER ---
#
"""Remove an owner from a given vgrid"""
import os
import subprocess
from binascii import hexlify
import shared.returnvalues as returnvalues
from shared.base import client_id_dir
from shared.fileio import remove_rec
from shared.functional import validate_input_and_cert, REJECT_UNSET
from shared.handlers import correct_handler
from shared.html import html_post_helper
from shared.init import initialize_main_variables, find_entry
from shared.parseflags import force
from shared.useradm import distinguished_name_to_user
from shared.vgrid import init_vgrid_script_add_rem, vgrid_is_owner, \
vgrid_is_member, vgrid_owners, vgrid_members, vgrid_resources, \
vgrid_list_subvgrids, vgrid_remove_owners, vgrid_list_parents
from shared.vgridaccess import unmap_vgrid, unmap_inheritance
def signature():
"""Signature of the main function"""
defaults = {'vgrid_name': REJECT_UNSET, 'cert_id': REJECT_UNSET,
'flags': []}
return ['text', defaults]
def rm_tracker_admin(configuration, cert_id, vgrid_name, tracker_dir,
output_objects):
"""Remove Trac issue tracker owner"""
cgi_tracker_var = os.path.join(tracker_dir, 'var')
if not os.path.isdir(cgi_tracker_var):
output_objects.append(
{'object_type': 'text', 'text'
: 'No tracker (%s) for %s %s - skipping tracker admin rights' \
% (tracker_dir, configuration.site_vgrid_label, vgrid_name)
})
return (output_objects, returnvalues.SYSTEM_ERROR)
try:
admin_user = distinguished_name_to_user(cert_id)
admin_id = admin_user.get(configuration.trac_id_field, 'unknown_id')
# Remove admin rights for owner using trac-admin command:
# trac-admin tracker_dir deploy cgi_tracker_bin
perms_cmd = [configuration.trac_admin_path, cgi_tracker_var,
'permission', 'remove', admin_id, 'TRAC_ADMIN']
configuration.logger.info('remove admin rights from owner: %s' % \
perms_cmd)
proc = subprocess.Popen(perms_cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.wait()
if proc.returncode != 0:
raise Exception("tracker permissions %s failed: %s (%d)" % \
(perms_cmd, proc.stdout.read(),
proc.returncode))
return True
except Exception, exc:
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Could not remove %s tracker admin rights: %s' % (cert_id, exc)
})
return False
def unlink_share(user_dir, vgrid):
"""Utility function to remove link to shared vgrid folder.
user_dir: the full path to the user home where deletion should happen
vgrid: the name of the vgrid to delete
Returns boolean success indicator and potential messages as a pair.
Note: Removed links are hard-coded (as in other modules)
user_dir/vgrid
In case of a sub-vgrid, enclosing empty directories are removed as well.
"""
success = True
msg = ""
path = os.path.join(user_dir, vgrid)
try:
if os.path.exists(path):
os.remove(path)
path = os.path.dirname(path)
if os.path.isdir(path) and os.listdir(path) == []:
os.removedirs(path)
except Exception, err:
success = False
msg += "\nCould not remove link %s: %s" % (path, err)
return (success, msg[1:])
def unlink_web_folders(user_dir, vgrid):
"""Utility function to remove links to shared vgrid web folders.
user_dir: the full path to the user home where deletion should happen
vgrid: the name of the vgrid to delete
Returns boolean success indicator and potential messages as a pair.
Note: Removed links are hard-coded (as in other modules)
user_dir/private_base/vgrid
user_dir/public_base/vgrid
In case of a sub-vgrid, enclosing empty directories are removed as well.
"""
success = True
msg = ""
for infix in ["private_base", "public_base"]:
path = os.path.join(user_dir, infix, vgrid)
try:
if os.path.exists(path):
os.remove(path)
path = os.path.dirname(path)
if os.path.isdir(path) and os.listdir(path) == []:
os.removedirs(path)
except Exception, err:
success = False
msg += "\nCould not remove link %s: %s" % (path, err)
return (success, msg[1:])
def abandon_vgrid_files(vgrid, configuration):
"""Remove all files which belong to the given VGrid (parameter).
This corresponds to the functionality in createvgrid.py, but we
can make our life easy by removing recursively, using a function
in fileio.py for this purpose. The VGrid is assumed to be abandoned entirely.
The function recursively removes the following directories:
configuration.vgrid_public_base/<vgrid>
configuration.vgrid_private_base/<vgrid>
configuration.vgrid_files_home/<vgrid>
and the soft link (if it is a link, not a directory)
configuration.wwwpublic/vgrid/<vgrid>
vgrid: The name of the VGrid to delete
configuration: to determine the location of the directories
Note: the entry for the VGrid itself, configuration.vgrid_home/<vgrid>
is removed separately, see remove_vgrid_entry
Returns: Success indicator and potential messages.
"""
configuration.logger.debug('Deleting all files for %s %s' % \
(configuration.site_vgrid_label, vgrid))
success = True
msg = ""
# removing this soft link may fail, since it is a directory for sub-VGrids
try:
os.remove(os.path.join(configuration.wwwpublic, 'vgrid', vgrid))
except Exception, err:
configuration.logger.debug(
'not removing soft link to public %s pages for %s: %s' % \
(configuration.site_vgrid_label, vgrid, err))
for prefix in [configuration.vgrid_public_base,
configuration.vgrid_private_base,
configuration.vgrid_files_home]:
success_here = remove_rec(os.path.join(prefix, vgrid), configuration)
if not success_here:
msg += "Error while removing %s." % os.path.join(prefix, vgrid)
success = False
configuration.logger.debug('Messages: %s.' % msg)
return (success, msg)
def remove_vgrid_entry(vgrid, configuration):
"""Remove an entry for a VGrid in the vgrid configuration directory.
configuration.vgrid_home/<vgrid>
The VGrid contents (shared files and web pages) are assumed to either
be abandoned entirely, or become subdirectory of another vgrid (for
sub-vgrids). Wiki and SCM are deleted as well, as they would be unusable
and undeletable.
vgrid: the name of the VGrid to delete
configuration: to determine configuration.vgrid_home
Returns: Success indicator and potential messages.
"""
configuration.logger.debug('Removing entry for %s %s' % \
(configuration.site_vgrid_label, vgrid))
msg = ''
success = remove_rec(os.path.join(configuration.vgrid_home, vgrid),
configuration)
if not success:
configuration.logger.debug('Error while removing %s.' % vgrid)
msg += "Error while removing entry for %s." % vgrid
else:
for prefix in [configuration.vgrid_public_base,
configuration.vgrid_private_base,
configuration.vgrid_files_home]:
# delete public, member, and owner scms/trackers
# we just remove and do not check success for these
if configuration.hg_path and configuration.hgweb_scripts:
remove_rec(os.path.join(prefix, vgrid, '.vgridscm'),
configuration)
if configuration.trac_admin_path:
remove_rec(os.path.join(prefix, vgrid, '.vgridtracker'),
configuration)
return (success, msg)
def main(client_id, user_arguments_dict):
"""Main function used by front end"""
(configuration, logger, output_objects, op_name) = \
initialize_main_variables(client_id, op_header=False)
defaults = signature()[1]
(validate_status, accepted) = validate_input_and_cert(
user_arguments_dict,
defaults,
output_objects,
client_id,
configuration,
allow_rejects=False,
)
if not validate_status:
return (accepted, returnvalues.CLIENT_ERROR)
if not correct_handler('POST'):
output_objects.append(
{'object_type': 'error_text', 'text'
: 'Only accepting POST requests to prevent unintended updates'})
return (output_objects, returnvalues.CLIENT_ERROR)
vgrid_name = accepted['vgrid_name'][-1]
flags = ''.join(accepted['flags'])
cert_id = accepted['cert_id'][-1]
cert_dir = client_id_dir(cert_id)
# inherited vgrid membership
inherit_vgrid_member = False
title_entry = find_entry(output_objects, 'title')
title_entry['text'] = 'Remove %s' % configuration.site_vgrid_label
output_objects.append({'object_type': 'header', 'text'
: 'Remove %s Owner' % \
configuration.site_vgrid_label})
# Validity of user and vgrid names is checked in this init function so
# no need to worry about illegal directory traversal through variables
(ret_val, msg, _) = \
init_vgrid_script_add_rem(vgrid_name, client_id, cert_id,
'owner', configuration)
if not ret_val:
output_objects.append({'object_type': 'error_text', 'text'
: msg})
return (output_objects, returnvalues.CLIENT_ERROR)
# don't remove if not already an owner
if not vgrid_is_owner(vgrid_name, cert_id, configuration):
output_objects.append({'object_type': 'error_text', 'text'
: '%s is not an owner of %s or a parent %s.'
% (cert_id, vgrid_name,
configuration.site_vgrid_label)})
return (output_objects, returnvalues.CLIENT_ERROR)
# we need the local owners file to detect inherited ownerships
(status, owners_direct) = vgrid_owners(vgrid_name, configuration, False)
(all_status, owners) = vgrid_owners(vgrid_name, configuration, True)
if not status or not all_status:
logger.error('Error loading owners for %s: %s / %s'
% (vgrid_name, owners_direct, owners))
output_objects.append({'object_type': 'error_text', 'text'
: 'An internal error occurred, error conditions have been logged.'})
output_objects.append({'object_type': 'text', 'text'
: '''
You can help us fix the problem by notifying the administrators
via mail about what you wanted to do when the error happened.'''})
return (output_objects, returnvalues.CLIENT_ERROR)
# find out whether to just remove an owner or delete the whole thing.
# ask about delete if last or no direct owners.
if len(owners_direct) > 1:
logger.debug('Removing %s, one of several owners, from %s.' %
(cert_id, vgrid_name))
if not (cert_id in owners_direct):
# the owner owns an upper vgrid, ownership is inherited
# cannot remove, not last (inherited) owner
logger.debug('Cannot delete: Inherited ownership.' +
'\n Owners: %s,\n Direct owners: %s.'
% (owners, owners_direct))
output_objects.append({'object_type': 'error_text', 'text'
: '''%s is owner of a parent %s.
Owner removal has to be performed at the topmost vgrid''' % \
(cert_id, configuration.site_vgrid_label)})
return (output_objects, returnvalues.CLIENT_ERROR)
else:
# Remove any tracker admin rights
if configuration.trac_admin_path:
public_tracker_dir = \
os.path.abspath(os.path.join(
configuration.vgrid_public_base, vgrid_name,
'.vgridtracker'))
private_tracker_dir = \
os.path.abspath(os.path.join(
configuration.vgrid_private_base, vgrid_name,
'.vgridtracker'))
vgrid_tracker_dir = \
os.path.abspath(os.path.join(
configuration.vgrid_files_home, vgrid_name,
'.vgridtracker'))
for tracker_dir in [public_tracker_dir, private_tracker_dir,
vgrid_tracker_dir]:
if not rm_tracker_admin(configuration, cert_id,
vgrid_name, tracker_dir,
output_objects):
return (output_objects, returnvalues.SYSTEM_ERROR)
user_dir = os.path.abspath(os.path.join(configuration.user_home,
cert_dir)) + os.sep
# Do not touch vgrid share if still a member of a parent vgrid
if vgrid_is_member(vgrid_name, cert_id, configuration):
# list is in top-down order
parent_vgrids = vgrid_list_parents(vgrid_name, configuration)
inherit_vgrid_member = vgrid_name
for parent in parent_vgrids:
if vgrid_is_member(parent, cert_id, configuration,
recursive=False):
inherit_vgrid_member = parent
break
output_objects.append(
{'object_type': 'text', 'text'
: '''NOTE: %s is still a member of parent %s %s.
Preserving access to corresponding %s.''' % \
(cert_id, configuration.site_vgrid_label,
inherit_vgrid_member, configuration.site_vgrid_label)
})
else:
(success, msg) = unlink_share(user_dir, vgrid_name)
if not success:
logger.error('Could not remove share link: %s.' % msg)
output_objects.append({'object_type': 'error_text', 'text'
: 'Could not remove share links: %s.'
% msg})
return (output_objects, returnvalues.SYSTEM_ERROR)
# unlink shared web folders
(success, msg) = unlink_web_folders(user_dir, vgrid_name)
if not success:
logger.error('Could not remove web links: %s.' % msg)
output_objects.append({'object_type': 'error_text', 'text'
: 'Could not remove web links: %s.'
% msg})
return (output_objects, returnvalues.SYSTEM_ERROR)
# remove user from saved owners list
(rm_status, rm_msg) = vgrid_remove_owners(configuration, vgrid_name,
[cert_id])
if not rm_status:
output_objects.append({'object_type': 'error_text', 'text'
: '%s of owners of %s'
% (rm_msg, vgrid_name)})
return (output_objects, returnvalues.SYSTEM_ERROR)
# Any parent vgrid membership is left untouched here as we only
# force a normal refresh in unmap_inheritance
unmap_inheritance(configuration, vgrid_name, cert_id)
output_objects.append({'object_type': 'text', 'text'
: '%s successfully removed as owner of %s!'
% (cert_id, vgrid_name)})
output_objects.append({'object_type': 'link', 'destination':
'adminvgrid.py?vgrid_name=%s' % vgrid_name, 'text':
'Back to administration for %s' % vgrid_name})
return (output_objects, returnvalues.OK)
else:
# no more direct owners - we try to remove this VGrid
logger.debug('Leave %s from %s with no more direct owners: delete' %
(vgrid_name, cert_id))
if not force(flags):
output_objects.append({'object_type': 'text', 'text' : '''
No more direct owners of %s - leaving will result in the %s getting
deleted. Please use either of the links below to confirm or cancel.
''' % (vgrid_name, configuration.site_vgrid_label)})
js_name = 'rmvgridowner%s' % hexlify(vgrid_name)
helper = html_post_helper(js_name, 'rmvgridowner.py',
{'vgrid_name': vgrid_name,
'cert_id': cert_id, 'flags': 'f'})
output_objects.append({'object_type': 'html_form', 'text': helper})
output_objects.append({'object_type': 'link', 'destination':
"javascript: %s();" % js_name, 'class':
'removelink', 'text':
'Really leave and delete %s' % vgrid_name})
output_objects.append({'object_type': 'text', 'text' : ''})
output_objects.append({'object_type': 'link', 'destination':
'adminvgrid.py?vgrid_name=%s' % vgrid_name,
'text': 'Back to administration for %s'
% vgrid_name})
return (output_objects, returnvalues.OK)
# check if any resources participate or sub-vgrids depend on this one
(status, subs) = vgrid_list_subvgrids(vgrid_name, configuration)
if not status:
logger.error('Error loading sub-%ss for %s: %s)'
% (configuration.site_vgrid_label, vgrid_name, subs))
output_objects.append({'object_type': 'error_text', 'text' : '''
An internal error occurred, error conditions have been logged.'''})
output_objects.append({'object_type': 'text', 'text' : '''
You can help us fix the problem by notifying the administrators
via mail about what you wanted to do when the error happened.'''})
return (output_objects, returnvalues.CLIENT_ERROR)
if len(subs) > 0:
logger.debug('Cannot delete: still has sub-%ss %s.'
% (configuration.site_vgrid_label, subs))
output_objects.append({'object_type': 'error_text', 'text' : \
'%s has sub-structures and cannot be deleted.' % vgrid_name})
output_objects.append({'object_type': 'text', 'text' : '''
To leave (and delete) %s, first remove its sub-structures: %s.'''
% (vgrid_name, ', '.join(subs))})
return (output_objects, returnvalues.CLIENT_ERROR)
# we consider the local members and resources here, not inherited ones
(member_status, members_direct) = vgrid_members(vgrid_name,
configuration,
False)
(resource_status, resources_direct) = vgrid_resources(vgrid_name,
configuration,
False)
if not member_status or not resource_status:
logger.warning('failed to load %s members or resources: %s %s'
% (vgrid_name, members_direct, resources_direct))
output_objects.append({'object_type': 'error_text', 'text' : \
'could not load %s members or resources for %s.' % \
(configuration.site_vgrid_label,
vgrid_name)})
return (output_objects, returnvalues.SYSTEM_ERROR)
if len(resources_direct) > 0:
logger.debug('Cannot delete: still has direct resources %s.'
% resources_direct)
output_objects.append({'object_type': 'error_text', 'text' : \
'%s still has resources and cannot be deleted.' % vgrid_name})
output_objects.append({'object_type': 'text', 'text' : '''
To leave (and delete) %s, first remove the participating resources.'''
% vgrid_name})
return (output_objects, returnvalues.CLIENT_ERROR)
if len(members_direct) > 0:
logger.debug('Cannot delete: still has direct members %s.'
% members_direct)
output_objects.append({'object_type': 'error_text', 'text' : \
'%s still has members and cannot be deleted.' % vgrid_name})
output_objects.append({'object_type': 'text', 'text' : '''
To leave (and delete) %s, first remove all members.'''
% vgrid_name})
return (output_objects, returnvalues.CLIENT_ERROR)
# When reaching here, OK to remove the VGrid.
# if top-level: unlink, remove all files and directories,
# in all cases: remove configuration entry for the VGrid
if (cert_id in owners_direct):
# owner owns this vgrid, direct ownership
logger.debug('%s looks like a top-level %s.' % \
(configuration.site_vgrid_label, vgrid_name))
logger.debug('Deleting all related files.')
user_dir = os.path.abspath(os.path.join(configuration.user_home,
cert_dir)) + os.sep
(share_lnk, msg1) = unlink_share(user_dir, vgrid_name)
(web_lnk, msg1) = unlink_web_folders(user_dir, vgrid_name)
(abandoned, msg2) = abandon_vgrid_files(vgrid_name, configuration)
else:
# owner owns an upper vgrid, ownership is inherited
logger.debug('%s looks like a sub-%s, ownership inherited.'
% (vgrid_name, configuration.site_vgrid_label))
logger.debug('Only removing entry, leaving files in place.')
share_lnk = True
web_lnk = True
abandoned = True
msg1 = ''
msg2 = ''
(removed, msg3) = remove_vgrid_entry(vgrid_name, configuration)
output_objects.append({'object_type': 'text', 'text'
: '%s has been removed with last owner.'
% vgrid_name})
output_objects.append({'object_type': 'link',
'destination': 'vgridadmin.py',
'text': 'Back to the overview.'})
if not share_lnk or not web_lnk or not abandoned or not removed:
logger.error('Errors while removing %s:\n%s.'
% (vgrid_name, '\n'.join([msg1,msg2,msg3])))
output_objects.append({'object_type': 'error_text', 'text' : '''
An internal error occurred, error conditions have been logged.'''})
output_objects.append({'object_type': 'text', 'text' : '''
You can help us fix the problem by notifying the administrators
via mail about what you wanted to do when the error happened.'''})
return (output_objects, returnvalues.CLIENT_ERROR)
else:
# Remove vgrid from vgrid cache (after deleting all)
unmap_vgrid(configuration, vgrid_name)
return (output_objects, returnvalues.OK)
| gpl-2.0 |
alexgorban/models | official/vision/detection/modeling/architecture/heads.py | 1 | 44483 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes to build various prediction heads in all supported models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import pickle
from absl import logging
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow.python.keras import backend
from official.vision.detection.modeling.architecture import nn_ops
from official.vision.detection.ops import spatial_transform_ops
class RpnHead(tf.keras.layers.Layer):
"""Region Proposal Network head."""
def __init__(self,
min_level,
max_level,
anchors_per_location,
num_convs=2,
num_filters=256,
use_separable_conv=False,
use_batch_norm=True,
batch_norm_relu=nn_ops.BatchNormRelu):
"""Initialize params to build Region Proposal Network head.
Args:
min_level: `int` number of minimum feature level.
max_level: `int` number of maximum feature level.
anchors_per_location: `int` number of number of anchors per pixel
location.
num_convs: `int` number that represents the number of the intermediate
conv layers before the prediction.
num_filters: `int` number that represents the number of filters of the
intermediate conv layers.
use_separable_conv: `bool`, indicating whether the separable conv layers
is used.
use_batch_norm: 'bool', indicating whether batchnorm layers are added.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
"""
self._min_level = min_level
self._max_level = max_level
self._anchors_per_location = anchors_per_location
self._use_batch_norm = use_batch_norm
if use_separable_conv:
self._conv2d_op = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
bias_initializer=tf.zeros_initializer())
else:
self._conv2d_op = functools.partial(
tf.keras.layers.Conv2D,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
bias_initializer=tf.zeros_initializer())
self._rpn_conv = self._conv2d_op(
num_filters,
kernel_size=(3, 3),
strides=(1, 1),
activation=(None if self._use_batch_norm else tf.nn.relu),
padding='same',
name='rpn')
self._rpn_class_conv = self._conv2d_op(
anchors_per_location,
kernel_size=(1, 1),
strides=(1, 1),
padding='valid',
name='rpn-class')
self._rpn_box_conv = self._conv2d_op(
4 * anchors_per_location,
kernel_size=(1, 1),
strides=(1, 1),
padding='valid',
name='rpn-box')
self._batch_norm_relus = {}
if self._use_batch_norm:
for level in range(self._min_level, self._max_level + 1):
self._batch_norm_relus[level] = batch_norm_relu(name='rpn-l%d-bn' %
level)
def _shared_rpn_heads(self, features, anchors_per_location, level,
is_training):
"""Shared RPN heads."""
features = self._rpn_conv(features)
if self._use_batch_norm:
# The batch normalization layers are not shared between levels.
features = self._batch_norm_relus[level](
features, is_training=is_training)
# Proposal classification scores
scores = self._rpn_class_conv(features)
# Proposal bbox regression deltas
bboxes = self._rpn_box_conv(features)
return scores, bboxes
def __call__(self, features, is_training=None):
scores_outputs = {}
box_outputs = {}
with backend.get_graph().as_default(), tf.name_scope('rpn_head'):
for level in range(self._min_level, self._max_level + 1):
scores_output, box_output = self._shared_rpn_heads(
features[level], self._anchors_per_location, level, is_training)
scores_outputs[level] = scores_output
box_outputs[level] = box_output
return scores_outputs, box_outputs
class FastrcnnHead(tf.keras.layers.Layer):
"""Fast R-CNN box head."""
def __init__(self,
num_classes,
num_convs=0,
num_filters=256,
use_separable_conv=False,
num_fcs=2,
fc_dims=1024,
use_batch_norm=True,
batch_norm_relu=nn_ops.BatchNormRelu):
"""Initialize params to build Fast R-CNN box head.
Args:
num_classes: a integer for the number of classes.
num_convs: `int` number that represents the number of the intermediate
conv layers before the FC layers.
num_filters: `int` number that represents the number of filters of the
intermediate conv layers.
use_separable_conv: `bool`, indicating whether the separable conv layers
is used.
num_fcs: `int` number that represents the number of FC layers before the
predictions.
fc_dims: `int` number that represents the number of dimension of the FC
layers.
use_batch_norm: 'bool', indicating whether batchnorm layers are added.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
"""
self._num_classes = num_classes
self._num_convs = num_convs
self._num_filters = num_filters
if use_separable_conv:
self._conv2d_op = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
bias_initializer=tf.zeros_initializer())
else:
self._conv2d_op = functools.partial(
tf.keras.layers.Conv2D,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
bias_initializer=tf.zeros_initializer())
self._num_fcs = num_fcs
self._fc_dims = fc_dims
self._use_batch_norm = use_batch_norm
self._batch_norm_relu = batch_norm_relu
self._conv_ops = []
self._conv_bn_ops = []
for i in range(self._num_convs):
self._conv_ops.append(
self._conv2d_op(
self._num_filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
activation=(None if self._use_batch_norm else tf.nn.relu),
name='conv_{}'.format(i)))
if self._use_batch_norm:
self._conv_bn_ops.append(self._batch_norm_relu())
self._fc_ops = []
self._fc_bn_ops = []
for i in range(self._num_fcs):
self._fc_ops.append(
tf.keras.layers.Dense(
units=self._fc_dims,
activation=(None if self._use_batch_norm else tf.nn.relu),
name='fc{}'.format(i)))
if self._use_batch_norm:
self._fc_bn_ops.append(self._batch_norm_relu(fused=False))
self._class_predict = tf.keras.layers.Dense(
self._num_classes,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
bias_initializer=tf.zeros_initializer(),
name='class-predict')
self._box_predict = tf.keras.layers.Dense(
self._num_classes * 4,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.001),
bias_initializer=tf.zeros_initializer(),
name='box-predict')
def __call__(self, roi_features, is_training=None):
"""Box and class branches for the Mask-RCNN model.
Args:
roi_features: A ROI feature tensor of shape
[batch_size, num_rois, height_l, width_l, num_filters].
is_training: `boolean`, if True if model is in training mode.
Returns:
class_outputs: a tensor with a shape of
[batch_size, num_rois, num_classes], representing the class predictions.
box_outputs: a tensor with a shape of
[batch_size, num_rois, num_classes * 4], representing the box
predictions.
"""
with backend.get_graph().as_default(), tf.name_scope('fast_rcnn_head'):
# reshape inputs beofre FC.
_, num_rois, height, width, filters = roi_features.get_shape().as_list()
net = tf.reshape(roi_features, [-1, height, width, filters])
for i in range(self._num_convs):
net = self._conv_ops[i](net)
if self._use_batch_norm:
net = self._conv_bn_ops[i](net, is_training=is_training)
filters = self._num_filters if self._num_convs > 0 else filters
net = tf.reshape(net, [-1, num_rois, height * width * filters])
for i in range(self._num_fcs):
net = self._fc_ops[i](net)
if self._use_batch_norm:
net = self._fc_bn_ops[i](net, is_training=is_training)
class_outputs = self._class_predict(net)
box_outputs = self._box_predict(net)
return class_outputs, box_outputs
class MaskrcnnHead(tf.keras.layers.Layer):
"""Mask R-CNN head."""
def __init__(self,
num_classes,
mask_target_size,
num_convs=4,
num_filters=256,
use_separable_conv=False,
use_batch_norm=True,
batch_norm_relu=nn_ops.BatchNormRelu):
"""Initialize params to build Fast R-CNN head.
Args:
num_classes: a integer for the number of classes.
mask_target_size: a integer that is the resolution of masks.
num_convs: `int` number that represents the number of the intermediate
conv layers before the prediction.
num_filters: `int` number that represents the number of filters of the
intermediate conv layers.
use_separable_conv: `bool`, indicating whether the separable conv layers
is used.
use_batch_norm: 'bool', indicating whether batchnorm layers are added.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
"""
self._num_classes = num_classes
self._mask_target_size = mask_target_size
self._num_convs = num_convs
self._num_filters = num_filters
if use_separable_conv:
self._conv2d_op = functools.partial(
tf.keras.layers.SeparableConv2D,
depth_multiplier=1,
bias_initializer=tf.zeros_initializer())
else:
self._conv2d_op = functools.partial(
tf.keras.layers.Conv2D,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
bias_initializer=tf.zeros_initializer())
self._use_batch_norm = use_batch_norm
self._batch_norm_relu = batch_norm_relu
self._conv2d_ops = []
for i in range(self._num_convs):
self._conv2d_ops.append(
self._conv2d_op(
self._num_filters,
kernel_size=(3, 3),
strides=(1, 1),
padding='same',
dilation_rate=(1, 1),
activation=(None if self._use_batch_norm else tf.nn.relu),
name='mask-conv-l%d' % i))
self._mask_conv_transpose = tf.keras.layers.Conv2DTranspose(
self._num_filters,
kernel_size=(2, 2),
strides=(2, 2),
padding='valid',
activation=(None if self._use_batch_norm else tf.nn.relu),
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=2, mode='fan_out', distribution='untruncated_normal'),
bias_initializer=tf.zeros_initializer(),
name='conv5-mask')
def __call__(self, roi_features, class_indices, is_training=None):
"""Mask branch for the Mask-RCNN model.
Args:
roi_features: A ROI feature tensor of shape
[batch_size, num_rois, height_l, width_l, num_filters].
class_indices: a Tensor of shape [batch_size, num_rois], indicating
which class the ROI is.
is_training: `boolean`, if True if model is in training mode.
Returns:
mask_outputs: a tensor with a shape of
[batch_size, num_masks, mask_height, mask_width, num_classes],
representing the mask predictions.
fg_gather_indices: a tensor with a shape of [batch_size, num_masks, 2],
representing the fg mask targets.
Raises:
ValueError: If boxes is not a rank-3 tensor or the last dimension of
boxes is not 4.
"""
with backend.get_graph().as_default():
with tf.name_scope('mask_head'):
_, num_rois, height, width, filters = roi_features.get_shape().as_list()
net = tf.reshape(roi_features, [-1, height, width, filters])
for i in range(self._num_convs):
net = self._conv2d_ops[i](net)
if self._use_batch_norm:
net = self._batch_norm_relu()(net, is_training=is_training)
net = self._mask_conv_transpose(net)
if self._use_batch_norm:
net = self._batch_norm_relu()(net, is_training=is_training)
mask_outputs = self._conv2d_op(
self._num_classes,
kernel_size=(1, 1),
strides=(1, 1),
padding='valid',
name='mask_fcn_logits')(
net)
mask_outputs = tf.reshape(mask_outputs, [
-1, num_rois, self._mask_target_size, self._mask_target_size,
self._num_classes
])
with tf.name_scope('masks_post_processing'):
# TODO(pengchong): Figure out the way not to use the static inferred
# batch size.
batch_size, num_masks = class_indices.get_shape().as_list()
mask_outputs = tf.transpose(a=mask_outputs, perm=[0, 1, 4, 2, 3])
# Contructs indices for gather.
batch_indices = tf.tile(
tf.expand_dims(tf.range(batch_size), axis=1), [1, num_masks])
mask_indices = tf.tile(
tf.expand_dims(tf.range(num_masks), axis=0), [batch_size, 1])
gather_indices = tf.stack(
[batch_indices, mask_indices, class_indices], axis=2)
mask_outputs = tf.gather_nd(mask_outputs, gather_indices)
return mask_outputs
class RetinanetHead(object):
"""RetinaNet head."""
def __init__(self,
min_level,
max_level,
num_classes,
anchors_per_location,
num_convs=4,
num_filters=256,
use_separable_conv=False,
batch_norm_relu=nn_ops.BatchNormRelu):
"""Initialize params to build RetinaNet head.
Args:
min_level: `int` number of minimum feature level.
max_level: `int` number of maximum feature level.
num_classes: `int` number of classification categories.
anchors_per_location: `int` number of anchors per pixel location.
num_convs: `int` number of stacked convolution before the last prediction
layer.
num_filters: `int` number of filters used in the head architecture.
use_separable_conv: `bool` to indicate whether to use separable
convoluation.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
"""
self._min_level = min_level
self._max_level = max_level
self._num_classes = num_classes
self._anchors_per_location = anchors_per_location
self._num_convs = num_convs
self._num_filters = num_filters
self._use_separable_conv = use_separable_conv
with tf.name_scope('class_net') as scope_name:
self._class_name_scope = tf.name_scope(scope_name)
with tf.name_scope('box_net') as scope_name:
self._box_name_scope = tf.name_scope(scope_name)
self._build_class_net_layers(batch_norm_relu)
self._build_box_net_layers(batch_norm_relu)
def _class_net_batch_norm_name(self, i, level):
return 'class-%d-%d' % (i, level)
def _box_net_batch_norm_name(self, i, level):
return 'box-%d-%d' % (i, level)
def _build_class_net_layers(self, batch_norm_relu):
"""Build re-usable layers for class prediction network."""
if self._use_separable_conv:
self._class_predict = tf.keras.layers.SeparableConv2D(
self._num_classes * self._anchors_per_location,
kernel_size=(3, 3),
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
padding='same',
name='class-predict')
else:
self._class_predict = tf.keras.layers.Conv2D(
self._num_classes * self._anchors_per_location,
kernel_size=(3, 3),
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5),
padding='same',
name='class-predict')
self._class_conv = []
self._class_batch_norm_relu = {}
for i in range(self._num_convs):
if self._use_separable_conv:
self._class_conv.append(
tf.keras.layers.SeparableConv2D(
self._num_filters,
kernel_size=(3, 3),
bias_initializer=tf.zeros_initializer(),
activation=None,
padding='same',
name='class-' + str(i)))
else:
self._class_conv.append(
tf.keras.layers.Conv2D(
self._num_filters,
kernel_size=(3, 3),
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.01),
activation=None,
padding='same',
name='class-' + str(i)))
for level in range(self._min_level, self._max_level + 1):
name = self._class_net_batch_norm_name(i, level)
self._class_batch_norm_relu[name] = batch_norm_relu(name=name)
def _build_box_net_layers(self, batch_norm_relu):
"""Build re-usable layers for box prediction network."""
if self._use_separable_conv:
self._box_predict = tf.keras.layers.SeparableConv2D(
4 * self._anchors_per_location,
kernel_size=(3, 3),
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-predict')
else:
self._box_predict = tf.keras.layers.Conv2D(
4 * self._anchors_per_location,
kernel_size=(3, 3),
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=1e-5),
padding='same',
name='box-predict')
self._box_conv = []
self._box_batch_norm_relu = {}
for i in range(self._num_convs):
if self._use_separable_conv:
self._box_conv.append(
tf.keras.layers.SeparableConv2D(
self._num_filters,
kernel_size=(3, 3),
activation=None,
bias_initializer=tf.zeros_initializer(),
padding='same',
name='box-' + str(i)))
else:
self._box_conv.append(
tf.keras.layers.Conv2D(
self._num_filters,
kernel_size=(3, 3),
activation=None,
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.01),
padding='same',
name='box-' + str(i)))
for level in range(self._min_level, self._max_level + 1):
name = self._box_net_batch_norm_name(i, level)
self._box_batch_norm_relu[name] = batch_norm_relu(name=name)
def __call__(self, fpn_features, is_training=None):
"""Returns outputs of RetinaNet head."""
class_outputs = {}
box_outputs = {}
with backend.get_graph().as_default(), tf.name_scope('retinanet'):
for level in range(self._min_level, self._max_level + 1):
features = fpn_features[level]
class_outputs[level] = self.class_net(
features, level, is_training=is_training)
box_outputs[level] = self.box_net(
features, level, is_training=is_training)
return class_outputs, box_outputs
def class_net(self, features, level, is_training):
"""Class prediction network for RetinaNet."""
with self._class_name_scope:
for i in range(self._num_convs):
features = self._class_conv[i](features)
# The convolution layers in the class net are shared among all levels, but
# each level has its batch normlization to capture the statistical
# difference among different levels.
name = self._class_net_batch_norm_name(i, level)
features = self._class_batch_norm_relu[name](
features, is_training=is_training)
classes = self._class_predict(features)
return classes
def box_net(self, features, level, is_training=None):
"""Box regression network for RetinaNet."""
with self._box_name_scope:
for i in range(self._num_convs):
features = self._box_conv[i](features)
# The convolution layers in the box net are shared among all levels, but
# each level has its batch normlization to capture the statistical
# difference among different levels.
name = self._box_net_batch_norm_name(i, level)
features = self._box_batch_norm_relu[name](
features, is_training=is_training)
boxes = self._box_predict(features)
return boxes
# TODO(yeqing): Refactor this class when it is ready for var_scope reuse.
class ShapemaskPriorHead(object):
"""ShapeMask Prior head."""
def __init__(self,
num_classes,
num_downsample_channels,
mask_crop_size,
use_category_for_mask,
num_of_instances,
min_mask_level,
max_mask_level,
num_clusters,
temperature,
shape_prior_path=None):
"""Initialize params to build RetinaNet head.
Args:
num_classes: Number of output classes.
num_downsample_channels: number of channels in mask branch.
mask_crop_size: feature crop size.
use_category_for_mask: use class information in mask branch.
num_of_instances: number of instances to sample in training time.
min_mask_level: minimum FPN level to crop mask feature from.
max_mask_level: maximum FPN level to crop mask feature from.
num_clusters: number of clusters to use in K-Means.
temperature: the temperature for shape prior learning.
shape_prior_path: the path to load shape priors.
"""
self._mask_num_classes = num_classes
self._num_downsample_channels = num_downsample_channels
self._mask_crop_size = mask_crop_size
self._use_category_for_mask = use_category_for_mask
self._num_of_instances = num_of_instances
self._min_mask_level = min_mask_level
self._max_mask_level = max_mask_level
self._num_clusters = num_clusters
self._temperature = temperature
self._shape_prior_path = shape_prior_path
def __call__(self,
fpn_features,
boxes,
outer_boxes,
classes,
is_training=None):
"""Generate the detection priors from the box detections and FPN features.
This corresponds to the Fig. 4 of the ShapeMask paper at
https://arxiv.org/pdf/1904.03239.pdf
Args:
fpn_features: a dictionary of FPN features.
boxes: a float tensor of shape [batch_size, num_instances, 4]
representing the tight gt boxes from dataloader/detection.
outer_boxes: a float tensor of shape [batch_size, num_instances, 4]
representing the loose gt boxes from dataloader/detection.
classes: a int Tensor of shape [batch_size, num_instances]
of instance classes.
is_training: training mode or not.
Returns:
crop_features: a float Tensor of shape [batch_size * num_instances,
mask_crop_size, mask_crop_size, num_downsample_channels]. This is the
instance feature crop.
detection_priors: A float Tensor of shape [batch_size * num_instances,
mask_size, mask_size, 1].
"""
with backend.get_graph().as_default():
# loads class specific or agnostic shape priors
if self._shape_prior_path:
if self._use_category_for_mask:
fid = tf.io.gfile.GFile(self._shape_prior_path, 'rb')
# The encoding='bytes' options is for incompatibility between python2
# and python3 pickle.
class_tups = pickle.load(fid, encoding='bytes')
max_class_id = class_tups[-1][0] + 1
class_masks = np.zeros((max_class_id, self._num_clusters,
self._mask_crop_size, self._mask_crop_size),
dtype=np.float32)
for cls_id, _, cls_mask in class_tups:
assert cls_mask.shape == (self._num_clusters,
self._mask_crop_size**2)
class_masks[cls_id] = cls_mask.reshape(self._num_clusters,
self._mask_crop_size,
self._mask_crop_size)
self.class_priors = tf.convert_to_tensor(
value=class_masks, dtype=tf.float32)
else:
npy_path = tf.io.gfile.GFile(self._shape_prior_path)
class_np_masks = np.load(npy_path)
assert class_np_masks.shape == (
self._num_clusters, self._mask_crop_size,
self._mask_crop_size), 'Invalid priors!!!'
self.class_priors = tf.convert_to_tensor(
value=class_np_masks, dtype=tf.float32)
else:
self.class_priors = tf.zeros(
[self._num_clusters, self._mask_crop_size, self._mask_crop_size],
tf.float32)
batch_size = boxes.get_shape()[0]
min_level_shape = fpn_features[self._min_mask_level].get_shape().as_list()
self._max_feature_size = min_level_shape[1]
detection_prior_levels = self._compute_box_levels(boxes)
level_outer_boxes = outer_boxes / tf.pow(
2., tf.expand_dims(detection_prior_levels, -1))
detection_prior_levels = tf.cast(detection_prior_levels, tf.int32)
uniform_priors = spatial_transform_ops.crop_mask_in_target_box(
tf.ones([
batch_size, self._num_of_instances, self._mask_crop_size,
self._mask_crop_size
], tf.float32), boxes, outer_boxes, self._mask_crop_size)
# Prepare crop features.
multi_level_features = self._get_multilevel_features(fpn_features)
crop_features = spatial_transform_ops.single_level_feature_crop(
multi_level_features, level_outer_boxes, detection_prior_levels,
self._min_mask_level, self._mask_crop_size)
# Predict and fuse shape priors.
shape_weights = self._classify_and_fuse_detection_priors(
uniform_priors, classes, crop_features)
fused_shape_priors = self._fuse_priors(shape_weights, classes)
fused_shape_priors = tf.reshape(fused_shape_priors, [
batch_size, self._num_of_instances, self._mask_crop_size,
self._mask_crop_size
])
predicted_detection_priors = spatial_transform_ops.crop_mask_in_target_box(
fused_shape_priors, boxes, outer_boxes, self._mask_crop_size)
predicted_detection_priors = tf.reshape(
predicted_detection_priors,
[-1, self._mask_crop_size, self._mask_crop_size, 1])
return crop_features, predicted_detection_priors
def _get_multilevel_features(self, fpn_features):
"""Get multilevel features from FPN feature dictionary into one tensor.
Args:
fpn_features: a dictionary of FPN features.
Returns:
features: a float tensor of shape [batch_size, num_levels,
max_feature_size, max_feature_size, num_downsample_channels].
"""
# TODO(yeqing): Recover reuse=tf.AUTO_REUSE logic.
with tf.name_scope('masknet'):
mask_feats = {}
# Reduce the feature dimension at each FPN level by convolution.
for feat_level in range(self._min_mask_level, self._max_mask_level + 1):
mask_feats[feat_level] = tf.keras.layers.Conv2D(
self._num_downsample_channels,
kernel_size=(1, 1),
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
padding='same',
name='mask-downsample')(
fpn_features[feat_level])
# Concat features through padding to the max size.
features = [mask_feats[self._min_mask_level]]
for feat_level in range(self._min_mask_level + 1,
self._max_mask_level + 1):
features.append(tf.image.pad_to_bounding_box(
mask_feats[feat_level], 0, 0,
self._max_feature_size, self._max_feature_size))
features = tf.stack(features, axis=1)
return features
def _compute_box_levels(self, boxes):
"""Compute the box FPN levels.
Args:
boxes: a float tensor of shape [batch_size, num_instances, 4].
Returns:
levels: a int tensor of shape [batch_size, num_instances].
"""
object_sizes = tf.stack([
boxes[:, :, 2] - boxes[:, :, 0],
boxes[:, :, 3] - boxes[:, :, 1],
], axis=2)
object_sizes = tf.reduce_max(input_tensor=object_sizes, axis=2)
ratios = object_sizes / self._mask_crop_size
levels = tf.math.ceil(tf.math.log(ratios) / tf.math.log(2.))
levels = tf.maximum(tf.minimum(levels, self._max_mask_level),
self._min_mask_level)
return levels
def _classify_and_fuse_detection_priors(self, uniform_priors,
detection_prior_classes,
crop_features):
"""Classify the uniform prior by predicting the shape modes.
Classify the object crop features into K modes of the clusters for each
category.
Args:
uniform_priors: A float Tensor of shape [batch_size, num_instances,
mask_size, mask_size] representing the uniform detection priors.
detection_prior_classes: A int Tensor of shape [batch_size, num_instances]
of detection class ids.
crop_features: A float Tensor of shape [batch_size * num_instances,
mask_size, mask_size, num_channels].
Returns:
shape_weights: A float Tensor of shape
[batch_size * num_instances, num_clusters] representing the classifier
output probability over all possible shapes.
"""
location_detection_priors = tf.reshape(
uniform_priors, [-1, self._mask_crop_size, self._mask_crop_size, 1])
# Generate image embedding to shape.
fused_shape_features = crop_features * location_detection_priors
shape_embedding = tf.reduce_mean(
input_tensor=fused_shape_features, axis=(1, 2))
if not self._use_category_for_mask:
# TODO(weicheng) use custom op for performance
shape_logits = tf.keras.layers.Dense(
self._num_clusters,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01))(
shape_embedding)
shape_logits = tf.reshape(shape_logits,
[-1, self._num_clusters]) / self._temperature
shape_weights = tf.nn.softmax(shape_logits, name='shape_prior_weights')
else:
shape_logits = tf.keras.layers.Dense(
self._mask_num_classes * self._num_clusters,
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01))(
shape_embedding)
shape_logits = tf.reshape(
shape_logits, [-1, self._mask_num_classes, self._num_clusters])
training_classes = tf.reshape(detection_prior_classes, [-1])
class_idx = tf.stack(
[tf.range(tf.size(input=training_classes)), training_classes - 1],
axis=1)
shape_logits = tf.gather_nd(shape_logits, class_idx) / self._temperature
shape_weights = tf.nn.softmax(shape_logits, name='shape_prior_weights')
return shape_weights
def _fuse_priors(self, shape_weights, detection_prior_classes):
"""Fuse shape priors by the predicted shape probability.
Args:
shape_weights: A float Tensor of shape [batch_size * num_instances,
num_clusters] of predicted shape probability distribution.
detection_prior_classes: A int Tensor of shape [batch_size, num_instances]
of detection class ids.
Returns:
detection_priors: A float Tensor of shape [batch_size * num_instances,
mask_size, mask_size, 1].
"""
if self._use_category_for_mask:
object_class_priors = tf.gather(
self.class_priors, detection_prior_classes)
else:
num_batch_instances = shape_weights.get_shape()[0]
object_class_priors = tf.tile(
tf.expand_dims(self.class_priors, 0),
[num_batch_instances, 1, 1, 1])
vector_class_priors = tf.reshape(
object_class_priors,
[-1, self._num_clusters,
self._mask_crop_size * self._mask_crop_size])
detection_priors = tf.matmul(
tf.expand_dims(shape_weights, 1), vector_class_priors)[:, 0, :]
detection_priors = tf.reshape(
detection_priors, [-1, self._mask_crop_size, self._mask_crop_size, 1])
return detection_priors
class ShapemaskCoarsemaskHead(object):
"""ShapemaskCoarsemaskHead head."""
def __init__(self,
num_classes,
num_downsample_channels,
mask_crop_size,
use_category_for_mask,
num_convs):
"""Initialize params to build ShapeMask coarse and fine prediction head.
Args:
num_classes: `int` number of mask classification categories.
num_downsample_channels: `int` number of filters at mask head.
mask_crop_size: feature crop size.
use_category_for_mask: use class information in mask branch.
num_convs: `int` number of stacked convolution before the last prediction
layer.
"""
self._mask_num_classes = num_classes
self._num_downsample_channels = num_downsample_channels
self._mask_crop_size = mask_crop_size
self._use_category_for_mask = use_category_for_mask
self._num_convs = num_convs
if not use_category_for_mask:
assert num_classes == 1
def __call__(self,
crop_features,
detection_priors,
inst_classes,
is_training=None):
"""Generate instance masks from FPN features and detection priors.
This corresponds to the Fig. 5-6 of the ShapeMask paper at
https://arxiv.org/pdf/1904.03239.pdf
Args:
crop_features: a float Tensor of shape [batch_size * num_instances,
mask_crop_size, mask_crop_size, num_downsample_channels]. This is the
instance feature crop.
detection_priors: a float Tensor of shape [batch_size * num_instances,
mask_crop_size, mask_crop_size, 1]. This is the detection prior for
the instance.
inst_classes: a int Tensor of shape [batch_size, num_instances]
of instance classes.
is_training: a bool indicating whether in training mode.
Returns:
mask_outputs: instance mask prediction as a float Tensor of shape
[batch_size * num_instances, mask_size, mask_size, num_classes].
"""
# Embed the anchor map into some feature space for anchor conditioning.
detection_prior_features = tf.keras.layers.Conv2D(
self._num_downsample_channels,
kernel_size=(1, 1),
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0., stddev=0.01),
padding='same',
name='anchor-conv')(
detection_priors)
prior_conditioned_features = crop_features + detection_prior_features
coarse_output_features = self.coarsemask_decoder_net(
prior_conditioned_features, is_training)
coarse_mask_classes = tf.keras.layers.Conv2D(
self._mask_num_classes,
kernel_size=(1, 1),
# Focal loss bias initialization to have foreground 0.01 probability.
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0, stddev=0.01),
padding='same',
name='class-predict')(
coarse_output_features)
if self._use_category_for_mask:
inst_classes = tf.cast(tf.reshape(inst_classes, [-1]), tf.int32)
coarse_mask_classes_t = tf.transpose(
a=coarse_mask_classes, perm=(0, 3, 1, 2))
# pylint: disable=g-long-lambda
coarse_mask_logits = tf.cond(
pred=tf.size(input=inst_classes) > 0,
true_fn=lambda: tf.gather_nd(
coarse_mask_classes_t,
tf.stack(
[tf.range(tf.size(input=inst_classes)), inst_classes - 1],
axis=1)),
false_fn=lambda: coarse_mask_classes_t[:, 0, :, :])
# pylint: enable=g-long-lambda
coarse_mask_logits = tf.expand_dims(coarse_mask_logits, -1)
else:
coarse_mask_logits = coarse_mask_classes
coarse_class_probs = tf.nn.sigmoid(coarse_mask_logits)
class_probs = tf.cast(coarse_class_probs, prior_conditioned_features.dtype)
return coarse_mask_classes, class_probs, prior_conditioned_features
def coarsemask_decoder_net(self,
images,
is_training=None,
batch_norm_relu=nn_ops.BatchNormRelu):
"""Coarse mask decoder network architecture.
Args:
images: A tensor of size [batch, height_in, width_in, channels_in].
is_training: Whether batch_norm layers are in training mode.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
Returns:
images: A feature tensor of size [batch, output_size, output_size,
num_channels]
"""
for i in range(self._num_convs):
images = tf.keras.layers.Conv2D(
self._num_downsample_channels,
kernel_size=(3, 3),
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(stddev=0.01),
activation=None,
padding='same',
name='coarse-class-%d' % i)(
images)
images = batch_norm_relu(name='coarse-class-%d-bn' % i)(
images, is_training=is_training)
return images
class ShapemaskFinemaskHead(object):
"""ShapemaskFinemaskHead head."""
def __init__(self,
num_classes,
num_downsample_channels,
mask_crop_size,
num_convs,
coarse_mask_thr,
gt_upsample_scale,
batch_norm_relu=nn_ops.BatchNormRelu):
"""Initialize params to build ShapeMask coarse and fine prediction head.
Args:
num_classes: `int` number of mask classification categories.
num_downsample_channels: `int` number of filters at mask head.
mask_crop_size: feature crop size.
num_convs: `int` number of stacked convolution before the last prediction
layer.
coarse_mask_thr: the threshold for suppressing noisy coarse prediction.
gt_upsample_scale: scale for upsampling groundtruths.
batch_norm_relu: an operation that includes a batch normalization layer
followed by a relu layer(optional).
"""
self._mask_num_classes = num_classes
self._num_downsample_channels = num_downsample_channels
self._mask_crop_size = mask_crop_size
self._num_convs = num_convs
self._coarse_mask_thr = coarse_mask_thr
self._gt_upsample_scale = gt_upsample_scale
self._class_predict_conv = tf.keras.layers.Conv2D(
self._mask_num_classes,
kernel_size=(1, 1),
# Focal loss bias initialization to have foreground 0.01 probability.
bias_initializer=tf.constant_initializer(-np.log((1 - 0.01) / 0.01)),
kernel_initializer=tf.keras.initializers.RandomNormal(
mean=0, stddev=0.01),
padding='same',
name='affinity-class-predict')
self._upsample_conv = tf.keras.layers.Conv2DTranspose(
self._num_downsample_channels // 2,
(self._gt_upsample_scale, self._gt_upsample_scale),
(self._gt_upsample_scale, self._gt_upsample_scale))
self._fine_class_conv = []
self._fine_class_bn = []
for i in range(self._num_convs):
self._fine_class_conv.append(
tf.keras.layers.Conv2D(
self._num_downsample_channels,
kernel_size=(3, 3),
bias_initializer=tf.zeros_initializer(),
kernel_initializer=tf.keras.initializers.RandomNormal(
stddev=0.01),
activation=None,
padding='same',
name='fine-class-%d' % i))
self._fine_class_bn.append(batch_norm_relu(name='fine-class-%d-bn' % i))
def __call__(self, prior_conditioned_features, class_probs, is_training=None):
"""Generate instance masks from FPN features and detection priors.
This corresponds to the Fig. 5-6 of the ShapeMask paper at
https://arxiv.org/pdf/1904.03239.pdf
Args:
prior_conditioned_features: a float Tensor of shape [batch_size *
num_instances, mask_crop_size, mask_crop_size, num_downsample_channels].
This is the instance feature crop.
class_probs: a float Tensor of shape [batch_size * num_instances,
mask_crop_size, mask_crop_size, 1]. This is the class probability of
instance segmentation.
is_training: a bool indicating whether in training mode.
Returns:
mask_outputs: instance mask prediction as a float Tensor of shape
[batch_size * num_instances, mask_size, mask_size, num_classes].
"""
with backend.get_graph().as_default(), tf.name_scope('affinity-masknet'):
# Extract the foreground mean features
point_samp_prob_thr = 1. / (1. + tf.exp(-self._coarse_mask_thr))
point_samp_prob_thr = tf.cast(point_samp_prob_thr, class_probs.dtype)
class_probs = tf.where(
tf.greater(class_probs, point_samp_prob_thr), class_probs,
tf.zeros_like(class_probs))
weighted_features = class_probs * prior_conditioned_features
sum_class_vector = tf.reduce_sum(
input_tensor=class_probs, axis=(1, 2)) + tf.constant(
1e-20, class_probs.dtype)
instance_embedding = tf.reduce_sum(
input_tensor=weighted_features, axis=(1, 2)) / sum_class_vector
# Take the difference between crop features and mean instance features.
instance_features = prior_conditioned_features - tf.reshape(
instance_embedding, (-1, 1, 1, self._num_downsample_channels))
# Decoder to generate upsampled segmentation mask.
affinity_output_features = self.finemask_decoder_net(
instance_features, is_training)
# Predict per-class instance masks.
affinity_mask_classes = self._class_predict_conv(affinity_output_features)
return affinity_mask_classes
def finemask_decoder_net(self, images, is_training=None):
"""Fine mask decoder network architecture.
Args:
images: A tensor of size [batch, height_in, width_in, channels_in].
is_training: Whether batch_norm layers are in training mode.
Returns:
images: A feature tensor of size [batch, output_size, output_size,
num_channels], where output size is self._gt_upsample_scale times
that of input.
"""
for i in range(self._num_convs):
images = self._fine_class_conv[i](images)
images = self._fine_class_bn[i](images, is_training=is_training)
if self._gt_upsample_scale > 1:
images = self._upsample_conv(images)
return images
| apache-2.0 |
julienr/vispy | vispy/gloo/gl/tests/test_use.py | 18 | 1865 | """ Test the use function.
"""
from vispy.testing import assert_is, requires_pyopengl
from vispy.gloo import gl
from vispy.testing import run_tests_if_main
def teardown_module():
gl.use_gl() # Reset to default
@requires_pyopengl()
def test_use_desktop():
""" Testing that gl.use injects all names in gl namespace """
# Use desktop
gl.use_gl('gl2')
#
for name in dir(gl.gl2):
if name.lower().startswith('gl'):
val1 = getattr(gl, name)
val2 = getattr(gl.gl2, name)
assert_is(val1, val2)
# Use pyopengl
gl.use_gl('pyopengl2')
#
for name in dir(gl.gl2):
if name.lower().startswith('gl'):
val1 = getattr(gl, name)
val2 = getattr(gl.pyopengl2, name)
assert_is(val1, val2)
# Use gl+
gl.use_gl('gl+')
# uses all ES2 names from pyopengl2 backend
for name in dir(gl.gl2):
if name.lower().startswith('gl'):
val1 = getattr(gl, name)
val2 = getattr(gl.pyopengl2, name)
assert_is(val1, val2)
# But provides extra names too
for name in dir(gl.glplus):
if name.lower().startswith('gl'):
val1 = getattr(gl, name)
val2 = getattr(gl.glplus, name)
assert_is(val1, val2)
# Use dummy
gl.use_gl('dummy')
#
for name in dir(gl.gl2):
if name.lower().startswith('gl'):
val1 = getattr(gl, name)
val2 = getattr(gl.dummy, name)
assert_is(val1, val2)
# Touch debug wrapper stuff
gl.use_gl('gl2 debug')
# Use desktop again
gl.use_gl('gl2')
#
for name in dir(gl.gl2):
if name.lower().startswith('gl'):
val1 = getattr(gl, name)
val2 = getattr(gl.gl2, name)
assert_is(val1, val2)
run_tests_if_main()
| bsd-3-clause |
supercollider/supercollider | editors/sced/scedwin/py/ConfigurationDialog.py | 44 | 6061 | # sced (SuperCollider mode for gedit)
#
# Copyright 2012 Jakob Leben
# Copyright 2009 Artem Popov and other contributors (see AUTHORS)
#
# sced is free software:
# you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gobject
import gtk
from Settings import Settings
def on_pref_widget_notify_sensitive(widget, spec):
label = widget.get_data("pref-label")
if label is not None:
label.set_sensitive(widget.props.sensitive)
# FIXME: implement custom widget (or custom widget sequence) as well
def create_pref_section(title, wlabels=[], custom=[]):
vbox = gtk.VBox(spacing=6)
label = gobject.new(gtk.Label, label="<b>%s</b>" % title,
use_markup=True,
xalign=0)
vbox.pack_start(label, expand=False)
label.show()
align = gobject.new(gtk.Alignment, left_padding=12)
vbox.pack_start(align, expand=False)
align.show()
table = gobject.new(gtk.Table,
n_rows=len(wlabels) + len(custom),
n_columns=2,
row_spacing=6,
column_spacing=12)
align.add(table)
table.show()
for i in range(len(wlabels)):
l, widget = wlabels[i]
label = gobject.new(gtk.Label, label=l, xalign=0)
widget.connect("notify::sensitive", on_pref_widget_notify_sensitive)
widget.set_data("pref-label", label)
if l is not None:
table.attach(label, 0, 1, i, i + 1,
xoptions=gtk.FILL, yoptions=gtk.FILL)
table.attach(widget, 1, 2, i, i + 1,
xoptions=gtk.EXPAND | gtk.FILL, yoptions=gtk.FILL)
else:
table.attach(widget, 0, 2, i, i + 1,
xoptions=gtk.EXPAND | gtk.FILL, yoptions=gtk.FILL)
table.show_all()
return vbox
# FIXME: fix notification
class ConfigurationDialog(gtk.Dialog):
__gsignals__ = {
"response": "override",
} # __gsignals__
def __init__(self, plugin):
gtk.Dialog.__init__(self, title=_("Sced configuration"),
flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
buttons = (
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT
))
self.set_default_response(gtk.RESPONSE_ACCEPT)
self.__settings = plugin.settings()
self.__create_page_general()
def __create_filesystem_entry( self, chooser, action, stock = gtk.STOCK_OPEN ):
entry = gtk.Entry()
btn = gtk.Button(stock=stock)
box = gtk.HBox()
box.add(entry)
box.add(btn)
def run_dialog(btn):
chooser.set_action(action)
chooser.set_filename(entry.get_text())
response = chooser.run()
chooser.hide()
if response == gtk.RESPONSE_ACCEPT:
entry.set_text(chooser.get_filename())
btn.connect("clicked", run_dialog)
return (box, entry)
def __create_page_general(self):
# create views
chooser = gtk.FileChooserDialog(
parent = self,
title = "Choose interpreter program",
buttons = (
gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_OK, gtk.RESPONSE_ACCEPT
)
)
chooser.set_select_multiple(False)
sc_dir_view, sc_dir_entry = self.__create_filesystem_entry (
chooser,
gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER )
adv_view = gtk.CheckButton()
cmd_view, cmd_entry = self.__create_filesystem_entry (
chooser,
gtk.FILE_CHOOSER_ACTION_OPEN )
wd_view, wd_entry = self.__create_filesystem_entry (
chooser,
gtk.FILE_CHOOSER_ACTION_SELECT_FOLDER )
def toggle_advanced(advanced):
sc_dir_view.set_sensitive(not advanced)
cmd_view.set_sensitive(advanced)
wd_view.set_sensitive(advanced)
adv_view.connect("toggled", lambda btn: toggle_advanced(btn.get_active()) )
# fill in the data
sets = self.__settings
if sets.sc_dir is not None:
sc_dir_entry.set_text(sets.sc_dir)
adv_view.set_active(sets.advanced is True)
if sets.sclang_cmd is not None:
cmd_entry.set_text(sets.sclang_cmd)
if sets.sclang_work_dir is not None:
wd_entry.set_text(sets.sclang_work_dir)
toggle_advanced(sets.advanced is True)
self.__adv_check = adv_view
self.__sc_dir_entry = sc_dir_entry
self.__cmd_entry = cmd_entry
self.__wd_entry = wd_entry
# lay out
section = create_pref_section("Basic", [
("SuperCollider folder:", sc_dir_view),
("Advanced settings:", adv_view),
])
section.props.border_width = 12
self.vbox.add(section)
section.show()
section = create_pref_section("Interpreter options", [
("Command:", cmd_view),
("Runtime folder:", wd_view)
])
section.props.border_width = 12
self.vbox.add(section)
section.show()
def do_response(self, response):
if response == gtk.RESPONSE_ACCEPT:
sets = self.__settings
sets.sc_dir = self.__sc_dir_entry.get_text()
sets.advanced = self.__adv_check.get_active()
sets.sclang_work_dir = self.__wd_entry.get_text()
sets.sclang_cmd = self.__cmd_entry.get_text()
sets.save()
self.destroy()
| gpl-3.0 |
Jaimenms/google-python-exercises | basic/string1.py | 1 | 3750 | #!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Basic string exercises
# Fill in the code for the functions below. main() is already set up
# to call the functions with a few different inputs,
# printing 'OK' when each function is correct.
# The starter code for each function includes a 'return'
# which is just a placeholder for your code.
# It's ok if you do not complete all the functions, and there
# are some additional functions to try in string2.py.
# A. donuts
# Given an int count of a number of donuts, return a string
# of the form 'Number of donuts: <count>', where <count> is the number
# passed in. However, if the count is 10 or more, then use the word 'many'
# instead of the actual count.
# So donuts(5) returns 'Number of donuts: 5'
# and donuts(23) returns 'Number of donuts: many'
def donuts(count):
text_1 = 'Number of donuts: '
if (count>=10):
text_2 = 'many'
else:
text_2 = str(count)
output = text_1 + text_2
return output
# B. both_ends
# Given a string s, return a string made of the first 2
# and the last 2 chars of the original string,
# so 'spring' yields 'spng'. However, if the string length
# is less than 2, return instead the empty string.
def both_ends(s):
if len(s)>=2:
output = s[0:2] + s[-2:]
else:
output = ''
return output
# C. fix_start
# Given a string s, return a string
# where all occurences of its first char have
# been changed to '*', except do not change
# the first char itself.
# e.g. 'babble' yields 'ba**le'
# Assume that the string is length 1 or more.
# Hint: s.replace(stra, strb) returns a version of string s
# where all instances of stra have been replaced by strb.
def fix_start(s):
text_1 = s[0]
text_2 = s.replace(text_1,'*')
return text_1 + text_2[1:]
# D. MixUp
# Given strings a and b, return a single string with a and b separated
# by a space '<a> <b>', except swap the first 2 chars of each string.
# e.g.
# 'mix', pod' -> 'pox mid'
# 'dog', 'dinner' -> 'dig donner'
# Assume a and b are length 2 or more.
def mix_up(a, b):
output = b[0:2] + a[2:] + ' ' + a[0:2] + b[2:]
return output
# Provided simple test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))
# Provided main() calls the above functions with interesting inputs,
# using test() to check if each result is correct or not.
def main():
print('donuts')
# Each line calls donuts, compares its result to the expected for that call.
test(donuts(4), 'Number of donuts: 4')
test(donuts(9), 'Number of donuts: 9')
test(donuts(10), 'Number of donuts: many')
test(donuts(99), 'Number of donuts: many')
print()
print('both_ends')
test(both_ends('spring'), 'spng')
test(both_ends('Hello'), 'Helo')
test(both_ends('a'), '')
test(both_ends('xyz'), 'xyyz')
print()
print('fix_start')
test(fix_start('babble'), 'ba**le')
test(fix_start('aardvark'), 'a*rdv*rk')
test(fix_start('google'), 'goo*le')
test(fix_start('donut'), 'donut')
print()
print('mix_up')
test(mix_up('mix', 'pod'), 'pox mid')
test(mix_up('dog', 'dinner'), 'dig donner')
test(mix_up('gnash', 'sport'), 'spash gnort')
test(mix_up('pezzy', 'firm'), 'fizzy perm')
# Standard boilerplate to call the main() function.
if __name__ == '__main__':
main()
| apache-2.0 |
hajicj/safire | safire/data/composite_corpus.py | 1 | 11234 | """
This module contains classes that ...
"""
import logging
import copy
import gensim
import itertools
import numpy
# import safire.datasets.transformations
import safire.utils.transcorp
from safire.utils import IndexedTransformedCorpus, flatten_composite_item
__author__ = "Jan Hajic jr."
class Zipper(gensim.interfaces.TransformationABC):
"""This transformer combines several pipelines into one. Applying a Zipper
to a tuple of pipelines creates a CompositeCorpus block.
While a Zipper instance should theoretically be independent of what it is
actually zipping and this should be determined at the time of the ``_apply``
call, in practice, we need to instantiate the zipper with a specific
combination of pipelines in mind to avoid a multi-parameter ``__getitem__``
call. Therefore, once a Zipper is created, it already knows its output
dimension.
The Zipper works like this::
>>> x = [[1, 10], [2, 20], [3, 30]]
>>> y = [[-1, -10], [-2, -20], [-3, -30]]
>>> z = Zipper((x, y))
>>> composite = z._apply((x, y))
>>> composite[0]
([1, 10], [-1, -10])
>>> composite[0:2]
[([1, 10], [-1, -10]), ([2, 20], [-2, -20])]
The Zipper by default returns output structured in tuples that
correspond to the structure of the input combination of pipelines.
This can be changed using the ``flatten`` parameter::
>>> z_flatten = Zipper((x, y), flatten=True)
>>> composite_flat = z_flatten._apply((x, y))
>>> composite_flat[0:2]
[[1, 10, -1, -10], [2, 20, -2, -20]]
**The Zipper expects all its input pipelines to be of the same length.**
(The :class:`ReorderingCorpus` is useful to meet this condition.)
Zippers are an essential part of Safire. Do get to know them.
"""
def __init__(self, corpora, flatten=False, dim=None, names=None):
# Check that 'corpora' is a tuple
if not isinstance(corpora, tuple):
raise TypeError('Input to zipper must be a tuple of corpora, got:'
' {0} with type {1}'.format(corpora, type(corpora)))
self.flatten = flatten
self.names = names
# Generate proposed dim from corpora
proposed_dim = tuple(safire.utils.transcorp.dimension(c) for c in corpora)
if dim is not None and dim != proposed_dim:
logging.warn('Supplied dimension {0} and proposed dimension {1} do '
'not match, defaulting to supplied dim.'
''.format(dim, proposed_dim))
if dim is None:
dim = proposed_dim
self.orig_dim = dim
if self.flatten:
flat_dim = safire.utils.transcorp.flattened_dimension(dim)
dim = flat_dim
self.dim = dim
def __getitem__(self, item):
# The check for _apply is quite fragile.
if isinstance(item[0], gensim.interfaces.CorpusABC):
return self._apply(item)
if self.flatten:
if isinstance(item[0], numpy.ndarray):
# Only works for ndarrays so far, support for gensim not implemented
output = self.flatten_numpy(item)
else:
output = self.flatten_gensim(item, self.orig_dim)
return output
else:
return item
def _apply(self, corpora):
return CompositeCorpus(corpora, dim=self.dim, names=self.names)
@staticmethod
def flatten_gensim_and_dim(vectors, dim):
"""Flattens the given gensim vectors, using the supplied dim.
Works on both non-recursive and recursive composite vectors.
>>> v1 = [(0, 1), (1, 2), (4, 1)]
>>> v2 = [(1, 1), (2, 4), (3, 1)]
>>> v3 = [(0, 2), (3, 1)]
>>> dim = ((5, 5), 4)
>>> vectors = ((v1, v2), v3)
>>> Zipper.flatten_gensim_and_dim(vectors, dim)
([(0, 1), (1, 2), (4, 1), (6, 1), (7, 4), (8, 1), (10, 2), (13, 1)], 14)
"""
# logging.warn('Vectors: {0}, dim: {1}'.format(vectors, dim))
if isinstance(dim, int):
return vectors, dim
if len(vectors) != len(dim):
raise ValueError('Item length {0} and dimension length {1} do not '
'match.'.format(vectors, dim))
output = []
dim_offset = 0
for item, d in itertools.izip(vectors, dim):
to_add, to_add_dim = Zipper.flatten_gensim_and_dim(item, d)
# logging.warn('To add: {0}, dim_offset: {1}'.format(to_add, dim_offset))
for key, value in to_add:
output.append((key + dim_offset, value))
dim_offset += to_add_dim
total_dim = dim_offset
return output, total_dim
@staticmethod
def flatten_numpy(vectors):
flattened_vectors = list(flatten_composite_item(vectors))
output = numpy.hstack(flattened_vectors)
return output
@staticmethod
def flatten_gensim(vectors, dim):
"""Given a composite item made of gensim vectors and the dimension of the
composite item, outputs a flattened version."""
output, total_dim = Zipper.flatten_gensim_and_dim(vectors, dim)
return output
class CompositeCorpus(IndexedTransformedCorpus):
"""Allows combining pipelines from multiple sources into one, like a more
flexible version of ``itertools.izip()``. A CompositeCorpus can either be
created directly, or through a :class:`Zipper` transformer. [NOT
IMPLEMENTED]
Also allows naming pipelines (this is useful for train/dev/test splits and
features/targets splits).
Initialized with a tuple of pipelines, indexing is available:
>>> from safire.datasets.dataset import DatasetABC
>>> features = DatasetABC([[1], [2], [3]], dim=1)
>>> targets = DatasetABC([[-1], [-2], [-3]], dim=1)
>>> composite = CompositeCorpus((features, targets), names=('features', 'targets'))
>>> composite[1:3]
([[2], [3]], [[-2], [-3]])
>>> composite['targets'][:2]
[[-1], [-2]]
>>> composite.dim
(1, 1)
Can also be recursive:
>>> recursive = CompositeCorpus((composite, composite), names=('first', 'second'))
>>> recursive.dim
((1, 1), (1, 1))
>>> recursive[1:3]
(([[2], [3]], [[-2], [-3]]), ([[2], [3]], [[-2], [-3]]))
However, it only currently supports building this tree-like structure one
by one. Trying ``composite = CompositeDataset(((data1, data2), data3))``
will fail.
"""
def __init__(self, corpora, dim=None, names=None,
aligned=True):
"""Initializes a CompositeCorpus.
:param corpora:
:param dim:
:param names:
:type aligned: bool
:param aligned: If set, will expect that all the individual datasets
from ``corpora`` have the same length. If unset, will not check this
and advertise the length of the first given dataset as its length;
only do this if you are flattening the dataset immediately after
initialization (and using indexes to flatten)!
"""
self.aligned = aligned
# Check lengths??
self.length = len(corpora[0]) # TODO: This is very ugly.
self.corpus = corpora
self.obj = None # No obj so far, waiting to implement ZipPipelines.
self.chunksize = None
derived_dim = self.derive_dimension(corpora)
if dim is None:
dim = self.derive_dimension(corpora)
else:
if dim != derived_dim:
logging.warn('Supplied dimension {0} inconsistent with '
'dimension {1} derived from given corpora. '
'Using supplied dimension (and hoping you know '
'what you are doing).'
''.format(dim, derived_dim))
self.dim = dim
if self.aligned:
for d in corpora:
if len(d) != self.length:
raise ValueError('All composite corpus components must '
'have the same length. (Lengths: '
'{0}) Are you sure the CompositeCorpus'
'should be aligned?\nStacks:{1}'
''.format(tuple((len(d) for d in corpora)),
'\n\n'.join([safire.utils.transcorp.log_corpus_stack(d, with_length=True) for d in corpora]))
)
if names:
if len(names) != len(corpora):
raise ValueError('Corpus names too many or too few'
' ({0}) for {1} component'
' corpora.'.format(len(names),
len(corpora)))
else:
names = []
self.names = names
self.names_dict = {name: i for i, name in enumerate(self.names)}
def __getitem__(self, item):
"""Retrieval from a composite corpus has several modes:
>>> from safire.datasets.dataset import DatasetABC
>>> features = DatasetABC([[1], [2], [3]], dim=1)
>>> targets = DatasetABC([[-1], [-2], [-3]], dim=1)
>>> composite = CompositeCorpus((features, targets), names=('features', 'targets'))
>>> composite[1:3]
([[2], [3]], [[-2], [-3]])
>>> composite.__getitem__((1, 2))
([2], [-3])
"""
try:
# For retrieving a different index from each data point
if isinstance(item, tuple):
return tuple([d[item[i]] for i, d in enumerate(self.corpus)])
else:
return tuple([d[item] for d in self.corpus])
except (TypeError, IndexError):
if isinstance(item, str):
return self.corpus[self.names_dict[item]]
else:
raise
def __len__(self):
# Ugly hack - returns a structure instead of a number... doesn't work
# with test_p and devel_p, though, so I disabled it for now.
# if not self.aligned:
# return tuple([len(d) for d in self.data])
return self.length
@staticmethod
def derive_dimension(corpora):
return tuple(safire.utils.transcorp.dimension(d) for d in corpora)
def as_composite_dim(self, idx):
"""Formats the given index as the composite dimension. Suppose self.dim
is ``{10, (10, 20), 50)``, then ``as_composite_dim(4)`` will return
``(4, (4, 4), 4)``.
Used as a utility function when creating indexes for flattening an
aligned CompositeCorpus.
:param idx: An integer.
"""
output = []
for component in self.corpus:
if isinstance(component, CompositeCorpus):
output.append(component.as_composite_dim(idx))
else:
output.append(idx)
if len(output) == 1:
return output[0]
else:
return tuple(output)
# There's a problem with id2doc.
#
# Flattening a CompositeCorpus: same as flattening a CompositeDataset, as the
# CompositeCorpus already guarantees a dimension. | gpl-3.0 |
maritaria/Isomurphy | tests/ColorAndBranchTest.py | 2 | 2391 | import unittest
from graph.graphIO import loadgraphs, writeDOT
from isomorphism.IndividualizationRefinementChecker import IndividualizationRefinementChecker
class IsomorphTest(unittest.TestCase):
def setUp(self):
self._checker = IndividualizationRefinementChecker()
def test_Quick(self):
self._graphs = loadgraphs('data\colorref_smallexample_4_7.grl')
self.runTest(0, 1, False)
self.runTest(0, 2, True)
self.runTest(0, 3, False)
self.runTest(1, 2, False)
self.runTest(1, 3, True)
self.runTest(2, 3, False)
def test_Quick1(self):
self._graphs = loadgraphs('data\colorref_smallexample_4_16.grl')
self.runTest(0, 1, True)
self.runTest(0, 2, False)
self.runTest(0, 3, False)
self.runTest(1, 2, False)
self.runTest(1, 3, False)
self.runTest(2, 3, True)
def test_Quick3(self):
self._graphs = loadgraphs('data\colorref_smallexample_6_15.grl')
self.runTest(0, 1, True)
self.runTest(0, 2, False)
self.runTest(0, 3, False)
self.runTest(0, 4, False)
self.runTest(0, 5, False)
self.runTest(1, 2, False)
self.runTest(1, 3, False)
self.runTest(1, 4, False)
self.runTest(1, 5, False)
self.runTest(2, 3, True)
self.runTest(2, 4, False)
self.runTest(2, 5, False)
self.runTest(3, 4, False)
self.runTest(3, 5, False)
self.runTest(4, 5, True)
def test_cubes(self):
self._graphs = loadgraphs('data\cubes3.grl')
self.runTest(0, 1, False)
self.runTest(0, 2, True)
self.runTest(0, 3, False)
self.runTest(1, 2, False)
self.runTest(1, 3, True)
self.runTest(2, 3, False)
def test_cubes2(self):
self._graphs = loadgraphs('data\cubes4.grl')
self.runTest(0, 1, False)
self.runTest(0, 2, False)
self.runTest(0, 3, False)
self.runTest(1, 2, False)
self.runTest(1, 3, True)
self.runTest(2, 3, False)
def test_cubes3(self):
self._graphs = loadgraphs('data\cubes5.grl')
self.runTest(0, 1, True)
self.runTest(0, 2, False)
self.runTest(0, 3, False)
self.runTest(1, 2, False)
self.runTest(1, 3, False)
self.runTest(2, 3, False)
def runTest(self, index1: int, index2: int, expectedResult : bool):
g1 = self._graphs[0][index1].clone()
g2 = self._graphs[0][index2].clone()
result = self._checker.isIsomorphic(g1, g2)
print(index1, index2, result)
self.assertEqual(expectedResult, result)
if result:
writeDOT(g1, 'cubes_' + str(index1) + '.dot')
writeDOT(g2, 'cubes_' + str(index2) + '.dot')
| mit |
saffroncoin/csfrd | docs/conf.py | 1 | 7704 | # -*- coding: utf-8 -*-
#
# csfrd documentation build configuration file, created by
# sphinx-quickstart on Mon Jan 20 15:45:40 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'csfrd'
copyright = u'2014, cSFR Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'csfrddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'csfrd.tex', u'csfrd Documentation',
u'cSFR Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'csfrd', u'csfrd Documentation',
[u'SFRDirect Team'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'csfrd', u'csfrd Documentation',
u'SFRDirect Team', 'csfrd', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
pnavarro/neutron | neutron/tests/unit/agent/linux/test_ipset_manager.py | 8 | 4469 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.agent.linux import ipset_manager
from neutron.tests import base
TEST_SET_ID = 'fake_sgid'
ETHERTYPE = 'IPv4'
TEST_SET_NAME = ipset_manager.IpsetManager.get_name(TEST_SET_ID, ETHERTYPE)
TEST_SET_NAME_NEW = TEST_SET_NAME + ipset_manager.SWAP_SUFFIX
FAKE_IPS = ['10.0.0.1', '10.0.0.2', '10.0.0.3', '10.0.0.4',
'10.0.0.5', '10.0.0.6']
class BaseIpsetManagerTest(base.BaseTestCase):
def setUp(self):
super(BaseIpsetManagerTest, self).setUp()
self.ipset = ipset_manager.IpsetManager()
self.execute = mock.patch.object(self.ipset, "execute").start()
self.expected_calls = []
self.expect_create()
def verify_mock_calls(self):
self.execute.assert_has_calls(self.expected_calls, any_order=False)
def expect_set(self, addresses):
temp_input = ['create NETIPv4fake_sgid-new hash:net family inet']
temp_input.extend('add NETIPv4fake_sgid-new %s' % ip
for ip in addresses)
input = '\n'.join(temp_input)
self.expected_calls.extend([
mock.call(['ipset', 'restore', '-exist'],
process_input=input,
run_as_root=True),
mock.call(['ipset', 'swap', TEST_SET_NAME_NEW, TEST_SET_NAME],
process_input=None,
run_as_root=True),
mock.call(['ipset', 'destroy', TEST_SET_NAME_NEW],
process_input=None,
run_as_root=True)])
def expect_add(self, addresses):
self.expected_calls.extend(
mock.call(['ipset', 'add', '-exist', TEST_SET_NAME, ip],
process_input=None,
run_as_root=True) for ip in addresses)
def expect_del(self, addresses):
self.expected_calls.extend(
mock.call(['ipset', 'del', TEST_SET_NAME, ip],
process_input=None,
run_as_root=True) for ip in addresses)
def expect_create(self):
self.expected_calls.append(
mock.call(['ipset', 'create', '-exist', TEST_SET_NAME,
'hash:net', 'family', 'inet'],
process_input=None,
run_as_root=True))
def expect_destroy(self):
self.expected_calls.append(
mock.call(['ipset', 'destroy', TEST_SET_NAME],
process_input=None,
run_as_root=True))
def add_first_ip(self):
self.expect_set([FAKE_IPS[0]])
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, [FAKE_IPS[0]])
def add_all_ips(self):
self.expect_set(FAKE_IPS)
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS)
class IpsetManagerTestCase(BaseIpsetManagerTest):
def test_set_exists(self):
self.add_first_ip()
self.assertTrue(self.ipset.set_exists(TEST_SET_ID, ETHERTYPE))
def test_set_members_with_first_add_member(self):
self.add_first_ip()
self.verify_mock_calls()
def test_set_members_adding_less_than_5(self):
self.add_first_ip()
self.expect_add(reversed(FAKE_IPS[1:5]))
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:5])
self.verify_mock_calls()
def test_set_members_deleting_less_than_5(self):
self.add_all_ips()
self.expect_del(reversed(FAKE_IPS[4:5]))
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS[0:3])
self.verify_mock_calls()
def test_set_members_adding_more_than_5(self):
self.add_first_ip()
self.expect_set(FAKE_IPS)
self.ipset.set_members(TEST_SET_ID, ETHERTYPE, FAKE_IPS)
self.verify_mock_calls()
def test_destroy(self):
self.add_first_ip()
self.expect_destroy()
self.ipset.destroy(TEST_SET_ID, ETHERTYPE)
self.verify_mock_calls()
| apache-2.0 |
javilonas/NCam | cross/android-toolchain/lib/python2.7/encodings/cp1026.py | 593 | 13369 | """ Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1026',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'{' # 0x48 -> LEFT CURLY BRACKET
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'[' # 0x68 -> LEFT SQUARE BRACKET
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
u':' # 0x7A -> COLON
u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'}' # 0x8C -> RIGHT CURLY BRACKET
u'`' # 0x8D -> GRAVE ACCENT
u'\xa6' # 0x8E -> BROKEN BAR
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u']' # 0xAC -> RIGHT SQUARE BRACKET
u'$' # 0xAD -> DOLLAR SIGN
u'@' # 0xAE -> COMMERCIAL AT
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'~' # 0xCC -> TILDE
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\\' # 0xDC -> REVERSE SOLIDUS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'#' # 0xEC -> NUMBER SIGN
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'"' # 0xFC -> QUOTATION MARK
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
mattupstate/flask-social | tests/functional_tests.py | 3 | 14205 | import unittest
import mock
from tests.test_app.sqlalchemy import create_app as create_sql_app
from tests.test_app.mongoengine import create_app as create_mongo_app
from tests.test_app.peewee_app import create_app as create_peewee_app
def get_mock_twitter_response():
return {
'oauth_token_secret': 'the_oauth_token_secret',
'user_id': '1234',
'oauth_token': 'the_oauth_token',
'screen_name': 'twitter_username',
'name': 'twitter_name'
}
def get_mock_twitter_connection_values():
return {
'provider_id': 'twitter',
'provider_user_id': '1234',
'access_token': 'the_oauth_token',
'secret': 'the_oauth_token_secret',
'display_name': '@twitter_username',
'full_name': 'twitter_name',
'profile_url': 'http://twitter.com/twitter_username',
'image_url': 'https://cdn.twitter.com/something.png'
}
def get_mock_twitter_token_pair():
return {
'access_token': 'the_oauth_token',
'secret': 'the_oauth_token_secret'
}
def get_mock_twitter_updated_token_pair():
return {
'access_token': 'the_updated_oauth_token',
'secret': 'the_updated_oauth_token_secret'
}
class SocialTest(unittest.TestCase):
SOCIAL_CONFIG = None
APP_TYPE = None
def setUp(self):
super(SocialTest, self).setUp()
self.app = self._create_app(self.SOCIAL_CONFIG or None)
self.app.debug = False
self.app.config['TESTING'] = True
self.client = self.app.test_client()
def tearDown(self):
super(SocialTest, self).tearDown()
self.client.get('/logout')
def _create_app(self, auth_config):
app_type = self.APP_TYPE or 'sql'
if app_type == 'sql':
return create_sql_app(auth_config, False)
if app_type == 'mongo':
return create_mongo_app(auth_config, False)
if app_type == 'peewee':
return create_peewee_app(auth_config, False)
def _post(self, route, data=None, content_type=None, follow_redirects=True, headers=None):
content_type = content_type or 'application/x-www-form-urlencoded'
return self.client.post(route, data=data,
follow_redirects=follow_redirects,
content_type=content_type, headers=headers)
def _get(self, route, content_type=None, follow_redirects=None, headers=None):
return self.client.get(route, follow_redirects=follow_redirects,
content_type=content_type or 'text/html',
headers=headers)
def authenticate(self, email="matt@lp.com", password="password", endpoint=None, **kwargs):
data = dict(email=email, password=password, remember='y')
return self._post(endpoint or '/login', data=data, **kwargs)
def assertIn(self, member, container, msg=None):
if hasattr(unittest.TestCase, 'assertIn'):
return unittest.TestCase.assertIn(self, member, container, msg)
return self.assertTrue(member in container)
def assertNotIn(self, member, container, msg=None):
if hasattr(unittest.TestCase, 'assertNotIn'):
return unittest.TestCase.assertNotIn(self, member, container, msg)
return self.assertFalse(member in container)
def assertIsNotNone(self, obj, msg=None):
if hasattr(unittest.TestCase, 'assertIsNotNone'):
return unittest.TestCase.assertIsNotNone(self, obj, msg)
return self.assertTrue(obj is not None)
class TwitterSocialTests(SocialTest):
@mock.patch('flask_social.providers.twitter.get_connection_values')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.handle_oauth1_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.authorize')
def test_connect_twitter(self,
mock_authorize,
mock_handle_oauth1_response,
mock_get_connection_values):
mock_get_connection_values.return_value = get_mock_twitter_connection_values()
mock_authorize.return_value = 'Should be a redirect'
mock_handle_oauth1_response.return_value = get_mock_twitter_response()
r = self.authenticate()
self.assertIn('Hello', r.data)
self._post('/connect/twitter')
r = self._get('/connect/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
self.assertIn('Connection established to Twitter', r.data)
@mock.patch('flask_social.providers.twitter.get_connection_values')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.handle_oauth1_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.authorize')
def test_double_connect_twitter(self,
mock_authorize,
mock_handle_oauth1_response,
mock_get_connection_values):
mock_get_connection_values.return_value = get_mock_twitter_connection_values()
mock_authorize.return_value = 'Should be a redirect'
mock_handle_oauth1_response.return_value = get_mock_twitter_response()
r = self.authenticate()
for x in range(2):
self._post('/connect/twitter')
r = self._get('/connect/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
self.assertIn('A connection is already established with', r.data)
@mock.patch('flask_social.providers.twitter.get_connection_values')
@mock.patch('flask_social.providers.twitter.get_token_pair_from_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.handle_oauth1_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.authorize')
def test_unconnected_twitter_login(self,
mock_authorize,
mock_handle_oauth1_response,
mock_get_token_pair_from_response,
mock_get_connection_values):
mock_get_connection_values.return_value = get_mock_twitter_connection_values()
mock_get_token_pair_from_response.return_value = get_mock_twitter_token_pair()
mock_authorize.return_value = 'Should be a redirect'
mock_handle_oauth1_response.return_value = get_mock_twitter_response()
self._post('/login/twitter')
r = self._get('/login/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
self.assertIn('Twitter account not associated with an existing user', r.data)
@mock.patch('flask_social.providers.twitter.get_api')
@mock.patch('flask_social.providers.twitter.get_connection_values')
@mock.patch('flask_social.providers.twitter.get_token_pair_from_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.handle_oauth1_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.authorize')
def test_connected_twitter_login(self,
mock_authorize,
mock_handle_oauth1_response,
mock_get_token_pair_from_response,
mock_get_connection_values,
mock_get_twitter_api):
mock_get_connection_values.return_value = get_mock_twitter_connection_values()
mock_get_token_pair_from_response.return_value = get_mock_twitter_token_pair()
mock_authorize.return_value = 'Should be a redirect'
mock_handle_oauth1_response.return_value = get_mock_twitter_response()
mock_get_twitter_api.return_value = get_mock_twitter_connection_values()
self.authenticate()
self._post('/connect/twitter')
r = self._get('/connect/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
self.assertIn('Connection established to Twitter', r.data)
self._get('/logout')
self._post('/login/twitter')
r = self._get('/login/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
self.assertIn("Hello matt@lp.com", r.data)
@mock.patch('flask_social.providers.twitter.get_api')
@mock.patch('flask_social.providers.twitter.get_connection_values')
@mock.patch('flask_social.providers.twitter.get_token_pair_from_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.handle_oauth1_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.authorize')
def test_connected_twitter_login_update_token(self,
mock_authorize,
mock_handle_oauth1_response,
mock_get_token_pair_from_response,
mock_get_connection_values,
mock_get_twitter_api):
mock_get_connection_values.return_value = get_mock_twitter_connection_values()
mock_get_token_pair_from_response.return_value = get_mock_twitter_updated_token_pair()
mock_authorize.return_value = 'Should be a redirect'
mock_handle_oauth1_response.return_value = get_mock_twitter_response()
mock_get_twitter_api.return_value = get_mock_twitter_connection_values()
self.authenticate()
self._post('/connect/twitter')
r = self._get('/connect/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
self.assertIn('Connection established to Twitter', r.data)
user = self.app.get_user()
connection = [c for c in user.connections if c.provider_id == 'twitter'][0]
self.assertEqual(connection.access_token,
get_mock_twitter_connection_values()['access_token'])
self.assertEqual(connection.secret,
get_mock_twitter_connection_values()['secret'])
self._get('/logout')
self._post('/login/twitter')
r = self._get('/login/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
self.assertIn("Hello matt@lp.com", r.data)
user = self.app.get_user()
connection = [c for c in user.connections if c.provider_id == 'twitter'][0]
self.assertEqual(connection.access_token,
get_mock_twitter_updated_token_pair()['access_token'])
self.assertEqual(connection.secret,
get_mock_twitter_updated_token_pair()['secret'])
@mock.patch('flask_social.providers.twitter.get_api')
@mock.patch('flask_social.providers.twitter.get_connection_values')
@mock.patch('flask_social.providers.twitter.get_token_pair_from_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.handle_oauth1_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.authorize')
def test_reconnect_twitter_token(self,
mock_authorize,
mock_handle_oauth1_response,
mock_get_token_pair_from_response,
mock_get_connection_values,
mock_get_twitter_api):
mock_get_connection_values.return_value = get_mock_twitter_connection_values()
mock_get_token_pair_from_response.return_value = get_mock_twitter_updated_token_pair()
mock_authorize.return_value = 'Should be a redirect'
mock_handle_oauth1_response.return_value = get_mock_twitter_response()
mock_get_twitter_api.return_value = get_mock_twitter_connection_values()
self.authenticate()
self._post('/connect/twitter')
r = self._get('/connect/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
self.assertIn('Connection established to Twitter', r.data)
user = self.app.get_user()
connection = [c for c in user.connections if c.provider_id == 'twitter'][0]
self.assertEqual(connection.access_token,
get_mock_twitter_connection_values()['access_token'])
self.assertEqual(connection.secret,
get_mock_twitter_connection_values()['secret'])
self._post('/reconnect/twitter')
r = self._get('/login/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
self.assertIn("Hello matt@lp.com", r.data)
user = self.app.get_user()
connection = [c for c in user.connections if c.provider_id == 'twitter'][0]
self.assertEqual(connection.access_token,
get_mock_twitter_updated_token_pair()['access_token'])
self.assertEqual(connection.secret,
get_mock_twitter_updated_token_pair()['secret'])
@mock.patch('flask_social.providers.twitter.get_connection_values')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.handle_oauth1_response')
@mock.patch('flask_oauthlib.client.OAuthRemoteApp.authorize')
def test_remove_connection(self,
mock_authorize,
mock_handle_oauth1_response,
mock_get_connection_values):
mock_get_connection_values.return_value = get_mock_twitter_connection_values()
mock_authorize.return_value = 'Should be a redirect'
mock_handle_oauth1_response.return_value = get_mock_twitter_response()
self._post('/login', data=dict(email='matt@lp.com', password='password'))
self._post('/connect/twitter')
r = self._get('/connect/twitter?oauth_token=oauth_token&oauth_verifier=oauth_verifier', follow_redirects=True)
r = self.client.delete('/connect/twitter/1234', follow_redirects=True)
self.assertIn('Connection to Twitter removed', r.data)
class MongoEngineTwitterSocialTests(TwitterSocialTests):
APP_TYPE = 'mongo'
class PeeweeTwitterSocialTests(TwitterSocialTests):
APP_TYPE = 'peewee'
| mit |
monibu1548/linux | scripts/checkkconfigsymbols.py | 221 | 9497 | #!/usr/bin/env python
"""Find Kconfig symbols that are referenced but not defined."""
# (c) 2014-2015 Valentin Rothberg <Valentin.Rothberg@lip6.fr>
# (c) 2014 Stefan Hengelein <stefan.hengelein@fau.de>
#
# Licensed under the terms of the GNU GPL License version 2
import os
import re
import sys
from subprocess import Popen, PIPE, STDOUT
from optparse import OptionParser
# regex expressions
OPERATORS = r"&|\(|\)|\||\!"
FEATURE = r"(?:\w*[A-Z0-9]\w*){2,}"
DEF = r"^\s*(?:menu){,1}config\s+(" + FEATURE + r")\s*"
EXPR = r"(?:" + OPERATORS + r"|\s|" + FEATURE + r")+"
STMT = r"^\s*(?:if|select|depends\s+on)\s+" + EXPR
SOURCE_FEATURE = r"(?:\W|\b)+[D]{,1}CONFIG_(" + FEATURE + r")"
# regex objects
REGEX_FILE_KCONFIG = re.compile(r".*Kconfig[\.\w+\-]*$")
REGEX_FEATURE = re.compile(r"(" + FEATURE + r")")
REGEX_SOURCE_FEATURE = re.compile(SOURCE_FEATURE)
REGEX_KCONFIG_DEF = re.compile(DEF)
REGEX_KCONFIG_EXPR = re.compile(EXPR)
REGEX_KCONFIG_STMT = re.compile(STMT)
REGEX_KCONFIG_HELP = re.compile(r"^\s+(help|---help---)\s*$")
REGEX_FILTER_FEATURES = re.compile(r"[A-Za-z0-9]$")
def parse_options():
"""The user interface of this module."""
usage = "%prog [options]\n\n" \
"Run this tool to detect Kconfig symbols that are referenced but " \
"not defined in\nKconfig. The output of this tool has the " \
"format \'Undefined symbol\\tFile list\'\n\n" \
"If no option is specified, %prog will default to check your\n" \
"current tree. Please note that specifying commits will " \
"\'git reset --hard\'\nyour current tree! You may save " \
"uncommitted changes to avoid losing data."
parser = OptionParser(usage=usage)
parser.add_option('-c', '--commit', dest='commit', action='store',
default="",
help="Check if the specified commit (hash) introduces "
"undefined Kconfig symbols.")
parser.add_option('-d', '--diff', dest='diff', action='store',
default="",
help="Diff undefined symbols between two commits. The "
"input format bases on Git log's "
"\'commmit1..commit2\'.")
parser.add_option('', '--force', dest='force', action='store_true',
default=False,
help="Reset current Git tree even when it's dirty.")
(opts, _) = parser.parse_args()
if opts.commit and opts.diff:
sys.exit("Please specify only one option at once.")
if opts.diff and not re.match(r"^[\w\-\.]+\.\.[\w\-\.]+$", opts.diff):
sys.exit("Please specify valid input in the following format: "
"\'commmit1..commit2\'")
if opts.commit or opts.diff:
if not opts.force and tree_is_dirty():
sys.exit("The current Git tree is dirty (see 'git status'). "
"Running this script may\ndelete important data since it "
"calls 'git reset --hard' for some performance\nreasons. "
" Please run this script in a clean Git tree or pass "
"'--force' if you\nwant to ignore this warning and "
"continue.")
return opts
def main():
"""Main function of this module."""
opts = parse_options()
if opts.commit or opts.diff:
head = get_head()
# get commit range
commit_a = None
commit_b = None
if opts.commit:
commit_a = opts.commit + "~"
commit_b = opts.commit
elif opts.diff:
split = opts.diff.split("..")
commit_a = split[0]
commit_b = split[1]
undefined_a = {}
undefined_b = {}
# get undefined items before the commit
execute("git reset --hard %s" % commit_a)
undefined_a = check_symbols()
# get undefined items for the commit
execute("git reset --hard %s" % commit_b)
undefined_b = check_symbols()
# report cases that are present for the commit but not before
for feature in sorted(undefined_b):
# feature has not been undefined before
if not feature in undefined_a:
files = sorted(undefined_b.get(feature))
print "%s\t%s" % (feature, ", ".join(files))
# check if there are new files that reference the undefined feature
else:
files = sorted(undefined_b.get(feature) -
undefined_a.get(feature))
if files:
print "%s\t%s" % (feature, ", ".join(files))
# reset to head
execute("git reset --hard %s" % head)
# default to check the entire tree
else:
undefined = check_symbols()
for feature in sorted(undefined):
files = sorted(undefined.get(feature))
print "%s\t%s" % (feature, ", ".join(files))
def execute(cmd):
"""Execute %cmd and return stdout. Exit in case of error."""
pop = Popen(cmd, stdout=PIPE, stderr=STDOUT, shell=True)
(stdout, _) = pop.communicate() # wait until finished
if pop.returncode != 0:
sys.exit(stdout)
return stdout
def tree_is_dirty():
"""Return true if the current working tree is dirty (i.e., if any file has
been added, deleted, modified, renamed or copied but not committed)."""
stdout = execute("git status --porcelain")
for line in stdout:
if re.findall(r"[URMADC]{1}", line[:2]):
return True
return False
def get_head():
"""Return commit hash of current HEAD."""
stdout = execute("git rev-parse HEAD")
return stdout.strip('\n')
def check_symbols():
"""Find undefined Kconfig symbols and return a dict with the symbol as key
and a list of referencing files as value."""
source_files = []
kconfig_files = []
defined_features = set()
referenced_features = dict() # {feature: [files]}
# use 'git ls-files' to get the worklist
stdout = execute("git ls-files")
if len(stdout) > 0 and stdout[-1] == "\n":
stdout = stdout[:-1]
for gitfile in stdout.rsplit("\n"):
if ".git" in gitfile or "ChangeLog" in gitfile or \
".log" in gitfile or os.path.isdir(gitfile) or \
gitfile.startswith("tools/"):
continue
if REGEX_FILE_KCONFIG.match(gitfile):
kconfig_files.append(gitfile)
else:
# all non-Kconfig files are checked for consistency
source_files.append(gitfile)
for sfile in source_files:
parse_source_file(sfile, referenced_features)
for kfile in kconfig_files:
parse_kconfig_file(kfile, defined_features, referenced_features)
undefined = {} # {feature: [files]}
for feature in sorted(referenced_features):
# filter some false positives
if feature == "FOO" or feature == "BAR" or \
feature == "FOO_BAR" or feature == "XXX":
continue
if feature not in defined_features:
if feature.endswith("_MODULE"):
# avoid false positives for kernel modules
if feature[:-len("_MODULE")] in defined_features:
continue
undefined[feature] = referenced_features.get(feature)
return undefined
def parse_source_file(sfile, referenced_features):
"""Parse @sfile for referenced Kconfig features."""
lines = []
with open(sfile, "r") as stream:
lines = stream.readlines()
for line in lines:
if not "CONFIG_" in line:
continue
features = REGEX_SOURCE_FEATURE.findall(line)
for feature in features:
if not REGEX_FILTER_FEATURES.search(feature):
continue
sfiles = referenced_features.get(feature, set())
sfiles.add(sfile)
referenced_features[feature] = sfiles
def get_features_in_line(line):
"""Return mentioned Kconfig features in @line."""
return REGEX_FEATURE.findall(line)
def parse_kconfig_file(kfile, defined_features, referenced_features):
"""Parse @kfile and update feature definitions and references."""
lines = []
skip = False
with open(kfile, "r") as stream:
lines = stream.readlines()
for i in range(len(lines)):
line = lines[i]
line = line.strip('\n')
line = line.split("#")[0] # ignore comments
if REGEX_KCONFIG_DEF.match(line):
feature_def = REGEX_KCONFIG_DEF.findall(line)
defined_features.add(feature_def[0])
skip = False
elif REGEX_KCONFIG_HELP.match(line):
skip = True
elif skip:
# ignore content of help messages
pass
elif REGEX_KCONFIG_STMT.match(line):
features = get_features_in_line(line)
# multi-line statements
while line.endswith("\\"):
i += 1
line = lines[i]
line = line.strip('\n')
features.extend(get_features_in_line(line))
for feature in set(features):
paths = referenced_features.get(feature, set())
paths.add(kfile)
referenced_features[feature] = paths
if __name__ == "__main__":
main()
| gpl-2.0 |
arvinsingla/CouchPotatoServer | libs/CodernityDB/migrate.py | 81 | 1317 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2011-2013 Codernity (http://codernity.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from CodernityDB.database import Database
import shutil
import os
def migrate(source, destination):
"""
Very basic for now
"""
dbs = Database(source)
dbt = Database(destination)
dbs.open()
dbt.create()
dbt.close()
for curr in os.listdir(os.path.join(dbs.path, '_indexes')):
if curr != '00id.py':
shutil.copyfile(os.path.join(dbs.path, '_indexes', curr),
os.path.join(dbt.path, '_indexes', curr))
dbt.open()
for c in dbs.all('id'):
del c['_rev']
dbt.insert(c)
return True
if __name__ == '__main__':
import sys
migrate(sys.argv[1], sys.argv[2])
| gpl-3.0 |
kerr-huang/SL4A | python/src/Lib/test/test_types.py | 56 | 25078 | # Python test set -- part 6, built-in types
from test.test_support import run_unittest, have_unicode, run_with_locale
import unittest
import sys
import locale
class TypesTests(unittest.TestCase):
def test_truth_values(self):
if None: self.fail('None is true instead of false')
if 0: self.fail('0 is true instead of false')
if 0L: self.fail('0L is true instead of false')
if 0.0: self.fail('0.0 is true instead of false')
if '': self.fail('\'\' is true instead of false')
if not 1: self.fail('1 is false instead of true')
if not 1L: self.fail('1L is false instead of true')
if not 1.0: self.fail('1.0 is false instead of true')
if not 'x': self.fail('\'x\' is false instead of true')
if not {'x': 1}: self.fail('{\'x\': 1} is false instead of true')
def f(): pass
class C: pass
import sys
x = C()
if not f: self.fail('f is false instead of true')
if not C: self.fail('C is false instead of true')
if not sys: self.fail('sys is false instead of true')
if not x: self.fail('x is false instead of true')
def test_boolean_ops(self):
if 0 or 0: self.fail('0 or 0 is true instead of false')
if 1 and 1: pass
else: self.fail('1 and 1 is false instead of true')
if not 1: self.fail('not 1 is true instead of false')
def test_comparisons(self):
if 0 < 1 <= 1 == 1 >= 1 > 0 != 1: pass
else: self.fail('int comparisons failed')
if 0L < 1L <= 1L == 1L >= 1L > 0L != 1L: pass
else: self.fail('long int comparisons failed')
if 0.0 < 1.0 <= 1.0 == 1.0 >= 1.0 > 0.0 != 1.0: pass
else: self.fail('float comparisons failed')
if '' < 'a' <= 'a' == 'a' < 'abc' < 'abd' < 'b': pass
else: self.fail('string comparisons failed')
if None is None: pass
else: self.fail('identity test failed')
def test_float_constructor(self):
self.assertRaises(ValueError, float, '')
self.assertRaises(ValueError, float, '5\0')
def test_zero_division(self):
try: 5.0 / 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 / 0.0 didn't raise ZeroDivisionError")
try: 5.0 // 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 // 0.0 didn't raise ZeroDivisionError")
try: 5.0 % 0.0
except ZeroDivisionError: pass
else: self.fail("5.0 % 0.0 didn't raise ZeroDivisionError")
try: 5 / 0L
except ZeroDivisionError: pass
else: self.fail("5 / 0L didn't raise ZeroDivisionError")
try: 5 // 0L
except ZeroDivisionError: pass
else: self.fail("5 // 0L didn't raise ZeroDivisionError")
try: 5 % 0L
except ZeroDivisionError: pass
else: self.fail("5 % 0L didn't raise ZeroDivisionError")
def test_numeric_types(self):
if 0 != 0L or 0 != 0.0 or 0L != 0.0: self.fail('mixed comparisons')
if 1 != 1L or 1 != 1.0 or 1L != 1.0: self.fail('mixed comparisons')
if -1 != -1L or -1 != -1.0 or -1L != -1.0:
self.fail('int/long/float value not equal')
# calling built-in types without argument must return 0
if int() != 0: self.fail('int() does not return 0')
if long() != 0L: self.fail('long() does not return 0L')
if float() != 0.0: self.fail('float() does not return 0.0')
if int(1.9) == 1 == int(1.1) and int(-1.1) == -1 == int(-1.9): pass
else: self.fail('int() does not round properly')
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: self.fail('long() does not round properly')
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: self.fail('float() does not work properly')
def test_float_to_string(self):
def test(f, result):
self.assertEqual(f.__format__('e'), result)
self.assertEqual('%e' % f, result)
# test all 2 digit exponents, both with __format__ and with
# '%' formatting
for i in range(-99, 100):
test(float('1.5e'+str(i)), '1.500000e{0:+03d}'.format(i))
# test some 3 digit exponents
self.assertEqual(1.5e100.__format__('e'), '1.500000e+100')
self.assertEqual('%e' % 1.5e100, '1.500000e+100')
self.assertEqual(1.5e101.__format__('e'), '1.500000e+101')
self.assertEqual('%e' % 1.5e101, '1.500000e+101')
self.assertEqual(1.5e-100.__format__('e'), '1.500000e-100')
self.assertEqual('%e' % 1.5e-100, '1.500000e-100')
self.assertEqual(1.5e-101.__format__('e'), '1.500000e-101')
self.assertEqual('%e' % 1.5e-101, '1.500000e-101')
def test_normal_integers(self):
# Ensure the first 256 integers are shared
a = 256
b = 128*2
if a is not b: self.fail('256 is not shared')
if 12 + 24 != 36: self.fail('int op')
if 12 + (-24) != -12: self.fail('int op')
if (-12) + 24 != 12: self.fail('int op')
if (-12) + (-24) != -36: self.fail('int op')
if not 12 < 24: self.fail('int op')
if not -24 < -12: self.fail('int op')
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
self.fail('int mul commutativity')
# And another.
m = -sys.maxint - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
self.fail("%r * %r == %r != %r" % (divisor, j, prod, m))
if type(prod) is not int:
self.fail("expected type(prod) to be int, not %r" %
type(prod))
# Check for expected * overflow to long.
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not long:
self.fail("expected type(%r) to be long, not %r" %
(prod, type(prod)))
# Check for expected * overflow to long.
m = sys.maxint
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not long:
self.fail("expected type(%r) to be long, not %r" %
(prod, type(prod)))
def test_long_integers(self):
if 12L + 24L != 36L: self.fail('long op')
if 12L + (-24L) != -12L: self.fail('long op')
if (-12L) + 24L != 12L: self.fail('long op')
if (-12L) + (-24L) != -36L: self.fail('long op')
if not 12L < 24L: self.fail('long op')
if not -24L < -12L: self.fail('long op')
x = sys.maxint
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)+1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
x = -x
if int(long(x)) != x: self.fail('long op')
x = x-1
if int(long(x)) != x: self.fail('long op')
try: y = int(long(x)-1L)
except OverflowError: self.fail('long op')
if not isinstance(y, long): self.fail('long op')
try: 5 << -5
except ValueError: pass
else: self.fail('int negative shift <<')
try: 5L << -5L
except ValueError: pass
else: self.fail('long negative shift <<')
try: 5 >> -5
except ValueError: pass
else: self.fail('int negative shift >>')
try: 5L >> -5L
except ValueError: pass
else: self.fail('long negative shift >>')
def test_floats(self):
if 12.0 + 24.0 != 36.0: self.fail('float op')
if 12.0 + (-24.0) != -12.0: self.fail('float op')
if (-12.0) + 24.0 != 12.0: self.fail('float op')
if (-12.0) + (-24.0) != -36.0: self.fail('float op')
if not 12.0 < 24.0: self.fail('float op')
if not -24.0 < -12.0: self.fail('float op')
def test_strings(self):
if len('') != 0: self.fail('len(\'\')')
if len('a') != 1: self.fail('len(\'a\')')
if len('abcdef') != 6: self.fail('len(\'abcdef\')')
if 'xyz' + 'abcde' != 'xyzabcde': self.fail('string concatenation')
if 'xyz'*3 != 'xyzxyzxyz': self.fail('string repetition *3')
if 0*'abcde' != '': self.fail('string repetition 0*')
if min('abc') != 'a' or max('abc') != 'c': self.fail('min/max string')
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: self.fail('in/not in string')
x = 'x'*103
if '%s!'%x != x+'!': self.fail('nasty string formatting bug')
#extended slices for strings
a = '0123456789'
self.assertEqual(a[::], a)
self.assertEqual(a[::2], '02468')
self.assertEqual(a[1::2], '13579')
self.assertEqual(a[::-1],'9876543210')
self.assertEqual(a[::-2], '97531')
self.assertEqual(a[3::-2], '31')
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], '02468')
if have_unicode:
a = unicode('0123456789', 'ascii')
self.assertEqual(a[::], a)
self.assertEqual(a[::2], unicode('02468', 'ascii'))
self.assertEqual(a[1::2], unicode('13579', 'ascii'))
self.assertEqual(a[::-1], unicode('9876543210', 'ascii'))
self.assertEqual(a[::-2], unicode('97531', 'ascii'))
self.assertEqual(a[3::-2], unicode('31', 'ascii'))
self.assertEqual(a[-100:100:], a)
self.assertEqual(a[100:-100:-1], a[::-1])
self.assertEqual(a[-100L:100L:2L], unicode('02468', 'ascii'))
def test_type_function(self):
self.assertRaises(TypeError, type, 1, 2)
self.assertRaises(TypeError, type, 1, 2, 3, 4)
def test_buffers(self):
self.assertRaises(ValueError, buffer, 'asdf', -1)
cmp(buffer("abc"), buffer("def")) # used to raise a warning: tp_compare didn't return -1, 0, or 1
self.assertRaises(TypeError, buffer, None)
a = buffer('asdf')
hash(a)
b = a * 5
if a == b:
self.fail('buffers should not be equal')
if str(b) != ('asdf' * 5):
self.fail('repeated buffer has wrong content')
if str(a * 0) != '':
self.fail('repeated buffer zero times has wrong content')
if str(a + buffer('def')) != 'asdfdef':
self.fail('concatenation of buffers yields wrong content')
if str(buffer(a)) != 'asdf':
self.fail('composing buffers failed')
if str(buffer(a, 2)) != 'df':
self.fail('specifying buffer offset failed')
if str(buffer(a, 0, 2)) != 'as':
self.fail('specifying buffer size failed')
if str(buffer(a, 1, 2)) != 'sd':
self.fail('specifying buffer offset and size failed')
self.assertRaises(ValueError, buffer, buffer('asdf', 1), -1)
if str(buffer(buffer('asdf', 0, 2), 0)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, 5000)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 0, -1)) != 'as':
self.fail('composing length-specified buffer failed')
if str(buffer(buffer('asdf', 0, 2), 1, 2)) != 's':
self.fail('composing length-specified buffer failed')
try: a[1] = 'g'
except TypeError: pass
else: self.fail("buffer assignment should raise TypeError")
try: a[0:1] = 'g'
except TypeError: pass
else: self.fail("buffer slice assignment should raise TypeError")
# array.array() returns an object that does not implement a char buffer,
# something which int() uses for conversion.
import array
try: int(buffer(array.array('c')))
except TypeError: pass
else: self.fail("char buffer (at C level) not working")
def test_int__format__(self):
def test(i, format_spec, result):
# just make sure I'm not accidentally checking longs
assert type(i) == int
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
self.assertEqual(i.__format__(unicode(format_spec)), result)
test(123456789, 'd', '123456789')
test(123456789, 'd', '123456789')
test(1, 'c', '\01')
# sign and aligning are interdependent
test(1, "-", '1')
test(-1, "-", '-1')
test(1, "-3", ' 1')
test(-1, "-3", ' -1')
test(1, "+3", ' +1')
test(-1, "+3", ' -1')
test(1, " 3", ' 1')
test(-1, " 3", ' -1')
test(1, " ", ' 1')
test(-1, " ", '-1')
# hex
test(3, "x", "3")
test(3, "X", "3")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(1234, "8x", " 4d2")
test(-1234, "8x", " -4d2")
test(1234, "x", "4d2")
test(-1234, "x", "-4d2")
test(-3, "x", "-3")
test(-3, "X", "-3")
test(int('be', 16), "x", "be")
test(int('be', 16), "X", "BE")
test(-int('be', 16), "x", "-be")
test(-int('be', 16), "X", "-BE")
# octal
test(3, "o", "3")
test(-3, "o", "-3")
test(65, "o", "101")
test(-65, "o", "-101")
test(1234, "o", "2322")
test(-1234, "o", "-2322")
test(1234, "-o", "2322")
test(-1234, "-o", "-2322")
test(1234, " o", " 2322")
test(-1234, " o", "-2322")
test(1234, "+o", "+2322")
test(-1234, "+o", "-2322")
# binary
test(3, "b", "11")
test(-3, "b", "-11")
test(1234, "b", "10011010010")
test(-1234, "b", "-10011010010")
test(1234, "-b", "10011010010")
test(-1234, "-b", "-10011010010")
test(1234, " b", " 10011010010")
test(-1234, " b", "-10011010010")
test(1234, "+b", "+10011010010")
test(-1234, "+b", "-10011010010")
# alternate (#) formatting
test(0, "#b", '0b0')
test(0, "-#b", '0b0')
test(1, "-#b", '0b1')
test(-1, "-#b", '-0b1')
test(-1, "-#5b", ' -0b1')
test(1, "+#5b", ' +0b1')
test(100, "+#b", '+0b1100100')
test(100, "#012b", '0b0001100100')
test(-100, "#012b", '-0b001100100')
test(0, "#o", '0o0')
test(0, "-#o", '0o0')
test(1, "-#o", '0o1')
test(-1, "-#o", '-0o1')
test(-1, "-#5o", ' -0o1')
test(1, "+#5o", ' +0o1')
test(100, "+#o", '+0o144')
test(100, "#012o", '0o0000000144')
test(-100, "#012o", '-0o000000144')
test(0, "#x", '0x0')
test(0, "-#x", '0x0')
test(1, "-#x", '0x1')
test(-1, "-#x", '-0x1')
test(-1, "-#5x", ' -0x1')
test(1, "+#5x", ' +0x1')
test(100, "+#x", '+0x64')
test(100, "#012x", '0x0000000064')
test(-100, "#012x", '-0x000000064')
test(123456, "#012x", '0x000001e240')
test(-123456, "#012x", '-0x00001e240')
test(0, "#X", '0X0')
test(0, "-#X", '0X0')
test(1, "-#X", '0X1')
test(-1, "-#X", '-0X1')
test(-1, "-#5X", ' -0X1')
test(1, "+#5X", ' +0X1')
test(100, "+#X", '+0X64')
test(100, "#012X", '0X0000000064')
test(-100, "#012X", '-0X000000064')
test(123456, "#012X", '0X000001E240')
test(-123456, "#012X", '-0X00001E240')
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3 .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3 .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3 .__format__, None)
self.assertRaises(TypeError, 3 .__format__, 0)
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0 .__format__, format_spec)
self.assertRaises(ValueError, 1 .__format__, format_spec)
self.assertRaises(ValueError, (-1) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the int to a float
for format_spec in 'eEfFgG%':
for value in [0, 1, -1, 100, -100, 1234567890, -1234567890]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
def test_long__format__(self):
def test(i, format_spec, result):
# make sure we're not accidentally checking ints
assert type(i) == long
assert type(format_spec) == str
self.assertEqual(i.__format__(format_spec), result)
self.assertEqual(i.__format__(unicode(format_spec)), result)
test(10**100, 'd', '1' + '0' * 100)
test(10**100+100, 'd', '1' + '0' * 97 + '100')
test(123456789L, 'd', '123456789')
test(123456789L, 'd', '123456789')
# sign and aligning are interdependent
test(1L, "-", '1')
test(-1L, "-", '-1')
test(1L, "-3", ' 1')
test(-1L, "-3", ' -1')
test(1L, "+3", ' +1')
test(-1L, "+3", ' -1')
test(1L, " 3", ' 1')
test(-1L, " 3", ' -1')
test(1L, " ", ' 1')
test(-1L, " ", '-1')
test(1L, 'c', '\01')
# hex
test(3L, "x", "3")
test(3L, "X", "3")
test(1234L, "x", "4d2")
test(-1234L, "x", "-4d2")
test(1234L, "8x", " 4d2")
test(-1234L, "8x", " -4d2")
test(1234L, "x", "4d2")
test(-1234L, "x", "-4d2")
test(-3L, "x", "-3")
test(-3L, "X", "-3")
test(long('be', 16), "x", "be")
test(long('be', 16), "X", "BE")
test(-long('be', 16), "x", "-be")
test(-long('be', 16), "X", "-BE")
# octal
test(3L, "o", "3")
test(-3L, "o", "-3")
test(65L, "o", "101")
test(-65L, "o", "-101")
test(1234L, "o", "2322")
test(-1234L, "o", "-2322")
test(1234L, "-o", "2322")
test(-1234L, "-o", "-2322")
test(1234L, " o", " 2322")
test(-1234L, " o", "-2322")
test(1234L, "+o", "+2322")
test(-1234L, "+o", "-2322")
# binary
test(3L, "b", "11")
test(-3L, "b", "-11")
test(1234L, "b", "10011010010")
test(-1234L, "b", "-10011010010")
test(1234L, "-b", "10011010010")
test(-1234L, "-b", "-10011010010")
test(1234L, " b", " 10011010010")
test(-1234L, " b", "-10011010010")
test(1234L, "+b", "+10011010010")
test(-1234L, "+b", "-10011010010")
# make sure these are errors
# precision disallowed
self.assertRaises(ValueError, 3L .__format__, "1.3")
# sign not allowed with 'c'
self.assertRaises(ValueError, 3L .__format__, "+c")
# format spec must be string
self.assertRaises(TypeError, 3L .__format__, None)
self.assertRaises(TypeError, 3L .__format__, 0)
# alternate specifier in wrong place
self.assertRaises(ValueError, 1L .__format__, "#+5x")
self.assertRaises(ValueError, 1L .__format__, "+5#x")
# ensure that only int and float type specifiers work
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'bcdoxXeEfFgGn%':
self.assertRaises(ValueError, 0L .__format__, format_spec)
self.assertRaises(ValueError, 1L .__format__, format_spec)
self.assertRaises(ValueError, (-1L) .__format__, format_spec)
# ensure that float type specifiers work; format converts
# the long to a float
for format_spec in 'eEfFgG%':
for value in [0L, 1L, -1L, 100L, -100L, 1234567890L, -1234567890L]:
self.assertEqual(value.__format__(format_spec),
float(value).__format__(format_spec))
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_float__format__locale(self):
# test locale support for __format__ code 'n'
for i in range(-10, 10):
x = 1234567890.0 * (10.0 ** i)
self.assertEqual(locale.format('%g', x, grouping=True), format(x, 'n'))
self.assertEqual(locale.format('%.10g', x, grouping=True), format(x, '.10n'))
@run_with_locale('LC_NUMERIC', 'en_US.UTF8')
def test_int__format__locale(self):
# test locale support for __format__ code 'n' for integers
x = 123456789012345678901234567890
for i in range(0, 30):
self.assertEqual(locale.format('%d', x, grouping=True), format(x, 'n'))
# move to the next integer to test
x = x // 10
rfmt = ">20n"
lfmt = "<20n"
cfmt = "^20n"
for x in (1234, 12345, 123456, 1234567, 12345678, 123456789, 1234567890, 12345678900):
self.assertEqual(len(format(0, rfmt)), len(format(x, rfmt)))
self.assertEqual(len(format(0, lfmt)), len(format(x, lfmt)))
self.assertEqual(len(format(0, cfmt)), len(format(x, cfmt)))
def test_float__format__(self):
# these should be rewritten to use both format(x, spec) and
# x.__format__(spec)
def test(f, format_spec, result):
assert type(f) == float
assert type(format_spec) == str
self.assertEqual(f.__format__(format_spec), result)
self.assertEqual(f.__format__(unicode(format_spec)), result)
test(0.0, 'f', '0.000000')
# the default is 'g', except for empty format spec
test(0.0, '', '0.0')
test(0.01, '', '0.01')
test(0.01, 'g', '0.01')
# test for issue 3411
test(1.23, '1', '1.23')
test(-1.23, '1', '-1.23')
test(1.23, '1g', '1.23')
test(-1.23, '1g', '-1.23')
test( 1.0, ' g', ' 1')
test(-1.0, ' g', '-1')
test( 1.0, '+g', '+1')
test(-1.0, '+g', '-1')
test(1.1234e200, 'g', '1.1234e+200')
test(1.1234e200, 'G', '1.1234E+200')
test(1.0, 'f', '1.000000')
test(-1.0, 'f', '-1.000000')
test( 1.0, ' f', ' 1.000000')
test(-1.0, ' f', '-1.000000')
test( 1.0, '+f', '+1.000000')
test(-1.0, '+f', '-1.000000')
test(1.1234e90, 'f', '1.1234e+90')
test(1.1234e90, 'F', '1.1234e+90')
test(1.1234e200, 'f', '1.1234e+200')
test(1.1234e200, 'F', '1.1234e+200')
test( 1.0, 'e', '1.000000e+00')
test(-1.0, 'e', '-1.000000e+00')
test( 1.0, 'E', '1.000000E+00')
test(-1.0, 'E', '-1.000000E+00')
test(1.1234e20, 'e', '1.123400e+20')
test(1.1234e20, 'E', '1.123400E+20')
# No format code means use g, but must have a decimal
# and a number after the decimal. This is tricky, because
# a totaly empty format specifier means something else.
# So, just use a sign flag
test(1e200, '+g', '+1e+200')
test(1e200, '+', '+1.0e+200')
test(1.1e200, '+g', '+1.1e+200')
test(1.1e200, '+', '+1.1e+200')
# % formatting
test(-1.0, '%', '-100.000000%')
# format spec must be string
self.assertRaises(TypeError, 3.0.__format__, None)
self.assertRaises(TypeError, 3.0.__format__, 0)
# other format specifiers shouldn't work on floats,
# in particular int specifiers
for format_spec in ([chr(x) for x in range(ord('a'), ord('z')+1)] +
[chr(x) for x in range(ord('A'), ord('Z')+1)]):
if not format_spec in 'eEfFgGn%':
self.assertRaises(ValueError, format, 0.0, format_spec)
self.assertRaises(ValueError, format, 1.0, format_spec)
self.assertRaises(ValueError, format, -1.0, format_spec)
self.assertRaises(ValueError, format, 1e100, format_spec)
self.assertRaises(ValueError, format, -1e100, format_spec)
self.assertRaises(ValueError, format, 1e-100, format_spec)
self.assertRaises(ValueError, format, -1e-100, format_spec)
# Alternate formatting is not supported
self.assertRaises(ValueError, format, 0.0, '#')
self.assertRaises(ValueError, format, 0.0, '#20f')
def test_main():
run_unittest(TypesTests)
if __name__ == '__main__':
test_main()
| apache-2.0 |
gwsu2008/automation | python/rotate-iam-keys.py | 1 | 4429 | import json
import boto3
import os
import time
import base64
from datetime import datetime
from botocore.exceptions import ClientError
iam_client = boto3.client('iam')
secret_client = boto3.client('secretsmanager')
kms_client = boto3.client('kms')
sqs_client = boto3.client('sqs')
now = datetime.now()
iam_users = os.getenv('IAM_USER_LIST', None)
secret_id = os.getenv('SECRET_ARN', None)
kms_key_id = os.getenv('CD_KMS_KEYID', None)
qurl = os.getenv('SQS_URL', None)
def create_new_key(username):
try:
access_key_metadata = iam_client.create_access_key(UserName=username)['AccessKey']
return access_key_metadata['AccessKeyId'], access_key_metadata['SecretAccessKey']
except ClientError as e:
raise Exception(str(e))
def main(event, context):
if iam_users is None:
print('IAM_USER_LIST is empty. No IAM key rotation required')
return
try:
response = secret_client.get_secret_value(SecretId=secret_id)
secret_text = kms_client.decrypt(CiphertextBlob=base64.decodestring(response['SecretBinary']))['Plaintext'].decode('utf-8')
secret_json = json.loads(secret_text)
except ClientError as e:
raise Exception(str(e))
for username in iam_users.split(','):
try:
keys = iam_client.list_access_keys(UserName=username)
except ClientError as e:
print(str(e))
continue
if len(keys['AccessKeyMetadata']) == 0:
print("User {} has no accessKey. Generate new accessKey...".format(username))
accessKey, secretKey = create_new_key(username)
secret_json[username] = {"accessKey":accessKey, "secretKey":secretKey}
return
for key in keys['AccessKeyMetadata']:
if key['Status']=='Inactive':
print('Delete inactive key {} for user {}'.format(key['AccessKeyId'], username))
iam_client.delete_access_key(UserName=username, AccessKeyId=key['AccessKeyId'])
# Delete any accessKeys with tag DeleteAccessKey
keys = iam_client.list_access_keys(UserName=username)
tags = iam_client.list_user_tags(UserName=username, MaxItems=100)
for tag in tags['Tags']:
if tag['Key'] == 'DeleteAccessKey':
print('Tag delete accessKey {} for user {}'.format(tag['Value'], username))
try:
iam_client.delete_access_key(UserName=username, AccessKeyId=tag['Value'])
except ClientError as e:
print('Can not delete {} for user {}'.format(tag['Value'], username))
pass
iam_client.untag_user(UserName=username, TagKeys=['DeleteAccessKey'])
keys = iam_client.list_access_keys(UserName=username)
if len(keys['AccessKeyMetadata']) == 2:
print("User {} already reach maximum accessKey of 2. Deleting both keys")
for key in keys['AccessKeyMetadata']:
iam_client.delete_access_key(UserName=username, AccessKeyId=key['AccessKeyId'])
keys = iam_client.list_access_keys(UserName=username)
delete_access_keys_list = list()
for key in keys['AccessKeyMetadata']:
duration = now - key['CreateDate'].replace(tzinfo=None)
iam_client.tag_user(UserName=username, Tags=[{'Key': 'DeleteAccessKey', 'Value': key['AccessKeyId']}])
print("Tag user {} accessKey {} with age {} days to be deleted".format(username, key['AccessKeyId'], duration.days))
delete_access_keys_list.append(key['AccessKeyId'])
print("Generate new accessKey and secretKey for user {} ...".format(username))
accessKey, secretKey = create_new_key(username)
secret_json[username] = {"accessKey":accessKey, "secretKey":secretKey}
ciphertext_blob = kms_client.encrypt(KeyId=kms_key_id, Plaintext=json.dumps(secret_json).encode('utf-8'))['CiphertextBlob']
try:
response = secret_client.update_secret(SecretId=secret_id,SecretBinary=base64.encodestring(ciphertext_blob))
print(response)
for accessKey in delete_access_keys_list:
sqs_client.send_message(QueueUrl=qurl,MessageBody=json.dumps({'user': username, 'accessKey': accessKey}), DelaySeconds=1)
except ClientError as e:
raise Exception(e)
return
if __name__=="main":
sys.exit(main())
| gpl-2.0 |
run2/citytour | 4symantec/Lib/site-packages/pip/_vendor/lockfile/symlinklockfile.py | 487 | 2613 | from __future__ import absolute_import
import time
import os
from . import (LockBase, LockFailed, NotLocked, NotMyLock, LockTimeout,
AlreadyLocked)
class SymlinkLockFile(LockBase):
"""Lock access to a file using symlink(2)."""
def __init__(self, path, threaded=True, timeout=None):
# super(SymlinkLockFile).__init(...)
LockBase.__init__(self, path, threaded, timeout)
# split it back!
self.unique_name = os.path.split(self.unique_name)[1]
def acquire(self, timeout=None):
# Hopefully unnecessary for symlink.
#try:
# open(self.unique_name, "wb").close()
#except IOError:
# raise LockFailed("failed to create %s" % self.unique_name)
timeout = timeout is not None and timeout or self.timeout
end_time = time.time()
if timeout is not None and timeout > 0:
end_time += timeout
while True:
# Try and create a symbolic link to it.
try:
os.symlink(self.unique_name, self.lock_file)
except OSError:
# Link creation failed. Maybe we've double-locked?
if self.i_am_locking():
# Linked to out unique name. Proceed.
return
else:
# Otherwise the lock creation failed.
if timeout is not None and time.time() > end_time:
if timeout > 0:
raise LockTimeout("Timeout waiting to acquire"
" lock for %s" %
self.path)
else:
raise AlreadyLocked("%s is already locked" %
self.path)
time.sleep(timeout/10 if timeout is not None else 0.1)
else:
# Link creation succeeded. We're good to go.
return
def release(self):
if not self.is_locked():
raise NotLocked("%s is not locked" % self.path)
elif not self.i_am_locking():
raise NotMyLock("%s is locked, but not by me" % self.path)
os.unlink(self.lock_file)
def is_locked(self):
return os.path.islink(self.lock_file)
def i_am_locking(self):
return os.path.islink(self.lock_file) and \
os.readlink(self.lock_file) == self.unique_name
def break_lock(self):
if os.path.islink(self.lock_file): # exists && link
os.unlink(self.lock_file)
| mit |
xyrnwbtj/dockerAutomatedBuild | pyspider/database/sqlalchemy/taskdb.py | 5 | 5778 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8:
# Author: Binux<roy@binux.me>
# http://binux.me
# Created on 2014-12-04 22:33:43
import re
import six
import time
import json
from sqlalchemy import (create_engine, MetaData, Table, Column, Index,
Integer, String, Float, LargeBinary, sql, func)
from pyspider.libs import utils
from pyspider.database.base.taskdb import TaskDB as BaseTaskDB
from .sqlalchemybase import SplitTableMixin, result2dict
if six.PY3:
where_type = utils.utf8
else:
where_type = utils.text
class TaskDB(SplitTableMixin, BaseTaskDB):
__tablename__ = ''
def __init__(self, url):
self.table = Table('__tablename__', MetaData(),
Column('taskid', String(64), primary_key=True, nullable=False),
Column('project', String(64)),
Column('url', String(1024)),
Column('status', Integer),
Column('schedule', LargeBinary),
Column('fetch', LargeBinary),
Column('process', LargeBinary),
Column('track', LargeBinary),
Column('lastcrawltime', Float(16)),
Column('updatetime', Float(16)),
)
self.engine = create_engine(url, convert_unicode=True)
self._list_project()
def _create_project(self, project):
assert re.match(r'^\w+$', project) is not None
if project in self.projects:
return
self.table.name = self._tablename(project)
Index('status_%s_index' % self.table.name, self.table.c.status)
self.table.create(self.engine)
self.table.indexes.clear()
@staticmethod
def _parse(data):
for key, value in list(six.iteritems(data)):
if isinstance(value, six.binary_type):
data[key] = utils.text(value)
for each in ('schedule', 'fetch', 'process', 'track'):
if each in data:
if data[each]:
if isinstance(data[each], bytearray):
data[each] = str(data[each])
data[each] = json.loads(data[each])
else:
data[each] = {}
return data
@staticmethod
def _stringify(data):
for each in ('schedule', 'fetch', 'process', 'track'):
if each in data:
data[each] = json.dumps(data[each])
if six.PY3:
for key, value in list(six.iteritems(data)):
if isinstance(value, six.string_types):
data[key] = utils.utf8(value)
return data
def load_tasks(self, status, project=None, fields=None):
if project and project not in self.projects:
return
if project:
projects = [project, ]
else:
projects = self.projects
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for project in projects:
self.table.name = self._tablename(project)
for task in self.engine.execute(self.table.select()
.with_only_columns(columns)
.where(self.table.c.status == status)):
yield self._parse(result2dict(columns, task))
def get_task(self, project, taskid, fields=None):
if project not in self.projects:
self._list_project()
if project not in self.projects:
return None
self.table.name = self._tablename(project)
columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c
for each in self.engine.execute(self.table.select()
.with_only_columns(columns)
.limit(1)
.where(self.table.c.taskid == where_type(taskid))):
return self._parse(result2dict(columns, each))
def status_count(self, project):
result = dict()
if project not in self.projects:
self._list_project()
if project not in self.projects:
return result
self.table.name = self._tablename(project)
for status, count in self.engine.execute(
self.table.select()
.with_only_columns((self.table.c.status, func.count(1)))
.group_by(self.table.c.status)):
result[status] = count
return result
def insert(self, project, taskid, obj={}):
if project not in self.projects:
self._list_project()
if project not in self.projects:
self._create_project(project)
self._list_project()
obj = dict(obj)
obj['taskid'] = taskid
obj['project'] = project
obj['updatetime'] = time.time()
self.table.name = self._tablename(project)
return self.engine.execute(self.table.insert()
.values(**self._stringify(obj)))
def update(self, project, taskid, obj={}, **kwargs):
if project not in self.projects:
self._list_project()
if project not in self.projects:
raise LookupError
self.table.name = self._tablename(project)
obj = dict(obj)
obj.update(kwargs)
obj['updatetime'] = time.time()
return self.engine.execute(self.table.update()
.where(self.table.c.taskid == where_type(taskid))
.values(**self._stringify(obj)))
| apache-2.0 |
chienlieu2017/it_management | odoo/odoo/workflow/instance.py | 20 | 4912 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import workitem
from odoo.workflow.helpers import Session
from odoo.workflow.helpers import Record
from odoo.workflow.workitem import WorkflowItem
class WorkflowInstance(object):
def __init__(self, session, record, values):
assert isinstance(session, Session)
assert isinstance(record, Record)
self.session = session
self.record = record
if not values:
values = {}
assert isinstance(values, dict)
self.instance = values
@classmethod
def create(cls, session, record, workflow_id):
assert isinstance(session, Session)
assert isinstance(record, Record)
assert isinstance(workflow_id, (int, long))
cr = session.cr
cr.execute('insert into wkf_instance (res_type,res_id,uid,wkf_id,state) values (%s,%s,%s,%s,%s) RETURNING id', (record.model, record.id, session.uid, workflow_id, 'active'))
instance_id = cr.fetchone()[0]
cr.execute('select * from wkf_activity where flow_start=True and wkf_id=%s', (workflow_id,))
stack = []
activities = cr.dictfetchall()
for activity in activities:
WorkflowItem.create(session, record, activity, instance_id, stack)
cr.execute('SELECT * FROM wkf_instance WHERE id = %s', (instance_id,))
values = cr.dictfetchone()
wi = WorkflowInstance(session, record, values)
wi.update()
return wi
def delete(self):
self.session.cr.execute('delete from wkf_instance where res_id=%s and res_type=%s', (self.record.id, self.record.model))
def validate(self, signal, force_running=False):
assert isinstance(signal, basestring)
assert isinstance(force_running, bool)
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
stack = []
for i, work_item_values in enumerate(cr.dictfetchall()):
if i > 0:
# test if previous workitem has already processed this one
cr.execute("select id from wkf_workitem where id=%s", (work_item_values['id'],))
if not cr.fetchone():
continue
wi = WorkflowItem(self.session, self.record, work_item_values)
wi.process(signal=signal, force_running=force_running, stack=stack)
# An action is returned
self._update_end()
return stack and stack[0] or False
def update(self):
cr = self.session.cr
cr.execute("select * from wkf_workitem where inst_id=%s", (self.instance['id'],))
for work_item_values in cr.dictfetchall():
stack = []
WorkflowItem(self.session, self.record, work_item_values).process(stack=stack)
return self._update_end()
def _update_end(self):
cr = self.session.cr
instance_id = self.instance['id']
cr.execute('select wkf_id from wkf_instance where id=%s', (instance_id,))
wkf_id = cr.fetchone()[0]
cr.execute('select state,flow_stop from wkf_workitem w left join wkf_activity a on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
ok=True
for r in cr.fetchall():
if (r[0]<>'complete') or not r[1]:
ok=False
break
if ok:
cr.execute('select distinct a.name from wkf_activity a left join wkf_workitem w on (a.id=w.act_id) where w.inst_id=%s', (instance_id,))
act_names = cr.fetchall()
cr.execute("update wkf_instance set state='complete' where id=%s", (instance_id,))
cr.execute("update wkf_workitem set state='complete' where subflow_id=%s", (instance_id,))
cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (instance_id,))
for cur_instance_id, cur_model_name, cur_record_id in cr.fetchall():
cur_record = Record(cur_model_name, cur_record_id)
for act_name in act_names:
WorkflowInstance(self.session, cur_record, {'id':cur_instance_id}).validate('subflow.%s' % act_name[0])
return ok
def create(session, record, workflow_id):
return WorkflowInstance(session, record).create(workflow_id)
def delete(session, record):
return WorkflowInstance(session, record).delete()
def validate(session, record, instance_id, signal, force_running=False):
return WorkflowInstance(session, record).validate(instance_id, signal, force_running)
def update(session, record, instance_id):
return WorkflowInstance(session, record).update(instance_id)
def _update_end(session, record, instance_id):
return WorkflowInstance(session, record)._update_end(instance_id)
| gpl-3.0 |
ericfc/django | tests/custom_pk/models.py | 99 | 1256 | # -*- coding: utf-8 -*-
"""
Using a custom primary key
By default, Django adds an ``"id"`` field to each model. But you can override
this behavior by explicitly adding ``primary_key=True`` to a field.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from .fields import MyAutoField
@python_2_unicode_compatible
class Employee(models.Model):
employee_code = models.IntegerField(primary_key=True, db_column='code')
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
class Meta:
ordering = ('last_name', 'first_name')
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
@python_2_unicode_compatible
class Business(models.Model):
name = models.CharField(max_length=20, primary_key=True)
employees = models.ManyToManyField(Employee)
class Meta:
verbose_name_plural = 'businesses'
def __str__(self):
return self.name
@python_2_unicode_compatible
class Bar(models.Model):
id = MyAutoField(primary_key=True, db_index=True)
def __str__(self):
return repr(self.pk)
class Foo(models.Model):
bar = models.ForeignKey(Bar)
| bsd-3-clause |
jstoxrocky/statsmodels | statsmodels/sandbox/tsa/fftarma.py | 30 | 16438 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 14 19:53:25 2009
Author: josef-pktd
generate arma sample using fft with all the lfilter it looks slow
to get the ma representation first
apply arma filter (in ar representation) to time series to get white noise
but seems slow to be useful for fast estimation for nobs=10000
change/check: instead of using marep, use fft-transform of ar and ma
separately, use ratio check theory is correct and example works
DONE : feels much faster than lfilter
-> use for estimation of ARMA
-> use pade (scipy.misc) approximation to get starting polynomial
from autocorrelation (is autocorrelation of AR(p) related to marep?)
check if pade is fast, not for larger arrays ?
maybe pade doesn't do the right thing for this, not tried yet
scipy.pade([ 1. , 0.6, 0.25, 0.125, 0.0625, 0.1],2)
raises LinAlgError: singular matrix
also doesn't have roots inside unit circle ??
-> even without initialization, it might be fast for estimation
-> how do I enforce stationarity and invertibility,
need helper function
get function drop imag if close to zero from numpy/scipy source, where?
"""
from __future__ import print_function
import numpy as np
import numpy.fft as fft
#import scipy.fftpack as fft
from scipy import signal
#from try_var_convolve import maxabs
from statsmodels.sandbox.archive.linalg_decomp_1 import OneTimeProperty
from statsmodels.tsa.arima_process import ArmaProcess
#trying to convert old experiments to a class
class ArmaFft(ArmaProcess):
'''fft tools for arma processes
This class contains several methods that are providing the same or similar
returns to try out and test different implementations.
Notes
-----
TODO:
check whether we don't want to fix maxlags, and create new instance if
maxlag changes. usage for different lengths of timeseries ?
or fix frequency and length for fft
check default frequencies w, terminology norw n_or_w
some ffts are currently done without padding with zeros
returns for spectral density methods needs checking, is it always the power
spectrum hw*hw.conj()
normalization of the power spectrum, spectral density: not checked yet, for
example no variance of underlying process is used
'''
def __init__(self, ar, ma, n):
#duplicates now that are subclassing ArmaProcess
super(ArmaFft, self).__init__(ar, ma)
self.ar = np.asarray(ar)
self.ma = np.asarray(ma)
self.nobs = n
#could make the polynomials into cached attributes
self.arpoly = np.polynomial.Polynomial(ar)
self.mapoly = np.polynomial.Polynomial(ma)
self.nar = len(ar) #1d only currently
self.nma = len(ma)
def padarr(self, arr, maxlag, atend=True):
'''pad 1d array with zeros at end to have length maxlag
function that is a method, no self used
Parameters
----------
arr : array_like, 1d
array that will be padded with zeros
maxlag : int
length of array after padding
atend : boolean
If True (default), then the zeros are added to the end, otherwise
to the front of the array
Returns
-------
arrp : ndarray
zero-padded array
Notes
-----
This is mainly written to extend coefficient arrays for the lag-polynomials.
It returns a copy.
'''
if atend:
return np.r_[arr, np.zeros(maxlag-len(arr))]
else:
return np.r_[np.zeros(maxlag-len(arr)), arr]
def pad(self, maxlag):
'''construct AR and MA polynomials that are zero-padded to a common length
Parameters
----------
maxlag : int
new length of lag-polynomials
Returns
-------
ar : ndarray
extended AR polynomial coefficients
ma : ndarray
extended AR polynomial coefficients
'''
arpad = np.r_[self.ar, np.zeros(maxlag-self.nar)]
mapad = np.r_[self.ma, np.zeros(maxlag-self.nma)]
return arpad, mapad
def fftar(self, n=None):
'''Fourier transform of AR polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ar, n))
def fftma(self, n):
'''Fourier transform of MA polynomial, zero-padded at end to n
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftar : ndarray
fft of zero-padded ar polynomial
'''
if n is None:
n = len(self.ar)
return fft.fft(self.padarr(self.ma, n))
#@OneTimeProperty # not while still debugging things
def fftarma(self, n=None):
'''Fourier transform of ARMA polynomial, zero-padded at end to n
The Fourier transform of the ARMA process is calculated as the ratio
of the fft of the MA polynomial divided by the fft of the AR polynomial.
Parameters
----------
n : int
length of array after zero-padding
Returns
-------
fftarma : ndarray
fft of zero-padded arma polynomial
'''
if n is None:
n = self.nobs
return (self.fftma(n) / self.fftar(n))
def spd(self, npos):
'''raw spectral density, returns Fourier transform
n is number of points in positive spectrum, the actual number of points
is twice as large. different from other spd methods with fft
'''
n = npos
w = fft.fftfreq(2*n) * 2 * np.pi
hw = self.fftarma(2*n) #not sure, need to check normalization
#return (hw*hw.conj()).real[n//2-1:] * 0.5 / np.pi #doesn't show in plot
return (hw*hw.conj()).real * 0.5 / np.pi, w
def spdshift(self, n):
'''power spectral density using fftshift
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
mapadded = self.padarr(self.ma, n)
arpadded = self.padarr(self.ar, n)
hw = fft.fft(fft.fftshift(mapadded)) / fft.fft(fft.fftshift(arpadded))
#return np.abs(spd)[n//2-1:]
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(n//2-1, None, None)
#return (hw*hw.conj()).real[wslice], w[wslice]
return (hw*hw.conj()).real, w
def spddirect(self, n):
'''power spectral density using padding to length n done by fft
currently returns two-sided according to fft frequencies, use first half
'''
#size = s1+s2-1
#abs looks wrong
hw = fft.fft(self.ma, n) / fft.fft(self.ar, n)
w = fft.fftfreq(n) * 2 * np.pi
wslice = slice(None, n//2, None)
#return (np.abs(hw)**2)[wslice], w[wslice]
return (np.abs(hw)**2) * 0.5/np.pi, w
def _spddirect2(self, n):
'''this looks bad, maybe with an fftshift
'''
#size = s1+s2-1
hw = (fft.fft(np.r_[self.ma[::-1],self.ma], n)
/ fft.fft(np.r_[self.ar[::-1],self.ar], n))
return (hw*hw.conj()) #.real[n//2-1:]
def spdroots(self, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
'''
return self.spdroots_(self.arroots, self.maroots, w)
def spdroots_(self, arroots, maroots, w):
'''spectral density for frequency using polynomial roots
builds two arrays (number of roots, number of frequencies)
Parameters
----------
arroots : ndarray
roots of ar (denominator) lag-polynomial
maroots : ndarray
roots of ma (numerator) lag-polynomial
w : array_like
frequencies for which spd is calculated
Notes
-----
this should go into a function
'''
w = np.atleast_2d(w).T
cosw = np.cos(w)
#Greene 5th edt. p626, section 20.2.7.a.
maroots = 1./maroots
arroots = 1./arroots
num = 1 + maroots**2 - 2* maroots * cosw
den = 1 + arroots**2 - 2* arroots * cosw
#print 'num.shape, den.shape', num.shape, den.shape
hw = 0.5 / np.pi * num.prod(-1) / den.prod(-1) #or use expsumlog
return np.squeeze(hw), w.squeeze()
def spdpoly(self, w, nma=50):
'''spectral density from MA polynomial representation for ARMA process
References
----------
Cochrane, section 8.3.3
'''
mpoly = np.polynomial.Polynomial(self.arma2ma(nma))
hw = mpoly(np.exp(1j * w))
spd = np.real_if_close(hw * hw.conj() * 0.5/np.pi)
return spd, w
def filter(self, x):
'''
filter a timeseries with the ARMA filter
padding with zero is missing, in example I needed the padding to get
initial conditions identical to direct filter
Initial filtered observations differ from filter2 and signal.lfilter, but
at end they are the same.
See Also
--------
tsa.filters.fftconvolve
'''
n = x.shape[0]
if n == self.fftarma:
fftarma = self.fftarma
else:
fftarma = self.fftma(n) / self.fftar(n)
tmpfft = fftarma * fft.fft(x)
return fft.ifft(tmpfft)
def filter2(self, x, pad=0):
'''filter a time series using fftconvolve3 with ARMA filter
padding of x currently works only if x is 1d
in example it produces same observations at beginning as lfilter even
without padding.
TODO: this returns 1 additional observation at the end
'''
from statsmodels.tsa.filters import fftconvolve3
if not pad:
pass
elif pad == 'auto':
#just guessing how much padding
x = self.padarr(x, x.shape[0] + 2*(self.nma+self.nar), atend=False)
else:
x = self.padarr(x, x.shape[0] + int(pad), atend=False)
return fftconvolve3(x, self.ma, self.ar)
def acf2spdfreq(self, acovf, nfreq=100, w=None):
'''
not really a method
just for comparison, not efficient for large n or long acf
this is also similarly use in tsa.stattools.periodogram with window
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)[:, None]
nac = len(acovf)
hw = 0.5 / np.pi * (acovf[0] +
2 * (acovf[1:] * np.cos(w*np.arange(1,nac))).sum(1))
return hw
def invpowerspd(self, n):
'''autocovariance from spectral density
scaling is correct, but n needs to be large for numerical accuracy
maybe padding with zero in fft would be faster
without slicing it returns 2-sided autocovariance with fftshift
>>> ArmaFft([1, -0.5], [1., 0.4], 40).invpowerspd(2**8)[:10]
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
>>> ArmaFft([1, -0.5], [1., 0.4], 40).acovf(10)
array([ 2.08 , 1.44 , 0.72 , 0.36 , 0.18 , 0.09 ,
0.045 , 0.0225 , 0.01125 , 0.005625])
'''
hw = self.fftarma(n)
return np.real_if_close(fft.ifft(hw*hw.conj()), tol=200)[:n]
def spdmapoly(self, w, twosided=False):
'''ma only, need division for ar, use LagPolynomial
'''
if w is None:
w = np.linspace(0, np.pi, nfreq)
return 0.5 / np.pi * self.mapoly(np.exp(w*1j))
def plot4(self, fig=None, nobs=100, nacf=20, nfreq=100):
rvs = self.generate_sample(nsample=100, burnin=500)
acf = self.acf(nacf)[:nacf] #TODO: check return length
pacf = self.pacf(nacf)
w = np.linspace(0, np.pi, nfreq)
spdr, wr = self.spdroots(w)
if fig is None:
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(2,2,1)
ax.plot(rvs)
ax.set_title('Random Sample \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,2)
ax.plot(acf)
ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,3)
ax.plot(wr, spdr)
ax.set_title('Power Spectrum \nar=%s, ma=%s' % (self.ar, self.ma))
ax = fig.add_subplot(2,2,4)
ax.plot(pacf)
ax.set_title('Partial Autocorrelation \nar=%s, ma=%s' % (self.ar, self.ma))
return fig
def spdar1(ar, w):
if np.ndim(ar) == 0:
rho = ar
else:
rho = -ar[1]
return 0.5 / np.pi /(1 + rho*rho - 2 * rho * np.cos(w))
if __name__ == '__main__':
def maxabs(x,y):
return np.max(np.abs(x-y))
nobs = 200 #10000
ar = [1, 0.0]
ma = [1, 0.0]
ar2 = np.zeros(nobs)
ar2[:2] = [1, -0.9]
uni = np.zeros(nobs)
uni[0]=1.
#arrep = signal.lfilter(ma, ar, ar2)
#marep = signal.lfilter([1],arrep, uni)
# same faster:
arcomb = np.convolve(ar, ar2, mode='same')
marep = signal.lfilter(ma,arcomb, uni) #[len(ma):]
print(marep[:10])
mafr = fft.fft(marep)
rvs = np.random.normal(size=nobs)
datafr = fft.fft(rvs)
y = fft.ifft(mafr*datafr)
print(np.corrcoef(np.c_[y[2:], y[1:-1], y[:-2]],rowvar=0))
arrep = signal.lfilter([1],marep, uni)
print(arrep[:20]) # roundtrip to ar
arfr = fft.fft(arrep)
yfr = fft.fft(y)
x = fft.ifft(arfr*yfr).real #imag part is e-15
# the next two are equal, roundtrip works
print(x[:5])
print(rvs[:5])
print(np.corrcoef(np.c_[x[2:], x[1:-1], x[:-2]],rowvar=0))
# ARMA filter using fft with ratio of fft of ma/ar lag polynomial
# seems much faster than using lfilter
#padding, note arcomb is already full length
arcombp = np.zeros(nobs)
arcombp[:len(arcomb)] = arcomb
map_ = np.zeros(nobs) #rename: map was shadowing builtin
map_[:len(ma)] = ma
ar0fr = fft.fft(arcombp)
ma0fr = fft.fft(map_)
y2 = fft.ifft(ma0fr/ar0fr*datafr)
#the next two are (almost) equal in real part, almost zero but different in imag
print(y2[:10])
print(y[:10])
print(maxabs(y, y2)) # from chfdiscrete
#1.1282071239631782e-014
ar = [1, -0.4]
ma = [1, 0.2]
arma1 = ArmaFft([1, -0.5,0,0,0,00, -0.7, 0.3], [1, 0.8], nobs)
nfreq = nobs
w = np.linspace(0, np.pi, nfreq)
w2 = np.linspace(0, 2*np.pi, nfreq)
import matplotlib.pyplot as plt
plt.close('all')
plt.figure()
spd1, w1 = arma1.spd(2**10)
print(spd1.shape)
_ = plt.plot(spd1)
plt.title('spd fft complex')
plt.figure()
spd2, w2 = arma1.spdshift(2**10)
print(spd2.shape)
_ = plt.plot(w2, spd2)
plt.title('spd fft shift')
plt.figure()
spd3, w3 = arma1.spddirect(2**10)
print(spd3.shape)
_ = plt.plot(w3, spd3)
plt.title('spd fft direct')
plt.figure()
spd3b = arma1._spddirect2(2**10)
print(spd3b.shape)
_ = plt.plot(spd3b)
plt.title('spd fft direct mirrored')
plt.figure()
spdr, wr = arma1.spdroots(w)
print(spdr.shape)
plt.plot(w, spdr)
plt.title('spd from roots')
plt.figure()
spdar1_ = spdar1(arma1.ar, w)
print(spdar1_.shape)
_ = plt.plot(w, spdar1_)
plt.title('spd ar1')
plt.figure()
wper, spdper = arma1.periodogram(nfreq)
print(spdper.shape)
_ = plt.plot(w, spdper)
plt.title('periodogram')
startup = 1000
rvs = arma1.generate_sample(startup+10000)[startup:]
import matplotlib.mlab as mlb
plt.figure()
sdm, wm = mlb.psd(x)
print('sdm.shape', sdm.shape)
sdm = sdm.ravel()
plt.plot(wm, sdm)
plt.title('matplotlib')
from nitime.algorithms import LD_AR_est
#yule_AR_est(s, order, Nfreqs)
wnt, spdnt = LD_AR_est(rvs, 10, 512)
plt.figure()
print('spdnt.shape', spdnt.shape)
_ = plt.plot(spdnt.ravel())
print(spdnt[:10])
plt.title('nitime')
fig = plt.figure()
arma1.plot4(fig)
#plt.show()
| bsd-3-clause |
dimitri-justeau/niamoto-core | niamoto/api/publish_api.py | 2 | 1717 | # coding: utf-8
import sys
from niamoto.data_publishers.base_data_publisher import PUBLISHER_REGISTRY
from niamoto.exceptions import WrongPublisherKeyError, \
UnavailablePublishFormat
def publish(publisher_key, publish_format, *args, destination=sys.stdout,
**kwargs):
"""
Api method for processing and publishing data.
:param publisher_key:
:param publish_format:
:param destination
:return:
"""
publisher_instance = get_publisher_class(publisher_key)()
if publish_format not in publisher_instance.get_publish_formats():
m = "The publish format '{}' is unavailable with the '{}' publisher."
raise UnavailablePublishFormat(
m.format(publish_format, publisher_key)
)
data, p_args, p_kwargs = publisher_instance.process(*args, **kwargs)
kwargs.update(p_kwargs)
publisher_instance.publish(
data,
publish_format,
*p_args,
destination=destination,
**kwargs
)
def list_publish_formats(publisher_key):
"""
Return the publish formats accepted by a publisher.
:param publisher_key:
:return:
"""
publisher_class = get_publisher_class(publisher_key)
return publisher_class.get_publish_formats()
def get_publisher_class(publisher_key):
"""
Return a publisher class from its key.
:param publisher_key: The key of the publisher.
:return: The publisher class corresponding to the key.
"""
if publisher_key not in PUBLISHER_REGISTRY:
m = "The publisher key '{}' does not exist.".format(publisher_key)
raise WrongPublisherKeyError(m)
publisher = PUBLISHER_REGISTRY[publisher_key]
return publisher['class']
| gpl-3.0 |
RalphBariz/RalphsDotNet | Old/RalphsDotNet.Apps.OptimizationStudio/Resources/PyLib/numpy/core/tests/test_errstate.py | 58 | 1715 | # The following exec statement (or something like it) is needed to
# prevent SyntaxError on Python < 2.5. Even though this is a test,
# SyntaxErrors are not acceptable; on Debian systems, they block
# byte-compilation during install and thus cause the package to fail
# to install.
import sys
if sys.version_info[:2] >= (2, 5):
exec """
from __future__ import with_statement
from numpy.core import *
from numpy.random import rand, randint
from numpy.testing import *
class TestErrstate(TestCase):
def test_invalid(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(invalid='ignore'):
sqrt(a)
# While this should fail!
try:
sqrt(a)
except FloatingPointError:
pass
else:
self.fail()
def test_divide(self):
with errstate(all='raise', under='ignore'):
a = -arange(3)
# This should work
with errstate(divide='ignore'):
a // 0
# While this should fail!
try:
a // 0
except FloatingPointError:
pass
else:
self.fail()
def test_errcall(self):
def foo(*args):
print(args)
olderrcall = geterrcall()
with errstate(call=foo):
assert(geterrcall() is foo), 'call is not foo'
with errstate(call=None):
assert(geterrcall() is None), 'call is not None'
assert(geterrcall() is olderrcall), 'call is not olderrcall'
"""
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
shtouff/django | tests/gis_tests/geos_tests/test_geos.py | 38 | 44711 | from __future__ import unicode_literals
import ctypes
import json
import random
import unittest
from binascii import a2b_hex, b2a_hex
from io import BytesIO
from unittest import skipUnless
from django.contrib.gis import gdal
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import (
HAS_GEOS, GeometryCollection, GEOSException, GEOSGeometry, GEOSIndexError,
LinearRing, LineString, MultiLineString, MultiPoint, MultiPolygon, Point,
Polygon, fromfile, fromstr, geos_version_info,
)
from django.contrib.gis.geos.base import GEOSBase
from django.contrib.gis.shortcuts import numpy
from django.utils import six
from django.utils.encoding import force_bytes
from django.utils.six.moves import range
from ..test_data import TestDataMixin
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSTest(unittest.TestCase, TestDataMixin):
def test_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p(b'foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
if geom.hasz and geos_version_info()['version'] >= '3.3.0':
self.assertEqual(g.ewkt, geom.wkt)
def test_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex.decode())
def test_hexewkb(self):
"Testing (HEX)EWKB output."
# For testing HEX(EWKB).
ogc_hex = b'01010000000000000000000000000000000000F03F'
ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex_3d, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
# Same for EWKB.
self.assertEqual(six.memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
self.assertEqual(six.memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml:
self.assertEqual(kml, geom.kml)
def test_errors(self):
"Testing the Error handlers."
# string-based
for err in self.geometries.errors:
with self.assertRaises((GEOSException, ValueError)):
fromstr(err.wkt)
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, six.memoryview(b'0'))
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test_wkb(self):
"Testing WKB output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex)
def test_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_create_wkb(self):
"Testing creation from WKB."
for g in self.geometries.hex_wkt:
wkb = six.memoryview(a2b_hex(g.hex.encode()))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalized
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test_ewkt(self):
"Testing EWKT."
srids = (-1, 32140)
for srid in srids:
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_json(self):
"Testing GeoJSON input/output (via GDAL)."
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
# Loading jsons to prevent decimal differences
self.assertEqual(json.loads(g.json), json.loads(geom.json))
self.assertEqual(json.loads(g.json), json.loads(geom.geojson))
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test_fromfile(self):
"Testing the fromfile() factory."
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = BytesIO()
wkt_f.write(force_bytes(ref_pnt.wkt))
wkb_f = BytesIO()
wkb_f.write(bytes(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo': 'bar'})
self.assertNotEqual(g, False)
def test_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(pnt, fromstr(p.wkt))
self.assertEqual(False, pnt == prev) # Use assertEqual to test __eq__
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertIsNone(pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(pnt, pnt2)
self.assertEqual(pnt, pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(ls, fromstr(l.wkt))
self.assertEqual(False, ls == prev) # Use assertEqual to test __eq__
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
# Point individual arguments
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt)
if numpy:
self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(ml, fromstr(l.wkt))
self.assertEqual(False, ml == prev) # Use assertEqual to test __eq__
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy:
self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test_polygons_from_bbox(self):
"Testing `from_bbox` class method."
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox(bbox)
self.assertEqual(bbox, p.extent)
# Testing numerical precision
x = 3.14159265358979323
bbox = (0, 0, 1, x)
p = Polygon.from_bbox(bbox)
y = p.extent[-1]
self.assertEqual(format(x, '.13f'), format(y, '.13f'))
def test_polygons(self):
"Testing Polygon objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(poly, fromstr(p.wkt))
# Should not be equal to previous geometry
self.assertEqual(False, poly == prev) # Use assertEqual to test __eq__
self.assertNotEqual(poly, prev) # Use assertNotEqual to test __ne__
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test_polygon_comparison(self):
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0)))
self.assertGreater(p1, p2)
self.assertLess(p2, p1)
p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0)))
p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0)))
self.assertGreater(p4, p3)
self.assertLess(p3, p4)
def test_multipolygons(self):
"Testing MultiPolygon objects."
fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
def test_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
# #### Memory issues with rings and poly
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
str(ring1)
str(ring2)
def test_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in range(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2:
tset = (5, 23)
else:
tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test_intersection(self):
"Testing intersects() and intersection()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test_union(self):
"Testing union()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test_difference(self):
"Testing difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test_symdifference(self):
"Testing sym_difference()."
for i in range(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in range(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in range(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly:
self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)):
self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
p2 = fromstr(p1.hex)
self.assertIsNone(p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_custom_srid(self):
""" Test with a srid unknown from GDAL """
pnt = Point(111200, 220900, srid=999999)
self.assertTrue(pnt.ewkt.startswith("SRID=999999;POINT (111200.0"))
self.assertIsInstance(pnt.ogr, gdal.OGRGeometry)
self.assertIsNone(pnt.srs)
# Test conversion from custom to a known srid
c2w = gdal.CoordTransform(
gdal.SpatialReference(
'+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 '
'+datum=WGS84 +units=m +no_defs'
),
gdal.SpatialReference(4326))
new_pnt = pnt.transform(c2w, clone=True)
self.assertEqual(new_pnt.srid, 4326)
self.assertAlmostEqual(new_pnt.x, 1, 3)
self.assertAlmostEqual(new_pnt.y, 2, 3)
def test_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
# ### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup:
new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
# ### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(21, 100), random.randint(21, 100))
# Testing the assignment
mp[i] = new
str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in range(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in range(len(poly)):
r = poly[j]
for k in range(len(r)):
r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
# mpoly[0][0][0] = (3.14, 2.71)
# self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
# self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
# del mpoly
def test_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2., 3., 8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1., 2.))
pnt.coords = (1., 2., 3.)
self.assertEqual((1., 2., 3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2., 3., 8.), (50., 250., -117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1., 2.))
ls[0] = (1., 2., 3.)
self.assertEqual((1., 2., 3.), ls[0])
def test_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumference of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend(mls.wkt for mls in self.geometries.multilinestrings)
coll.extend(p.wkt for p in self.geometries.polygons)
coll.extend(mp.wkt for mp in self.geometries.multipoints)
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_gdal(self):
"Testing `ogr` and `srs` properties."
g1 = fromstr('POINT(5 23)')
self.assertIsInstance(g1.ogr, gdal.OGRGeometry)
self.assertIsNone(g1.srs)
g1_3d = fromstr('POINT(5 23 8)')
self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry)
self.assertEqual(g1_3d.ogr.z, 8)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertIsInstance(g2.ogr, gdal.OGRGeometry)
self.assertIsInstance(g2.srs, gdal.SpatialReference)
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test_copy(self):
"Testing use with the Python `copy` module."
import copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform(self):
"Testing `transform` method."
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
@skipUnless(HAS_GDAL, "GDAL is required to transform geometries")
def test_transform_3d(self):
p3d = GEOSGeometry('POINT (5 23 100)', 4326)
p3d.transform(2774)
self.assertEqual(p3d.z, 100)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assertIsNot(g1, g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test_transform_nosrid(self):
""" Testing `transform` method (no SRID or negative SRID) """
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
@skipUnless(HAS_GDAL, "GDAL is required.")
def test_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
from django.utils.six.moves import cPickle
import pickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 3857))
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
self.assertEqual(geom.srid, tmpg.srid)
def test_prepared(self):
"Testing PreparedGeometry support."
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
if geos_version_info()['version'] > '3.3.0':
self.assertTrue(prep.crosses(fromstr('LINESTRING(1 1, 15 15)')))
self.assertTrue(prep.disjoint(Point(-5, -5)))
poly = Polygon(((-1, -1), (1, 1), (1, 0), (-1, -1)))
self.assertTrue(prep.overlaps(poly))
poly = Polygon(((-5, 0), (-5, 5), (0, 5), (-5, 0)))
self.assertTrue(prep.touches(poly))
poly = Polygon(((-1, -1), (-1, 11), (11, 11), (11, -1), (-1, -1)))
self.assertTrue(prep.within(poly))
# Original geometry deletion should not crash the prepared one (#21662)
del mpoly
self.assertTrue(prep.covers(Point(5, 5)))
def test_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test_valid_reason(self):
"Testing IsValidReason support"
g = GEOSGeometry("POINT(0 0)")
self.assertTrue(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertEqual(g.valid_reason, "Valid Geometry")
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assertFalse(g.valid)
self.assertIsInstance(g.valid_reason, six.string_types)
self.assertTrue(g.valid_reason.startswith("Too few points in geometry component"))
@skipUnless(HAS_GEOS, "Geos is required.")
def test_linearref(self):
"Testing linear referencing"
ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)')
mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))')
self.assertEqual(ls.project(Point(0, 20)), 10.0)
self.assertEqual(ls.project(Point(7, 6)), 24)
self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0 / 3)
self.assertEqual(ls.interpolate(10), Point(0, 10))
self.assertEqual(ls.interpolate(24), Point(10, 6))
self.assertEqual(ls.interpolate_normalized(1.0 / 3), Point(0, 10))
self.assertEqual(mls.project(Point(0, 20)), 10)
self.assertEqual(mls.project(Point(7, 6)), 16)
self.assertEqual(mls.interpolate(9), Point(0, 9))
self.assertEqual(mls.interpolate(17), Point(10, 7))
def test_geos_version(self):
"""Testing the GEOS version regular expression."""
from django.contrib.gis.geos.libgeos import version_regex
versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'),
('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'),
('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'),
('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')]
for v_init, v_geos, v_capi in versions:
m = version_regex.match(v_init)
self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init)
self.assertEqual(m.group('version'), v_geos)
self.assertEqual(m.group('capi_version'), v_capi)
| bsd-3-clause |
kyleconroy/vogeltron | tests/test_sports.py | 1 | 6742 | import mock
from datetime import datetime, timezone
import pytz
from nose.tools import assert_equals, assert_raises
from vogeltron import baseball
from bs4 import BeautifulSoup
YEAR = datetime.today().year
def date_for_month(month, day, hour, minute):
timez = pytz.timezone('US/Pacific')
return timez.localize(datetime(YEAR, month, day, hour, minute))
def april(day, hour, minute):
return date_for_month(4, day, hour, minute)
def june(day, hour, minute):
return date_for_month(6, day, hour, minute)
game = baseball.Result('LA Dodgers', april(1, 20, 5), False, False, '4-0')
def test_game_date():
assert_equals(game.pretty_date, 'April 1')
def test_game_time():
assert_equals(game.pretty_time, '08:05PM')
def test_game_description():
assert_equals(game.description, 'at LA Dodgers')
@mock.patch('requests.get')
def test_all_teams(_get):
_get().content = open('tests/fixtures/teams.html').read()
teams = baseball.teams()
assert_equals(len(teams), 30)
@mock.patch('requests.get')
def test_first_teams(_get):
_get().content = open('tests/fixtures/teams.html').read()
team = baseball.teams()[0]
assert_equals(team['name'], 'Baltimore Orioles')
assert_equals(team['league'], 'AMERICAN')
assert_equals(team['division'], 'EAST')
assert_equals(team['links']['schedule'],
'http://espn.go.com/mlb/teams/schedule?team=bal')
@mock.patch('requests.get')
def test_results(_get):
_get().content = open('tests/fixtures/schedule.html').read()
results, _ = baseball.schedule('WEST', 'http://example.com')
assert_equals(results, [
baseball.Result('LA Dodgers', april(1, 13, 5), False, False, '4-0'),
baseball.Result('LA Dodgers', april(2, 13, 5), False, True, '3-0'),
baseball.Result('LA Dodgers', april(3, 13, 5), False, True, '5-3'),
baseball.Result('St. Louis', april(5, 13, 5), True, True, '1-0'),
])
@mock.patch('requests.get')
def test_no_next_game(_get):
_get().content = open('tests/fixtures/schedule_current_game.html').read()
game_time, game_id = baseball.next_game('http://example.com')
assert_equals(game_id, '330406126')
@mock.patch('requests.get')
def test_next_game_against_bluejays(_get):
_get().content = \
open('tests/fixtures/bluejays_with_double_header.html').read()
game_time, game_id = baseball.next_game('http://example.com')
assert game_time is not None
assert_equals('330604126', game_id)
@mock.patch('requests.get')
def test_next_game(_get):
_get().content = open('tests/fixtures/schedule.html').read()
game_time, game_id = baseball.next_game('http://example.com')
assert_equals(game_id, '330406126')
assert_equals(game_time, april(6, 13, 5))
@mock.patch('requests.get')
def test_upcoming(_get):
_get().content = open('tests/fixtures/schedule.html').read()
_, upcoming = baseball.schedule('WEST', 'http://example.com')
assert_equals(upcoming, [
baseball.Result('St. Louis', april(6, 13, 5), True, None, '0-0'),
baseball.Result('St. Louis', april(7, 13, 5), True, None, '0-0'),
baseball.Result('Colorado', april(8, 19, 15), True, None, '0-0'),
baseball.Result('Colorado', april(9, 19, 15), True, None, '0-0'),
baseball.Result('Colorado', april(10, 12, 45), True, None, '0-0'),
])
@mock.patch('requests.get')
def test_upcoming_with_skipped(_get):
webpage = open('tests/fixtures/bluejays_with_double_header.html').read()
_get().content = webpage
_, upcoming = baseball.schedule('WEST', 'http://example.com')
print(upcoming[0].opponent)
assert_equals(upcoming, [
baseball.Result('Toronto', june(4, 19, 15), True, None, '0-0'),
baseball.Result('Toronto', june(5, 12, 45), True, None, '0-0'),
baseball.Result('Arizona', june(7, 18, 40), False, None, '0-0'),
baseball.Result('Arizona', june(8, 19, 10), False, None, '0-0'),
baseball.Result('Arizona', june(9, 13, 10), False, None, '0-0'),
])
@mock.patch('requests.get')
def test_standings(_get):
_get().content = open('tests/fixtures/standings.html').read()
standings = baseball.current_standings('NATIONAL', 'WEST')
examples = [
baseball.Standing('San Francisco', 'SF', 3, 1, .75, 0.0, 'Won 3'),
baseball.Standing('Colorado', 'COL', 3, 1, .75, 0.0, 'Won 3'),
baseball.Standing('Arizona', 'ARI', 2, 1, .667, 0.5, 'Won 1'),
baseball.Standing('LA Dodgers', 'LAD', 1, 2, .333, 1.5, 'Lost 2'),
baseball.Standing('San Diego', 'SD', 1, 3, .250, 2.0, 'Lost 1'),
]
assert_equals(standings, examples)
def test_parse_gametime_tba():
gt = baseball.parse_gametime("Mon, Apr 1", "TBA")
assert_equals(pytz.utc.localize(datetime(YEAR, 4, 1, 20, 5)), gt)
def test_parse_gametime_postponed():
gt = baseball.parse_gametime("Mon, Apr 1", "POSTPONED")
assert_equals(pytz.utc.localize(datetime(YEAR, 4, 1, 20, 5)), gt)
def test_parse_gametime():
gt = baseball.parse_gametime("Mon, Apr 1", "4:05 PM")
assert_equals(pytz.utc.localize(datetime(YEAR, 4, 1, 20, 5)), gt)
def test_no_team_info():
with assert_raises(Exception):
baseball.team_info('Giantssjk')
def test_team_info():
team = baseball.team_info('Giants')
assert_equals(team['name'], 'San Francisco Giants')
def test_normalize():
assert_equals(baseball.normalize('Giants'), 'GIANTS')
assert_equals(baseball.normalize('Francisco Giants'), 'FRANCISCOGIANTS')
assert_equals(baseball.normalize('Red-Sox'), 'REDSOX')
def test_preview_weather():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
assert_equals(baseball.parse_weather(soup), '40° Broken Clouds')
def test_preview_gametime():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
assert_equals(baseball.parse_game_time(soup),
datetime(2013, 4, 13, 17, 5, tzinfo=timezone.utc))
def test_preview_teamname():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
name, record = baseball.parse_team_info(soup, 0)
assert_equals(name, "Giants")
assert_equals(record, "7-4")
def test_preview_pitcher():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
pitcher = baseball.parse_starting_pitcher(soup, 0)
assert_equals(pitcher.name, "Bumgarner")
assert_equals(pitcher.era, 0.96)
assert_equals(pitcher.record, '2-0')
def test_preview_lineup():
soup = BeautifulSoup(open('tests/fixtures/preview_during.html'))
lineup = baseball.parse_starting_lineup(soup, 0)
blanco = lineup[0]
assert_equals(len(lineup), 9)
assert_equals(blanco.name, 'Blanco')
assert_equals(blanco.position, 'CF')
| mit |
junhuac/MQUIC | src/tools/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| mit |
YathishReddy/Robust_ECN_Signalling_With_Nonces | src/wifi/doc/source/conf.py | 92 | 7432 | # -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'wifi'
# General information about the project.
project = u'ns-3'
copyright = u'ns-3 project'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'ns-3-dev'
# The full version, including alpha/beta/rc tags.
release = 'ns-3-dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
# ('wifi-testing', 'wifi-doc-testing.tex', u'Wi-Fi Testing Documentation', u'ns-3 project', 'manual'),
# ('wifi-design', 'wifi-doc-design.tex', u'Wi-Fi Design Documentation', u'ns-3 project', 'manual'),
# ('wifi-user', 'wifi-doc-user.tex', u'Wi-Fi User Documentation', u'ns-3 project', 'manual'),
('wifi', 'wifi-module-doc.tex', u'The ns-3 Wi-Fi Module Documentation', u'ns-3 project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# add page breaks in the pdf. Level 1 is for top-level sections, level 2 for subsections, and so on.
pdf_break_level = 4
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
| gpl-2.0 |
kanpol/hk | hooker_xp/hooker_xp/release.py | 2 | 5522 | # -*- coding: utf-8 -*-
#+---------------------------------------------------------------------------+
#| |
#| Android's Hooker |
#| |
#+---------------------------------------------------------------------------+
#| Copyright (C) 2011 Georges Bossert and Dimitri Kirchner |
#| This program is free software: you can redistribute it and/or modify |
#| it under the terms of the GNU General Public License as published by |
#| the Free Software Foundation, either version 3 of the License, or |
#| (at your option) any later version. |
#| |
#| This program is distributed in the hope that it will be useful, |
#| but WITHOUT ANY WARRANTY; without even the implied warranty of |
#| MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
#| GNU General Public License for more details. |
#| |
#| You should have received a copy of the GNU General Public License |
#| along with this program. If not, see <http://www.gnu.org/licenses/>. |
#+---------------------------------------------------------------------------+
#| @url : http://www.amossys.fr |
#| @contact : android-hooker@amossys.fr |
#| @sponsors : Amossys, http://www.amossys.fr |
#+---------------------------------------------------------------------------+
#+---------------------------------------------------------------------------+
#| Official name of the project :
#+---------------------------------------------------------------------------+
name = "hooker_xp"
#+---------------------------------------------------------------------------+
#| Official name of the application
#+---------------------------------------------------------------------------+
appname = name
#+---------------------------------------------------------------------------+
#| Current OFFICIAL version of the application
#| the version number must be changed during the last commit before
#| the tag release.
#| Development version has version number increased and is
#| postfixed by ~git
#+---------------------------------------------------------------------------+
version = "0.1~git"
versionName = "TODO"
#+---------------------------------------------------------------------------+
#| Copyright mentions
#+---------------------------------------------------------------------------+
copyright = "Copyright (C) 2014 Georges Bossert and Dimitri Kirchner"
#+---------------------------------------------------------------------------+
#| Description of the application
#+---------------------------------------------------------------------------+
description = "Dynamic Analysis of Android Applications"
#+---------------------------------------------------------------------------+
#| Platforms on which the application can be executed
#+---------------------------------------------------------------------------+
platforms = "Linux_x86, Linux_x64"
#+---------------------------------------------------------------------------+
#| Authors names
#+---------------------------------------------------------------------------+
author = "Georges Bossert, Dimitri Kirchner"
#+---------------------------------------------------------------------------+
#| Email to contact authors
#+---------------------------------------------------------------------------+
author_email = "android-hooker@amossys.fr"
#+---------------------------------------------------------------------------+
#| Official website of the application
#+---------------------------------------------------------------------------+
url = "https://github.com/AndroidHooker/hooker"
#+---------------------------------------------------------------------------+
#| Official url to download the application
#+---------------------------------------------------------------------------+
download_url = "https://github.com/AndroidHooker/hooker.git"
#+---------------------------------------------------------------------------+
#| Keywords to describe the application
#+---------------------------------------------------------------------------+
keywords = ["Android", "Reverse Engineering", "Security", "Dynamic Analysis"]
#+---------------------------------------------------------------------------+
#| License used to publish the tool
#+---------------------------------------------------------------------------+
licenseName = "GPLv3"
license = """This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>."""
| gpl-3.0 |
sundisee/django-dynamic-scraper | dynamic_scraper/management/commands/run_checker_tests.py | 6 | 3056 | from optparse import make_option
from subprocess import Popen, PIPE
from django.conf import settings
from django.core.mail import mail_admins
from django.core.management.base import BaseCommand
from dynamic_scraper.models import Scraper
class Command(BaseCommand):
help = 'Runs all checker tests'
option_list = BaseCommand.option_list + (
make_option(
'--only-active',
action="store_true",
dest="only_active",
default=False,
help="Run checker tests only for active scrapers"),
make_option(
'--report-only-errors',
action="store_true",
dest="report_only_errors",
default=False,
help="Report only if checker is returning ERROR (default: WARNING/ERROR)"),
make_option(
'--send-admin-mail',
action="store_true",
dest="send_admin_mail",
default=False,
help="Send report mail to Django admins if errors occured"),
)
def handle(self, *args, **options):
if options.get('only_active'):
scraper_list = Scraper.objects.filter(
checker_x_path__isnull=False,
checker_ref_url__isnull=False,
status='A'
)
else:
scraper_list = Scraper.objects.filter(
checker_x_path__isnull=False,
checker_ref_url__isnull=False
)
mail_to_admins = False
msg = ''
for scraper in scraper_list:
scraper_str = unicode(scraper) + " "
scraper_str += "(ID:" + unicode(scraper.pk) + ", Status: " + scraper.get_status_display() + ")"
print "Run checker test for scraper %s..." % scraper_str
cmd = 'scrapy crawl checker_test '
if options.get('report_only_errors'):
cmd += '-L ERROR '
else:
cmd += '-L WARNING '
cmd += '-a id=' + str(scraper.pk)
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
stderr = p.communicate()[1]
if stderr != '':
print stderr
msg += 'Checker test for scraper %s failed:\n' % scraper_str
msg += stderr + '\n\n'
mail_to_admins = True
else:
print "Checker configuration working."
if options.get('send_admin_mail') and mail_to_admins:
print "Send mail to admins..."
if 'django.contrib.sites' in settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
subject = Site.objects.get_current().name
else:
subject = 'DDS Scraper Site'
subject += " - Errors while running checker configuration tests"
mail_admins(subject, msg)
| bsd-3-clause |
sudheesh001/oh-mainline | vendor/packages/twisted/twisted/web/test/test_webclient.py | 18 | 46700 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.web.client}.
"""
import os
from errno import ENOSPC
from urlparse import urlparse
from twisted.trial import unittest
from twisted.web import server, static, client, error, util, resource, http_headers
from twisted.internet import reactor, defer, interfaces
from twisted.python.failure import Failure
from twisted.python.filepath import FilePath
from twisted.python.log import msg
from twisted.protocols.policies import WrappingFactory
from twisted.test.proto_helpers import StringTransport
from twisted.test.proto_helpers import MemoryReactor
from twisted.internet.address import IPv4Address
from twisted.internet.task import Clock
from twisted.internet.error import ConnectionRefusedError
from twisted.internet.protocol import Protocol
from twisted.internet.defer import Deferred, succeed
from twisted.web.client import Request
from twisted.web._newclient import HTTP11ClientProtocol
from twisted.web.error import SchemeNotSupported
try:
from twisted.internet import ssl
except:
ssl = None
else:
from OpenSSL.SSL import ContextType
class ExtendedRedirect(resource.Resource):
"""
Redirection resource.
The HTTP status code is set according to the C{code} query parameter.
@type lastMethod: C{str}
@ivar lastMethod: Last handled HTTP request method
"""
isLeaf = 1
lastMethod = None
def __init__(self, url):
resource.Resource.__init__(self)
self.url = url
def render(self, request):
if self.lastMethod:
self.lastMethod = request.method
return "OK Thnx!"
else:
self.lastMethod = request.method
code = int(request.args['code'][0])
return self.redirectTo(self.url, request, code)
def getChild(self, name, request):
return self
def redirectTo(self, url, request, code):
request.setResponseCode(code)
request.setHeader("location", url)
return "OK Bye!"
class ForeverTakingResource(resource.Resource):
"""
L{ForeverTakingResource} is a resource which never finishes responding
to requests.
"""
def __init__(self, write=False):
resource.Resource.__init__(self)
self._write = write
def render(self, request):
if self._write:
request.write('some bytes')
return server.NOT_DONE_YET
class CookieMirrorResource(resource.Resource):
def render(self, request):
l = []
for k,v in request.received_cookies.items():
l.append((k, v))
l.sort()
return repr(l)
class RawCookieMirrorResource(resource.Resource):
def render(self, request):
return repr(request.getHeader('cookie'))
class ErrorResource(resource.Resource):
def render(self, request):
request.setResponseCode(401)
if request.args.get("showlength"):
request.setHeader("content-length", "0")
return ""
class NoLengthResource(resource.Resource):
def render(self, request):
return "nolength"
class HostHeaderResource(resource.Resource):
"""
A testing resource which renders itself as the value of the host header
from the request.
"""
def render(self, request):
return request.received_headers['host']
class PayloadResource(resource.Resource):
"""
A testing resource which renders itself as the contents of the request body
as long as the request body is 100 bytes long, otherwise which renders
itself as C{"ERROR"}.
"""
def render(self, request):
data = request.content.read()
contentLength = request.received_headers['content-length']
if len(data) != 100 or int(contentLength) != 100:
return "ERROR"
return data
class BrokenDownloadResource(resource.Resource):
def render(self, request):
# only sends 3 bytes even though it claims to send 5
request.setHeader("content-length", "5")
request.write('abc')
return ''
class CountingRedirect(util.Redirect):
"""
A L{util.Redirect} resource that keeps track of the number of times the
resource has been accessed.
"""
def __init__(self, *a, **kw):
util.Redirect.__init__(self, *a, **kw)
self.count = 0
def render(self, request):
self.count += 1
return util.Redirect.render(self, request)
class CountingResource(resource.Resource):
"""
A resource that keeps track of the number of times it has been accessed.
"""
def __init__(self):
resource.Resource.__init__(self)
self.count = 0
def render(self, request):
self.count += 1
return "Success"
class ParseUrlTestCase(unittest.TestCase):
"""
Test URL parsing facility and defaults values.
"""
def test_parse(self):
"""
L{client._parse} correctly parses a URL into its various components.
"""
# The default port for HTTP is 80.
self.assertEqual(
client._parse('http://127.0.0.1/'),
('http', '127.0.0.1', 80, '/'))
# The default port for HTTPS is 443.
self.assertEqual(
client._parse('https://127.0.0.1/'),
('https', '127.0.0.1', 443, '/'))
# Specifying a port.
self.assertEqual(
client._parse('http://spam:12345/'),
('http', 'spam', 12345, '/'))
# Weird (but commonly accepted) structure uses default port.
self.assertEqual(
client._parse('http://spam:/'),
('http', 'spam', 80, '/'))
# Spaces in the hostname are trimmed, the default path is /.
self.assertEqual(
client._parse('http://foo '),
('http', 'foo', 80, '/'))
def test_externalUnicodeInterference(self):
"""
L{client._parse} should return C{str} for the scheme, host, and path
elements of its return tuple, even when passed an URL which has
previously been passed to L{urlparse} as a C{unicode} string.
"""
badInput = u'http://example.com/path'
goodInput = badInput.encode('ascii')
urlparse(badInput)
scheme, host, port, path = client._parse(goodInput)
self.assertIsInstance(scheme, str)
self.assertIsInstance(host, str)
self.assertIsInstance(path, str)
class HTTPPageGetterTests(unittest.TestCase):
"""
Tests for L{HTTPPagerGetter}, the HTTP client protocol implementation
used to implement L{getPage}.
"""
def test_earlyHeaders(self):
"""
When a connection is made, L{HTTPPagerGetter} sends the headers from
its factory's C{headers} dict. If I{Host} or I{Content-Length} is
present in this dict, the values are not sent, since they are sent with
special values before the C{headers} dict is processed. If
I{User-Agent} is present in the dict, it overrides the value of the
C{agent} attribute of the factory. If I{Cookie} is present in the
dict, its value is added to the values from the factory's C{cookies}
attribute.
"""
factory = client.HTTPClientFactory(
'http://foo/bar',
agent="foobar",
cookies={'baz': 'quux'},
postdata="some data",
headers={
'Host': 'example.net',
'User-Agent': 'fooble',
'Cookie': 'blah blah',
'Content-Length': '12981',
'Useful': 'value'})
transport = StringTransport()
protocol = client.HTTPPageGetter()
protocol.factory = factory
protocol.makeConnection(transport)
self.assertEqual(
transport.value(),
"GET /bar HTTP/1.0\r\n"
"Host: example.net\r\n"
"User-Agent: foobar\r\n"
"Content-Length: 9\r\n"
"Useful: value\r\n"
"connection: close\r\n"
"Cookie: blah blah; baz=quux\r\n"
"\r\n"
"some data")
class WebClientTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
self.cleanupServerConnections = 0
name = self.mktemp()
os.mkdir(name)
FilePath(name).child("file").setContent("0123456789")
r = static.File(name)
r.putChild("redirect", util.Redirect("/file"))
self.infiniteRedirectResource = CountingRedirect("/infiniteRedirect")
r.putChild("infiniteRedirect", self.infiniteRedirectResource)
r.putChild("wait", ForeverTakingResource())
r.putChild("write-then-wait", ForeverTakingResource(write=True))
r.putChild("error", ErrorResource())
r.putChild("nolength", NoLengthResource())
r.putChild("host", HostHeaderResource())
r.putChild("payload", PayloadResource())
r.putChild("broken", BrokenDownloadResource())
r.putChild("cookiemirror", CookieMirrorResource())
self.afterFoundGetCounter = CountingResource()
r.putChild("afterFoundGetCounter", self.afterFoundGetCounter)
r.putChild("afterFoundGetRedirect", util.Redirect("/afterFoundGetCounter"))
miscasedHead = static.Data("miscased-head GET response content", "major/minor")
miscasedHead.render_Head = lambda request: "miscased-head content"
r.putChild("miscased-head", miscasedHead)
self.extendedRedirect = ExtendedRedirect('/extendedRedirect')
r.putChild("extendedRedirect", self.extendedRedirect)
self.site = server.Site(r, timeout=None)
self.wrapper = WrappingFactory(self.site)
self.port = self._listen(self.wrapper)
self.portno = self.port.getHost().port
def tearDown(self):
# If the test indicated it might leave some server-side connections
# around, clean them up.
connections = self.wrapper.protocols.keys()
# If there are fewer server-side connections than requested,
# that's okay. Some might have noticed that the client closed
# the connection and cleaned up after themselves.
for n in range(min(len(connections), self.cleanupServerConnections)):
proto = connections.pop()
msg("Closing %r" % (proto,))
proto.transport.loseConnection()
if connections:
msg("Some left-over connections; this test is probably buggy.")
return self.port.stopListening()
def getURL(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def testPayload(self):
s = "0123456789" * 10
return client.getPage(self.getURL("payload"), postdata=s
).addCallback(self.assertEquals, s
)
def test_getPageBrokenDownload(self):
"""
If the connection is closed before the number of bytes indicated by
I{Content-Length} have been received, the L{Deferred} returned by
L{getPage} fails with L{PartialDownloadError}.
"""
d = client.getPage(self.getURL("broken"))
d = self.assertFailure(d, client.PartialDownloadError)
d.addCallback(lambda exc: self.assertEquals(exc.response, "abc"))
return d
def test_downloadPageBrokenDownload(self):
"""
If the connection is closed before the number of bytes indicated by
I{Content-Length} have been received, the L{Deferred} returned by
L{downloadPage} fails with L{PartialDownloadError}.
"""
# test what happens when download gets disconnected in the middle
path = FilePath(self.mktemp())
d = client.downloadPage(self.getURL("broken"), path.path)
d = self.assertFailure(d, client.PartialDownloadError)
def checkResponse(response):
"""
The HTTP status code from the server is propagated through the
C{PartialDownloadError}.
"""
self.assertEquals(response.status, "200")
self.assertEquals(response.message, "OK")
return response
d.addCallback(checkResponse)
def cbFailed(ignored):
self.assertEquals(path.getContent(), "abc")
d.addCallback(cbFailed)
return d
def test_downloadPageLogsFileCloseError(self):
"""
If there is an exception closing the file being written to after the
connection is prematurely closed, that exception is logged.
"""
class BrokenFile:
def write(self, bytes):
pass
def close(self):
raise IOError(ENOSPC, "No file left on device")
d = client.downloadPage(self.getURL("broken"), BrokenFile())
d = self.assertFailure(d, client.PartialDownloadError)
def cbFailed(ignored):
self.assertEquals(len(self.flushLoggedErrors(IOError)), 1)
d.addCallback(cbFailed)
return d
def testHostHeader(self):
# if we pass Host header explicitly, it should be used, otherwise
# it should extract from url
return defer.gatherResults([
client.getPage(self.getURL("host")).addCallback(self.assertEquals, "127.0.0.1:%s" % (self.portno,)),
client.getPage(self.getURL("host"), headers={"Host": "www.example.com"}).addCallback(self.assertEquals, "www.example.com")])
def test_getPage(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the body of the response if the default method B{GET} is used.
"""
d = client.getPage(self.getURL("file"))
d.addCallback(self.assertEquals, "0123456789")
return d
def test_getPageHEAD(self):
"""
L{client.getPage} returns a L{Deferred} which is called back with
the empty string if the method is I{HEAD} and there is a successful
response code.
"""
d = client.getPage(self.getURL("file"), method="HEAD")
d.addCallback(self.assertEquals, "")
return d
def test_getPageNotQuiteHEAD(self):
"""
If the request method is a different casing of I{HEAD} (ie, not all
capitalized) then it is not a I{HEAD} request and the response body
is returned.
"""
d = client.getPage(self.getURL("miscased-head"), method='Head')
d.addCallback(self.assertEquals, "miscased-head content")
return d
def test_timeoutNotTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and the page is
retrieved before the timeout period elapses, the L{Deferred} is
called back with the contents of the page.
"""
d = client.getPage(self.getURL("host"), timeout=100)
d.addCallback(self.assertEquals, "127.0.0.1:%s" % (self.portno,))
return d
def test_timeoutTriggering(self):
"""
When a non-zero timeout is passed to L{getPage} and that many
seconds elapse before the server responds to the request. the
L{Deferred} is errbacked with a L{error.TimeoutError}.
"""
# This will probably leave some connections around.
self.cleanupServerConnections = 1
return self.assertFailure(
client.getPage(self.getURL("wait"), timeout=0.000001),
defer.TimeoutError)
def testDownloadPage(self):
downloads = []
downloadData = [("file", self.mktemp(), "0123456789"),
("nolength", self.mktemp(), "nolength")]
for (url, name, data) in downloadData:
d = client.downloadPage(self.getURL(url), name)
d.addCallback(self._cbDownloadPageTest, data, name)
downloads.append(d)
return defer.gatherResults(downloads)
def _cbDownloadPageTest(self, ignored, data, name):
bytes = file(name, "rb").read()
self.assertEquals(bytes, data)
def testDownloadPageError1(self):
class errorfile:
def write(self, data):
raise IOError, "badness happened during write"
def close(self):
pass
ef = errorfile()
return self.assertFailure(
client.downloadPage(self.getURL("file"), ef),
IOError)
def testDownloadPageError2(self):
class errorfile:
def write(self, data):
pass
def close(self):
raise IOError, "badness happened during close"
ef = errorfile()
return self.assertFailure(
client.downloadPage(self.getURL("file"), ef),
IOError)
def testDownloadPageError3(self):
# make sure failures in open() are caught too. This is tricky.
# Might only work on posix.
tmpfile = open("unwritable", "wb")
tmpfile.close()
os.chmod("unwritable", 0) # make it unwritable (to us)
d = self.assertFailure(
client.downloadPage(self.getURL("file"), "unwritable"),
IOError)
d.addBoth(self._cleanupDownloadPageError3)
return d
def _cleanupDownloadPageError3(self, ignored):
os.chmod("unwritable", 0700)
os.unlink("unwritable")
return ignored
def _downloadTest(self, method):
dl = []
for (url, code) in [("nosuchfile", "404"), ("error", "401"),
("error?showlength=1", "401")]:
d = method(url)
d = self.assertFailure(d, error.Error)
d.addCallback(lambda exc, code=code: self.assertEquals(exc.args[0], code))
dl.append(d)
return defer.DeferredList(dl, fireOnOneErrback=True)
def testServerError(self):
return self._downloadTest(lambda url: client.getPage(self.getURL(url)))
def testDownloadServerError(self):
return self._downloadTest(lambda url: client.downloadPage(self.getURL(url), url.split('?')[0]))
def testFactoryInfo(self):
url = self.getURL('file')
scheme, host, port, path = client._parse(url)
factory = client.HTTPClientFactory(url)
reactor.connectTCP(host, port, factory)
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
def _cbFactoryInfo(self, ignoredResult, factory):
self.assertEquals(factory.status, '200')
self.assert_(factory.version.startswith('HTTP/'))
self.assertEquals(factory.message, 'OK')
self.assertEquals(factory.response_headers['content-length'][0], '10')
def testRedirect(self):
return client.getPage(self.getURL("redirect")).addCallback(self._cbRedirect)
def _cbRedirect(self, pageData):
self.assertEquals(pageData, "0123456789")
d = self.assertFailure(
client.getPage(self.getURL("redirect"), followRedirect=0),
error.PageRedirect)
d.addCallback(self._cbCheckLocation)
return d
def _cbCheckLocation(self, exc):
self.assertEquals(exc.location, "/file")
def test_infiniteRedirection(self):
"""
When more than C{redirectLimit} HTTP redirects are encountered, the
page request fails with L{InfiniteRedirection}.
"""
def checkRedirectCount(*a):
self.assertEquals(f._redirectCount, 13)
self.assertEquals(self.infiniteRedirectResource.count, 13)
f = client._makeGetterFactory(
self.getURL('infiniteRedirect'),
client.HTTPClientFactory,
redirectLimit=13)
d = self.assertFailure(f.deferred, error.InfiniteRedirection)
d.addCallback(checkRedirectCount)
return d
def test_isolatedFollowRedirect(self):
"""
C{client.HTTPPagerGetter} instances each obey the C{followRedirect}
value passed to the L{client.getPage} call which created them.
"""
d1 = client.getPage(self.getURL('redirect'), followRedirect=True)
d2 = client.getPage(self.getURL('redirect'), followRedirect=False)
d = self.assertFailure(d2, error.PageRedirect
).addCallback(lambda dummy: d1)
return d
def test_afterFoundGet(self):
"""
Enabling unsafe redirection behaviour overwrites the method of
redirected C{POST} requests with C{GET}.
"""
url = self.getURL('extendedRedirect?code=302')
f = client.HTTPClientFactory(url, followRedirect=True, method="POST")
self.assertFalse(
f.afterFoundGet,
"By default, afterFoundGet must be disabled")
def gotPage(page):
self.assertEquals(
self.extendedRedirect.lastMethod,
"GET",
"With afterFoundGet, the HTTP method must change to GET")
d = client.getPage(
url, followRedirect=True, afterFoundGet=True, method="POST")
d.addCallback(gotPage)
return d
def test_downloadAfterFoundGet(self):
"""
Passing C{True} for C{afterFoundGet} to L{client.downloadPage} invokes
the same kind of redirect handling as passing that argument to
L{client.getPage} invokes.
"""
url = self.getURL('extendedRedirect?code=302')
def gotPage(page):
self.assertEquals(
self.extendedRedirect.lastMethod,
"GET",
"With afterFoundGet, the HTTP method must change to GET")
d = client.downloadPage(url, "downloadTemp",
followRedirect=True, afterFoundGet=True, method="POST")
d.addCallback(gotPage)
return d
def test_afterFoundGetMakesOneRequest(self):
"""
When C{afterFoundGet} is C{True}, L{client.getPage} only issues one
request to the server when following the redirect. This is a regression
test, see #4760.
"""
def checkRedirectCount(*a):
self.assertEquals(self.afterFoundGetCounter.count, 1)
url = self.getURL('afterFoundGetRedirect')
d = client.getPage(
url, followRedirect=True, afterFoundGet=True, method="POST")
d.addCallback(checkRedirectCount)
return d
def testPartial(self):
name = self.mktemp()
f = open(name, "wb")
f.write("abcd")
f.close()
partialDownload = [(True, "abcd456789"),
(True, "abcd456789"),
(False, "0123456789")]
d = defer.succeed(None)
for (partial, expectedData) in partialDownload:
d.addCallback(self._cbRunPartial, name, partial)
d.addCallback(self._cbPartialTest, expectedData, name)
return d
testPartial.skip = "Cannot test until webserver can serve partial data properly"
def _cbRunPartial(self, ignored, name, partial):
return client.downloadPage(self.getURL("file"), name, supportPartial=partial)
def _cbPartialTest(self, ignored, expectedData, filename):
bytes = file(filename, "rb").read()
self.assertEquals(bytes, expectedData)
def test_downloadTimeout(self):
"""
If the timeout indicated by the C{timeout} parameter to
L{client.HTTPDownloader.__init__} elapses without the complete response
being received, the L{defer.Deferred} returned by
L{client.downloadPage} fires with a L{Failure} wrapping a
L{defer.TimeoutError}.
"""
self.cleanupServerConnections = 2
# Verify the behavior if no bytes are ever written.
first = client.downloadPage(
self.getURL("wait"),
self.mktemp(), timeout=0.01)
# Verify the behavior if some bytes are written but then the request
# never completes.
second = client.downloadPage(
self.getURL("write-then-wait"),
self.mktemp(), timeout=0.01)
return defer.gatherResults([
self.assertFailure(first, defer.TimeoutError),
self.assertFailure(second, defer.TimeoutError)])
def test_downloadHeaders(self):
"""
After L{client.HTTPDownloader.deferred} fires, the
L{client.HTTPDownloader} instance's C{status} and C{response_headers}
attributes are populated with the values from the response.
"""
def checkHeaders(factory):
self.assertEquals(factory.status, '200')
self.assertEquals(factory.response_headers['content-type'][0], 'text/html')
self.assertEquals(factory.response_headers['content-length'][0], '10')
os.unlink(factory.fileName)
factory = client._makeGetterFactory(
self.getURL('file'),
client.HTTPDownloader,
fileOrName=self.mktemp())
return factory.deferred.addCallback(lambda _: checkHeaders(factory))
def test_downloadCookies(self):
"""
The C{cookies} dict passed to the L{client.HTTPDownloader}
initializer is used to populate the I{Cookie} header included in the
request sent to the server.
"""
output = self.mktemp()
factory = client._makeGetterFactory(
self.getURL('cookiemirror'),
client.HTTPDownloader,
fileOrName=output,
cookies={'foo': 'bar'})
def cbFinished(ignored):
self.assertEqual(
FilePath(output).getContent(),
"[('foo', 'bar')]")
factory.deferred.addCallback(cbFinished)
return factory.deferred
def test_downloadRedirectLimit(self):
"""
When more than C{redirectLimit} HTTP redirects are encountered, the
page request fails with L{InfiniteRedirection}.
"""
def checkRedirectCount(*a):
self.assertEquals(f._redirectCount, 7)
self.assertEquals(self.infiniteRedirectResource.count, 7)
f = client._makeGetterFactory(
self.getURL('infiniteRedirect'),
client.HTTPDownloader,
fileOrName=self.mktemp(),
redirectLimit=7)
d = self.assertFailure(f.deferred, error.InfiniteRedirection)
d.addCallback(checkRedirectCount)
return d
class WebClientSSLTestCase(WebClientTestCase):
def _listen(self, site):
from twisted import test
return reactor.listenSSL(0, site,
contextFactory=ssl.DefaultOpenSSLContextFactory(
FilePath(test.__file__).sibling('server.pem').path,
FilePath(test.__file__).sibling('server.pem').path,
),
interface="127.0.0.1")
def getURL(self, path):
return "https://127.0.0.1:%d/%s" % (self.portno, path)
def testFactoryInfo(self):
url = self.getURL('file')
scheme, host, port, path = client._parse(url)
factory = client.HTTPClientFactory(url)
reactor.connectSSL(host, port, factory, ssl.ClientContextFactory())
# The base class defines _cbFactoryInfo correctly for this
return factory.deferred.addCallback(self._cbFactoryInfo, factory)
class WebClientRedirectBetweenSSLandPlainText(unittest.TestCase):
def getHTTPS(self, path):
return "https://127.0.0.1:%d/%s" % (self.tlsPortno, path)
def getHTTP(self, path):
return "http://127.0.0.1:%d/%s" % (self.plainPortno, path)
def setUp(self):
plainRoot = static.Data('not me', 'text/plain')
tlsRoot = static.Data('me neither', 'text/plain')
plainSite = server.Site(plainRoot, timeout=None)
tlsSite = server.Site(tlsRoot, timeout=None)
from twisted import test
self.tlsPort = reactor.listenSSL(0, tlsSite,
contextFactory=ssl.DefaultOpenSSLContextFactory(
FilePath(test.__file__).sibling('server.pem').path,
FilePath(test.__file__).sibling('server.pem').path,
),
interface="127.0.0.1")
self.plainPort = reactor.listenTCP(0, plainSite, interface="127.0.0.1")
self.plainPortno = self.plainPort.getHost().port
self.tlsPortno = self.tlsPort.getHost().port
plainRoot.putChild('one', util.Redirect(self.getHTTPS('two')))
tlsRoot.putChild('two', util.Redirect(self.getHTTP('three')))
plainRoot.putChild('three', util.Redirect(self.getHTTPS('four')))
tlsRoot.putChild('four', static.Data('FOUND IT!', 'text/plain'))
def tearDown(self):
ds = map(defer.maybeDeferred,
[self.plainPort.stopListening, self.tlsPort.stopListening])
return defer.gatherResults(ds)
def testHoppingAround(self):
return client.getPage(self.getHTTP("one")
).addCallback(self.assertEquals, "FOUND IT!"
)
class FakeTransport:
disconnecting = False
def __init__(self):
self.data = []
def write(self, stuff):
self.data.append(stuff)
class CookieTestCase(unittest.TestCase):
def _listen(self, site):
return reactor.listenTCP(0, site, interface="127.0.0.1")
def setUp(self):
root = static.Data('El toro!', 'text/plain')
root.putChild("cookiemirror", CookieMirrorResource())
root.putChild("rawcookiemirror", RawCookieMirrorResource())
site = server.Site(root, timeout=None)
self.port = self._listen(site)
self.portno = self.port.getHost().port
def tearDown(self):
return self.port.stopListening()
def getHTTP(self, path):
return "http://127.0.0.1:%d/%s" % (self.portno, path)
def testNoCookies(self):
return client.getPage(self.getHTTP("cookiemirror")
).addCallback(self.assertEquals, "[]"
)
def testSomeCookies(self):
cookies = {'foo': 'bar', 'baz': 'quux'}
return client.getPage(self.getHTTP("cookiemirror"), cookies=cookies
).addCallback(self.assertEquals, "[('baz', 'quux'), ('foo', 'bar')]"
)
def testRawNoCookies(self):
return client.getPage(self.getHTTP("rawcookiemirror")
).addCallback(self.assertEquals, "None"
)
def testRawSomeCookies(self):
cookies = {'foo': 'bar', 'baz': 'quux'}
return client.getPage(self.getHTTP("rawcookiemirror"), cookies=cookies
).addCallback(self.assertEquals, "'foo=bar; baz=quux'"
)
def testCookieHeaderParsing(self):
factory = client.HTTPClientFactory('http://foo.example.com/')
proto = factory.buildProtocol('127.42.42.42')
proto.transport = FakeTransport()
proto.connectionMade()
for line in [
'200 Ok',
'Squash: yes',
'Hands: stolen',
'Set-Cookie: CUSTOMER=WILE_E_COYOTE; path=/; expires=Wednesday, 09-Nov-99 23:12:40 GMT',
'Set-Cookie: PART_NUMBER=ROCKET_LAUNCHER_0001; path=/',
'Set-Cookie: SHIPPING=FEDEX; path=/foo',
'',
'body',
'more body',
]:
proto.dataReceived(line + '\r\n')
self.assertEquals(proto.transport.data,
['GET / HTTP/1.0\r\n',
'Host: foo.example.com\r\n',
'User-Agent: Twisted PageGetter\r\n',
'\r\n'])
self.assertEquals(factory.cookies,
{
'CUSTOMER': 'WILE_E_COYOTE',
'PART_NUMBER': 'ROCKET_LAUNCHER_0001',
'SHIPPING': 'FEDEX',
})
class TestHostHeader(unittest.TestCase):
"""
Test that L{HTTPClientFactory} includes the port in the host header
if needed.
"""
def _getHost(self, bytes):
"""
Retrieve the value of the I{Host} header from the serialized
request given by C{bytes}.
"""
for line in bytes.splitlines():
try:
name, value = line.split(':', 1)
if name.strip().lower() == 'host':
return value.strip()
except ValueError:
pass
def test_HTTPDefaultPort(self):
"""
No port should be included in the host header when connecting to the
default HTTP port.
"""
factory = client.HTTPClientFactory('http://foo.example.com/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEquals(self._getHost(proto.transport.value()),
'foo.example.com')
def test_HTTPPort80(self):
"""
No port should be included in the host header when connecting to the
default HTTP port even if it is in the URL.
"""
factory = client.HTTPClientFactory('http://foo.example.com:80/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEquals(self._getHost(proto.transport.value()),
'foo.example.com')
def test_HTTPNotPort80(self):
"""
The port should be included in the host header when connecting to the
a non default HTTP port.
"""
factory = client.HTTPClientFactory('http://foo.example.com:8080/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEquals(self._getHost(proto.transport.value()),
'foo.example.com:8080')
def test_HTTPSDefaultPort(self):
"""
No port should be included in the host header when connecting to the
default HTTPS port.
"""
factory = client.HTTPClientFactory('https://foo.example.com/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEquals(self._getHost(proto.transport.value()),
'foo.example.com')
def test_HTTPSPort443(self):
"""
No port should be included in the host header when connecting to the
default HTTPS port even if it is in the URL.
"""
factory = client.HTTPClientFactory('https://foo.example.com:443/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEquals(self._getHost(proto.transport.value()),
'foo.example.com')
def test_HTTPSNotPort443(self):
"""
The port should be included in the host header when connecting to the
a non default HTTPS port.
"""
factory = client.HTTPClientFactory('http://foo.example.com:8080/')
proto = factory.buildProtocol('127.42.42.42')
proto.makeConnection(StringTransport())
self.assertEquals(self._getHost(proto.transport.value()),
'foo.example.com:8080')
class StubHTTPProtocol(Protocol):
"""
A protocol like L{HTTP11ClientProtocol} but which does not actually know
HTTP/1.1 and only collects requests in a list.
@ivar requests: A C{list} of two-tuples. Each time a request is made, a
tuple consisting of the request and the L{Deferred} returned from the
request method is appended to this list.
"""
def __init__(self):
self.requests = []
def request(self, request):
"""
Capture the given request for later inspection.
@return: A L{Deferred} which this code will never fire.
"""
result = Deferred()
self.requests.append((request, result))
return result
class AgentTests(unittest.TestCase):
"""
Tests for the new HTTP client API provided by L{Agent}.
"""
def setUp(self):
"""
Create an L{Agent} wrapped around a fake reactor.
"""
class Reactor(MemoryReactor, Clock):
def __init__(self):
MemoryReactor.__init__(self)
Clock.__init__(self)
self.reactor = Reactor()
self.agent = client.Agent(self.reactor)
def completeConnection(self):
"""
Do whitebox stuff to finish any outstanding connection attempts the
agent may have initiated.
This spins the fake reactor clock just enough to get L{ClientCreator},
which agent is implemented in terms of, to fire its Deferreds.
"""
self.reactor.advance(0)
def test_unsupportedScheme(self):
"""
L{Agent.request} returns a L{Deferred} which fails with
L{SchemeNotSupported} if the scheme of the URI passed to it is not
C{'http'}.
"""
return self.assertFailure(
self.agent.request('GET', 'mailto:alice@example.com'),
SchemeNotSupported)
def test_connectionFailed(self):
"""
The L{Deferred} returned by L{Agent.request} fires with a L{Failure} if
the TCP connection attempt fails.
"""
result = self.agent.request('GET', 'http://foo/')
# Cause the connection to be refused
host, port, factory = self.reactor.tcpClients.pop()[:3]
factory.clientConnectionFailed(None, Failure(ConnectionRefusedError()))
self.completeConnection()
return self.assertFailure(result, ConnectionRefusedError)
def test_connectHTTP(self):
"""
L{Agent._connect} uses C{connectTCP} to set up a connection to
a server when passed a scheme of C{'http'} and returns a
L{Deferred} which fires (when that connection is established)
with the protocol associated with that connection.
"""
expectedHost = 'example.com'
expectedPort = 1234
d = self.agent._connect('http', expectedHost, expectedPort)
host, port, factory = self.reactor.tcpClients.pop()[:3]
self.assertEquals(host, expectedHost)
self.assertEquals(port, expectedPort)
protocol = factory.buildProtocol(IPv4Address('TCP', '10.0.0.1', port))
self.assertIsInstance(protocol, HTTP11ClientProtocol)
self.completeConnection()
d.addCallback(self.assertIdentical, protocol)
return d
def test_connectHTTPS(self):
"""
L{Agent._connect} uses C{connectSSL} to set up a connection to
a server when passed a scheme of C{'https'} and returns a
L{Deferred} which fires (when that connection is established)
with the protocol associated with that connection.
"""
expectedHost = 'example.com'
expectedPort = 4321
d = self.agent._connect('https', expectedHost, expectedPort)
host, port, factory, contextFactory = self.reactor.sslClients.pop()[:4]
self.assertEquals(host, expectedHost)
self.assertEquals(port, expectedPort)
context = contextFactory.getContext()
# This is a pretty weak assertion. It's true that the context must be
# an instance of OpenSSL.SSL.Context, Unfortunately these are pretty
# opaque and there's not much more than checking its type that we could
# do here. It would be nice if the SSL APIs involved more testable (ie,
# inspectable) objects.
self.assertIsInstance(context, ContextType)
protocol = factory.buildProtocol(IPv4Address('TCP', '10.0.0.1', port))
self.assertIsInstance(protocol, HTTP11ClientProtocol)
self.completeConnection()
d.addCallback(self.assertIdentical, protocol)
return d
if ssl is None:
test_connectHTTPS.skip = "OpenSSL not present"
def test_connectHTTPSCustomContextFactory(self):
"""
If a context factory is passed to L{Agent.__init__} it will be used to
determine the SSL parameters for HTTPS requests. When an HTTPS request
is made, the hostname and port number of the request URL will be passed
to the context factory's C{getContext} method. The resulting context
object will be used to establish the SSL connection.
"""
expectedHost = 'example.org'
expectedPort = 20443
expectedContext = object()
contextArgs = []
class StubWebContextFactory(object):
def getContext(self, hostname, port):
contextArgs.append((hostname, port))
return expectedContext
agent = client.Agent(self.reactor, StubWebContextFactory())
d = agent._connect('https', expectedHost, expectedPort)
host, port, factory, contextFactory = self.reactor.sslClients.pop()[:4]
context = contextFactory.getContext()
self.assertEquals(context, expectedContext)
self.assertEquals(contextArgs, [(expectedHost, expectedPort)])
protocol = factory.buildProtocol(IPv4Address('TCP', '10.0.0.1', port))
self.assertIsInstance(protocol, HTTP11ClientProtocol)
self.completeConnection()
d.addCallback(self.assertIdentical, protocol)
return d
def _dummyConnect(self, scheme, host, port):
"""
Fake implementation of L{Agent._connect} which synchronously
succeeds with an instance of L{StubHTTPProtocol} for ease of
testing.
"""
protocol = StubHTTPProtocol()
protocol.makeConnection(None)
self.protocol = protocol
return succeed(protocol)
def test_request(self):
"""
L{Agent.request} establishes a new connection to the host indicated by
the host part of the URI passed to it and issues a request using the
method, the path portion of the URI, the headers, and the body producer
passed to it. It returns a L{Deferred} which fires with a L{Response}
from the server.
"""
self.agent._connect = self._dummyConnect
headers = http_headers.Headers({'foo': ['bar']})
# Just going to check the body for identity, so it doesn't need to be
# real.
body = object()
self.agent.request(
'GET', 'http://example.com:1234/foo?bar', headers, body)
protocol = self.protocol
# The request should be issued.
self.assertEquals(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertIsInstance(req, Request)
self.assertEquals(req.method, 'GET')
self.assertEquals(req.uri, '/foo?bar')
self.assertEquals(
req.headers,
http_headers.Headers({'foo': ['bar'],
'host': ['example.com:1234']}))
self.assertIdentical(req.bodyProducer, body)
def test_hostProvided(self):
"""
If C{None} is passed to L{Agent.request} for the C{headers}
parameter, a L{Headers} instance is created for the request and a
I{Host} header added to it.
"""
self.agent._connect = self._dummyConnect
self.agent.request('GET', 'http://example.com/foo')
protocol = self.protocol
# The request should have been issued with a host header based on
# the request URL.
self.assertEquals(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertEquals(req.headers.getRawHeaders('host'), ['example.com'])
def test_hostOverride(self):
"""
If the headers passed to L{Agent.request} includes a value for the
I{Host} header, that value takes precedence over the one which would
otherwise be automatically provided.
"""
self.agent._connect = self._dummyConnect
headers = http_headers.Headers({'foo': ['bar'], 'host': ['quux']})
body = object()
self.agent.request(
'GET', 'http://example.com/baz', headers, body)
protocol = self.protocol
# The request should have been issued with the host header specified
# above, not one based on the request URI.
self.assertEquals(len(protocol.requests), 1)
req, res = protocol.requests.pop()
self.assertEquals(req.headers.getRawHeaders('host'), ['quux'])
def test_headersUnmodified(self):
"""
If a I{Host} header must be added to the request, the L{Headers}
instance passed to L{Agent.request} is not modified.
"""
self.agent._connect = self._dummyConnect
headers = http_headers.Headers()
body = object()
self.agent.request(
'GET', 'http://example.com/foo', headers, body)
protocol = self.protocol
# The request should have been issued.
self.assertEquals(len(protocol.requests), 1)
# And the headers object passed in should not have changed.
self.assertEquals(headers, http_headers.Headers())
def test_hostValueStandardHTTP(self):
"""
When passed a scheme of C{'http'} and a port of C{80},
L{Agent._computeHostValue} returns a string giving just the
host name passed to it.
"""
self.assertEquals(
self.agent._computeHostValue('http', 'example.com', 80),
'example.com')
def test_hostValueNonStandardHTTP(self):
"""
When passed a scheme of C{'http'} and a port other than C{80},
L{Agent._computeHostValue} returns a string giving the host
passed to it joined together with the port number by C{":"}.
"""
self.assertEquals(
self.agent._computeHostValue('http', 'example.com', 54321),
'example.com:54321')
def test_hostValueStandardHTTPS(self):
"""
When passed a scheme of C{'https'} and a port of C{443},
L{Agent._computeHostValue} returns a string giving just the
host name passed to it.
"""
self.assertEquals(
self.agent._computeHostValue('https', 'example.com', 443),
'example.com')
def test_hostValueNonStandardHTTPS(self):
"""
When passed a scheme of C{'https'} and a port other than
C{443}, L{Agent._computeHostValue} returns a string giving the
host passed to it joined together with the port number by
C{":"}.
"""
self.assertEquals(
self.agent._computeHostValue('https', 'example.com', 54321),
'example.com:54321')
if ssl is None or not hasattr(ssl, 'DefaultOpenSSLContextFactory'):
for case in [WebClientSSLTestCase, WebClientRedirectBetweenSSLandPlainText]:
case.skip = "OpenSSL not present"
if not interfaces.IReactorSSL(reactor, None):
for case in [WebClientSSLTestCase, WebClientRedirectBetweenSSLandPlainText]:
case.skip = "Reactor doesn't support SSL"
| agpl-3.0 |
jpasosa/agroapex | plugins/apostrophePlugin/web/js/fckeditor/editor/filemanager/connectors/py/zope.py | 89 | 5685 | #!/usr/bin/env python
"""
FCKeditor - The text editor for Internet - http://www.fckeditor.net
Copyright (C) 2003-2009 Frederico Caldeira Knabben
== BEGIN LICENSE ==
Licensed under the terms of any of the following licenses at your
choice:
- GNU General Public License Version 2 or later (the "GPL")
http://www.gnu.org/licenses/gpl.html
- GNU Lesser General Public License Version 2.1 or later (the "LGPL")
http://www.gnu.org/licenses/lgpl.html
- Mozilla Public License Version 1.1 or later (the "MPL")
http://www.mozilla.org/MPL/MPL-1.1.html
== END LICENSE ==
Connector for Python and Zope.
This code was not tested at all.
It just was ported from pre 2.5 release, so for further reference see
\editor\filemanager\browser\default\connectors\py\connector.py in previous
releases.
"""
from fckutil import *
from connector import *
import config as Config
class FCKeditorConnectorZope(FCKeditorConnector):
"""
Zope versiof FCKeditorConnector
"""
# Allow access (Zope)
__allow_access_to_unprotected_subobjects__ = 1
def __init__(self, context=None):
"""
Constructor
"""
FCKeditorConnector.__init__(self, environ=None) # call superclass constructor
# Instance Attributes
self.context = context
self.request = FCKeditorRequest(context)
def getZopeRootContext(self):
if self.zopeRootContext is None:
self.zopeRootContext = self.context.getPhysicalRoot()
return self.zopeRootContext
def getZopeUploadContext(self):
if self.zopeUploadContext is None:
folderNames = self.userFilesFolder.split("/")
c = self.getZopeRootContext()
for folderName in folderNames:
if (folderName <> ""):
c = c[folderName]
self.zopeUploadContext = c
return self.zopeUploadContext
def setHeader(self, key, value):
self.context.REQUEST.RESPONSE.setHeader(key, value)
def getFolders(self, resourceType, currentFolder):
# Open the folders node
s = ""
s += """<Folders>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["Folder"]):
s += """<Folder name="%s" />""" % (
convertToXmlAttribute(name)
)
# Close the folders node
s += """</Folders>"""
return s
def getZopeFoldersAndFiles(self, resourceType, currentFolder):
folders = self.getZopeFolders(resourceType, currentFolder)
files = self.getZopeFiles(resourceType, currentFolder)
s = folders + files
return s
def getZopeFiles(self, resourceType, currentFolder):
# Open the files node
s = ""
s += """<Files>"""
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
for (name, o) in zopeFolder.objectItems(["File","Image"]):
s += """<File name="%s" size="%s" />""" % (
convertToXmlAttribute(name),
((o.get_size() / 1024) + 1)
)
# Close the files node
s += """</Files>"""
return s
def findZopeFolder(self, resourceType, folderName):
# returns the context of the resource / folder
zopeFolder = self.getZopeUploadContext()
folderName = self.removeFromStart(folderName, "/")
folderName = self.removeFromEnd(folderName, "/")
if (resourceType <> ""):
try:
zopeFolder = zopeFolder[resourceType]
except:
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=resourceType, title=resourceType)
zopeFolder = zopeFolder[resourceType]
if (folderName <> ""):
folderNames = folderName.split("/")
for folderName in folderNames:
zopeFolder = zopeFolder[folderName]
return zopeFolder
def createFolder(self, resourceType, currentFolder):
# Find out where we are
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
errorNo = 0
errorMsg = ""
if self.request.has_key("NewFolderName"):
newFolder = self.request.get("NewFolderName", None)
zopeFolder.manage_addProduct["OFSP"].manage_addFolder(id=newFolder, title=newFolder)
else:
errorNo = 102
return self.sendErrorNode ( errorNo, errorMsg )
def uploadFile(self, resourceType, currentFolder, count=None):
zopeFolder = self.findZopeFolder(resourceType, currentFolder)
file = self.request.get("NewFile", None)
fileName = self.getFileName(file.filename)
fileNameOnly = self.removeExtension(fileName)
fileExtension = self.getExtension(fileName).lower()
if (count):
nid = "%s.%s.%s" % (fileNameOnly, count, fileExtension)
else:
nid = fileName
title = nid
try:
zopeFolder.manage_addProduct['OFSP'].manage_addFile(
id=nid,
title=title,
file=file.read()
)
except:
if (count):
count += 1
else:
count = 1
return self.zopeFileUpload(resourceType, currentFolder, count)
return self.sendUploadResults( 0 )
class FCKeditorRequest(object):
"A wrapper around the request object"
def __init__(self, context=None):
r = context.REQUEST
self.request = r
def has_key(self, key):
return self.request.has_key(key)
def get(self, key, default=None):
return self.request.get(key, default)
"""
Running from zope, you will need to modify this connector.
If you have uploaded the FCKeditor into Zope (like me), you need to
move this connector out of Zope, and replace the "connector" with an
alias as below. The key to it is to pass the Zope context in, as
we then have a like to the Zope context.
## Script (Python) "connector.py"
##bind container=container
##bind context=context
##bind namespace=
##bind script=script
##bind subpath=traverse_subpath
##parameters=*args, **kws
##title=ALIAS
##
import Products.zope as connector
return connector.FCKeditorConnectorZope(context=context).doResponse()
"""
| mit |
ds-hwang/chromium-crosswalk | third_party/WebKit/Source/devtools/scripts/modular_build.py | 32 | 6644 | #!/usr/bin/env python
#
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Utilities for the modular DevTools build.
"""
from os import path
import os
try:
import simplejson as json
except ImportError:
import json
def read_file(filename):
with open(path.normpath(filename), 'rt') as input:
return input.read()
def write_file(filename, content):
if path.exists(filename):
os.remove(filename)
with open(filename, 'wt') as output:
output.write(content)
def bail_error(message):
raise Exception(message)
def load_and_parse_json(filename):
try:
return json.loads(read_file(filename))
except:
print 'ERROR: Failed to parse %s' % filename
raise
def concatenate_scripts(file_names, module_dir, output_dir, output):
for file_name in file_names:
output.write('/* %s */\n' % file_name)
file_path = path.join(module_dir, file_name)
if not path.isfile(file_path):
file_path = path.join(output_dir, path.basename(module_dir), file_name)
output.write(read_file(file_path))
output.write(';')
class Descriptors:
def __init__(self, application_dir, application_descriptor, module_descriptors):
self.application_dir = application_dir
self.application = application_descriptor
self.modules = module_descriptors
self._cached_sorted_modules = None
def application_json(self):
return json.dumps(self.application.values())
def all_compiled_files(self):
files = {}
for name in self.modules:
module = self.modules[name]
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files[path.normpath(path.join(self.application_dir, name, script))] = True
return files.keys()
def module_compiled_files(self, name):
files = []
module = self.modules.get(name)
skipped_files = set(module.get('skip_compilation', []))
for script in module.get('scripts', []):
if script not in skipped_files:
files.append(script)
return files
def module_resources(self, name):
return [name + '/' + resource for resource in self.modules[name].get('resources', [])]
def sorted_modules(self):
if self._cached_sorted_modules:
return self._cached_sorted_modules
result = []
unvisited_modules = set(self.modules)
temp_modules = set()
def visit(parent, name):
if name not in unvisited_modules:
return None
if name not in self.modules:
return (parent, name)
if name in temp_modules:
bail_error('Dependency cycle found at module "%s"' % name)
temp_modules.add(name)
deps = self.modules[name].get('dependencies')
if deps:
for dep_name in deps:
bad_dep = visit(name, dep_name)
if bad_dep:
return bad_dep
unvisited_modules.remove(name)
temp_modules.remove(name)
result.append(name)
return None
while len(unvisited_modules):
for next in unvisited_modules:
break
failure = visit(None, next)
if failure:
# failure[0] can never be None
bail_error('Unknown module "%s" encountered in dependencies of "%s"' % (failure[1], failure[0]))
self._cached_sorted_modules = result
return result
def sorted_dependencies_closure(self, module_name):
visited = set()
def sorted_deps_for_module(name):
result = []
desc = self.modules[name]
deps = desc.get('dependencies', [])
for dep in deps:
result += sorted_deps_for_module(dep)
if name not in visited:
result.append(name)
visited.add(name)
return result
return sorted_deps_for_module(module_name)
class DescriptorLoader:
def __init__(self, application_dir):
self.application_dir = application_dir
def load_application(self, application_descriptor_name):
return self.load_applications([application_descriptor_name])
def load_applications(self, application_descriptor_names):
merged_application_descriptor = {}
all_module_descriptors = {}
for application_descriptor_name in application_descriptor_names:
module_descriptors = {}
application_descriptor_filename = path.join(self.application_dir, application_descriptor_name)
application_descriptor = {desc['name']: desc for desc in load_and_parse_json(application_descriptor_filename)}
for name in application_descriptor:
merged_application_descriptor[name] = application_descriptor[name]
for (module_name, module) in application_descriptor.items():
if module_descriptors.get(module_name):
bail_error('Duplicate definition of module "%s" in %s' % (module_name, application_descriptor_filename))
if not all_module_descriptors.get(module_name):
module_descriptors[module_name] = self._read_module_descriptor(module_name, application_descriptor_filename)
all_module_descriptors[module_name] = module_descriptors[module_name]
for module in module_descriptors.values():
deps = module.get('dependencies', [])
for dep in deps:
if dep not in application_descriptor:
bail_error('Module "%s" (dependency of "%s") not listed in application descriptor %s' % (dep, module['name'], application_descriptor_filename))
return Descriptors(self.application_dir, merged_application_descriptor, all_module_descriptors)
def _read_module_descriptor(self, module_name, application_descriptor_filename):
json_filename = path.join(self.application_dir, module_name, 'module.json')
if not path.exists(json_filename):
bail_error('Module descriptor %s referenced in %s is missing' % (json_filename, application_descriptor_filename))
module_json = load_and_parse_json(json_filename)
module_json['name'] = module_name
return module_json
| bsd-3-clause |
altsen/diandiyun-platform | lms/djangoapps/django_comment_client/permissions.py | 7 | 4151 | """
Module for checking permissions with the comment_client backend
"""
import logging
from django.core import cache
CACHE = cache.get_cache('default')
CACHE_LIFESPAN = 60
def cached_has_permission(user, permission, course_id=None):
"""
Call has_permission if it's not cached. A change in a user's role or
a role's permissions will only become effective after CACHE_LIFESPAN seconds.
"""
key = u"permission_{user_id:d}_{course_id}_{permission}".format(
user_id=user.id, course_id=course_id, permission=permission)
val = CACHE.get(key, None)
if val not in [True, False]:
val = has_permission(user, permission, course_id=course_id)
CACHE.set(key, val, CACHE_LIFESPAN)
return val
def has_permission(user, permission, course_id=None):
for role in user.roles.filter(course_id=course_id):
if role.has_permission(permission):
return True
return False
CONDITIONS = ['is_open', 'is_author']
def check_condition(user, condition, course_id, data):
def check_open(user, condition, course_id, data):
try:
return data and not data['content']['closed']
except KeyError:
return False
def check_author(user, condition, course_id, data):
try:
return data and data['content']['user_id'] == str(user.id)
except KeyError:
return False
handlers = {
'is_open': check_open,
'is_author': check_author,
}
return handlers[condition](user, condition, course_id, data)
def check_conditions_permissions(user, permissions, course_id, **kwargs):
"""
Accepts a list of permissions and proceed if any of the permission is valid.
Note that ["can_view", "can_edit"] will proceed if the user has either
"can_view" or "can_edit" permission. To use AND operator in between, wrap them in
a list.
"""
def test(user, per, operator="or"):
if isinstance(per, basestring):
if per in CONDITIONS:
return check_condition(user, per, course_id, kwargs)
return cached_has_permission(user, per, course_id=course_id)
elif isinstance(per, list) and operator in ["and", "or"]:
results = [test(user, x, operator="and") for x in per]
if operator == "or":
return True in results
elif operator == "and":
return not False in results
return test(user, permissions, operator="or")
VIEW_PERMISSIONS = {
'update_thread': ['edit_content', ['update_thread', 'is_open', 'is_author']],
'create_comment': [["create_comment", "is_open"]],
'delete_thread': ['delete_thread', ['update_thread', 'is_author']],
'update_comment': ['edit_content', ['update_comment', 'is_open', 'is_author']],
'endorse_comment': ['endorse_comment'],
'openclose_thread': ['openclose_thread'],
'create_sub_comment': [['create_sub_comment', 'is_open']],
'delete_comment': ['delete_comment', ['update_comment', 'is_open', 'is_author']],
'vote_for_comment': [['vote', 'is_open']],
'undo_vote_for_comment': [['unvote', 'is_open']],
'vote_for_thread': [['vote', 'is_open']],
'flag_abuse_for_thread': [['vote', 'is_open']],
'un_flag_abuse_for_thread': [['vote', 'is_open']],
'flag_abuse_for_comment': [['vote', 'is_open']],
'un_flag_abuse_for_comment': [['vote', 'is_open']],
'undo_vote_for_thread': [['unvote', 'is_open']],
'pin_thread': ['create_comment'],
'un_pin_thread': ['create_comment'],
'follow_thread': ['follow_thread'],
'follow_commentable': ['follow_commentable'],
'follow_user': ['follow_user'],
'unfollow_thread': ['unfollow_thread'],
'unfollow_commentable': ['unfollow_commentable'],
'unfollow_user': ['unfollow_user'],
'create_thread': ['create_thread'],
}
def check_permissions_by_view(user, course_id, content, name):
try:
p = VIEW_PERMISSIONS[name]
except KeyError:
logging.warning("Permission for view named %s does not exist in permissions.py" % name)
return check_conditions_permissions(user, p, course_id, content=content)
| agpl-3.0 |
jvassev/dd-agent | tests/checks/integration/test_http_check.py | 10 | 8334 | # stdlibb
import time
# 3p
import mock
from nose.plugins.attrib import attr
# project
from config import AGENT_VERSION
from tests.checks.common import AgentCheckTest
from util import headers as agent_headers
RESULTS_TIMEOUT = 10
AGENT_CONFIG = {
'version': AGENT_VERSION,
'api_key': 'toto'
}
CONFIG = {
'instances': [{
'name': 'conn_error',
'url': 'https://thereisnosuchlink.com',
'check_certificate_expiration': False,
'timeout': 1,
}, {
'name': 'http_error_status_code',
'url': 'http://httpbin.org/404',
'check_certificate_expiration': False,
'timeout': 1,
}, {
'name': 'status_code_match',
'url': 'http://httpbin.org/404',
'http_response_status_code': '4..',
'check_certificate_expiration': False,
'timeout': 1,
'tags': ["foo:bar"]
}, {
'name': 'cnt_mismatch',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': False,
'content_match': 'thereisnosuchword'
}, {
'name': 'cnt_match',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': False,
'content_match': '(thereisnosuchword|github)'
}
]
}
CONFIG_SSL_ONLY = {
'instances': [{
'name': 'good_cert',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14
}, {
'name': 'cert_exp_soon',
'url': 'https://google.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 9999
}, {
'name': 'conn_error',
'url': 'https://thereisnosuchlink.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14
}
]
}
CONFIG_EXPIRED_SSL = {
'instances': [{
'name': 'expired_cert',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14
},
]
}
CONFIG_UNORMALIZED_INSTANCE_NAME = {
'instances': [{
'name': '_need-to__be_normalized-',
'url': 'https://github.com',
'timeout': 1,
'check_certificate_expiration': True,
'days_warning': 14
},
]
}
SIMPLE_CONFIG = {
'instances': [{
'name': 'simple_config',
'url': 'http://httpbin.org',
'check_certificate_expiration': False,
},
]
}
CONFIG_HTTP_HEADERS = {
'instances': [{
'url': 'https://google.com',
'name': 'UpService',
'timeout': 1,
'headers': {"X-Auth-Token": "SOME-AUTH-TOKEN"}
}]
}
FAKE_CERT = {'notAfter': 'Apr 12 12:00:00 2006 GMT'}
@attr(requires='network')
class HTTPCheckTest(AgentCheckTest):
CHECK_NAME = 'http_check'
def tearDown(self):
if self.check:
self.check.stop()
def wait_for_async(self, method, attribute, count):
"""
Loop on `self.check.method` until `self.check.attribute >= count`.
Raise after
"""
i = 0
while i < RESULTS_TIMEOUT:
self.check._process_results()
if len(getattr(self.check, attribute)) >= count:
return getattr(self.check, method)()
time.sleep(1)
i += 1
raise Exception("Didn't get the right count of service checks in time, {0}/{1} in {2}s: {3}"
.format(len(getattr(self.check, attribute)), count, i,
getattr(self.check, attribute)))
def test_http_headers(self):
"""
Headers format.
"""
# Run the check
self.load_check(CONFIG_HTTP_HEADERS, AGENT_CONFIG)
url, username, password, http_response_status_code, timeout,\
include_content, headers, response_time, content_match,\
tags, ssl, ssl_expiration,\
instance_ca_certs = self.check._load_conf(CONFIG_HTTP_HEADERS['instances'][0])
self.assertEqual(headers["X-Auth-Token"], "SOME-AUTH-TOKEN", headers)
expected_headers = agent_headers(AGENT_CONFIG).get('User-Agent')
self.assertEqual(expected_headers, headers.get('User-Agent'), headers)
def test_check(self):
"""
Check coverage.
"""
self.run_check(CONFIG)
# Overrides self.service_checks attribute when values are available\
self.service_checks = self.wait_for_async('get_service_checks', 'service_checks', 5)
# HTTP connection error
tags = ['url:https://thereisnosuchlink.com', 'instance:conn_error']
self.assertServiceCheckCritical("http.can_connect", tags=tags)
# Wrong HTTP response status code
tags = ['url:http://httpbin.org/404', 'instance:http_error_status_code']
self.assertServiceCheckCritical("http.can_connect", tags=tags)
self.assertServiceCheckOK("http.can_connect", tags=tags, count=0)
# HTTP response status code match
tags = ['url:http://httpbin.org/404', 'instance:status_code_match', 'foo:bar']
self.assertServiceCheckOK("http.can_connect", tags=tags)
# Content match & mismatching
tags = ['url:https://github.com', 'instance:cnt_mismatch']
self.assertServiceCheckCritical("http.can_connect", tags=tags)
self.assertServiceCheckOK("http.can_connect", tags=tags, count=0)
tags = ['url:https://github.com', 'instance:cnt_match']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.coverage_report()
def test_check_ssl(self):
self.run_check(CONFIG_SSL_ONLY)
# Overrides self.service_checks attribute when values are available
self.service_checks = self.wait_for_async('get_service_checks', 'service_checks', 6)
tags = ['url:https://github.com', 'instance:good_cert']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.assertServiceCheckOK("http.ssl_cert", tags=tags)
tags = ['url:https://google.com', 'instance:cert_exp_soon']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.assertServiceCheckWarning("http.ssl_cert", tags=tags)
tags = ['url:https://thereisnosuchlink.com', 'instance:conn_error']
self.assertServiceCheckCritical("http.can_connect", tags=tags)
self.assertServiceCheckCritical("http.ssl_cert", tags=tags)
self.coverage_report()
@mock.patch('ssl.SSLSocket.getpeercert', return_value=FAKE_CERT)
def test_mock_case(self, getpeercert_func):
self.run_check(CONFIG_EXPIRED_SSL)
# Overrides self.service_checks attribute when values are av
# Needed for the HTTP headers
self.service_checks = self.wait_for_async('get_service_checks', 'service_checks', 2)
tags = ['url:https://github.com', 'instance:expired_cert']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.assertServiceCheckCritical("http.ssl_cert", tags=tags)
self.coverage_report()
def test_service_check_instance_name_normalization(self):
"""
Service check `instance` tag value is normalized.
Note: necessary to avoid mismatch and backward incompatiblity.
"""
# Run the check
self.run_check(CONFIG_UNORMALIZED_INSTANCE_NAME)
# Overrides self.service_checks attribute when values are available
self.service_checks = self.wait_for_async('get_service_checks', 'service_checks', 2)
# Assess instance name normalization
tags = ['url:https://github.com', 'instance:need_to_be_normalized']
self.assertServiceCheckOK("http.can_connect", tags=tags)
self.assertServiceCheckOK("http.ssl_cert", tags=tags)
def test_warnings(self):
"""
Deprecate events usage for service checks.
"""
self.run_check(SIMPLE_CONFIG)
# Overrides self.service_checks attribute when values are available\
self.warnings = self.wait_for_async('get_warnings', 'warnings', 1)
# Assess warnings
self.assertWarning(
"Using events for service checks is deprecated in "
"favor of monitors and will be removed in future versions of the "
"Datadog Agent.",
count=1
)
| bsd-3-clause |
insidenothing/3D-Printing-Software | skein_engines/skeinforge-47/fabmetheus_utilities/geometry/manipulation_meta/_copy.py | 12 | 2877 | """
Boolean geometry copy.
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.geometry.creation import lineation
from fabmetheus_utilities.geometry.creation import solid
from fabmetheus_utilities.geometry.geometry_utilities import evaluate
from fabmetheus_utilities.geometry.geometry_utilities import matrix
from fabmetheus_utilities import euclidean
__author__ = 'Enrique Perez (perez_enrique@yahoo.com)'
__credits__ = 'Art of Illusion <http://www.artofillusion.org/>'
__date__ = '$Date: 2008/02/05 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getNewDerivation(elementNode):
'Get new derivation.'
return CopyDerivation(elementNode)
def processElementNode(elementNode):
'Process the xml element.'
processElementNodeByDerivation(None, elementNode)
def processElementNodeByDerivation(derivation, elementNode):
'Process the xml element by derivation.'
if derivation == None:
derivation = CopyDerivation(elementNode)
if derivation.target == None:
print('Warning, copy could not get target for:')
print(elementNode)
return
del elementNode.attributes['target']
copyMatrix = matrix.getBranchMatrixSetElementNode(elementNode)
targetMatrix = matrix.getBranchMatrixSetElementNode(derivation.target)
targetDictionaryCopy = evaluate.removeIdentifiersFromDictionary(derivation.target.attributes.copy())
targetDictionaryCopy.update(elementNode.attributes)
elementNode.attributes = targetDictionaryCopy
euclidean.removeTrueFromDictionary(elementNode.attributes, 'visible')
elementNode.localName = derivation.target.localName
derivation.target.copyXMLChildNodes(elementNode.getIDSuffix(), elementNode)
elementNode.getXMLProcessor().processElementNode(elementNode)
if copyMatrix != None and targetMatrix != None:
elementNode.xmlObject.matrix4X4 = copyMatrix.getSelfTimesOther(targetMatrix.tetragrid)
if elementNode.xmlObject == None:
return
if len(elementNode.xmlObject.getPaths()) > 0:
lineation.processElementNode(elementNode)
return
geometryOutput = elementNode.xmlObject.getGeometryOutput()
if geometryOutput == None:
return
solidMatchingPlugins = solid.getSolidMatchingPlugins(elementNode)
if len(solidMatchingPlugins) == 0:
return
geometryOutput = solid.getGeometryOutputByManipulation(elementNode, geometryOutput)
elementNode.xmlObject.transformGeometryOutput(geometryOutput)
lineation.removeChildNodesFromElementObject(elementNode)
elementNode.getXMLProcessor().convertElementNode(elementNode, geometryOutput)
class CopyDerivation:
"Class to hold copy variables."
def __init__(self, elementNode):
'Set defaults.'
self.target = evaluate.getElementNodeByKey(elementNode, 'target')
| gpl-2.0 |
HiSPARC/station-software | user/python/Lib/site-packages/pip/_vendor/chardet/eucjpprober.py | 289 | 3749 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .enums import ProbingState, MachineState
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJP_SM_MODEL
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
super(EUCJPProber, self).__init__()
self.coding_sm = CodingStateMachine(EUCJP_SM_MODEL)
self.distribution_analyzer = EUCJPDistributionAnalysis()
self.context_analyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
super(EUCJPProber, self).reset()
self.context_analyzer.reset()
@property
def charset_name(self):
return "EUC-JP"
@property
def language(self):
return "Japanese"
def feed(self, byte_str):
for i in range(len(byte_str)):
# PY3K: byte_str is a byte array, so byte_str[i] is an int, not a byte
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s',
self.charset_name, self.language, i)
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.context_analyzer.feed(self._last_char, char_len)
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.context_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if (self.context_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
context_conf = self.context_analyzer.get_confidence()
distrib_conf = self.distribution_analyzer.get_confidence()
return max(context_conf, distrib_conf)
| gpl-3.0 |
Nick-Hall/gramps | gramps/plugins/lib/libprogen.py | 4 | 84334 | # -*- coding: utf-8 -*-
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2008-2011 Kees Bakker
# Copyright (C) 2008 Brian G. Matherly
# Copyright (C) 2013-2017 Alois Poettker
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"Import from Pro-Gen"
#-------------------------------------------------------------------------
#
# standard python modules
#
#-------------------------------------------------------------------------
import re, os, struct, sys, time
#------------------------------------------------------------------------
#
# Set up logging
#
#------------------------------------------------------------------------
import logging
LOG = logging.getLogger('.ImportProGen')
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
from gramps.gen.datehandler import displayer
from gramps.gen.db.txn import DbTxn
from gramps.gen.db.dbconst import (PERSON_KEY, FAMILY_KEY, EVENT_KEY, PLACE_KEY,
NOTE_KEY, TAG_KEY, CITATION_KEY, SOURCE_KEY)
from gramps.gen.errors import HandleError
from gramps.gen.lib import (Address, Attribute, AttributeType, ChildRef,
Citation, Date, Event, EventRef, EventType, Family,
FamilyRelType, Name, NameType, NameOriginType, Note,
NoteType, Person, Place, PlaceName, Source,
SrcAttribute, Surname, Tag)
from gramps.gen.updatecallback import UpdateCallback
from gramps.gen.utils.id import create_id
from gramps.gui.utils import ProgressMeter
from gramps.plugins.importer.importxml import ImportInfo
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
PRIMOBJECTS = ('person', 'family', 'child')
TAGOBJECTS = ('person', 'family', 'event', 'place', 'source', 'citation', 'note')
OPTOBJECTS = (
'person-ident', 'family-ident',
'surname-male', 'surname-female',
'birth-date', 'death-date', 'death-cause',
'refn-code'
)
MONTHES = {
'jan' : 1, # de, en, nl
'feb' : 2, 'febr' : 2, # de, en, nl
'mrz' : 3, # de
'mar' : 3, 'march' : 3, # en
'maa' : 3, 'mrt' : 3, 'maart' : 3, # nl
'apr' : 4, 'april' : 4, # de, en, nl
'mai' : 5, 'may' : 5, 'mei' : 5, # de, en, nl
'jun' : 6, 'june' : 6, 'juni' : 6, # de, en, nl
'jul' : 7, 'july' : 7, 'juli' : 7, # de, en, nl
'aug' : 8, # de, en, nl
'sep' : 9, 'sept' : 9, # de, en, nl
'okt' : 10, 'oct' : 10, 'ok' : 10, # de, en, nl
'nov' : 11, # de, en, nl
'dez' : 12, 'dec' : 12 # de, en, nl
}
PREFIXES = (
't ', # nl
'den ', 'der ', 'de ', # de, nl
'het ', # nl
'in den ', # nl
'ten ', 'ter ', 'te ', # nl
'van ', 'van den ', 'van der ', 'van de ', # nl
'von ', 'von der ', # de
'zu ' # DE
)
class ProgenError(Exception):
"""
Class used to report Pro-Gen exceptions (mostly errors).
"""
def __init__(self, value=''):
Exception.__init__(self)
self.value = value
def __str__(self):
return self.value
def _read_mem(bname):
"""
Read a Pro-Gen record.
"""
# Each record is 32 bytes. First a 4 byte reference to the next record
# followed by 28 bytes of text. The information forms a chain of records,
# that stops when a reference is 0 or smaller. There are two special
# sequences:
# <ESC> <CR> hard return
# <ESC> <^Z> end of the memo field
if os.path.exists(bname + '.MEM'):
fname = bname + '.MEM'
else:
fname = bname + '.mem'
with open(fname, "rb") as file_:
LOG.debug("The current system is %s-endian", sys.byteorder)
# The input file comes from [what was originally] a DOS machine so will
# be little-endian, regardless of the 'native' byte order of the host
recfmt = "<i28s"
reclen = struct.calcsize(str(recfmt))
# print("# reclen = %d" % reclen)
mems = []
while 1:
buf = file_.read(reclen)
if not buf:
break
(recno, text) = struct.unpack(recfmt, buf)
mems.append([recno, text])
return mems
def _read_recs(table, bname, mems):
"""
Read records from .PER or .REL file.
"""
if os.path.exists(bname + table.fileext):
fname = bname + table.fileext
else:
fname = bname + table.fileext.lower()
with open(fname, "rb") as file_:
recfmt = table.recfmt
LOG.info("# %s - recfmt = %s", table['name1'], recfmt)
reclen = struct.calcsize(str(recfmt))
LOG.info("# %s - reclen = %d", table['name1'], reclen)
recs = []
pos_recs, tot_recs = 0, 0 # positive / total records
while 1:
buf = file_.read(reclen)
if not buf:
break
tot_recs += 1
# check if all items in list are identical
if buf.count(buf[0]) != len(buf):
pos_recs += 1
tups = struct.unpack(recfmt, buf)
gid = str(tot_recs).encode('cp850')
tups = list(tups) # casting to list
tups.insert(0, gid) # inserting essential Gramps ID!
recs.append(tups)
# recflds = table.convert_record_to_list(tups, mems) # Debug!
LOG.info("# length %s.recs[] = %d", table['name1'], len(recs))
LOG.info("# total %d, pos. %d, null %d recs", \
tot_recs, pos_recs, tot_recs - pos_recs)
return recs
def _get_defname(fname):
"""
Get the name of the PG30.DEF file by looking at the user DEF file.
"""
# Return the name of the DEF file. <fname> is expected to be somewhere in
# the PG30 tree. Contents of <fname> is always something like:
# => \\0
# => C:\\PG30\\NL\\PG30-1.DEF
# We will strip the C: and convert the rest to a native pathname. Next,
# this pathname is compared with <fname>.
with open(fname, buffering=1, encoding='cp437', errors='strict') as file_:
lines = file_.readlines()
if not lines[0].startswith(r'\0') or len(lines) != 2:
return None, fname
defname = lines[1]
defname = defname.strip()
# Strip drive, if any
defname = re.sub(r'^\w:', '', defname)
defname = defname.replace('\\', os.sep)
# Strip leading slash, if any.
if defname.startswith(os.sep):
defname = defname[1:]
# LOG.warning('_get_defname: fname=%(fname)s => defname=%(defname)s', vars())
# Using directory of <fname>, go to parent directory until the DEF is found
dir_, file_ = os.path.split(os.path.abspath(fname))
while dir_ and dir_ != os.sep:
# LOG.warning('_get_defname: dir=%(dir_)s => defname=%(defname)s', vars())
newdefname = os.path.join(dir_, defname)
if os.path.exists(newdefname):
return newdefname, defname
newdefname = newdefname.lower()
if os.path.exists(newdefname):
return newdefname, defname
# One level up
dir_, file_ = os.path.split(dir_)
return None, defname
# Example field:
# ['First name', '47', '64', '4', '2', '15', '""', '""']
# item 0
# item 1 is a number indicating the fieldtype
# item 2
# item 3 is the size of the field
class PG30DefTableField(object):
"""
This class represents a field in one of the tables in the DEF file.
"""
def __init__(self, name, value):
self.fieldname = name
self.fields = value.split(',')
self.fields = [p.strip() for p in self.fields]
# We have seen some case insensitivity in DEF files ...
self.name = self.fields[0].lower()
self.type_ = int(self.fields[1])
self.size = int(self.fields[3])
def __repr__(self):
return self.fieldname + ' -> ' + ', '.join(self.fields)
ESC_CTRLZ = re.compile(r'\033\032.*')
class PG30DefTable(object):
"""
This class represents a table in the DEF file.
"""
def __init__(self, name, lines):
self.name = name
self.flds = []
self.parms = {}
self.recfmt = None
# Example line:
# f02=Person_last_change ,32,10,10, 1,68,"","INDI CHAN DATE"
line_pat = re.compile(r'(\w+) = (.*)', re.VERBOSE)
for lne in lines:
mtch = line_pat.match(lne)
if mtch: # Catch duplicates?
self.parms[mtch.group(1)] = mtch.group(2)
self.fileext = self.parms.get('fileext', None)
# If there is a n_fields entry then this is a table that
# has details about the record format of another file (PER or REL).
if 'n_fields' in self.parms:
self.flds = self.get_fields()
self.recfmt = self.get_recfmt()
self.nam2fld = {}
self.nam2idx = {}
self.recflds = [] # list of fields that use up space in a record
j = 0
for i, fld in enumerate(self.flds):
# print("# field %s" % fld)
nam = fld.name
self.nam2fld[nam] = fld
# fld.size == 0: Field will not be acknowleged!
if (i == 0) or (fld.size != 0):
self.nam2idx[nam] = j
# print("# %s <= %d" % (fld.fieldname, j))
self.recflds.append(fld)
j += 1
def __getitem__(self, i):
return self.parms.get(i, None)
def get_recfmt(self):
""" Get the record format for struct.unpack """
# Example field:
# ['First Name', '47', '64', '4', '2', '15', '""', '""']
# item 0
# item 1 is a number indicating the fieldtype
# item 2
# item 3 is the size of the field
# ...
flds = self.flds
# The input file comes from [what was originally] a DOS machine so will
# be little-endian, regardless of 'native' byte order of the host system
fmt = '<'
for fld in flds:
fldtyp = fld.type_
if fldtyp == 2 or fldtyp == 3 or fldtyp == 22 or fldtyp == 23:
fmt += 'i'
elif fldtyp == 31:
pass
elif fldtyp == 32 or fldtyp == 44 or fldtyp == 45:
fmt += '%ds' % fld.size
elif fldtyp == 41:
fmt += 'h'
elif fldtyp == 42 or fldtyp == 43 or fldtyp == 46 or fldtyp == 47:
fmt += 'i'
else:
pass # ???? Do we want to know?
return fmt
def get_fields(self):
""" Get the fields """
# For example from PG30-1.DEF
# n_fields=58
# f01=Person ID , 31, 6, 0, 1, 17, "", "INDI RFN"
# f02=Person change, 32, 10,10, 1, 68, "", "INDI CHAN DATE"
# f03=First name , 47, 64, 4, 2, 15, "", ""
n_fields = int(self.parms['n_fields'])
flds = []
for i in range(n_fields):
fld_name = 'f%02d' % (i+1)
fld = self.parms.get(fld_name, None)
flds.append(PG30DefTableField(fld_name, fld))
return flds
def get_mem_text(self, mems, i):
""" Normalize text. """
# Notice that Pro-Gen starts the mem numbering at 1.
if i <= 0:
# MEM index 0, just return an empty string
return ""
i -= 1
recno = mems[i][0] - 1
text = mems[i][1].decode('cp850')
while recno >= 0:
text += mems[recno][1].decode('cp850')
recno = mems[recno][0] - 1
text = text.replace('\033\r', '\n') # ESC-^M is newline
text = ESC_CTRLZ.sub('', text) # ESC-^Z is end of string
text = text.replace('\0', '') # There can be nul bytes. Remove them.
text = text.strip() # Strip leading/trailing whitespace
return text
def get_record_field_index(self, fldname):
""" Return the index number in the record tuple, based on the name. """
if not fldname in self.nam2idx:
raise ProgenError(_("Field '%(fldname)s' not found") % locals())
return self.nam2idx[fldname]
def convert_record_to_list(self, rec, mems):
""" Convert records to list. """
flds = []
for i in range(len(rec)):
typ = self.recflds[i].type_
if typ == 2 or typ == 3 or typ == 22 or typ == 23:
# Record field is record number
flds.append("%d" % rec[i])
elif typ == 46 or typ == 47:
# Record field is memory type
flds.append(self.get_mem_text(mems, rec[i]))
else:
# Not a record number, not a memory type. It must be just text.
fld = rec[i].strip()
fld = fld.decode('cp850') # Convert to unicode
flds.append(fld)
# print(', '.join(flds))
return flds
def get_field_names(self):
""" Return field names. """
ret = []
for fld in self.flds:
if fld.size != 0:
ret.append(fld.name)
return ret
def diag(self):
""" Diagnostic ... """
txt = self.name + '\n'
if 'n_fields' in self.parms:
txt += 'n_fields = %s\n' % self.parms['n_fields']
# Just grab a field
txt += '"%s"\n' % self.flds[1]
txt += 'recfmt = %s (length=%d)' % \
(self.recfmt, struct.calcsize(str(self.recfmt)))
return txt
class PG30Def(object):
"""
Utility class to read PG30-1.DEF and to get certain information from it.
"""
# The contents of the DEF file is separated in sections that start with
# [<section name>]. For example:
# [general]
# dateformat=DD-MM-YYYY
# pointerlength=4
# tables=2
def __init__(self, fname):
# Read the main DEF file (maybe throw a IOError)
lines = None
with open(fname, buffering=1, encoding='cp437', errors='strict') as frame:
lines = frame.readlines()
# Analyse the DEF lines
lines = [l.strip() for l in lines]
content = '\n'.join(lines)
parts = re.split(r'\n(?=\[)', content)
self.parts = {}
self.tables = {}
for prts in parts:
lines = prts.splitlines()
# Get section names (typically "PRO-GEN", "general",
# "Table_1", "Table_2", "Genealogical")
k = re.sub(r'\[(.*)\]', r'\1', lines[0])
# Store section contents in a hashtable using that section name
self.parts[k] = lines[1:]
self.tables[k] = PG30DefTable(k, self.parts[k])
def __getitem__(self, i):
return self.tables.get(i, None)
def __repr__(self):
return '\n'.join([self.tables[t].diag() for t in self.tables])
# Split surname prefixes
def _split_surname(surname):
""" Divides prefix from surname. """
for prefix in PREFIXES:
if surname.startswith(prefix):
return prefix.strip(), surname[len(prefix):].strip()
return '', surname
class ProgenParser(UpdateCallback):
"""
Main class to parse and import Pro-Gen files.
"""
def parse_progen_file(self):
"""
Parse and analyse the Pro-Gen file.
"""
if not (self.option['prim_person'] or
self.option['prim_family'] or
self.option['prim_child']):
# Nothing to import
return None
# Read the stub DEF file (maybe throw a IOError)
self.fname, dname = _get_defname(self.fname)
if not self.fname:
error_msg = ProgenError(_("Not a (right) DEF file: %(dname)s") % locals())
self.user.notify_error(_("Pro-Gen data error"), str(error_msg))
# close feedback about import progress (GUI)!
if self.uistate: self.progress.close()
return None
# start feedback about import progress (GUI / TXT)
self.__display_message(_('Initializing.'), _('Import from Pro-Gen'))
self.def_ = PG30Def(self.fname)
# Check correct languages (only 'de', 'en' and 'nl' accepted)
male_text = self.def_.tables['Genealogical']
male_text = male_text.parms['field_father'].lower()
female_text = self.def_.tables['Genealogical']
female_text = female_text.parms['field_mother'].lower()
# Double check on keywords
if male_text == "vater" and female_text == "mutter":
self.language = 0 # language = 'de'
elif male_text == "father" and female_text == "mother":
self.language = 1 # language = 'en'
elif male_text == "vader" and female_text == "moeder":
self.language = 2 # language = 'nl'
else:
# Raise a error message
error_msg = ProgenError(_("Not a supported Pro-Gen import file language"))
self.user.notify_error(_("Pro-Gen data error"), str(error_msg))
# close feedback about import progress (GUI)
if self.uistate: self.progress.close()
return None
self.mems = _read_mem(self.bname)
self.pers = _read_recs(self.def_['Table_1'], self.bname, self.mems)
self.rels = _read_recs(self.def_['Table_2'], self.bname, self.mems)
# calculate total amount of data
if not self.uistate:
# approx. (1x father, 1x mother) + 1.5x child & families
self.set_total(2.5 * len(self.pers) + len(self.rels))
self.dbase.disable_signals()
with DbTxn(_("Pro-Gen import"), self.dbase, batch=True) as self.trans:
self.create_tags()
if self.option['prim_person']:
self.create_persons()
if self.option['prim_family']:
self.create_families()
if self.option['prim_child']:
self.add_children()
self.__display_message(_('Saving.'))
self.dbase.enable_signals()
self.dbase.request_rebuild()
# close feedback about import progress (GUI)
if self.uistate: self.progress.close()
return self.info
def __init__(self, data_base, file_name, user, option):
"""
Pro-Gen defines his own set of (static) person and family identifiers.
"""
# Sometime their match the Gramps localisation, sometimes not. To be on
# a safe and uniform path person and family identifiers for (alphabetic)
# German (de), English (en) and Dutch (nl) language defined here.
self.bname, ext = os.path.splitext(file_name)
if ext.lower() in ('.per', '.rel', '.mem'):
file_name = self.bname + '.def'
self.dbase = data_base
self.fname = file_name
self.user = user
self.uistate = user.uistate
self.info = ImportInfo()
self.option = option
self.language = 0
self.mems = None # Memory area
self.pers = [] # List for raw person data
self.rels = [] # List for raw relation data
self.gid2id = {} # Maps person id to id
self.fid2id = {} # Maps family id to id
self.fm2fam = {} # Maps family id to family
self.pkeys = {} # Caching place handles
self.skeys = {} # Caching source handles
self.ckeys = {} # Caching citation handles
# Miscellaneous
self.trans = None # Transaction identifier
self.def_ = None # PG30 definitions
self.high_fam_id = -1
# Add Config import tag?
self.tagobject_list = {}
# Records in the PER file using PG30-1.DEF contain the following fields:
self.person_identifier = [
# F00: None
[""], # F00
# F01 - F15: Person ID, Change, First / Last Name, Gender,
# Call Name, Alias, Person Code, Titel 1/2/3,
# Father, Mother, Occupation
["Person_ID", "Person_record", "Persoon record"], # F01
["Person_Änderung", "Person_last_change", "Persoon gewijzigd"], # F02
["Vorname", "Given_name", "Voornaam"], # F03
["Nachname", "Surname", "Achternaam"], # F04
["Geschlecht", "Sex", "Geslacht"], # F05
["Patronym", "Patronym", "Patroniem"], # F06
["Rufname", "Call_name", "Roepnaam"], # F07
["Alias", "Alias", "Alias"], # F08
["Person_Code", "Person_code", "Persoon code"], # F09
["Titel1", "Title1", "Titel1"], # F10
["Titel2", "Title2", "Titel2"], # F11
["Titel3", "Title3", "Titel3"], # F12
["Vater", "Father", "Vader"], # F13
["Mutter", "Mother", "Moeder"], # F14
["Beruf", "Occupation", "Beroep"], # F15
# F16 - F17: Person Note, Info
["Person_Notiz", "Person_scratch", "Persoon klad"], # F16
["Person_Info", "Person_info", "Persoon info"], # F17
# F18 - F24: Address Date, Street, ZIP, Place, Country, Phone, Info
["Anschrift_Datum", "Address_date", "Adres datum"], # F18
["Anschrift_Straße", "Address_street", "Adres straat"], # F19
["Anschrift_PLZ", "Address_zip", "Adres postcode"], # F20
["Anschrift_Ort", "Address_place", "Adres plaats"], # F21
["Anschrift_Land", "Address_country", "Adres land"], # F22
["Anschrift_Telefon", "Address_phone", "Adres telefoon"], #
["Anschrift_Info", "Address_info", "Adres info"], # F24
# F25 - F31: Birth Date, Place, Time, Source, Reference, Text, Info
["Geburt_Datum", "Birth_date", "Geboorte datum"], # F25
["Geburt_Ort", "Birth_place", "Geboorte plaats"], # F26
["Geburt_Zeit", "Birth_time", "Geboorte tijd"], # F27
["Geburt_Quelle", "Birth_source", "Geboorte bron"], # F28
["Geburt_Akte", "Birth_ref", "Geboorte akte"], # F29
["Geburt_Text", "Birth_text", "Geboorte brontekst"], # F30
["Geburt_Info", "Birth_info", "Geboorte info"], # F31
# F32 - F39: Christening Date, Place, Religion, Witness, Source,
# Reference, Text, Info
["Taufe_Datum", "Christening_date", "Doop datum"], # F32
["Taufe_Ort", "Christening_place", "Doop plaats"], # F33
["Religion", "Religion", "Gezindte"], # F34
["Taufe_Paten", "Christening_witness", "Doop getuigen"], # F35
["Taufe_Quelle", "Christening_source", "Doop bron"], # F36
["Taufe_Akte", "Christening_ref", "Doop akte"], # F37
["Taufe_Text", "Christening_text", "Doop brontekst"], # F38
["Taufe_Info", "Christening_info", "Doop info"], # F39
# F40 - F46: Death Date, Place, Time, Source, Reference, Text, Info
["Sterbe_Datum", "Death_date", "Overlijden datum"], # F40
["Sterbe_Ort", "Death_place", "Overlijden plaats"], # F41
["Sterbe_Zeit", "Death_time", "Overlijden tijd"], # F42
["Sterbe_Quelle", "Death_source", "Overlijden bron"], # F43
["Sterbe_Akte", "Death_ref", "Overlijden akte"], # F44
["Sterbe_Text", "Death_text", "Overlijden brontekst"], # F45
["Sterbe_Info", "Death_info", "Overlijden info"], # F46
# F47 - F52: Cremation Date, Place, Source, Reference, Text, Info
["Einäscherung_Datum", "Cremation_date", "Crematie datum"], # F47
["Einäscherung_Ort", "Cremation_place", "Crematie plaats"], # F48
["Einäscherung_Quelle", "Cremation_source", "Crematie bron"], # F49
["Einäscherung_Akte", "Cremation_ref", "Crematie akte"], # F50
["Einäscherung_Text", "Cremation_text", "Crematie brontekst"], # F51
["Einäscherung_Info", "Cremation_info", "Crematie info"], # F52
# F53 - F58: Burial Date, Place, Source, Reference, Text, Info
["Beerdigung_Datum", "Burial_date", "Begrafenis datum"], # F53
["Beerdigung_Ort", "Burial_place", "Begrafenis plaats"], # F54
["Beerdigung_Quelle", "Burial_source", "Begrafenis bron"], # F55
["Beerdigung_Akte", "Burial_ref", "Begrafenis akte"], # F56
["Beerdigung_Text", "Burial_text", "Begrafenis brontekst"], # F57
["Beerdigung_Info", "Burial_info", "Begrafenis info"], # F58
]
# Records in the REL file using PG30-1.DEF contain the following fields:
self.family_identifier = [
# F00: None
[""], # F00
# F01 - F07: Relation ID, Change, Husband, Wife, Code, Note, Info
["Ehe_ID", "Relation_record", "Relatie record"], # F01
["Ehe_Änderung", "Relation_last_change", "Relatie gewijzigd"], # F02
["Ehemann", "Husband", "Man"], # F03
["Ehefrau", "Wife", "Vrouw"], # F04
["Ehe_Code", "Relation_code", "Relatie code"], # F05
["Ehe_Notiz", "Relation_scratch", "Relatie klad"], # F06
["Ehe_Info", "Relation_info", "Relatie info"], # F07
# F08 - F13: Civil Union Date, Place, Source, Reference, Text, Info
["Lebensgem_Datum", "Living_date", "Samenwonen datum"], # F08
["Lebensgem_Ort", "Living_place", "Samenwonen plaats"], # F09
["Lebensgem_Quelle", "Living_source", "Samenwonen bron"], # F10
["Lebensgem_Akte", "Living_ref", "Samenwonen akte"], # F11
["Lebensgem_Text", "Living_text", "Samenwonen brontekst"], # F12
["Lebensgem_Info", "Living_info", "Samenwonen info"], # F13
# F14 - F20: Marriage License Date, Place, Witness, Source, Record,
# Text, Info
["Aufgebot_Datum", "Banns_date", "Ondertrouw datum"], # F14
["Aufgebot_Ort", "Banns_place", "Ondertrouw plaats"], # F15
["Aufgebot_Zeugen", "Banns_witnesses", "Ondertrouw getuigen"], # F16
["Aufgebot_Quelle", "Banns_source", "Ondertrouw bron"], # F17
["Aufgebot_Akte", "Banns_ref", "Ondertrouw akte"], # F18
["Aufgebot_Text", "Banns_text", "Ondertrouw brontekst"], # F19
["Aufgebot_Info", "Banns_info", "Ondertrouw info"], # F20
# F14 - F20: Civil Marriage Date, Place, Witness, Source, Record,
# Text, Info
["Standesamt_Datum", "Civil_date", "Wettelijk datum"], # F21
["Standesamt_Ort", "Civil_place", "Wettelijk plaats"], # F22
["Standesamt_Zeugen", "Civil_witnesses", "Wettelijk getuigen"], # F23
["Standesamt_Quelle", "Civil_source", "Wettelijk bron"], # F24
["Standesamt_Akte", "Civil_ref", "Wettelijk akte"], # F25
["Standesamt_Text", "Civil_text", "Wettelijk brontekst"], # F26
["Standesamt_Info", "Civil_info", "Wettelijk info"], # F27
# F28 - F35: Church Wedding Date, Place, Church Name, Witness,
# Source, Reference, Text, Info
["Kirche_Datum", "Church_date", "Kerkelijk datum"], # F28
["Kirche_Ort", "Church_place", "Kerkelijk plaats"], # F29
["Kirche", "Church", "Kerk"], # F30
["Kirche_Zeugen", "Church_witnesses", "Kerkelijk getuigen"], # F31
["Kirche_Quelle", "Church_source", "Kerkelijk bron"], # F32
["Kirche_Akte", "Church_ref", "Kerkelijk akte"], # F33
["Kirche_Text", "Church_text", "Kerkelijk brontekst"], # F34
["Kirche_Info", "Church_info", "Kerkelijk info"], # F35
# F36 - F41: Divorce Date, Place, Source, Reference, Text, Info
["Scheidung_Datum", "Divorce_date", "Scheiding datum"], # F36
["Scheidung_Ort", "Divorce_place", "Scheiding plaats"], # F37
["Scheidung_Quelle", "Divorce_source", "Scheiding bron"], # F38
["Scheidung_Akte", "Divorce_ref", "Scheiding akte"], # F39
["Scheidung_Text", "Divorce_text", "Scheiding brontekst"], # F40
["Scheidung_Info", "Divorce_info", "Scheiding info"], # F41
]
# provide feedback about import progress (GUI / TXT)
if self.uistate:
self.progress = ProgressMeter(_("Import from Pro-Gen"), '',
parent=self.uistate.window)
else:
UpdateCallback.__init__(self, user.callback)
def __add_name(self, person, citationhandle, nametype,
firstname, prefix, surname, suffix):
"""
Add a new name to the object.
"""
name = Name()
name.set_type(nametype)
name.set_first_name(firstname)
sur_name = Surname()
sur_name.set_prefix(prefix)
sur_name.set_surname(surname)
name.add_surname(sur_name)
name.set_suffix(suffix)
if citationhandle:
name.add_citation(citationhandle)
person.add_alternate_name(name)
def __add_tag(self, tag, obj):
"""
Add the default tag to the object.
"""
if self.tagobject_list and (tag in self.tagobject_list):
obj.add_tag(self.tagobject_list[tag].handle)
def __find_from_handle(self, progen_id, table):
"""
Find a handle corresponding to the specified Pro-Gen ID.
"""
# The passed table contains the mapping. If the value is found, we
# return it, otherwise we create a new handle, store it, and return it.
intid = table.get(progen_id)
if not intid:
intid = create_id()
table[progen_id] = intid
return intid
def __find_person_handle(self, progen_id):
"""
Return the database handle associated with the person's Pro-Gen ID
"""
return self.__find_from_handle(progen_id, self.gid2id)
def __find_family_handle(self, progen_id):
"""
Return the database handle associated with the family's Pro-Gen ID
"""
return self.__find_from_handle(progen_id, self.fid2id)
def __find_or_create_person(self, progen_id):
"""
Finds or creates a Person based on the Pro-Gen ID.
"""
# If the ID is already used (= is in the database), return the item in
# DB. Otherwise, create a new person, assign the handle and Gramps ID.
person = Person()
intid = self.gid2id.get(progen_id)
if self.dbase.has_person_handle(intid):
person.unserialize(self.dbase.get_raw_person_data(intid))
else:
# create a new Person
gramps_id = self.dbase.id2user_format("I%06d" % progen_id)
if self.dbase.has_person_gramps_id(gramps_id):
gramps_id = self.dbase.find_next_person_gramps_id()
intid = self.__find_from_handle(progen_id, self.gid2id)
person.set_handle(intid)
person.set_gramps_id(gramps_id)
# add info for import statistic
self.info.add('new-object', PERSON_KEY, None)
return person
def __find_or_create_family(self, progen_id):
"""
Finds or creates a Family based on the Pro-Gen ID.
"""
family = Family()
intid = self.fid2id.get(progen_id)
if self.dbase.has_family_handle(intid):
family.unserialize(self.dbase.get_raw_family_data(intid))
else:
# create a new Family
gramps_id = self.dbase.fid2user_format("F%04d" % progen_id)
if self.dbase.has_family_gramps_id(gramps_id):
gramps_id = self.dbase.find_next_family_gramps_id()
intid = self.__find_from_handle(progen_id, self.fid2id)
family.set_handle(intid)
family.set_gramps_id(gramps_id)
# add info for import statistic
self.info.add('new-object', FAMILY_KEY, None)
return family
def __get_or_create_place(self, place_name):
"""
Finds or creates a Place based on the place name.
"""
if not place_name:
return None
if place_name in self.pkeys:
place = self.dbase.get_place_from_handle(self.pkeys[place_name])
else:
# create a new Place
place = Place()
place.set_name(PlaceName(value=place_name))
place.set_title(place_name)
self.__add_tag('place', place) # add tag to 'Place'
self.dbase.add_place(place, self.trans) # add & commit ...
self.pkeys[place_name] = place.get_handle()
# add info for import statistic
self.info.add('new-object', PLACE_KEY, None)
return place
def __get_or_create_citation(self, source_title, date_text,
page_text='', page_ref=''):
"""
Finds or creates Source & Citation based on:
Source, Name, Date, Page, Note, Attribute.
"""
if not source_title:
return None
# process Source
if not self.option['imp_source']: # No Source enabled
return None
if source_title in self.skeys: # source exists
source = self.dbase.get_source_from_handle(self.skeys[source_title])
else: # create a new source
source = Source()
source.set_title(source_title)
source.private = self.option['imp_source_priv']
self.__add_tag('source', source) # add tag to 'Source'
# process Attribute
if self.option['imp_source_attr']:
sattr = SrcAttribute()
sattr.set_type(_("Source"))
sattr.set_value(self.option['imp_source_attr'])
source.add_attribute(sattr)
self.dbase.add_source(source, self.trans) # add & commit ...
self.skeys[source_title] = source.get_handle()
# add info for import statistic
self.info.add('new-object', SOURCE_KEY, None)
# process Citation
if not self.option['imp_citation']: # No Citation enabled
return None
# process Volume/Page
page = source_title
if page_text or page_ref:
page = '%s %s' % (page_text, page_ref)
if page in self.ckeys: # citation exists
citation = self.dbase.get_citation_from_handle(self.ckeys[page])
else: # create a new citation
citation = Citation()
citation.set_reference_handle(source.get_handle())
citation.private = self.option['imp_citation_priv']
self.__add_tag('citation', citation) # add tag to 'Citation'
# process Date
date = self.__create_date_from_text(date_text)
if date:
citation.set_date_object(date)
# process Confidence
citation.set_confidence_level(self.option['imp_citation_conf'])
# process Page (substitute string directives)
if ('%Y' or '%m' or '%d' or '%H' or '%M' or '%S') in page:
page = time.strftime(page)
citation.set_page('%s' % page)
# process Note
imp_citation_note = '' # Not yet used
if imp_citation_note:
note = self.__create_note(imp_citation_note, NoteType.CUSTOM,
_("Pro-Gen Import"))
if note and note.handle:
citation.add_note(note.handle)
# process Attribute
if self.option['imp_citation_attr']:
sattr = SrcAttribute()
sattr.set_type(_("Citation"))
sattr.set_value(self.option['imp_citation_attr'])
citation.add_attribute(sattr)
self.dbase.add_citation(citation, self.trans) # add & commit ...
self.ckeys[page] = citation.get_handle()
# add info for import statistic
self.info.add('new-object', CITATION_KEY, None)
return citation
def __create_note(self, note_text, note_type, note_cust=''):
"""
Create an note base on Type and Text.
"""
if not note_text:
return None
if isinstance(note_text, list):
note_text = '\n'.join(note_text)
note = Note()
note.set(note_text)
note_type = NoteType()
note_type.set((note_type, note_cust))
self.__add_tag('note', note) # add tag to 'Note'
self.dbase.add_note(note, self.trans) # add & commit ...
# add info for import statistic
self.info.add('new-object', NOTE_KEY, None)
return note
def __create_attribute(self, attr_text, attr_type, attr_cust=''):
"""
Creates an attribute base on (Custom-)Type and Text.
"""
if not attr_text:
return None
attr = Attribute()
attr.set_type((attr_type, attr_cust))
attr.set_value(attr_text)
return attr
def __create_event_and_ref(self, type_, desc=None, date=None, place=None,
citation=None, note_text=None,
attr_text=None, attr_type=None, attr_cust=None):
"""
Finds or creates an Event based on the Type, Description, Date, Place,
Citation, Note and Time.
"""
event = Event()
event.set_type(EventType(type_))
self.__add_tag('event', event) # add tag to 'Event'
if desc:
event.set_description(desc)
if date:
event.set_date_object(date)
if place:
event.set_place_handle(place.get_handle())
if citation:
event.add_citation(citation.handle)
attr = self.__create_attribute(attr_text, attr_type, attr_cust)
if attr:
event.add_attribute(attr)
note = self.__create_note(note_text, NoteType.CUSTOM, "Info")
if note and note.handle:
event.add_note(note.handle)
self.dbase.add_event(event, self.trans) # add & commit ...
# add info for import statistic
self.info.add('new-object', EVENT_KEY, None)
event_ref = EventRef()
event_ref.set_reference_handle(event.get_handle())
return event, event_ref
__date_pat1 = re.compile(r'(?P<day>\d{1,2}) (.|-|=) (?P<month>\d{1,2}) (.|-|=) (?P<year>\d{2,4})',
re.VERBOSE)
__date_pat2 = re.compile(r'(?P<month>\d{1,2}) (.|-|=) (?P<year>\d{4})',
re.VERBOSE)
__date_pat3 = re.compile(r'(?P<year>\d{3,4})', re.VERBOSE)
__date_pat4_de = re.compile(r'(v|vor|n|nach|ca|circa|etwa|in|um|±) (\.|\s)* (?P<year>\d{3,4})',
re.VERBOSE)
__date_pat4_en = re.compile(r'(b|before|a|after|ab|about|between|±) (\.|\s)* (?P<year>\d{3,4})',
re.VERBOSE)
__date_pat4_nl = re.compile(r'(v|voor|vóór|na|ca|circa|rond|±) (\.|\s)* (?P<year>\d{3,4})',
re.VERBOSE)
__date_pat5 = re.compile(r'(oo|OO) (-|=) (oo|OO) (-|=) (?P<year>\d{2,4})',
re.VERBOSE)
__date_pat6 = re.compile(r'(?P<month>(%s)) (\.|\s)* (?P<year>\d{3,4})' % \
'|'.join(list(MONTHES.keys())),
re.VERBOSE | re.IGNORECASE)
def __create_date_from_text(self, date_text, diag_msg=None):
"""
Finds or creates a Date based on Text, an Offset and a Message.
"""
# Pro-Gen has a text field for the date.
# It can be anything (it should be dd-mm-yyyy), but we have seen:
# yyyy
# mm-yyyy
# before yyyy
# dd=mm-yyyy (typo I guess)
# 00-00-yyyy
# oo-oo-yyyy
# dd-mm-00 (does this mean we do not know about the year?)
# Function tries to parse the text and create a proper Gramps Date()
# object. If all else fails create a MOD_TEXTONLY Date() object.
dte_txt = date_text == _("Unknown")
if not (dte_txt or date_text) or date_text == '??':
return None
date = Date()
# dd-mm-yyyy
dte_mtch = self.__date_pat1.match(date_text)
if dte_mtch:
day = int(dte_mtch.group('day'))
month = int(dte_mtch.group('month'))
if month > 12:
month %= 12
year = int(dte_mtch.group('year'))
if day and month and year:
date.set_yr_mon_day(year, month, day)
else:
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(day, month, year, 0))
return date
# mm-yyyy
dte_mtch = self.__date_pat2.match(date_text)
if dte_mtch:
month = int(dte_mtch.group('month'))
year = int(dte_mtch.group('year'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, month, year, 0))
return date
# yyy or yyyy
dte_mtch = self.__date_pat3.match(date_text)
if dte_mtch:
year = int(dte_mtch.group('year'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, 0, year, 0))
return date
# before|after|... yyyy
if self.language == 0: # 'de' language
dte_mtch = self.__date_pat4_de.match(date_text)
elif self.language == 1: # 'en' language
dte_mtch = self.__date_pat4_en.match(date_text)
elif self.language == 2: # 'nl' language
dte_mtch = self.__date_pat4_nl.match(date_text)
if dte_mtch:
year = int(dte_mtch.group('year'))
if dte_mtch.group(1) == 'v' or dte_mtch.group(1) == 'vor' or \
dte_mtch.group(1) == 'before' or \
dte_mtch.group(1) == 'voor' or dte_mtch.group(1) == 'vóór':
date.set(Date.QUAL_NONE, Date.MOD_BEFORE, Date.CAL_GREGORIAN,
(0, 0, year, 0))
elif dte_mtch.group(1) == 'n' or dte_mtch.group(1) == 'nach' or \
dte_mtch.group(1) == 'after' or \
dte_mtch.group(1) == 'na':
date.set(Date.QUAL_NONE, Date.MOD_AFTER, Date.CAL_GREGORIAN,
(0, 0, year, 0))
else:
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, 0, year, 0))
return date
# oo-oo-yyyy
dte_mtch = self.__date_pat5.match(date_text)
if dte_mtch:
year = int(dte_mtch.group('year'))
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, 0, year, 0))
return date
# mmm yyyy (textual month)
dte_mtch = self.__date_pat6.match(date_text)
if dte_mtch:
year = int(dte_mtch.group('year'))
month = MONTHES.get(dte_mtch.group('month'), 0)
date.set(Date.QUAL_NONE, Date.MOD_ABOUT, Date.CAL_GREGORIAN,
(0, month, year, 0))
return date
# Hmmm. Just use the plain text.
LOG.warning(_("Date did not match: '%(text)s' (%(msg)s)"), \
{'text' : date_text.encode('utf-8'), 'msg' : diag_msg or ''})
date.set_as_text(date_text)
return date
def __create_desc_from_text(self, desc_txt):
"""
Creates a variation of a description depending on language
"""
desc = None
if desc_txt:
if self.language == 0: # 'de' language
desc = desc_txt + ' Uhr'
else:
desc = _('Time: %s') % desc_txt
return desc
def __display_message(self, gui_mesg, txt_mesg=None, gui_max=None):
"""
Display messaging depending of GUI / TXT.
"""
if self.uistate:
if gui_max: self.progress.set_pass(gui_mesg, gui_max)
else: self.progress.set_pass(gui_mesg)
else:
if txt_mesg: self.set_text(txt_mesg)
else: self.set_text(gui_mesg)
def create_tags(self):
"""
Creates tags to objects (if provide)
"""
for tagobj in TAGOBJECTS:
tagname = 'tag_%s' % tagobj
if self.option[tagname]:
# process tagname (substitute string directives)
tagname = '%s %s' % (_(tagobj).capitalize(), \
self.option[tagname])
tag = self.dbase.get_tag_from_name(tagname)
if not tag:
tag = Tag()
tag.set_name(tagname)
self.dbase.add_tag(tag, self.trans)
# add info for import statistic
self.info.add('new-object', TAG_KEY, None)
self.tagobject_list[tagobj] = tag
__rel_pat = re.compile(r'(r|w|)', re.VERBOSE)
def create_persons(self):
"""
Method to import Persons
"""
table = self.def_['Table_1']
LOG.info(table.get_field_names())
# We'll start with F02: Person last change
# Note: We like this to be computed just once.
person_ix = [0, 0]
for count in range(2, len(self.person_identifier)):
# We have seen some case insensitivity in DEF files ...
pid = self.person_identifier[count][self.language].lower()
pix = table.get_record_field_index(pid)
person_ix.append(pix)
# start feedback about import progress (GUI/TXT)
self.__display_message(_('Importing persons.'), gui_max=len(self.pers))
# Male / Female symbols
male_sym = self.def_.tables['Genealogical'].parms['male']
female_sym = self.def_.tables['Genealogical'].parms['female']
ind_id = 0
for i, rec in enumerate(self.pers):
# Update at the begin
self.progress.step() if self.uistate else self.update()
recflds = table.convert_record_to_list(rec, self.mems)
# Option: Original Individuals IDs
if self.option['opt_person-ident']:
ind_id = int(recflds[person_ix[1]]) # F01: INDI RFN
else:
ind_id += 1
# print(("Ind ID %d " % ind_id) + " ".join(("%s" % r) for r in rec))
person = self.__find_or_create_person(ind_id)
# process F03 Given Name, F07 Call Name
name = Name()
name.set_type(NameType.BIRTH)
first_name = recflds[person_ix[3]] # F03: TBD
if first_name:
# replace if necessary separators with ' '
first_name = re.sub(r'[,;]', ' ', first_name)
else:
# default first name 'Nomen nominandum'
first_name = 'N.N.'
name.set_first_name(first_name)
# process F04 Last Name
sur_prefix, sur_name = '', ''
if recflds[person_ix[4]]:
# F04: INDI NAME
sur_prefix, sur_name = _split_surname(recflds[person_ix[4]])
if not sur_name:
# default surname 'Nomen nominandum'
sur_name = 'N.N.'
surname = Surname()
surname.set_surname(sur_name)
if sur_prefix:
surname.set_prefix(sur_prefix)
name.add_surname(surname)
# process F06 Patronym
patronym = recflds[person_ix[6]] # F06: INDI _PATR
if patronym:
patronym_name = Surname()
patronym_name.set_surname(patronym)
patronym_name.set_origintype(NameOriginType.PATRONYMIC)
name.add_surname(patronym_name)
# process F10 - F12 Title(s)
title1 = recflds[person_ix[10]] # F10: INDI TITL
title2 = recflds[person_ix[11]] # F11: INDI _TITL2
title3 = recflds[person_ix[12]] # F12: INDI _TITL3
title = [_f for _f in [title1, title2, title3] if _f]
if title:
name.set_title(", ".join(title))
# General config: addtional individual citation
if self.option['imp_source_title']:
# Original individual ID from source
pageref = '[ID: I%06d] %s, %s' % (i +1, sur_name, first_name)
citation = self.__get_or_create_citation \
(self.option['imp_source_title'],
recflds[person_ix[2]], # F02: INDI CHAN DATE
self.option['imp_citation_page'], pageref)
if citation and citation.handle:
person.add_citation(citation.handle)
name.add_citation(citation.handle)
# add tag to 'Person'
self.__add_tag('person', person)
# create diagnose message
diag_msg = "%s: %s %s" % (person.gramps_id,
first_name.encode('utf-8'),
sur_name.encode('utf-8'))
# prcesss F25 Birth Date
birth_date = self.__create_date_from_text \
(recflds[person_ix[25]], diag_msg) # F25: ... DATE
# process F07 Call Name
if recflds[person_ix[7]]:
# F07: INDI NAME NICK/INDI NAME ALIA/INDI CHR NICK
name.set_call_name(recflds[person_ix[7]])
else:
nick_name = first_name.split(' ')
if birth_date and len(nick_name) > 1: # Two or more firstnames
number = 0 # Firstname number
if birth_date.dateval[2] < 1900: # 1900: Common edge date
number = 1
name.set_call_name(nick_name[number])
# set the Person in database
person.set_primary_name(name)
# process F05 Gender
gender = recflds[person_ix[5]] # F05: INDI SEX
if gender == male_sym:
gender = Person.MALE
elif gender == female_sym:
gender = Person.FEMALE
else:
gender = Person.UNKNOWN
person.set_gender(gender)
# process F08 Alias
# F08: INDI NAME _ALIA / INDI NAME COMM
alias = recflds[person_ix[8]]
if alias:
# expand separator with ' '
alias = re.sub(r'\.', '. ', alias)
alias_text = alias.split()
# two ways: Attribute-Nickname or AKA-Name
if len(alias_text) == 1:
attr = self.__create_attribute(alias,
AttributeType.NICKNAME)
if attr:
person.add_attribute(attr)
else:
self.__add_name(
person, citation.handle if citation else None,
NameType.AKA, ' '.join(alias_text[0:-1]),
'', alias_text[-1], '')
# process F09 Person Code
refn_code = recflds[person_ix[9]] # F09: INDI REFN/INDI CODE
if refn_code:
# We have seen some artefacts ...
rel_cde = self.__rel_pat.match(refn_code)
# Option: Reference code contains one/two letters
if self.option['opt_refn-code'] and rel_cde:
attr = self.__create_attribute(refn_code,
AttributeType.CUSTOM,
"REFN")
if attr:
person.add_attribute(attr)
# process F15 Occupation
occupation = recflds[person_ix[15]] # F15: INDI OCCU
if occupation:
dummy, event_ref = self.__create_event_and_ref \
(EventType.OCCUPATION, occupation)
if event_ref:
person.add_event_ref(event_ref)
# process F16 Person Comment, F17 Person Note
comm = recflds[person_ix[16]] # F16: INDI _COMM / INDI COMM
note = recflds[person_ix[17]] # F17: INDI NOTE
note_text = [_f for _f in [comm, note] if _f]
note = self.__create_note(note_text, NoteType.PERSON)
if note and note.handle:
person.add_note(note.handle)
# process F18 - F24 Address Date, Place, Street, ZIP, Country,
# Phone, Info
# GEDCOM symbols: INDI RESI ...
date = self.__create_date_from_text \
(recflds[person_ix[18]], diag_msg) # F18: ... DATE
street = recflds[person_ix[19]] # F19: ... ADDR
# F20: ... ADDR POST/INDI RESI POST
postal_code = recflds[person_ix[20]]
# F21: ... ADDR CITY/INDI RESI PLAC
place = self.__get_or_create_place(recflds[person_ix[21]])
# F22: ... ADDR CTRY/INDI RESI CTRY
country = recflds[person_ix[22]]
# F23: ... PHON/INDI PHON
phone = recflds[person_ix[23]]
# F24: I... NOTE / INDI ADDR
info = recflds[person_ix[24]]
address = None
if street or postal_code or country or phone:
# Create address
address = Address()
if date:
address.set_date_object(date)
if street:
address.set_street(street)
if recflds[person_ix[21]]:
address.set_city(recflds[person_ix[21]])
if postal_code: # Debugging!
address.set_postal_code(postal_code)
if country:
address.set_country(country)
if phone:
address.set_phone(phone)
# Option 1: add Notes to Address
note = self.__create_note(info, NoteType.ADDRESS)
if note and note.handle:
address.add_note(note.handle)
info = None
person.add_address(address)
if place:
desc = ''
if address and date:
desc = _('see address on ')
desc += displayer.display(date)
elif address:
desc = _('see also address')
dummy, resi_ref = self.__create_event_and_ref \
(EventType.RESIDENCE, desc, date, place, '', info)
if resi_ref:
person.add_event_ref(resi_ref)
# process F25 - F31 Birth Date, Place, Time, Source, Reference,
# Text, Info
# GEDCOM symbols: INDI BIRT ...
# date = self.__create_date_from_text \ # Birth Date processed above
# (recflds[person_ix[25]], diag_msg) # F25: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[26]]) # F26: ... PLAC
birth_time = recflds[person_ix[27]] # F27: ... TIME
source = recflds[person_ix[28]] # F28: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[29]] # F29: ... SOUR REFN
source_text = recflds[person_ix[30]] # F30: ... SOUR TEXT
info = recflds[person_ix[31]] # F31: INDI ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[person_ix[25]], source_refn)
if birth_date or place or info or citation:
desc = source_text
# Option: Birth time in description
if self.option['opt_birth-date']:
time_text = self.__create_desc_from_text(birth_time)
desc += '; %s' % time_text
dummy, birth_ref = self.__create_event_and_ref \
(EventType.BIRTH, desc, birth_date, place, citation, info,
birth_time, AttributeType.TIME)
if birth_ref:
person.set_birth_ref(birth_ref)
# process F32 - F37 Baptism / Christening Date, Place, Religion,
# Source, Reference, Text, Info
# GEDCOM symbols: INDI CHR ...
date = self.__create_date_from_text \
(recflds[person_ix[32]], diag_msg) # F32: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[33]]) # F33: ... PLAC
religion = recflds[person_ix[36]] # F34: ... RELI / INDI RELI
witness = recflds[person_ix[35]] # F35: ... _WITN / ... WITN
source = recflds[person_ix[36]] # F36: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[37]] # F37: ... SOUR REFN
source_text = recflds[person_ix[38]] # F38: ... SOUR TEXT
info = recflds[person_ix[39]] # F39: ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[person_ix[32]], source_refn)
if date or place or info or citation:
dummy, chris_ref = self.__create_event_and_ref \
(EventType.CHRISTEN, source_text, date, place, citation,
info, witness, AttributeType.CUSTOM, _("Godfather"))
if chris_ref:
person.add_event_ref(chris_ref)
# process F34 Religion
if religion:
citation = None
if source != religion:
citation = self.__get_or_create_citation \
(religion, recflds[person_ix[32]], source_refn)
dummy, reli_ref = self.__create_event_and_ref \
(EventType.RELIGION, '', date, '', citation)
if reli_ref:
person.add_event_ref(reli_ref)
# process F40 - F46 Death Date, Place, Time, Source, Reference,
# Text, Info
# GEDCOM symbols: INDI DEAT ...
date = self.__create_date_from_text \
(recflds[person_ix[40]], diag_msg) # F40: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[41]]) # F41: ... PLAC
death_time = recflds[person_ix[42]] # F42: ... TIME
source = recflds[person_ix[43]] # F43: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[44]] # F44: ... SOUR REFN
source_text = recflds[person_ix[45]] # F45: ... SOUR TEXT
info = recflds[person_ix[46]] # F46: ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[person_ix[40]], source_refn)
if date or place or info or citation:
desc = source_text
# Option: Death time in description
if self.option['opt_death-date']:
time_text = self.__create_desc_from_text(death_time)
desc += '; %s' % time_text
if not self.option['opt_death-cause']:
desc += ' (%s)' % info
dummy, death_ref = self.__create_event_and_ref \
(EventType.DEATH, desc, date, place, citation, None,
death_time, AttributeType.TIME)
if death_ref:
person.set_death_ref(death_ref)
# Option: Death info to Death cause
if source_text or (self.option['opt_death-cause'] and info):
desc = [_f for _f in [source_text, info] if _f]
desc = desc and '; '.join(desc) or None
if _('Death cause') in desc:
desc = desc[13:].strip()
dummy, event_ref = self.__create_event_and_ref \
(EventType.CAUSE_DEATH, desc)
if event_ref:
person.add_event_ref(event_ref)
# process F47 - F52 Cremation Date, Place, Source, Reference,
# Text, Info
# GEDCOM symbols: INDI CREM ...
date = self.__create_date_from_text \
(recflds[person_ix[47]], diag_msg) # F47: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[48]]) # F48: ... PLAC
source = recflds[person_ix[49]] # F49: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[50]] # F50: ... SOUR REFN
source_text = recflds[person_ix[51]] # F51: ... SOUR TEXT
info = recflds[person_ix[52]] # F52: ... INFO
citation = self.__get_or_create_citation \
(source, recflds[person_ix[47]], source_refn)
if date or place or info or citation:
dummy, cremation_ref = self.__create_event_and_ref \
(EventType.CREMATION, source_text, date, place, citation,
info)
if cremation_ref:
person.add_event_ref(cremation_ref)
# process F53 Burial Date, F54 Burial Place, F55 Burial Source,
# F56 Burial Reference, F57 Burial Text, F58 Burial Info
# GEDCOM symbols: INDI BURI ...
date = self.__create_date_from_text \
(recflds[person_ix[53]], diag_msg) # F53: ... DATE
place = self.__get_or_create_place \
(recflds[person_ix[54]]) # F54: ... PLAC
source = recflds[person_ix[55]] # F49: ... SOUR / ... SOUR TITL
source_refn = recflds[person_ix[56]] # F50: ... SOUR REFN
source_text = recflds[person_ix[57]] # F51: ... SOUR TEXT
info = recflds[person_ix[58]] # F58: ... INFO
citation = self.__get_or_create_citation \
(source, recflds[person_ix[53]], source_refn)
if date or place or info or citation:
dummy, buri_ref = self.__create_event_and_ref \
(EventType.BURIAL, source_text, date, place, citation, info)
if buri_ref:
person.add_event_ref(buri_ref)
# commit the Person
self.dbase.commit_person(person, self.trans)
def create_families(self):
"""
Method to import Families
"""
table = self.def_['Table_2']
LOG.info(table.get_field_names())
# We'll start with F03: Husband
# Note: We like this to be computed just once.
family_ix = [0, 0]
for count in range(2, len(self.family_identifier)):
# We've seen some case insensitivity in DEF files ...
fid = self.family_identifier[count][self.language].lower()
fix = table.get_record_field_index(fid)
family_ix.append(fix)
# start feedback about import progress (GUI/TXT)
self.__display_message(_('Importing families.'), gui_max=len(self.rels))
fam_id = 0
for i, rec in enumerate(self.rels):
# Update at the begin
self.progress.step() if self.uistate else self.update()
husband = rec[family_ix[3]] # F03: FAM HUSB
wife = rec[family_ix[4]] # F04: FAM WIFE
if husband > 0 or wife > 0:
recflds = table.convert_record_to_list(rec, self.mems)
# Option: Original family IDs
if self.option['opt_family-ident']:
fam_id = int(recflds[family_ix[1]]) # F01: FAM RFN
else:
fam_id += 1
self.high_fam_id = fam_id
family = self.__find_or_create_family(fam_id)
# process F03 / F04 Husband / Wife
husband_handle = None
if husband > 0:
husband_handle = self.__find_person_handle(husband)
family.set_father_handle(husband_handle)
husband_person = self.dbase.get_person_from_handle(husband_handle)
husband_person.add_family_handle(family.get_handle())
self.dbase.commit_person(husband_person, self.trans)
wife_handle = None
if wife > 0:
wife_handle = self.__find_person_handle(wife)
family.set_mother_handle(wife_handle)
wife_person = self.dbase.get_person_from_handle(wife_handle)
wife_person.add_family_handle(family.get_handle())
self.dbase.commit_person(wife_person, self.trans)
# Optional: Husband changes Surname (e.g. marriage)
if (husband > 0) and self.option['opt_surname-male']:
citation_handle = wife_person.get_citation_list()[0] \
if husband_person.citation_list else None
self.__add_name(husband_person, citation_handle,
NameType.MARRIED,
husband_person.primary_name.get_first_name(),
husband_person.primary_name.surname_list[0].prefix,
wife_person.primary_name.get_surname(),
husband_person.primary_name.get_suffix())
# commit the Person
self.dbase.commit_person(husband_person, self.trans)
# Optional: Wife changes Surname (e.g. marriage)
if (wife > 0) and self.option['opt_surname-female']:
citation_handle = wife_person.get_citation_list()[0] \
if wife_person.citation_list else None
self.__add_name(wife_person, citation_handle,
NameType.MARRIED,
wife_person.primary_name.get_first_name(),
wife_person.primary_name.surname_list[0].prefix,
husband_person.primary_name.get_surname(),
wife_person.primary_name.get_suffix())
# commit the Person
self.dbase.commit_person(wife_person, self.trans)
self.fm2fam[husband_handle, wife_handle] = family
diag_msg = "%s: %s %s" % \
(family.gramps_id,
husband_person.gramps_id if husband_handle else "",
wife_person.gramps_id if wife_handle else "")
# Option: Addtional family citation
if self.option['imp_source_title']:
husband_name = husband_person.get_primary_name()
husband_name = husband_name.get_surname()
wife_name = wife_person.get_primary_name()
wife_name = wife_name.get_surname()
# Original family ID from source
pageref = '[ID: F%05d] %s -- %s' % \
(i +1, husband_name, wife_name)
citation = self.__get_or_create_citation \
(self.option['imp_source_title'],
recflds[family_ix[2]], # F02: FAM CHAN DATE
self.option['imp_citation_page'], pageref)
if citation and citation.handle:
family.add_citation(citation.handle)
# add tag to 'Family'
self.__add_tag('family', family)
# process F08 - F13 Civil Union Date, Place, Source,
# Reference, Text, Info
# GEDCOM symbols: FAM _LIV ...
date = self.__create_date_from_text \
(recflds[family_ix[8]], diag_msg) # F08: ... DATE
place = self.__get_or_create_place \
(recflds[family_ix[9]]) # F09: ... PLAC
# F10: ... SOUR/FAM _LIV SOUR TITL
source = recflds[family_ix[10]]
source_refn = recflds[family_ix[11]] # F11: ... SOUR REFN
source_text = recflds[family_ix[12]] # F12: ... SOUR TEXT
info = recflds[family_ix[13]] # F13: ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[family_ix[8]], source_refn)
if date or place or info or citation:
evt_type = _('Civil union')
event, civu_ref = self.__create_event_and_ref \
(EventType.UNKNOWN, source_text, date, place, citation,
info)
event.set_type((EventType.CUSTOM, evt_type))
if civu_ref:
family.add_event_ref(civu_ref)
# Type of relation
famreltype = FamilyRelType.CIVIL_UNION
family.set_relationship(FamilyRelType(famreltype))
# process F14 - F20 Marriage License Date, Place, Witness,
# Source, Reference, Text, Info
# GEDCOM symbols: FAM MARB ...
# F14: ... DATE/FAM REGS DATE
date = self.__create_date_from_text \
(recflds[family_ix[14]], diag_msg)
# F15: ... PLAC/FAM REGS PLAC
place = self.__get_or_create_place(recflds[family_ix[15]])
# F16: ... _WITN/FAM MARB WITN
witness = recflds[family_ix[16]]
# F17: ... SOUR/FAM MARB SOUR TITL/FAM REGS SOUR
source = recflds[family_ix[17]]
# F18: ... SOUR REFN/FAM REGS SOUR REFN
source_refn = recflds[family_ix[18]]
# F19: ... SOUR TEXT
source_text = recflds[family_ix[19]]
# F20: ... NOTE
info = recflds[family_ix[20]]
citation = self.__get_or_create_citation \
(source, recflds[family_ix[14]], source_refn)
if date or place or info or citation:
desc = source_text
desc = [_f for _f in [source_text, info] if _f]
desc = desc and '; '.join(desc) or None
dummy, marl_ref = self.__create_event_and_ref \
(EventType.MARR_BANNS, desc, date, place, citation, '',
witness, AttributeType.WITNESS)
if marl_ref:
family.add_event_ref(marl_ref)
# process F21 - F27 Civil Marriage Date, Place, Witness,
# Source, Reference, Text, Info
# GEDCOM symbols: FAM MARR(Civil) ...
# F21: ... DATE/FAM MARR DATE
date = self.__create_date_from_text \
(recflds[family_ix[21]], diag_msg)
# F22: ... PLAC/FAM MARR PLAC
place = self.__get_or_create_place(recflds[family_ix[22]])
# F23: ... _WITN/FAM MARR _WITN/FAM MARR WITN/FAM WITN
witness = recflds[family_ix[23]]
# F24: ... SOUR/FAM MARR SOUR/FAM MARR SOUR TITL
source = recflds[family_ix[24]]
# F25: ... SOUR REFN/FAM MARR SOUR REFN
source_refn = recflds[family_ix[25]]
# F26: ... SOUR TEXT/FAM MARR SOUR TEXT
source_text = recflds[family_ix[26]]
info = recflds[family_ix[27]] # F27: ... NOTE
citation = self.__get_or_create_citation \
(source, recflds[family_ix[21]], source_refn)
if date or place or info or citation:
desc = source_text
if not desc:
# 'Civil' is widely accepted and language independent
desc = "Civil"
dummy, mar_ref = self.__create_event_and_ref \
(EventType.MARRIAGE, desc, date, place, citation, info,
witness, AttributeType.WITNESS)
if mar_ref:
family.add_event_ref(mar_ref)
# Type of relation
famreltype = FamilyRelType.MARRIED
family.set_relationship(FamilyRelType(famreltype))
# process F28 - F35 Church Wedding Date, Place, Church, Witness,
# Source, Reference, Text, Info
# GEDCOM symbols: FAM MARR(Church) ...
# F28: ... DATE / FAM ORDI DATE
wedding_date = self.__create_date_from_text \
(recflds[family_ix[28]], diag_msg)
# F29: ... DATE / FAM ORDI PLACE
place = self.__get_or_create_place(recflds[family_ix[29]])
# F30: ... _CHUR / FAM ORDI _CHUR / FAM ORDI RELI
church = recflds[family_ix[30]]
# F31: ... _WITN / FAM ORDI _WITN / FAM ORDI WITN
witness = recflds[family_ix[31]]
# F32: ... SOUR / FAM ORDI SOUR / FAM ORDI SOUR TITL
source = recflds[family_ix[32]]
# F33: ... SOUR REFN / FAM ORDI SOUR REFN
source_refn = recflds[family_ix[33]]
# F34: ... SOUR TEXT / FAM ORDI SOUR TEXT
source_text = recflds[family_ix[34]]
# F35 ... INFO
info = recflds[family_ix[35]]
citation = self.__get_or_create_citation \
(source, recflds[family_ix[28]], source_refn)
if wedding_date or place or info or citation:
desc = [_f for _f in [church, source_text] if _f]
desc = desc and '; '.join(desc) or None
if not desc:
desc = _('Wedding')
dummy, marc_ref = self.__create_event_and_ref \
(EventType.MARRIAGE, desc, wedding_date, place,
citation, info, witness, AttributeType.WITNESS)
if marc_ref:
family.add_event_ref(marc_ref)
# Type of relation
famreltype = FamilyRelType.MARRIED
family.set_relationship(FamilyRelType(famreltype))
# process F05 - F07 Relation Code, Note, Info
refn_code = recflds[family_ix[5]] # F05: FAM REFN / FAM CODE
if refn_code:
# We have seen some artefacts ...
rel_cde = self.__rel_pat.match(refn_code)
# Option: Reference code contains one/two letters
if self.option['opt_refn-code'] and rel_cde:
attr = self.__create_attribute(refn_code,
AttributeType.CUSTOM,
"REFN")
if attr:
family.add_attribute(attr)
comm = recflds[family_ix[6]] # F06: FAM _COMM/FAM COMM
note = recflds[family_ix[7]] # F07: FAM NOTE
note_text = [_f for _f in [comm, note] if _f]
if note_text:
cnt = None
if len(note_text) > 0:
note_cont = (' '.join(note_text)).split(' ')
else:
note_cont = note_text.split(' ')
if note_cont[0] == _('Residence'):
cnt = 1
elif note_cont[0] == _('future') and \
note_cont[1] == _('Residence'):
cnt = 2
else:
note = self.__create_note(note_text, NoteType.FAMILY)
if note and note.handle:
family.add_note(note.handle)
if cnt:
if wedding_date:
date_text = _('after') + ' ' + \
str(wedding_date.dateval[2]) # Wedding Year
# F28: ... DATE / FAM ORDI DATE
date = self.__create_date_from_text \
(date_text, diag_msg)
place_text = ''
# Add all elements of Note Content
for i in range(cnt, len(note_cont)):
place_text += note_cont[i] + ' '
place_text = place_text.rstrip() # Strip whitespace
place = self.__get_or_create_place(place_text)
dummy, place_ref = self.__create_event_and_ref \
(EventType.RESIDENCE, None, date, place, citation)
if place_ref:
family.add_event_ref(place_ref)
# process F36 - F41 Divorce Date, Place, Source, Text,
# Reference, Info
# GEDCOM symbols: FAM DIV ...
# F36: ... DATE / FAM DIVO DATE
date = self.__create_date_from_text \
(recflds[family_ix[36]], diag_msg)
# F37: ... PLAC / FAM DIVO PlAC
place = self.__get_or_create_place(recflds[family_ix[37]])
# F38: ... SOUR / FAM DIV SOUR TITL
source = recflds[family_ix[38]]
# F39: ... SOUR REFN
source_refn = recflds[family_ix[39]]
# F40: ... SOUR TEXT
source_text = recflds[family_ix[40]]
# F41: ... INFO
info = recflds[family_ix[41]]
citation = self.__get_or_create_citation \
(source, recflds[family_ix[36]], source_refn)
if date or place or info or citation:
desc = source_text
dummy, div_ref = self.__create_event_and_ref \
(EventType.DIVORCE, desc, date, place, citation, info)
if div_ref:
family.add_event_ref(div_ref)
# commit the Family
self.dbase.commit_family(family, self.trans)
# add info for import statistic
self.info.add('new-object', FAMILY_KEY, None)
def add_children(self):
"""
Method to add Children.
"""
# Once more to record the father and mother
table = self.def_['Table_1']
# We have seen some case insensitivity in DEF files ...
person_F13 = table.get_record_field_index \
(self.person_identifier[13][self.language].lower()) # F13: Father
person_F14 = table.get_record_field_index \
(self.person_identifier[14][self.language].lower()) # F14: Mother
# start feedback about import progress (GUI/TXT)
self.__display_message(_('Adding children.'),
gui_max=len(self.pers) *0.6)
ind_id = 0
for dummy, rec in enumerate(self.pers):
# Update at the begin
self.progress.step() if self.uistate else self.update()
father = rec[person_F13] # F13: Father
mother = rec[person_F14] # F14: Mother
if father > 0 or mother > 0:
recflds = table.convert_record_to_list(rec, self.mems)
# Option: Original Individuals IDs
if self.option['opt_person-ident']:
ind_id = int(recflds[0]) # F01: INDI RFN
else:
ind_id += 1
# Find the family with this Father and Mother
child_handle = self.__find_person_handle(ind_id)
father_handle = father > 0 and \
self.__find_person_handle(father) or None
mother_handle = mother > 0 and \
self.__find_person_handle(mother) or None
if father > 0 and not father_handle:
LOG.warning(_("Cannot find father for I%(person)s (Father=%(father))"), \
{'person':ind_id, 'father':father})
elif mother > 0 and not mother_handle:
LOG.warning(_("Cannot find mother for I%(person)s (Mother=%(mother))"), \
{'person':ind_id, 'mother':mother})
else:
family = self.fm2fam.get((father_handle, mother_handle), None)
if not family:
# Family not present in REL. Create a new one.
self.high_fam_id += 1
fam_id = self.high_fam_id
family = self.__find_or_create_family(fam_id)
if father_handle:
family.set_father_handle(father_handle)
try:
father_person = self.dbase.get_person_from_handle \
(father_handle)
father_person.add_family_handle(family.get_handle())
# commit the Father
self.dbase.commit_person(father_person, self.trans)
except HandleError:
LOG.warning("Failed to add father %s to child %s", \
father, ind_id)
if mother_handle:
family.set_mother_handle(mother_handle)
try:
mother_person = self.dbase.get_person_from_handle \
(mother_handle)
mother_person.add_family_handle(family.get_handle())
# commit the Mother
self.dbase.commit_person(mother_person, self.trans)
except HandleError:
LOG.warning("Failed to add mother %s to child %s", \
mother, ind_id)
if family:
childref = ChildRef()
childref.set_reference_handle(child_handle)
if childref:
family.add_child_ref(childref)
# commit the Family
self.dbase.commit_family(family, self.trans)
try:
child = self.dbase.get_person_from_handle(child_handle)
if child:
child.add_parent_family_handle(family.get_handle())
# commit the Child
self.dbase.commit_person(child, self.trans)
except HandleError:
LOG.warning("Failed to add child %s to family", ind_id)
| gpl-2.0 |
mmaelicke/scikit-gstat | skgstat/plotting/stvariogram_plot3d.py | 1 | 3989 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
try:
import plotly.graph_objects as go
except ImportError:
pass
def __calculate_plot_data(stvariogram, **kwargs):
xx, yy = stvariogram.meshbins
z = stvariogram.experimental
# x = xx.flatten()
# y = yy.flatten()
# apply the model
nx = kwargs.get('x_resolution', 100)
nt = kwargs.get('t_resolution', 100)
# model spacing
_xx, _yy = np.mgrid[
0:np.nanmax(stvariogram.xbins):nx * 1j,
0:np.nanmax(stvariogram.tbins):nt * 1j
]
model = stvariogram.fitted_model
lags = np.vstack((_xx.flatten(), _yy.flatten())).T
# apply the model
_z = model(lags)
return xx.T, yy.T, z, _xx, _yy, _z
def matplotlib_plot_3d(stvariogram, kind='scatter', ax=None, elev=30, azim=220, **kwargs):
# get the data, spanned over a bin meshgrid
xx, yy, z, _xx, _yy, _z = __calculate_plot_data(stvariogram, **kwargs)
x = xx.flatten()
y = yy.flatten()
# some settings
c = kwargs.get('color', kwargs.get('c', 'b'))
cmap = kwargs.get('model_color', kwargs.get('cmap', 'terrain'))
alpha = kwargs.get('alpha', 0.8)
depthshade = kwargs.get('depthshade', False)
# handle the axes
if ax is not None:
if not isinstance(ax, Axes3D):
raise ValueError('The passed ax object is not an instance of mpl_toolkis.mplot3d.Axes3D.')
fig = ax.get_figure()
else:
fig = plt.figure(figsize=kwargs.get('figsize', (10, 10)))
ax = fig.add_subplot(111, projection='3d')
# do the plot
ax.view_init(elev=elev, azim=azim)
if kind == 'surf':
ax.plot_trisurf(x, y, z, color=c, alpha=alpha)
elif kind == 'scatter':
ax.scatter(x, y, z, c=c, depthshade=depthshade)
else:
raise ValueError('%s is not a valid 3D plot' % kind)
# add the model
if not kwargs.get('no_model', False):
ax.plot_trisurf(_xx.flatten(), _yy.flatten(), _z, cmap=cmap, alpha=alpha)
# labels:
ax.set_xlabel('space')
ax.set_ylabel('time')
ax.set_zlabel('semivariance [%s]' % stvariogram.estimator.__name__)
# return
return fig
def plotly_plot_3d(stvariogram, kind='scatter', fig=None, **kwargs):
# get the data spanned over a bin meshgrid
xx, yy, z, _xx, _yy, _z = __calculate_plot_data(stvariogram, **kwargs)
# get some settings
c = kwargs.get('color', kwargs.get('c', 'black'))
cmap = kwargs.get('model_color', kwargs.get('colorscale', kwargs.get('cmap', 'Electric')))
alpha = kwargs.get('opacity', kwargs.get('alpha', 0.6))
# handle the figue
if fig is None:
fig = go.Figure()
# do the plot
if kind == 'surf':
fig.add_trace(
go.Surface(
x=xx,
y=yy,
z=z.reshape(xx.shape),
opacity=0.8 * alpha,
colorscale=[[0, c], [1, c]],
name='experimental variogram'
)
)
elif kind == 'scatter' or kwargs.get('add_points', False):
fig.add_trace(
go.Scatter3d(
x=xx.flatten(),
y=yy.flatten(),
z=z,
mode='markers',
opacity=alpha,
marker=dict(color=c, size=kwargs.get('size', 4)),
name='experimental variogram'
)
)
# add the model
if not kwargs.get('no_model', False):
fig.add_trace(
go.Surface(
x=_xx,
y=_yy,
z=_z.reshape(_xx.shape),
opacity=max(1, alpha * 1.2),
colorscale=cmap,
name='%s model' % stvariogram.model.__name__
)
)
# set some labels
fig.update_layout(scene=dict(
xaxis_title='space',
yaxis_title='time',
zaxis_title='semivariance [%s]' % stvariogram.estimator.__name__
))
# return
return fig
| mit |
shaufi10/odoo | addons/hr_timesheet_sheet/wizard/__init__.py | 443 | 1075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_current
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/scipy/weave/examples/binary_search.py | 97 | 6783 | # Offers example of inline C for binary search algorithm.
# Borrowed from Kalle Svensson in the Python Cookbook.
# The results are nearly in the "not worth it" category.
#
# C:\home\ej\wrk\scipy\compiler\examples>python binary_search.py
# Binary search for 3000 items in 100000 length list of integers:
# speed in python: 0.139999985695
# speed in c: 0.0900000333786
# speed up: 1.41
# search(a,3450) 3450 3450
# search(a,-1) -1 -1
# search(a,10001) 10001 10001
#
# Note -- really need to differentiate between conversion errors and
# run time errors. This would reduce useless compiles and provide a
# more intelligent control of things.
from __future__ import absolute_import, print_function
import sys
sys.path.insert(0,'..')
# from compiler import inline_tools
import scipy.weave.inline_tools as inline_tools
from bisect import bisect_left as bisect
import types
def c_int_search(seq,t,chk=1):
# do partial type checking in Python.
# checking that list items are ints should happen in py_to_scalar<int>
# if chk:
# assert(type(t) is int)
# assert(type(seq) is list)
code = """
#line 33 "binary_search.py"
if (!PyList_Check(py_seq))
py::fail(PyExc_TypeError, "seq must be a list");
if (!PyInt_Check(py_t))
py::fail(PyExc_TypeError, "t must be an integer");
int val, m, min = 0;
int max = seq.len()- 1;
for(;;)
{
if (max < min )
{
return_val = -1;
break;
}
m = (min + max) / 2;
val = py_to_int(PyList_GET_ITEM(py_seq,m),"val");
if (val < t)
min = m + 1;
else if (val > t)
max = m - 1;
else
{
return_val = m;
break;
}
}
"""
# return inline_tools.inline(code,['seq','t'],compiler='msvc')
return inline_tools.inline(code,['seq','t'],verbose=2)
def c_int_search_scxx(seq,t,chk=1):
# do partial type checking in Python.
# checking that list items are ints should happen in py_to_scalar<int>
if chk:
assert(type(t) is int)
assert(type(seq) is list)
code = """
#line 67 "binary_search.py"
int val, m, min = 0;
int max = seq.len()- 1;
for(;;)
{
if (max < min )
{
return_val = -1;
break;
}
m = (min + max) / 2;
val = seq[m];
if (val < t)
min = m + 1;
else if (val > t)
max = m - 1;
else
{
return_val = m;
break;
}
}
"""
# return inline_tools.inline(code,['seq','t'],compiler='msvc')
return inline_tools.inline(code,['seq','t'],verbose=2)
try:
from numpy import *
def c_array_int_search(seq,t):
code = """
#line 62 "binary_search.py"
int val, m, min = 0;
int max = Nseq[0] - 1;
PyObject *py_val;
for(;;)
{
if (max < min )
{
return_val = -1;
break;
}
m = (min + max) / 2;
val = seq[m];
if (val < t)
min = m + 1;
else if (val > t)
max = m - 1;
else
{
return_val = m;
break;
}
}
"""
# return inline_tools.inline(code,['seq','t'],compiler='msvc')
return inline_tools.inline(code,['seq','t'],verbose=2,
extra_compile_args=['-O2','-G6'])
except:
pass
def py_int_search(seq, t):
min = 0
max = len(seq) - 1
while 1:
if max < min:
return -1
m = (min + max) / 2
if seq[m] < t:
min = m + 1
elif seq[m] > t:
max = m - 1
else:
return m
import time
def search_compare(a,n):
print('Binary search for %d items in %d length list of integers:' % (n,m))
t1 = time.time()
for i in range(n):
py_int_search(a,i)
t2 = time.time()
py = (t2-t1)
print(' speed in python:', (t2 - t1))
# bisect
t1 = time.time()
for i in range(n):
bisect(a,i)
t2 = time.time()
bi = (t2-t1) + 1e-20 # protect against div by zero
print(' speed of bisect:', bi)
print(' speed up: %3.2f' % (py/bi))
# get it in cache
c_int_search(a,i)
t1 = time.time()
for i in range(n):
c_int_search(a,i,chk=1)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed in c:',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
c_int_search(a,i)
t1 = time.time()
for i in range(n):
c_int_search(a,i,chk=0)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed in c(no asserts):',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
c_int_search_scxx(a,i)
t1 = time.time()
for i in range(n):
c_int_search_scxx(a,i,chk=1)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed for scxx:',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
c_int_search_scxx(a,i)
t1 = time.time()
for i in range(n):
c_int_search_scxx(a,i,chk=0)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed for scxx(no asserts):',sp)
print(' speed up: %3.2f' % (py/sp))
# get it in cache
a = array(a)
try:
a = array(a)
c_array_int_search(a,i)
t1 = time.time()
for i in range(n):
c_array_int_search(a,i)
t2 = time.time()
sp = (t2-t1)+1e-20 # protect against div by zero
print(' speed in c(numpy arrays):',sp)
print(' speed up: %3.2f' % (py/sp))
except:
pass
if __name__ == "__main__":
# note bisect returns index+1 compared to other algorithms
m = 100000
a = range(m)
n = 50000
search_compare(a,n)
print('search(a,3450)', c_int_search(a,3450), py_int_search(a,3450), bisect(a,3450))
print('search(a,-1)', c_int_search(a,-1), py_int_search(a,-1), bisect(a,-1))
print('search(a,10001)', c_int_search(a,10001), py_int_search(a,10001),bisect(a,10001))
| bsd-3-clause |
ibressler/pyqtgraph | examples/Draw.py | 28 | 1142 | # -*- coding: utf-8 -*-
"""
Demonstrate ability of ImageItem to be used as a canvas for painting with
the mouse.
"""
import initExample ## Add path to library (just for examples; you do not need this)
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
import pyqtgraph as pg
app = QtGui.QApplication([])
## Create window with GraphicsView widget
w = pg.GraphicsView()
w.show()
w.resize(800,800)
w.setWindowTitle('pyqtgraph example: Draw')
view = pg.ViewBox()
w.setCentralItem(view)
## lock the aspect ratio
view.setAspectLocked(True)
## Create image item
img = pg.ImageItem(np.zeros((200,200)))
view.addItem(img)
## Set initial view bounds
view.setRange(QtCore.QRectF(0, 0, 200, 200))
## start drawing with 3x3 brush
kern = np.array([
[0.0, 0.5, 0.0],
[0.5, 1.0, 0.5],
[0.0, 0.5, 0.0]
])
img.setDrawKernel(kern, mask=kern, center=(1,1), mode='add')
img.setLevels([0, 10])
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
Imaginashion/cloud-vision | .fr-d0BNfn/django-jquery-file-upload/venv/lib/python3.5/encodings/raw_unicode_escape.py | 852 | 1208 | """ Python 'raw-unicode-escape' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.raw_unicode_escape_encode
decode = codecs.raw_unicode_escape_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.raw_unicode_escape_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.raw_unicode_escape_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='raw-unicode-escape',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mit |
timchen86/ntulifeguardapp | gdata-2.0.18/samples/apps/marketplace_sample/appengine_utilities/settings_default.py | 26 | 3607 | """
Copyright (c) 2008, appengine-utilities project
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
- Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
- Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
- Neither the name of the appengine-utilities project nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
__author__="jbowman"
__date__ ="$Sep 11, 2009 4:20:11 PM$"
# Configuration settings for the session class.
session = {
"COOKIE_NAME": "gaeutilities_session",
"DEFAULT_COOKIE_PATH": "/",
"DEFAULT_COOKIE_DOMAIN": False, # Set to False if you do not want this value
# set on the cookie, otherwise put the
# domain value you wish used.
"SESSION_EXPIRE_TIME": 7200, # sessions are valid for 7200 seconds
# (2 hours)
"INTEGRATE_FLASH": True, # integrate functionality from flash module?
"SET_COOKIE_EXPIRES": True, # Set to True to add expiration field to
# cookie
"WRITER":"datastore", # Use the datastore writer by default.
# cookie is the other option.
"CLEAN_CHECK_PERCENT": 50, # By default, 50% of all requests will clean
# the datastore of expired sessions
"CHECK_IP": True, # validate sessions by IP
"CHECK_USER_AGENT": True, # validate sessions by user agent
"SESSION_TOKEN_TTL": 5, # Number of seconds a session token is valid
# for.
"UPDATE_LAST_ACTIVITY": 60, # Number of seconds that may pass before
# last_activity is updated
}
# Configuration settings for the cache class
cache = {
"DEFAULT_TIMEOUT": 3600, # cache expires after one hour (3600 sec)
"CLEAN_CHECK_PERCENT": 50, # 50% of all requests will clean the database
"MAX_HITS_TO_CLEAN": 20, # the maximum number of cache hits to clean
}
# Configuration settings for the flash class
flash = {
"COOKIE_NAME": "appengine-utilities-flash",
}
# Configuration settings for the paginator class
paginator = {
"DEFAULT_COUNT": 10,
"CACHE": 10,
"DEFAULT_SORT_ORDER": "ASC",
}
rotmodel = {
"RETRY_ATTEMPTS": 3,
"RETRY_INTERVAL": .2,
}
if __name__ == "__main__":
print "Hello World";
| apache-2.0 |
mjvakili/ccppabc | ccppabc/code/test_data.py | 1 | 4243 | '''
Test the data.py module
'''
import numpy as np
import matplotlib.pyplot as plt
import util
import data as Data
# --- Halotools ---
from halotools.empirical_models import PrebuiltHodModelFactory
from ChangTools.plotting import prettyplot
from ChangTools.plotting import prettycolors
def PlotCovariance(obvs, Mr=21, b_normal=0.25, inference='mcmc'):
''' Plot the covariance matrix for a specified obvs
'''
# import the covariance matrix
covar = Data.data_cov(Mr=Mr, b_normal=b_normal, inference=inference)
if obvs == 'xi':
obvs_cov = covar[1:16 , 1:16]
r_bin = Data.xi_binedges()
elif obvs == 'gmf':
obvs_cov = covar[17:, 17:]
binedges = Data.data_gmf_bins()
r_bin = 0.5 * (binedges[:-1] + binedges[1:])
n_bin = int(np.sqrt(obvs_cov.size))
# calculate the reduced covariance for plotting
red_covar = np.zeros([n_bin, n_bin])
for ii in range(n_bin):
for jj in range(n_bin):
red_covar[ii][jj] = obvs_cov[ii][jj]/np.sqrt(obvs_cov[ii][ii] * obvs_cov[jj][jj])
prettyplot()
fig = plt.figure()
sub = fig.add_subplot(111)
cont = sub.pcolormesh(r_bin, r_bin, red_covar, cmap=plt.cm.afmhot_r)
plt.colorbar(cont)
sub.set_xlim([r_bin[0], r_bin[-1]])
sub.set_ylim([r_bin[0], r_bin[-1]])
sub.set_xscale('log')
sub.set_yscale('log')
sub.set_xlabel(r'$\mathtt{r}\;[\mathtt{Mpc/h}$]', fontsize=25)
sub.set_ylabel(r'$\mathtt{r}\;[\mathtt{Mpc/h}$]', fontsize=25)
fig_file = ''.join([util.fig_dir(),
obvs.upper(), 'covariance',
'.Mr', str(Mr),
'.bnorm', str(round(b_normal,2)),
'.', inference, '_inf.png'])
fig.savefig(fig_file, bbox_inches='tight')
plt.close()
return None
# ---- Plotting ----
def xi(Mr=20, Nmock=500):
'''
Plot xi(r) of the fake observations
'''
prettyplot()
pretty_colors = prettycolors()
xir, cii = Data.data_xi(Mr=Mr, Nmock=Nmock)
rbin = Data.data_xi_bins(Mr=Mr)
fig = plt.figure(1)
sub = fig.add_subplot(111)
sub.plot(rbin, rbin*xir, c='k', lw=1)
sub.errorbar(rbin, rbin*xir, yerr = rbin*cii**0.5 , fmt="ok", ms=1, capsize=2, alpha=1.)
sub.set_xlim([0.1, 15])
sub.set_ylim([1, 10])
sub.set_yscale("log")
sub.set_xscale("log")
sub.set_xlabel(r'$\mathtt{r}\; (\mathtt{Mpc})$', fontsize=25)
sub.set_ylabel(r'$\mathtt{r} \xi_{\rm gg}$', fontsize=25)
fig_file = ''.join([util.fig_dir(),
'xi.Mr', str(Mr), '.Nmock', str(Nmock), '.png'])
fig.savefig(fig_file, bbox_inches='tight')
plt.close()
return None
def gmf(Mr=20, Nmock=500):
'''
Plot Group Multiplicty Function of fake observations
'''
prettyplot()
pretty_colors = prettycolors()
# import fake obs GMF
gmf, sig_gmf = Data.data_gmf(Mr=Mr, Nmock=Nmock)
# group richness bins
gmf_bin = Data.data_gmf_bins()
fig = plt.figure(1)
sub = fig.add_subplot(111)
sub.errorbar(
0.5*(gmf_bin[:-1]+gmf_bin[1:]), gmf, yerr=sig_gmf,
fmt="ok", capsize=1.0
)
sub.set_xlim([1, 60])
sub.set_yscale('log')
sub.set_ylabel(r"Group Multiplicity Function (h$^{3}$ Mpc$^{-3}$)", fontsize=20)
sub.set_xlabel(r"$\mathtt{Group\;\;Richness}$", fontsize=20)
# save to file
fig_file = ''.join([util.fig_dir(),
'gmf.Mr', str(Mr), '.Nmock', str(Nmock), '.png'])
fig.savefig(fig_file, bbox_inches='tight')
return None
# ---- tests -----
def xi_binning_tests(Mr=20):
model = PrebuiltHodModelFactory('zheng07', threshold = -1.0*np.float(Mr))
rbins = np.concatenate([np.array([0.1]), np.logspace(np.log10(0.5), np.log10(20.), 15)])
print 'R bins = ', rbins
for ii in xrange(10):
model.populate_mock() # population mock realization
#rbins = np.logspace(-1, np.log10(20.), 16)
r_bin, xi_r = model.mock.compute_galaxy_clustering(rbins=rbins)
print xi_r
def test_nbar(Mr=21, b_normal=0.25):
print Data.data_nbar(Mr=Mr, b_normal=b_normal)
if __name__=='__main__':
PlotCovariance('gmf', inference='mcmc')
#test_nbar()
#xi_cov(Mr=20, Nmock=500)
#xi_binning_tests(Mr=20)
| mit |
michealcarrerweb/LHVent_app | time_log/migrations/0006_auto_20170731_1738.py | 1 | 4838 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-07-31 17:38
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('work_order', '0007_order_time_requirements_filled'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('time_log', '0005_auto_20170728_1831'),
]
operations = [
migrations.CreateModel(
name='GenericActivity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(blank=True)),
('activity', models.CharField(max_length=50, verbose_name='activity')),
],
options={
'verbose_name': 'Activity Type',
'verbose_name_plural': 'Activity Types',
'ordering': ['activity'],
},
),
migrations.CreateModel(
name='LoggedDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(blank=True)),
('day', models.DateField(unique=True)),
],
),
migrations.CreateModel(
name='ScheduledTimeSlotEntry',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activity', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='slotactivity', to='time_log.GenericActivity', verbose_name='activity')),
],
options={
'verbose_name': 'Scheduled Time Slot',
'verbose_name_plural': 'Sscheduled Time Slots',
'ordering': ['work_order'],
},
),
migrations.CreateModel(
name='StaffLogDetailsForDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('slug', models.SlugField(blank=True, max_length=150)),
('origin', models.DateTimeField(auto_now_add=True, null=True)),
('last_modified', models.DateTimeField(auto_now=True, null=True)),
('actual_start', models.TimeField(verbose_name='actual start')),
('actual_end', models.TimeField(verbose_name='actual end')),
('forecasted_hours_for_day', models.DecimalField(decimal_places=3, max_digits=5)),
('actual_hours_for_day', models.DecimalField(decimal_places=3, max_digits=5)),
('full', models.BooleanField(default=False, verbose_name='full')),
('over_time', models.BooleanField(default=False, verbose_name='over time')),
('day', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='daylogdetail', to='time_log.LoggedDay', verbose_name='Day for log')),
('staff', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='staffday', to=settings.AUTH_USER_MODEL, verbose_name='Staff Member')),
],
options={
'verbose_name': "Staff's Overall Time For Day",
'verbose_name_plural': "Staff's Overall Times For Day",
'ordering': ['staff', 'day'],
},
),
migrations.AlterModelOptions(
name='availabilityforday',
options={'ordering': ['staff', 'day'], 'verbose_name': "Staff's Scheduled Hours For Day Of The Week", 'verbose_name_plural': "Staff's Scheduled Hours For Days Of The Week"},
),
migrations.AlterField(
model_name='availabilityforday',
name='staff',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='employeeavail', to=settings.AUTH_USER_MODEL, verbose_name='Staff Member'),
),
migrations.AddField(
model_name='scheduledtimeslotentry',
name='staff_day',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='staffslot', to='time_log.StaffLogDetailsForDay', verbose_name='Staff Day'),
),
migrations.AddField(
model_name='scheduledtimeslotentry',
name='work_order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='workorder', to='work_order.Order', verbose_name='work order'),
),
migrations.AlterUniqueTogether(
name='stafflogdetailsforday',
unique_together=set([('day', 'staff')]),
),
]
| mit |
ramitalat/odoo | addons/crm_claim/report/__init__.py | 446 | 1080 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_claim_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zhengwsh/InplusTrader_Linux | InplusTrader/backtestEngine/api/helper.py | 2 | 1115 | # -*- coding: utf-8 -*-
#
# Copyright 2017 Ricequant, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import api_base, api_future, api_stock
from ..const import ACCOUNT_TYPE
def get_apis(account_list):
apis = {name: getattr(api_base, name) for name in api_base.__all__}
for account_type in account_list:
if account_type == ACCOUNT_TYPE.STOCK:
apis.update((name, getattr(api_stock, name)) for name in api_stock.__all__)
elif account_type == ACCOUNT_TYPE.FUTURE:
apis.update((name, getattr(api_future, name)) for name in api_future.__all__)
return apis
| mit |
shawnadelic/shuup | shuup/campaigns/models/context_conditions.py | 2 | 2074 | # This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.db import models
from django.utils.translation import ugettext_lazy as _
from polymorphic.models import PolymorphicModel
from shuup.core.models import AnonymousContact, Contact, ContactGroup
class ContextCondition(PolymorphicModel):
model = None
identifier = "context_condition"
name = _("Context Condition")
description = _("Context Condition")
active = models.BooleanField(default=True)
def matches(self, context):
return False
class ContactGroupCondition(ContextCondition):
model = ContactGroup
identifier = "contact_group_condition"
name = _("Contact Group")
contact_groups = models.ManyToManyField(ContactGroup, verbose_name=_("contact groups"))
def matches(self, context):
customer = (context.customer if context.customer is not None else AnonymousContact())
customers_groups = customer.groups.all()
return self.contact_groups.filter(pk__in=customers_groups).exists()
@property
def description(self):
return _("Limit the campaign to members of the selected contact groups.")
@property
def values(self):
return self.contact_groups
@values.setter
def values(self, values):
self.contact_groups = values
class ContactCondition(ContextCondition):
model = Contact
identifier = "contact_condition"
name = _("Contact")
contacts = models.ManyToManyField(Contact, verbose_name=_("contacts"))
def matches(self, context):
customer = context.customer
return bool(customer and self.contacts.filter(pk=customer.pk).exists())
@property
def description(self):
return _("Limit the campaign to selected contacts.")
@property
def values(self):
return self.contacts
@values.setter
def values(self, values):
self.contacts = values
| agpl-3.0 |
timonwong/OmniMarkupPreviewer | OmniMarkupLib/Renderers/libs/markdown/blockprocessors.py | 16 | 22431 | """
CORE MARKDOWN BLOCKPARSER
===========================================================================
This parser handles basic parsing of Markdown blocks. It doesn't concern itself
with inline elements such as **bold** or *italics*, but rather just catches
blocks, lists, quotes, etc.
The BlockParser is made up of a bunch of BlockProssors, each handling a
different type of block. Extensions may add/replace/remove BlockProcessors
as they need to alter how markdown blocks are parsed.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import logging
import re
from . import util
from .blockparser import BlockParser
logger = logging.getLogger('MARKDOWN')
def build_block_parser(md_instance, **kwargs):
""" Build the default block parser used by Markdown. """
parser = BlockParser(md_instance)
parser.blockprocessors['empty'] = EmptyBlockProcessor(parser)
parser.blockprocessors['indent'] = ListIndentProcessor(parser)
parser.blockprocessors['code'] = CodeBlockProcessor(parser)
parser.blockprocessors['hashheader'] = HashHeaderProcessor(parser)
parser.blockprocessors['setextheader'] = SetextHeaderProcessor(parser)
parser.blockprocessors['hr'] = HRProcessor(parser)
parser.blockprocessors['olist'] = OListProcessor(parser)
parser.blockprocessors['ulist'] = UListProcessor(parser)
parser.blockprocessors['quote'] = BlockQuoteProcessor(parser)
parser.blockprocessors['paragraph'] = ParagraphProcessor(parser)
return parser
class BlockProcessor:
""" Base class for block processors.
Each subclass will provide the methods below to work with the source and
tree. Each processor will need to define it's own ``test`` and ``run``
methods. The ``test`` method should return True or False, to indicate
whether the current block should be processed by this processor. If the
test passes, the parser will call the processors ``run`` method.
"""
def __init__(self, parser):
self.parser = parser
self.tab_length = parser.markdown.tab_length
def lastChild(self, parent):
""" Return the last child of an etree element. """
if len(parent):
return parent[-1]
else:
return None
def detab(self, text):
""" Remove a tab from the front of each line of the given text. """
newtext = []
lines = text.split('\n')
for line in lines:
if line.startswith(' '*self.tab_length):
newtext.append(line[self.tab_length:])
elif not line.strip():
newtext.append('')
else:
break
return '\n'.join(newtext), '\n'.join(lines[len(newtext):])
def looseDetab(self, text, level=1):
""" Remove a tab from front of lines but allowing dedented lines. """
lines = text.split('\n')
for i in range(len(lines)):
if lines[i].startswith(' '*self.tab_length*level):
lines[i] = lines[i][self.tab_length*level:]
return '\n'.join(lines)
def test(self, parent, block):
""" Test for block type. Must be overridden by subclasses.
As the parser loops through processors, it will call the ``test`` method
on each to determine if the given block of text is of that type. This
method must return a boolean ``True`` or ``False``. The actual method of
testing is left to the needs of that particular block type. It could
be as simple as ``block.startswith(some_string)`` or a complex regular
expression. As the block type may be different depending on the parent
of the block (i.e. inside a list), the parent etree element is also
provided and may be used as part of the test.
Keywords:
* ``parent``: A etree element which will be the parent of the block.
* ``block``: A block of text from the source which has been split at
blank lines.
"""
pass
def run(self, parent, blocks):
""" Run processor. Must be overridden by subclasses.
When the parser determines the appropriate type of a block, the parser
will call the corresponding processor's ``run`` method. This method
should parse the individual lines of the block and append them to
the etree.
Note that both the ``parent`` and ``etree`` keywords are pointers
to instances of the objects which should be edited in place. Each
processor must make changes to the existing objects as there is no
mechanism to return new/different objects to replace them.
This means that this method should be adding SubElements or adding text
to the parent, and should remove (``pop``) or add (``insert``) items to
the list of blocks.
Keywords:
* ``parent``: A etree element which is the parent of the current block.
* ``blocks``: A list of all remaining blocks of the document.
"""
pass
class ListIndentProcessor(BlockProcessor):
""" Process children of list items.
Example:
* a list item
process this part
or this part
"""
ITEM_TYPES = ['li']
LIST_TYPES = ['ul', 'ol']
def __init__(self, *args):
BlockProcessor.__init__(self, *args)
self.INDENT_RE = re.compile(r'^(([ ]{%s})+)'% self.tab_length)
def test(self, parent, block):
return block.startswith(' '*self.tab_length) and \
not self.parser.state.isstate('detabbed') and \
(parent.tag in self.ITEM_TYPES or \
(len(parent) and parent[-1] and \
(parent[-1].tag in self.LIST_TYPES)
)
)
def run(self, parent, blocks):
block = blocks.pop(0)
level, sibling = self.get_level(parent, block)
block = self.looseDetab(block, level)
self.parser.state.set('detabbed')
if parent.tag in self.ITEM_TYPES:
# It's possible that this parent has a 'ul' or 'ol' child list
# with a member. If that is the case, then that should be the
# parent. This is intended to catch the edge case of an indented
# list whose first member was parsed previous to this point
# see OListProcessor
if len(parent) and parent[-1].tag in self.LIST_TYPES:
self.parser.parseBlocks(parent[-1], [block])
else:
# The parent is already a li. Just parse the child block.
self.parser.parseBlocks(parent, [block])
elif sibling.tag in self.ITEM_TYPES:
# The sibling is a li. Use it as parent.
self.parser.parseBlocks(sibling, [block])
elif len(sibling) and sibling[-1].tag in self.ITEM_TYPES:
# The parent is a list (``ol`` or ``ul``) which has children.
# Assume the last child li is the parent of this block.
if sibling[-1].text:
# If the parent li has text, that text needs to be moved to a p
# The p must be 'inserted' at beginning of list in the event
# that other children already exist i.e.; a nested sublist.
p = util.etree.Element('p')
p.text = sibling[-1].text
sibling[-1].text = ''
sibling[-1].insert(0, p)
self.parser.parseChunk(sibling[-1], block)
else:
self.create_item(sibling, block)
self.parser.state.reset()
def create_item(self, parent, block):
""" Create a new li and parse the block with it as the parent. """
li = util.etree.SubElement(parent, 'li')
self.parser.parseBlocks(li, [block])
def get_level(self, parent, block):
""" Get level of indent based on list level. """
# Get indent level
m = self.INDENT_RE.match(block)
if m:
indent_level = len(m.group(1))/self.tab_length
else:
indent_level = 0
if self.parser.state.isstate('list'):
# We're in a tightlist - so we already are at correct parent.
level = 1
else:
# We're in a looselist - so we need to find parent.
level = 0
# Step through children of tree to find matching indent level.
while indent_level > level:
child = self.lastChild(parent)
if child is not None and (child.tag in self.LIST_TYPES or child.tag in self.ITEM_TYPES):
if child.tag in self.LIST_TYPES:
level += 1
parent = child
else:
# No more child levels. If we're short of indent_level,
# we have a code block. So we stop here.
break
return level, parent
class CodeBlockProcessor(BlockProcessor):
""" Process code blocks. """
def test(self, parent, block):
return block.startswith(' '*self.tab_length)
def run(self, parent, blocks):
sibling = self.lastChild(parent)
block = blocks.pop(0)
theRest = ''
if sibling is not None and sibling.tag == "pre" and len(sibling) \
and sibling[0].tag == "code":
# The previous block was a code block. As blank lines do not start
# new code blocks, append this block to the previous, adding back
# linebreaks removed from the split into a list.
code = sibling[0]
block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n%s\n' % (code.text, block.rstrip()))
else:
# This is a new codeblock. Create the elements and insert text.
pre = util.etree.SubElement(parent, 'pre')
code = util.etree.SubElement(pre, 'code')
block, theRest = self.detab(block)
code.text = util.AtomicString('%s\n' % block.rstrip())
if theRest:
# This block contained unindented line(s) after the first indented
# line. Insert these lines as the first block of the master blocks
# list for future processing.
blocks.insert(0, theRest)
class BlockQuoteProcessor(BlockProcessor):
RE = re.compile(r'(^|\n)[ ]{0,3}>[ ]?(.*)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # Lines before blockquote
# Pass lines before blockquote in recursively for parsing forst.
self.parser.parseBlocks(parent, [before])
# Remove ``> `` from begining of each line.
block = '\n'.join([self.clean(line) for line in
block[m.start():].split('\n')])
sibling = self.lastChild(parent)
if sibling is not None and sibling.tag == "blockquote":
# Previous block was a blockquote so set that as this blocks parent
quote = sibling
else:
# This is a new blockquote. Create a new parent element.
quote = util.etree.SubElement(parent, 'blockquote')
# Recursively parse block with blockquote as parent.
# change parser state so blockquotes embedded in lists use p tags
self.parser.state.set('blockquote')
self.parser.parseChunk(quote, block)
self.parser.state.reset()
def clean(self, line):
""" Remove ``>`` from beginning of a line. """
m = self.RE.match(line)
if line.strip() == ">":
return ""
elif m:
return m.group(2)
else:
return line
class OListProcessor(BlockProcessor):
""" Process ordered list blocks. """
TAG = 'ol'
# Detect an item (``1. item``). ``group(1)`` contains contents of item.
RE = re.compile(r'^[ ]{0,3}\d+\.[ ]+(.*)')
# Detect items on secondary lines. they can be of either list type.
CHILD_RE = re.compile(r'^[ ]{0,3}((\d+\.)|[*+-])[ ]+(.*)')
# Detect indented (nested) items of either type
INDENT_RE = re.compile(r'^[ ]{4,7}((\d+\.)|[*+-])[ ]+.*')
# The integer (python string) with which the lists starts (default=1)
# Eg: If list is intialized as)
# 3. Item
# The ol tag will get starts="3" attribute
STARTSWITH = '1'
# List of allowed sibling tags.
SIBLING_TAGS = ['ol', 'ul']
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
# Check fr multiple items in one block.
items = self.get_items(blocks.pop(0))
sibling = self.lastChild(parent)
if sibling is not None and sibling.tag in self.SIBLING_TAGS:
# Previous block was a list item, so set that as parent
lst = sibling
# make sure previous item is in a p- if the item has text, then it
# it isn't in a p
if lst[-1].text:
# since it's possible there are other children for this sibling,
# we can't just SubElement the p, we need to insert it as the
# first item
p = util.etree.Element('p')
p.text = lst[-1].text
lst[-1].text = ''
lst[-1].insert(0, p)
# if the last item has a tail, then the tail needs to be put in a p
# likely only when a header is not followed by a blank line
lch = self.lastChild(lst[-1])
if lch is not None and lch.tail:
p = util.etree.SubElement(lst[-1], 'p')
p.text = lch.tail.lstrip()
lch.tail = ''
# parse first block differently as it gets wrapped in a p.
li = util.etree.SubElement(lst, 'li')
self.parser.state.set('looselist')
firstitem = items.pop(0)
self.parser.parseBlocks(li, [firstitem])
self.parser.state.reset()
elif parent.tag in ['ol', 'ul']:
# this catches the edge case of a multi-item indented list whose
# first item is in a blank parent-list item:
# * * subitem1
# * subitem2
# see also ListIndentProcessor
lst = parent
else:
# This is a new list so create parent with appropriate tag.
lst = util.etree.SubElement(parent, self.TAG)
# Check if a custom start integer is set
if not self.parser.markdown.lazy_ol and self.STARTSWITH !='1':
lst.attrib['start'] = self.STARTSWITH
self.parser.state.set('list')
# Loop through items in block, recursively parsing each with the
# appropriate parent.
for item in items:
if item.startswith(' '*self.tab_length):
# Item is indented. Parse with last item as parent
self.parser.parseBlocks(lst[-1], [item])
else:
# New item. Create li and parse with it as parent
li = util.etree.SubElement(lst, 'li')
self.parser.parseBlocks(li, [item])
self.parser.state.reset()
def get_items(self, block):
""" Break a block into list items. """
items = []
for line in block.split('\n'):
m = self.CHILD_RE.match(line)
if m:
# This is a new list item
# Check first item for the start index
if not items and self.TAG=='ol':
# Detect the integer value of first list item
INTEGER_RE = re.compile('(\d+)')
self.STARTSWITH = INTEGER_RE.match(m.group(1)).group()
# Append to the list
items.append(m.group(3))
elif self.INDENT_RE.match(line):
# This is an indented (possibly nested) item.
if items[-1].startswith(' '*self.tab_length):
# Previous item was indented. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
else:
items.append(line)
else:
# This is another line of previous item. Append to that item.
items[-1] = '%s\n%s' % (items[-1], line)
return items
class UListProcessor(OListProcessor):
""" Process unordered list blocks. """
TAG = 'ul'
RE = re.compile(r'^[ ]{0,3}[*+-][ ]+(.*)')
class HashHeaderProcessor(BlockProcessor):
""" Process Hash Headers. """
# Detect a header at start of any line in block
RE = re.compile(r'(^|\n)(?P<level>#{1,6})(?P<header>.*?)#*(\n|$)')
def test(self, parent, block):
return bool(self.RE.search(block))
def run(self, parent, blocks):
block = blocks.pop(0)
m = self.RE.search(block)
if m:
before = block[:m.start()] # All lines before header
after = block[m.end():] # All lines after header
if before:
# As the header was not the first line of the block and the
# lines before the header must be parsed first,
# recursively parse this lines as a block.
self.parser.parseBlocks(parent, [before])
# Create header using named groups from RE
h = util.etree.SubElement(parent, 'h%d' % len(m.group('level')))
h.text = m.group('header').strip()
if after:
# Insert remaining lines as first block for future parsing.
blocks.insert(0, after)
else:
# This should never happen, but just in case...
logger.warn("We've got a problem header: %r" % block)
class SetextHeaderProcessor(BlockProcessor):
""" Process Setext-style Headers. """
# Detect Setext-style header. Must be first 2 lines of block.
RE = re.compile(r'^.*?\n[=-]+[ ]*(\n|$)', re.MULTILINE)
def test(self, parent, block):
return bool(self.RE.match(block))
def run(self, parent, blocks):
lines = blocks.pop(0).split('\n')
# Determine level. ``=`` is 1 and ``-`` is 2.
if lines[1].startswith('='):
level = 1
else:
level = 2
h = util.etree.SubElement(parent, 'h%d' % level)
h.text = lines[0].strip()
if len(lines) > 2:
# Block contains additional lines. Add to master blocks for later.
blocks.insert(0, '\n'.join(lines[2:]))
class HRProcessor(BlockProcessor):
""" Process Horizontal Rules. """
RE = r'^[ ]{0,3}((-+[ ]{0,2}){3,}|(_+[ ]{0,2}){3,}|(\*+[ ]{0,2}){3,})[ ]*'
# Detect hr on any line of a block.
SEARCH_RE = re.compile(RE, re.MULTILINE)
def test(self, parent, block):
m = self.SEARCH_RE.search(block)
# No atomic grouping in python so we simulate it here for performance.
# The regex only matches what would be in the atomic group - the HR.
# Then check if we are at end of block or if next char is a newline.
if m and (m.end() == len(block) or block[m.end()] == '\n'):
# Save match object on class instance so we can use it later.
self.match = m
return True
return False
def run(self, parent, blocks):
block = blocks.pop(0)
# Check for lines in block before hr.
prelines = block[:self.match.start()].rstrip('\n')
if prelines:
# Recursively parse lines before hr so they get parsed first.
self.parser.parseBlocks(parent, [prelines])
# create hr
util.etree.SubElement(parent, 'hr')
# check for lines in block after hr.
postlines = block[self.match.end():].lstrip('\n')
if postlines:
# Add lines after hr to master blocks for later parsing.
blocks.insert(0, postlines)
class EmptyBlockProcessor(BlockProcessor):
""" Process blocks that are empty or start with an empty line. """
def test(self, parent, block):
return not block or block.startswith('\n')
def run(self, parent, blocks):
block = blocks.pop(0)
filler = '\n\n'
if block:
# Starts with empty line
# Only replace a single line.
filler = '\n'
# Save the rest for later.
theRest = block[1:]
if theRest:
# Add remaining lines to master blocks for later.
blocks.insert(0, theRest)
sibling = self.lastChild(parent)
if sibling is not None and sibling.tag == 'pre' and len(sibling) and sibling[0].tag == 'code':
# Last block is a codeblock. Append to preserve whitespace.
sibling[0].text = util.AtomicString('%s%s' % (sibling[0].text, filler))
class ParagraphProcessor(BlockProcessor):
""" Process Paragraph blocks. """
def test(self, parent, block):
return True
def run(self, parent, blocks):
block = blocks.pop(0)
if block.strip():
# Not a blank block. Add to parent, otherwise throw it away.
if self.parser.state.isstate('list'):
# The parent is a tight-list.
#
# Check for any children. This will likely only happen in a
# tight-list when a header isn't followed by a blank line.
# For example:
#
# * # Header
# Line 2 of list item - not part of header.
sibling = self.lastChild(parent)
if sibling is not None:
# Insetrt after sibling.
if sibling.tail:
sibling.tail = '%s\n%s' % (sibling.tail, block)
else:
sibling.tail = '\n%s' % block
else:
# Append to parent.text
if parent.text:
parent.text = '%s\n%s' % (parent.text, block)
else:
parent.text = block.lstrip()
else:
# Create a regular paragraph
p = util.etree.SubElement(parent, 'p')
p.text = block.lstrip()
| mit |
pdufour/sqlalchemy | lib/sqlalchemy/sql/dml.py | 40 | 31506 | # sql/dml.py
# Copyright (C) 2009-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Provide :class:`.Insert`, :class:`.Update` and :class:`.Delete`.
"""
from .base import Executable, _generative, _from_objects, DialectKWArgs
from .elements import ClauseElement, _literal_as_text, Null, and_, _clone, \
_column_as_key
from .selectable import _interpret_as_from, _interpret_as_select, HasPrefixes
from .. import util
from .. import exc
class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement):
"""Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements.
"""
__visit_name__ = 'update_base'
_execution_options = \
Executable._execution_options.union({'autocommit': True})
_hints = util.immutabledict()
_prefixes = ()
def _process_colparams(self, parameters):
def process_single(p):
if isinstance(p, (list, tuple)):
return dict(
(c.key, pval)
for c, pval in zip(self.table.c, p)
)
else:
return p
if (isinstance(parameters, (list, tuple)) and parameters and
isinstance(parameters[0], (list, tuple, dict))):
if not self._supports_multi_parameters:
raise exc.InvalidRequestError(
"This construct does not support "
"multiple parameter sets.")
return [process_single(p) for p in parameters], True
else:
return process_single(parameters), False
def params(self, *arg, **kw):
"""Set the parameters for the statement.
This method raises ``NotImplementedError`` on the base class,
and is overridden by :class:`.ValuesBase` to provide the
SET/VALUES clause of UPDATE and INSERT.
"""
raise NotImplementedError(
"params() is not supported for INSERT/UPDATE/DELETE statements."
" To set the values for an INSERT or UPDATE statement, use"
" stmt.values(**parameters).")
def bind(self):
"""Return a 'bind' linked to this :class:`.UpdateBase`
or a :class:`.Table` associated with it.
"""
return self._bind or self.table.bind
def _set_bind(self, bind):
self._bind = bind
bind = property(bind, _set_bind)
@_generative
def returning(self, *cols):
"""Add a :term:`RETURNING` or equivalent clause to this statement.
e.g.::
stmt = table.update().\\
where(table.c.data == 'value').\\
values(status='X').\\
returning(table.c.server_flag,
table.c.updated_timestamp)
for server_flag, updated_timestamp in connection.execute(stmt):
print(server_flag, updated_timestamp)
The given collection of column expressions should be derived from
the table that is
the target of the INSERT, UPDATE, or DELETE. While :class:`.Column`
objects are typical, the elements can also be expressions::
stmt = table.insert().returning(
(table.c.first_name + " " + table.c.last_name).
label('fullname'))
Upon compilation, a RETURNING clause, or database equivalent,
will be rendered within the statement. For INSERT and UPDATE,
the values are the newly inserted/updated values. For DELETE,
the values are those of the rows which were deleted.
Upon execution, the values of the columns to be returned are made
available via the result set and can be iterated using
:meth:`.ResultProxy.fetchone` and similar. For DBAPIs which do not
natively support returning values (i.e. cx_oracle), SQLAlchemy will
approximate this behavior at the result level so that a reasonable
amount of behavioral neutrality is provided.
Note that not all databases/DBAPIs
support RETURNING. For those backends with no support,
an exception is raised upon compilation and/or execution.
For those who do support it, the functionality across backends
varies greatly, including restrictions on executemany()
and other statements which return multiple rows. Please
read the documentation notes for the database in use in
order to determine the availability of RETURNING.
.. seealso::
:meth:`.ValuesBase.return_defaults` - an alternative method tailored
towards efficient fetching of server-side defaults and triggers
for single-row INSERTs or UPDATEs.
"""
self._returning = cols
@_generative
def with_hint(self, text, selectable=None, dialect_name="*"):
"""Add a table hint for a single table to this
INSERT/UPDATE/DELETE statement.
.. note::
:meth:`.UpdateBase.with_hint` currently applies only to
Microsoft SQL Server. For MySQL INSERT/UPDATE/DELETE hints, use
:meth:`.UpdateBase.prefix_with`.
The text of the hint is rendered in the appropriate
location for the database backend in use, relative
to the :class:`.Table` that is the subject of this
statement, or optionally to that of the given
:class:`.Table` passed as the ``selectable`` argument.
The ``dialect_name`` option will limit the rendering of a particular
hint to a particular backend. Such as, to add a hint
that only takes effect for SQL Server::
mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql")
.. versionadded:: 0.7.6
:param text: Text of the hint.
:param selectable: optional :class:`.Table` that specifies
an element of the FROM clause within an UPDATE or DELETE
to be the subject of the hint - applies only to certain backends.
:param dialect_name: defaults to ``*``, if specified as the name
of a particular dialect, will apply these hints only when
that dialect is in use.
"""
if selectable is None:
selectable = self.table
self._hints = self._hints.union(
{(selectable, dialect_name): text})
class ValuesBase(UpdateBase):
"""Supplies support for :meth:`.ValuesBase.values` to
INSERT and UPDATE constructs."""
__visit_name__ = 'values_base'
_supports_multi_parameters = False
_has_multi_parameters = False
select = None
def __init__(self, table, values, prefixes):
self.table = _interpret_as_from(table)
self.parameters, self._has_multi_parameters = \
self._process_colparams(values)
if prefixes:
self._setup_prefixes(prefixes)
@_generative
def values(self, *args, **kwargs):
"""specify a fixed VALUES clause for an INSERT statement, or the SET
clause for an UPDATE.
Note that the :class:`.Insert` and :class:`.Update` constructs support
per-execution time formatting of the VALUES and/or SET clauses,
based on the arguments passed to :meth:`.Connection.execute`.
However, the :meth:`.ValuesBase.values` method can be used to "fix" a
particular set of parameters into the statement.
Multiple calls to :meth:`.ValuesBase.values` will produce a new
construct, each one with the parameter list modified to include
the new parameters sent. In the typical case of a single
dictionary of parameters, the newly passed keys will replace
the same keys in the previous construct. In the case of a list-based
"multiple values" construct, each new list of values is extended
onto the existing list of values.
:param \**kwargs: key value pairs representing the string key
of a :class:`.Column` mapped to the value to be rendered into the
VALUES or SET clause::
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
:param \*args: Alternatively, a dictionary, tuple or list
of dictionaries or tuples can be passed as a single positional
argument in order to form the VALUES or
SET clause of the statement. The single dictionary form
works the same as the kwargs form::
users.insert().values({"name": "some name"})
If a tuple is passed, the tuple should contain the same number
of columns as the target :class:`.Table`::
users.insert().values((5, "some name"))
The :class:`.Insert` construct also supports multiply-rendered VALUES
construct, for those backends which support this SQL syntax
(SQLite, Postgresql, MySQL). This mode is indicated by passing a
list of one or more dictionaries/tuples::
users.insert().values([
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
])
In the case of an :class:`.Update`
construct, only the single dictionary/tuple form is accepted,
else an exception is raised. It is also an exception case to
attempt to mix the single-/multiple- value styles together,
either through multiple :meth:`.ValuesBase.values` calls
or by sending a list + kwargs at the same time.
.. note::
Passing a multiple values list is *not* the same
as passing a multiple values list to the
:meth:`.Connection.execute` method. Passing a list of parameter
sets to :meth:`.ValuesBase.values` produces a construct of this
form::
INSERT INTO table (col1, col2, col3) VALUES
(col1_0, col2_0, col3_0),
(col1_1, col2_1, col3_1),
...
whereas a multiple list passed to :meth:`.Connection.execute`
has the effect of using the DBAPI
`executemany() <http://www.python.org/dev/peps/pep-0249/#id18>`_
method, which provides a high-performance system of invoking
a single-row INSERT or single-criteria UPDATE or DELETE statement
many times against a series
of parameter sets. The "executemany" style is supported by
all database backends, and works equally well for INSERT,
UPDATE, and DELETE, as it does not depend on a special SQL
syntax. See :ref:`execute_multiple` for an introduction to
the traditional Core method of multiple parameter set invocation
using this system.
.. versionadded:: 0.8
Support for multiple-VALUES INSERT statements.
.. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES
clause, even a list of length one,
implies that the :paramref:`.Insert.inline` flag is set to
True, indicating that the statement will not attempt to fetch
the "last inserted primary key" or other defaults. The statement
deals with an arbitrary number of rows, so the
:attr:`.ResultProxy.inserted_primary_key` accessor does not apply.
.. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports
columns with Python side default values and callables in the
same way as that of an "executemany" style of invocation; the
callable is invoked for each row. See :ref:`bug_3288`
for other details.
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
:func:`~.expression.insert` - produce an ``INSERT`` statement
:func:`~.expression.update` - produce an ``UPDATE`` statement
"""
if self.select is not None:
raise exc.InvalidRequestError(
"This construct already inserts from a SELECT")
if self._has_multi_parameters and kwargs:
raise exc.InvalidRequestError(
"This construct already has multiple parameter sets.")
if args:
if len(args) > 1:
raise exc.ArgumentError(
"Only a single dictionary/tuple or list of "
"dictionaries/tuples is accepted positionally.")
v = args[0]
else:
v = {}
if self.parameters is None:
self.parameters, self._has_multi_parameters = \
self._process_colparams(v)
else:
if self._has_multi_parameters:
self.parameters = list(self.parameters)
p, self._has_multi_parameters = self._process_colparams(v)
if not self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.extend(p)
else:
self.parameters = self.parameters.copy()
p, self._has_multi_parameters = self._process_colparams(v)
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't mix single-values and multiple values "
"formats in one statement")
self.parameters.update(p)
if kwargs:
if self._has_multi_parameters:
raise exc.ArgumentError(
"Can't pass kwargs and multiple parameter sets "
"simultaenously")
else:
self.parameters.update(kwargs)
@_generative
def return_defaults(self, *cols):
"""Make use of a :term:`RETURNING` clause for the purpose
of fetching server-side expressions and defaults.
E.g.::
stmt = table.insert().values(data='newdata').return_defaults()
result = connection.execute(stmt)
server_created_at = result.returned_defaults['created_at']
When used against a backend that supports RETURNING, all column
values generated by SQL expression or server-side-default will be
added to any existing RETURNING clause, provided that
:meth:`.UpdateBase.returning` is not used simultaneously. The column
values will then be available on the result using the
:attr:`.ResultProxy.returned_defaults` accessor as a dictionary,
referring to values keyed to the :class:`.Column` object as well as
its ``.key``.
This method differs from :meth:`.UpdateBase.returning` in these ways:
1. :meth:`.ValuesBase.return_defaults` is only intended for use with
an INSERT or an UPDATE statement that matches exactly one row.
While the RETURNING construct in the general sense supports
multiple rows for a multi-row UPDATE or DELETE statement, or for
special cases of INSERT that return multiple rows (e.g. INSERT from
SELECT, multi-valued VALUES clause),
:meth:`.ValuesBase.return_defaults` is intended only for an
"ORM-style" single-row INSERT/UPDATE statement. The row returned
by the statement is also consumed implcitly when
:meth:`.ValuesBase.return_defaults` is used. By contrast,
:meth:`.UpdateBase.returning` leaves the RETURNING result-set
intact with a collection of any number of rows.
2. It is compatible with the existing logic to fetch auto-generated
primary key values, also known as "implicit returning". Backends
that support RETURNING will automatically make use of RETURNING in
order to fetch the value of newly generated primary keys; while the
:meth:`.UpdateBase.returning` method circumvents this behavior,
:meth:`.ValuesBase.return_defaults` leaves it intact.
3. It can be called against any backend. Backends that don't support
RETURNING will skip the usage of the feature, rather than raising
an exception. The return value of
:attr:`.ResultProxy.returned_defaults` will be ``None``
:meth:`.ValuesBase.return_defaults` is used by the ORM to provide
an efficient implementation for the ``eager_defaults`` feature of
:func:`.mapper`.
:param cols: optional list of column key names or :class:`.Column`
objects. If omitted, all column expressions evaluated on the server
are added to the returning list.
.. versionadded:: 0.9.0
.. seealso::
:meth:`.UpdateBase.returning`
:attr:`.ResultProxy.returned_defaults`
"""
self._return_defaults = cols or True
class Insert(ValuesBase):
"""Represent an INSERT construct.
The :class:`.Insert` object is created using the
:func:`~.expression.insert()` function.
.. seealso::
:ref:`coretutorial_insert_expressions`
"""
__visit_name__ = 'insert'
_supports_multi_parameters = True
def __init__(self,
table,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw):
"""Construct an :class:`.Insert` object.
Similar functionality is available via the
:meth:`~.TableClause.insert` method on
:class:`~.schema.Table`.
:param table: :class:`.TableClause` which is the subject of the
insert.
:param values: collection of values to be inserted; see
:meth:`.Insert.values` for a description of allowed formats here.
Can be omitted entirely; a :class:`.Insert` construct will also
dynamically render the VALUES clause at execution time based on
the parameters passed to :meth:`.Connection.execute`.
:param inline: if True, no attempt will be made to retrieve the
SQL-generated default values to be provided within the statement;
in particular,
this allows SQL expressions to be rendered 'inline' within the
statement without the need to pre-execute them beforehand; for
backends that support "returning", this turns off the "implicit
returning" feature for the statement.
If both `values` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within `values` on a per-key basis.
The keys within `values` can be either
:class:`~sqlalchemy.schema.Column` objects or their string
identifiers. Each key may reference one of:
* a literal data value (i.e. string, number, etc.);
* a Column object;
* a SELECT statement.
If a ``SELECT`` statement is specified which references this
``INSERT`` statement's table, the statement will be correlated
against the ``INSERT`` statement.
.. seealso::
:ref:`coretutorial_insert_expressions` - SQL Expression Tutorial
:ref:`inserts_and_updates` - SQL Expression Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self.select = self.select_names = None
self.include_insert_from_select_defaults = False
self.inline = inline
self._returning = returning
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self.select is not None:
return self.select,
else:
return ()
@_generative
def from_select(self, names, select, include_defaults=True):
"""Return a new :class:`.Insert` construct which represents
an ``INSERT...FROM SELECT`` statement.
e.g.::
sel = select([table1.c.a, table1.c.b]).where(table1.c.c > 5)
ins = table2.insert().from_select(['a', 'b'], sel)
:param names: a sequence of string column names or :class:`.Column`
objects representing the target columns.
:param select: a :func:`.select` construct, :class:`.FromClause`
or other construct which resolves into a :class:`.FromClause`,
such as an ORM :class:`.Query` object, etc. The order of
columns returned from this FROM clause should correspond to the
order of columns sent as the ``names`` parameter; while this
is not checked before passing along to the database, the database
would normally raise an exception if these column lists don't
correspond.
:param include_defaults: if True, non-server default values and
SQL expressions as specified on :class:`.Column` objects
(as documented in :ref:`metadata_defaults_toplevel`) not
otherwise specified in the list of names will be rendered
into the INSERT and SELECT statements, so that these values are also
included in the data to be inserted.
.. note:: A Python-side default that uses a Python callable function
will only be invoked **once** for the whole statement, and **not
per row**.
.. versionadded:: 1.0.0 - :meth:`.Insert.from_select` now renders
Python-side and SQL expression column defaults into the
SELECT statement for columns otherwise not included in the
list of column names.
.. versionchanged:: 1.0.0 an INSERT that uses FROM SELECT
implies that the :paramref:`.insert.inline` flag is set to
True, indicating that the statement will not attempt to fetch
the "last inserted primary key" or other defaults. The statement
deals with an arbitrary number of rows, so the
:attr:`.ResultProxy.inserted_primary_key` accessor does not apply.
.. versionadded:: 0.8.3
"""
if self.parameters:
raise exc.InvalidRequestError(
"This construct already inserts value expressions")
self.parameters, self._has_multi_parameters = \
self._process_colparams(
dict((_column_as_key(n), Null()) for n in names))
self.select_names = names
self.inline = True
self.include_insert_from_select_defaults = include_defaults
self.select = _interpret_as_select(select)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self.parameters = self.parameters.copy()
if self.select is not None:
self.select = _clone(self.select)
class Update(ValuesBase):
"""Represent an Update construct.
The :class:`.Update` object is created using the :func:`update()`
function.
"""
__visit_name__ = 'update'
def __init__(self,
table,
whereclause=None,
values=None,
inline=False,
bind=None,
prefixes=None,
returning=None,
return_defaults=False,
**dialect_kw):
"""Construct an :class:`.Update` object.
E.g.::
from sqlalchemy import update
stmt = update(users).where(users.c.id==5).\\
values(name='user #5')
Similar functionality is available via the
:meth:`~.TableClause.update` method on
:class:`.Table`::
stmt = users.update().\\
where(users.c.id==5).\\
values(name='user #5')
:param table: A :class:`.Table` object representing the database
table to be updated.
:param whereclause: Optional SQL expression describing the ``WHERE``
condition of the ``UPDATE`` statement. Modern applications
may prefer to use the generative :meth:`~Update.where()`
method to specify the ``WHERE`` clause.
The WHERE clause can refer to multiple tables.
For databases which support this, an ``UPDATE FROM`` clause will
be generated, or on MySQL, a multi-table update. The statement
will fail on databases that don't have support for multi-table
update statements. A SQL-standard method of referring to
additional tables in the WHERE clause is to use a correlated
subquery::
users.update().values(name='ed').where(
users.c.name==select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. versionchanged:: 0.7.4
The WHERE clause can refer to multiple tables.
:param values:
Optional dictionary which specifies the ``SET`` conditions of the
``UPDATE``. If left as ``None``, the ``SET``
conditions are determined from those parameters passed to the
statement during the execution and/or compilation of the
statement. When compiled standalone without any parameters,
the ``SET`` clause generates for all columns.
Modern applications may prefer to use the generative
:meth:`.Update.values` method to set the values of the
UPDATE statement.
:param inline:
if True, SQL defaults present on :class:`.Column` objects via
the ``default`` keyword will be compiled 'inline' into the statement
and not pre-executed. This means that their values will not
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
The keys within ``values`` can be either :class:`.Column`
objects or their string identifiers (specifically the "key" of the
:class:`.Column`, normally but not necessarily equivalent to
its "name"). Normally, the
:class:`.Column` objects used here are expected to be
part of the target :class:`.Table` that is the table
to be updated. However when using MySQL, a multiple-table
UPDATE statement can refer to columns from any of
the tables referred to in the WHERE clause.
The values referred to in ``values`` are typically:
* a literal data value (i.e. string, number, etc.)
* a SQL expression, such as a related :class:`.Column`,
a scalar-returning :func:`.select` construct,
etc.
When combining :func:`.select` constructs within the values
clause of an :func:`.update` construct,
the subquery represented by the :func:`.select` should be
*correlated* to the parent table, that is, providing criterion
which links the table inside the subquery to the outer table
being updated::
users.update().values(
name=select([addresses.c.email_address]).\\
where(addresses.c.user_id==users.c.id).\\
as_scalar()
)
.. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
"""
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self.inline = inline
self._validate_dialect_kwargs(dialect_kw)
self._return_defaults = return_defaults
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
self.parameters = self.parameters.copy()
@_generative
def where(self, whereclause):
"""return a new update() construct with the given expression added to
its WHERE clause, joined to the existing clause via AND, if any.
"""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
@property
def _extra_froms(self):
# TODO: this could be made memoized
# if the memoization is reset on each generative call.
froms = []
seen = set([self.table])
if self._whereclause is not None:
for item in _from_objects(self._whereclause):
if not seen.intersection(item._cloned_set):
froms.append(item)
seen.update(item._cloned_set)
return froms
class Delete(UpdateBase):
"""Represent a DELETE construct.
The :class:`.Delete` object is created using the :func:`delete()`
function.
"""
__visit_name__ = 'delete'
def __init__(self,
table,
whereclause=None,
bind=None,
returning=None,
prefixes=None,
**dialect_kw):
"""Construct :class:`.Delete` object.
Similar functionality is available via the
:meth:`~.TableClause.delete` method on
:class:`~.schema.Table`.
:param table: The table to delete rows from.
:param whereclause: A :class:`.ClauseElement` describing the ``WHERE``
condition of the ``DELETE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
.. seealso::
:ref:`deletes` - SQL Expression Tutorial
"""
self._bind = bind
self.table = _interpret_as_from(table)
self._returning = returning
if prefixes:
self._setup_prefixes(prefixes)
if whereclause is not None:
self._whereclause = _literal_as_text(whereclause)
else:
self._whereclause = None
self._validate_dialect_kwargs(dialect_kw)
def get_children(self, **kwargs):
if self._whereclause is not None:
return self._whereclause,
else:
return ()
@_generative
def where(self, whereclause):
"""Add the given WHERE clause to a newly returned delete construct."""
if self._whereclause is not None:
self._whereclause = and_(self._whereclause,
_literal_as_text(whereclause))
else:
self._whereclause = _literal_as_text(whereclause)
def _copy_internals(self, clone=_clone, **kw):
# TODO: coverage
self._whereclause = clone(self._whereclause, **kw)
| mit |
Andreea-G/Codds_DarkMatter | src/experiment_HaloIndep_Band.py | 1 | 59260 | """
Copyright (c) 2015 Andreea Georgescu
Created on Wed Mar 4 00:47:37 2015
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
# TODO! This only works for CDMSSi!
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from experiment_HaloIndep import *
import interp_uniform as unif
# from interp import interp1d
from scipy import interpolate
from scipy.optimize import brentq, minimize
from basinhopping import *
import matplotlib.pyplot as plt
import os # for speaking
import parallel_map as par
DEBUG = F
DEBUG_FULL = F
USE_BASINHOPPING = T
ADAPT_KWARGS = F
ALLOW_MOVE = T
class ConstraintsFunction(object):
""" Class to implement the constraints function that will be passed as an argunent
to the minimization routines.
Input:
args: Arguments needed for calculating the constraints:
vminStar, logetaStar, vminStar_index
"""
def __init__(self, *args):
self.vminStar = args[0]
self.logetaStar = args[1]
self.vminStar_index = args[2]
self.vmin_max = 2000
def __call__(self, x, close=True):
"""
Input:
x: ndarray
Returns:
constraints: ndarray
Constraints vector, where each value must be >= 0 for the
constraint to be specified. Contains:
0 - 8: bounds: 3 * (x.size/2) constraints = 9 for x.size/2 = 3
9 - 12: sorted array: 2 * (x.size/2 - 1) constraints = 4 for x.size/2 = 3
13 - 15: vminStar_index: x.size/2 constraints = 3 for x.size/2 = 3
16 - 18: vminStar and logetaStar: x.size/2 constraints = 3 for x.size/2 = 3
"""
constraints = np.concatenate([x[:x.size/2], self.vmin_max - x[:x.size/2], -x[x.size/2:],
np.diff(x[:x.size/2]), np.diff(-x[x.size/2:]),
(x[:x.size/2] - self.vminStar) * (-x[x.size/2:] + self.logetaStar),
self.vminStar - x[:self.vminStar_index],
x[self.vminStar_index: x.size/2] - self.vminStar,
x[x.size/2: x.size/2 + self.vminStar_index] - self.logetaStar,
self.logetaStar - x[x.size/2 + self.vminStar_index:]])
if close:
is_not_close = np.logical_not(np.isclose(constraints, np.zeros_like(constraints), atol=1e-5))
is_not_close[:3 * (x.size/2)] = True
constraints = np.where(is_not_close, constraints, np.abs(constraints))
if np.any(np.isnan(constraints)):
raise ValueError
return constraints
class Experiment_EHI(Experiment_HaloIndep):
""" Class implementing the extended maximum likelihood halo-independent (EHI)
method to obtain the confidence band for experiments with potential signals and
unbinned data (arXiv:1507.03902).
Input:
expername: string
The name of the experiment.
scattering_type: string
The type of scattering. Can be
- 'SI' (spin-independent)
- 'SDAV' (spin-dependent, axial-vector)
- 'SDPS' (spin-dependent, pseudo-scalar)
mPhi: float, optional
The mass of the mediator.
method: str, optional
Type of minimization solver to be passed as a parameter to the minimization
routine. Can be 'SLSQP' or 'COBYLA'.
"""
def __init__(self, expername, scattering_type, mPhi=mPhiRef, method='SLSQP'):
super().__init__(expername, scattering_type, mPhi)
module = import_file(INPUT_DIR + expername + ".py")
self.ERecoilList = module.ERecoilList
self.mu_BKG_i = module.mu_BKG_i
self.NBKG = module.NBKG
self.method = method
def _VMinSortedList(self, mx, fp, fn, delta):
""" Computes the list of vmin corresponsing to measured recoil energies,
sorted in increasing order. Will be useful as starting guesses.
"""
self.vmin_sorted_list = np.sort(VMin(self.ERecoilList, self.mT[0], mx, delta))
return
def ResponseTables(self, vmin_min, vmin_max, vmin_step, mx, fp, fn, delta,
output_file_tail):
""" Computes response tables
- self.diff_response_tab is a table of [vmin, DifferentialResponse(Eee_i)]
pairs for each vmin in the range [vminmin, vminmax], corresponding to measured
recoil energies Eee_i. It is a 3D matrix where
axis = 0 has dimension self.ERecoilList.size()
axis = 1 has dimension vmin_list.size() + 1 (where + 1 is because we
prepend zeros for vmin = 0)
axis = 2 has dimension 2 for the pairs of [vmin, diff_response].
- self.response_tab is a table of [vmin, Response] pairs for each vmin
in the range [vminmin, vminmax], corresponding to DifferentialResponse
integrated over the full energy range. It is a 2D matrix where
axis = 1 has dimension vmin_list.size() + 1 (where +1 is because we
prepend zeros for vmin = 0)
axis = 2 has dimension 2 for the pairs of [vmin, diff_response].
Input:
vmin_min, vmin_max, vmin_step: float
Vmin range and vmin step size.
mx, fp, fn, delta: float
output_file_tail: string
Tag to be added to the file name since the results for
self.vmin_sorted_list, self.diff_response_tab and self.response_tab
are each written to files.
"""
self._VMinSortedList(mx, fp, fn, delta)
file = output_file_tail + "_VminSortedList.dat"
print(file)
np.savetxt(file, self.vmin_sorted_list)
if delta == 0:
branches = [1]
else:
branches = [1, -1]
self.vmin_linspace = np.linspace(vmin_min, vmin_max,
(vmin_max - vmin_min)/vmin_step + 1)
self.diff_response_tab = np.zeros((self.ERecoilList.size, 1))
self.response_tab = np.zeros(1)
self.curly_H_tab = np.zeros((self.ERecoilList.size, 1))
self.xi_tab = np.zeros(1)
xi = 0
vmin_prev = 0
for vmin in self.vmin_linspace:
print("vmin =", vmin)
diff_resp_list = np.zeros((1, len(self.ERecoilList)))
resp = 0
curly_H = np.zeros((1, len(self.ERecoilList)))
for sign in branches:
(ER, qER, const_factor) = self.ConstFactor(vmin, mx, fp, fn, delta, sign)
v_delta = min(VminDelta(self.mT, mx, delta))
diff_resp_list += np.array([self.DifferentialResponse(Eee, qER, const_factor)
for Eee in self.ERecoilList])
resp += integrate.quad(self.DifferentialResponse, self.Ethreshold, self.Emaximum,
args=(qER, const_factor), epsrel=PRECISSION, epsabs=0)[0]
curly_H += np.array([[integrate.quad(self.DifferentialResponse_Full, v_delta, vmin,
args=(Eee, mx, fp, fn, delta, sign),
epsrel=PRECISSION, epsabs=0)[0]
for Eee in self.ERecoilList]])
xi += self.Exposure * \
self.IntegratedResponse(vmin_prev, vmin,
self.Ethreshold, self.Emaximum,
mx, fp, fn, delta)
vmin_prev = vmin
self.diff_response_tab = \
np.append(self.diff_response_tab, diff_resp_list.transpose(), axis=1)
self.response_tab = np.append(self.response_tab, [resp], axis=0)
self.curly_H_tab = np.append(self.curly_H_tab, curly_H.transpose(), axis=1)
# counts/kg/keVee
self.xi_tab = np.append(self.xi_tab, [xi], axis=0)
# counts * day
self.vmin_linspace = np.insert(self.vmin_linspace, 0., 0)
file = output_file_tail + "_VminLinspace.dat"
print(file)
np.savetxt(file, self.vmin_linspace)
file = output_file_tail + "_DiffRespTable.dat"
print(file)
np.savetxt(file, self.diff_response_tab)
file = output_file_tail + "_RespTable.dat"
print(file)
np.savetxt(file, self.response_tab)
file = output_file_tail + "_CurlyHTable.dat"
print(file)
np.savetxt(file, self.curly_H_tab)
file = output_file_tail + "_XiTable.dat"
print(file)
np.savetxt(file, self.xi_tab)
os.system("say Finished response tables.")
return
def PlotTable(self, func, dimension=0, xlim=None, ylim=None,
title=None, plot_close=True, plot_show=True, show_zero_axis=False):
""" Plots response tables.
Input:
func: callable
Function or list of functions of v that should be plotted.
dimension: int
0 (if there's only one function) or
1 (if there are a list of functions).
xlim, ylim: float
Axis limits for the plots.
title: string
Plot title.
plot_close, plot_show: bool
Whether to call plt.close() before and plt.show() after.
show_zero_axis: bool
Whether to show a horizontal line at zero.
"""
if plot_close:
plt.close()
if dimension == 0:
# only one function
plt.plot(self.vmin_linspace, np.array([func(v)
for v in self.vmin_linspace]))
elif dimension == 1:
# list of interpolated functions for each energy in self.ERecoilList
for i in range(self.ERecoilList.size):
plt.plot(self.vmin_linspace, np.array([func[i](v)
for v in self.vmin_linspace]))
else:
print("Wrong dimension")
raise TypeError
if show_zero_axis:
plt.plot(self.vmin_linspace, np.zeros(self.vmin_linspace.size))
if xlim is not None:
plt.xlim(xlim)
if ylim is not None:
plt.ylim(ylim)
if title is not None:
plt.title(title)
if plot_show:
plt.show()
def ImportResponseTables(self, output_file_tail, plot=True):
""" Imports the data for the response tables from files.
"""
file = output_file_tail + "_VminSortedList.dat"
with open(file, 'r') as f_handle:
self.vmin_sorted_list = np.loadtxt(f_handle)
file = output_file_tail + "_VminLinspace.dat"
with open(file, 'r') as f_handle:
self.vmin_linspace = np.loadtxt(f_handle)
file = output_file_tail + "_DiffRespTable.dat"
with open(file, 'r') as f_handle:
self.diff_response_tab = np.loadtxt(f_handle)
file = output_file_tail + "_RespTable.dat"
with open(file, 'r') as f_handle:
self.response_tab = np.loadtxt(f_handle)
file = output_file_tail + "_CurlyHTable.dat"
with open(file, 'r') as f_handle:
self.curly_H_tab = np.loadtxt(f_handle)
file = output_file_tail + "_XiTable.dat"
with open(file, 'r') as f_handle:
self.xi_tab = np.loadtxt(f_handle)
self.diff_response_interp = np.array([unif.interp1d(self.vmin_linspace, dr)
for dr in self.diff_response_tab])
self.response_interp = unif.interp1d(self.vmin_linspace, self.response_tab)
self.curly_H_interp = np.array([unif.interp1d(self.vmin_linspace, h)
for h in self.curly_H_tab])
if plot:
self.PlotTable(self.diff_response_interp, dimension=1)
self.PlotTable(self.response_interp, dimension=0)
self.PlotTable(self.curly_H_interp, dimension=1, title='Curly H')
return
def VminIntegratedResponseTable(self, vmin_list):
return np.array([[integrate.quad(self.diff_response_interp[i],
vmin_list[a], vmin_list[a + 1],
epsrel=PRECISSION, epsabs=0)[0]
for a in range(vmin_list.size - 1)]
for i in range(self.ERecoilList.size)])
def IntegratedResponseTable(self, vmin_list):
return np.array([integrate.quad(self.response_interp,
vmin_list[a], vmin_list[a + 1],
epsrel=PRECISSION, epsabs=0)[0]
for a in range(vmin_list.size - 1)])
def _MinusLogLikelihood(self, vars_list, vminStar=None, logetaStar=None,
vminStar_index=None):
""" Compute -log(L)
Input:
vars_list: ndarray
List of variables [vmin_1, ..., vmin_No, log(eta_1), ..., log(eta_No)]
vminStar, logetaStar: float, optional
Values of fixed vmin^* and log(eta)^*.
Returns:
-log(L): float
"""
if vminStar is None:
vmin_list_w0 = vars_list[: vars_list.size/2]
logeta_list = vars_list[vars_list.size/2:]
else:
vmin_list_w0 = np.insert(vars_list[: vars_list.size/2],
vminStar_index, vminStar)
logeta_list = np.insert(vars_list[vars_list.size/2:],
vminStar_index, logetaStar)
vmin_list_w0 = np.insert(vmin_list_w0, 0, 0)
vmin_resp_integr = self.VminIntegratedResponseTable(vmin_list_w0)
resp_integr = self.IntegratedResponseTable(vmin_list_w0)
mu_i = self.Exposure * np.dot(vmin_resp_integr, 10**logeta_list)
Nsignal = self.Exposure * np.dot(10**logeta_list, resp_integr)
if vminStar is None:
self.gamma_i = (self.mu_BKG_i + mu_i) / self.Exposure
# counts/kg/keVee/days
result = self.NBKG + Nsignal - np.log(self.mu_BKG_i + mu_i).sum()
if np.any(self.mu_BKG_i + mu_i < 0):
raise ValueError
return result
def MinusLogLikelihood(self, vars_list, constr_func=None, vminStar=None,
logetaStar=None, vminStar_index=None):
""" Computes -log(L) and tests whether constraints are satisfied.
Input:
vars_list: ndarray
List of variables [vmin_1, ..., vmin_No, log(eta_1), ..., log(eta_No)].
constr_func: callable, optional
Ffunction of vars_list giving an array of values each corresponding to
a constraint. If the values are > 0 the constraints are satisfied.
vminStar, logetaStar: float, optional
Values of fixed vmin^* and log(eta)^*.
vminStar_index: int, optional
Index corresponding to the position of vminStar in the array of vmin
steps.
Returns:
-log(L) if all constraints are valid, and the result of an artificial
function that grows with the invalid constraints if not all constraints
are valid.
"""
constraints = constr_func(vars_list)
constr_not_valid = constraints < 0
if DEBUG_FULL:
print("*** vars_list =", repr(vars_list))
if DEBUG_FULL:
print("vminStar =", vminStar)
print("logetaStar =", logetaStar)
print("constraints =", repr(constraints))
print("constr_not_valid =", repr(constr_not_valid))
try:
return self._MinusLogLikelihood(vars_list, vminStar=vminStar,
logetaStar=logetaStar,
vminStar_index=vminStar_index)
except:
if np.any(constr_not_valid):
constr_list = constraints[constr_not_valid]
if DEBUG_FULL:
print("Constraints not valid!!")
print("constr sum =", -constr_list.sum())
return min(max(-constr_list.sum(), 0.001) * 1e6, 1e6)
else:
print("Error!!")
raise
def OptimalLikelihood(self, output_file_tail, logeta_guess):
""" Finds the best-fit piecewise constant eta function corresponding to the
minimum MinusLogLikelihood, and prints the results to file (value of the minimum
MinusLogLikelihood and the corresponding values of vmin, logeta steps.
Input:
output_file_tail: string
Tag to be added to the file name.
logeta_guess: float
Guess for the value of log(eta) in the minimization procedure.
"""
self.ImportResponseTables(output_file_tail, plot=False)
vars_guess = np.append(self.vmin_sorted_list,
logeta_guess * np.ones(self.vmin_sorted_list.size))
print("vars_guess =", vars_guess)
vmin_max = self.vmin_linspace[-1]
def constr_func(x, vmin_max=vmin_max):
""" 0 - 8: bounds: 3 * (x.size/2) constraints = 9 for x.size/2 = 3
9 - 12: sorted array: 2 * (x.size/2 - 1) constraints = 4 for x.size/2 = 3
"""
constraints = np.concatenate([x[:x.size/2], vmin_max - x[:x.size/2],
-x[x.size/2:],
np.diff(x[:x.size/2]), np.diff(-x[x.size/2:])])
is_not_close = np.logical_not(
np.isclose(constraints, np.zeros_like(constraints), atol=1e-5))
is_not_close[:3 * (x.size/2)] = T
constr = np.where(is_not_close, constraints, np.abs(constraints))
if DEBUG:
print("***constr =", repr(constr))
print("tf =", repr(constr < 0))
return constr
constr = ({'type': 'ineq', 'fun': constr_func})
np.random.seed(0)
if USE_BASINHOPPING:
minimizer_kwargs = {"constraints": constr, "args": (constr_func,)}
optimum_log_likelihood = basinhopping(self.MinusLogLikelihood, vars_guess,
minimizer_kwargs=minimizer_kwargs,
niter=30, stepsize=0.1)
else:
optimum_log_likelihood = minimize(self.MinusLogLikelihood, vars_guess,
args=(constr_func,), constraints=constr)
print(optimum_log_likelihood)
print("MinusLogLikelihood =", self._MinusLogLikelihood(optimum_log_likelihood.x))
print("vars_guess =", repr(vars_guess))
file = output_file_tail + "_GloballyOptimalLikelihood.dat"
print(file)
np.savetxt(file, np.append([optimum_log_likelihood.fun],
optimum_log_likelihood.x))
os.system("say 'Finished finding optimum'")
return
def ImportOptimalLikelihood(self, output_file_tail, plot=False):
""" Import the minumum -log(L) and the locations of the steps in the best-fit
logeta function.
Input:
output_file_tail: string
Tag to be added to the file name.
plot: bool, optional
Whether to plot response tables.
"""
self.ImportResponseTables(output_file_tail, plot=False)
file = output_file_tail + "_GloballyOptimalLikelihood.dat"
with open(file, 'r') as f_handle:
optimal_result = np.loadtxt(f_handle)
self.optimal_logL = optimal_result[0]
self.optimal_vmin = optimal_result[1: optimal_result.size/2 + 1]
self.optimal_logeta = optimal_result[optimal_result.size/2 + 1:]
print("optimal result =", optimal_result)
if plot:
self._MinusLogLikelihood(optimal_result[1:]) # to get self.gamma_i
self.xi_interp = unif.interp1d(self.vmin_linspace, self.xi_tab)
self.h_sum_tab = np.sum([self.curly_H_tab[i] / self.gamma_i[i]
for i in range(self.optimal_vmin.size)], axis=0)
self.q_tab = 2 * (self.xi_tab - self.h_sum_tab)
self.h_sum_interp = unif.interp1d(self.vmin_linspace, self.h_sum_tab)
self.q_interp = unif.interp1d(self.vmin_linspace, self.q_tab)
file = output_file_tail + "_HSumTable.dat"
print(file)
np.savetxt(file, self.h_sum_tab)
file = output_file_tail + "_QTable.dat"
print(file)
np.savetxt(file, self.q_tab)
self.PlotTable(self.xi_interp, dimension=0, plot_show=False)
self.PlotTable(self.h_sum_interp, dimension=0,
xlim=[0, 2000], ylim=[-2e24, 2e24],
title='Xi, H_sum', plot_close=False)
self.PlotTable(self.q_interp, dimension=0,
xlim=[0, 2000], ylim=[-2e24, 2e24],
title='q', show_zero_axis=True)
return
def _PlotStepFunction(self, vmin_list, logeta_list,
xlim_percentage=(0., 1.1), ylim_percentage=(1.01, 0.99),
mark=None, color=None, linewidth=1,
plot_close=True, plot_show=True):
""" Plots a step-like function, given the location of the steps.
"""
if plot_close:
plt.close()
print(vmin_list)
print(logeta_list)
x = np.append(np.insert(vmin_list, 0, 0), vmin_list[-1] + 0.1)
y = np.append(np.insert(logeta_list, 0, logeta_list[0]), -80)
if color is not None:
plt.step(x, y, color=color, linewidth=linewidth)
if mark is not None:
plt.plot(x, y, mark, color=color)
else:
plt.step(x, y, linewidth=linewidth)
if mark is not None:
plt.plot(x, y, mark)
# plt.xlim([vmin_list[0] * xlim_percentage[0], vmin_list[-1] * xlim_percentage[1]])
plt.xlim([0, 1000])
plt.ylim([max(logeta_list[-1] * ylim_percentage[0], -60),
max(logeta_list[0] * ylim_percentage[1], -35)])
if plot_show:
plt.show()
return
def PlotOptimum(self, xlim_percentage=(0., 1.1), ylim_percentage=(1.01, 0.99),
color='red', linewidth=1,
plot_close=True, plot_show=True):
""" Plots the best-fit eta(vmin) step function.
"""
self._PlotStepFunction(self.optimal_vmin, self.optimal_logeta,
xlim_percentage=xlim_percentage,
ylim_percentage=ylim_percentage,
color=color, linewidth=linewidth,
plot_close=plot_close, plot_show=plot_show)
return
def PlotConstrainedOptimum(self, vminStar, logetaStar, vminStar_index,
xlim_percentage=(0., 1.1), ylim_percentage=(1.01, 0.99),
plot_close=True, plot_show=True):
""" Plots the eta(vmin) function given the location of vminStar and logetaStar.
"""
self._PlotStepFunction(self.optimal_vmin, self.optimal_logeta,
plot_close=plot_close, plot_show=False)
x = np.insert(self.constr_optimal_vmin, vminStar_index, vminStar)
y = np.insert(self.constr_optimal_logeta, vminStar_index, logetaStar)
self._PlotStepFunction(x, y,
xlim_percentage=xlim_percentage,
ylim_percentage=ylim_percentage,
plot_close=False, plot_show=False, mark='x', color='k')
plt.plot(vminStar, logetaStar, '*')
if plot_show:
plt.show()
return
def _ConstrainedOptimalLikelihood(self, vminStar, logetaStar, vminStar_index):
""" Finds the constrained minimum MinusLogLikelihood for given vminStar,
logetaStar and vminStar_index.
Input:
vminStar, logetaStar: float
Location of the constrained step.
vminStar_index: int
Index of vminStar in the list of vmin steps of the constrained optimum
logeta function.
Returns:
constr_optimal_logl: float
The constrained minimum MinusLogLikelihood
"""
if DEBUG:
print("~~~~~ vminStar_index =", vminStar_index)
vmin_guess_left = np.array([self.optimal_vmin[ind]
if self.optimal_vmin[ind] < vminStar
else vminStar * (1 - 0.001*(vminStar_index - ind))
for ind in range(vminStar_index)])
vmin_guess_right = np.array([self.optimal_vmin[ind]
if self.optimal_vmin[ind] > vminStar
else vminStar * (1 + 0.001*(ind - vminStar_index - 1))
for ind in range(vminStar_index, self.optimal_vmin.size)])
vmin_guess = np.append(vmin_guess_left, vmin_guess_right)
logeta_guess = self.optimal_logeta
logeta_guess_left = np.maximum(logeta_guess[:vminStar_index],
np.ones(vminStar_index)*logetaStar)
logeta_guess_right = np.minimum(logeta_guess[vminStar_index:],
np.ones(logeta_guess.size - vminStar_index) *
logetaStar)
logeta_guess = np.append(logeta_guess_left, logeta_guess_right)
vars_guess = np.append(vmin_guess, logeta_guess)
constr_func = ConstraintsFunction(vminStar, logetaStar, vminStar_index)
constr = ({'type': 'ineq', 'fun': constr_func})
args = (constr_func, vminStar, logetaStar, vminStar_index)
sol_not_found = True
attempts = 3
np.random.seed(1)
random_variation = 1e-5
if USE_BASINHOPPING:
class TakeStep(object):
def __init__(self, stepsize=0.1):
pass
self.stepsize = stepsize
def __call__(self, x):
x[:x.size/2] += np.random.uniform(-5. * self.stepsize,
5. * self.stepsize,
x[x.size/2:].shape)
x[x.size/2:] += np.random.uniform(-self.stepsize,
self.stepsize, x[x.size/2:].shape)
return x
take_step = TakeStep()
class AdaptiveKwargs(object):
def __init__(self, kwargs, random_variation=random_variation):
self.kwargs = kwargs
self.random_variation = random_variation
def __call__(self):
new_kwargs = {}
random_factor_vminStar = \
(1 + self.random_variation * np.random.uniform(-1, 1))
random_factor_logetaStar = \
(1 + self.random_variation * np.random.uniform(-1, 1))
constr_func_args = (self.kwargs['args'][1] * random_factor_vminStar,
self.kwargs['args'][2] * random_factor_logetaStar,
self.kwargs['args'][3])
constr_func = ConstraintsFunction(*constr_func_args)
new_kwargs['args'] = (constr_func,) + constr_func_args
new_kwargs['constraints'] = ({'type': 'ineq', 'fun': constr_func})
if 'method' in self.kwargs:
new_kwargs['method'] = self.kwargs['method']
return new_kwargs
minimizer_kwargs = {"constraints": constr, "args": args, "method": self.method}
if ADAPT_KWARGS:
adapt_kwargs = AdaptiveKwargs(minimizer_kwargs, random_variation)
else:
adapt_kwargs = None
while sol_not_found and attempts > 0:
try:
if USE_BASINHOPPING:
constr_optimum_log_likelihood = \
basinhopping(self.MinusLogLikelihood, vars_guess,
minimizer_kwargs=minimizer_kwargs, niter=5,
take_step=take_step, adapt_kwargs=adapt_kwargs,
stepsize=0.2)
else:
constr_optimum_log_likelihood = \
minimize(self.MinusLogLikelihood, vars_guess,
args=args, constraints=constr, method=self.method)
constraints = constr_func(constr_optimum_log_likelihood.x)
is_not_close = np.logical_not(np.isclose(constraints,
np.zeros_like(constraints)))
constr_not_valid = np.logical_and(constraints < 0, is_not_close)
sol_not_found = np.any(constr_not_valid)
except ValueError:
sol_not_found = True
pass
attempts -= 1
args = (constr_func,
vminStar * (1 + random_variation * np.random.uniform(-1, 1)),
logetaStar * (1 + random_variation * np.random.uniform(-1, 1)),
vminStar_index)
if USE_BASINHOPPING:
minimizer_kwargs = {"constraints": constr, "args": args}
if DEBUG and sol_not_found:
print(attempts, "attempts left! ####################################" +
"################################################################")
print("sol_not_found =", sol_not_found)
if sol_not_found:
if DEBUG:
print("ValueError: sol not found")
raise ValueError
if DEBUG:
print(constr_optimum_log_likelihood)
print("kwargs =", constr_optimum_log_likelihood.minimizer.kwargs)
print("args =", constr_optimum_log_likelihood.minimizer.kwargs['args'])
print("optimum_logL =", self.optimal_logL)
print("constraints=", repr(constraints))
print("constr_not_valid =", repr(constr_not_valid))
print("vars_guess =", repr(vars_guess))
print("optimum_logL =", self.optimal_logL)
print("vminStar_index =", vminStar_index)
return constr_optimum_log_likelihood
def ConstrainedOptimalLikelihood(self, vminStar, logetaStar, plot=False):
""" Finds the constrained minimum MinusLogLikelihood for given vminStar,
logetaStar. Finds the minimum for all vminStar_index, and picks the best one.
Input:
vminStar, logetaStar: float
Location of constrained step.
plot: bool, optional
Whether to plot the constrained piecewice-constant logeta function.
Returns:
constr_optimal_logl: float
The constrained minimum MinusLogLikelihood
"""
vminStar_index = 0
while vminStar_index < self.optimal_vmin.size and \
vminStar > self.optimal_vmin[vminStar_index]:
vminStar_index += 1
try:
constr_optimum_log_likelihood = \
self._ConstrainedOptimalLikelihood(vminStar, logetaStar, vminStar_index)
except ValueError:
optim_logL = 10**6
pass
else:
optim_logL = constr_optimum_log_likelihood.fun
original_optimum = constr_optimum_log_likelihood
vminStar_index_original = vminStar_index
index = vminStar_index
while ALLOW_MOVE and index > 0:
try:
index -= 1
new_optimum = \
self._ConstrainedOptimalLikelihood(vminStar, logetaStar, index)
except ValueError:
pass
else:
if new_optimum.fun < optim_logL:
os.system("say Moved left")
print("Moved left, index is now", index)
print("############################################################" +
"############################################################")
vminStar_index = index
constr_optimum_log_likelihood = new_optimum
optim_logL = constr_optimum_log_likelihood.fun
index = vminStar_index_original
while ALLOW_MOVE and index < self.optimal_vmin.size:
try:
index += 1
new_optimum = self._ConstrainedOptimalLikelihood(vminStar, logetaStar,
index)
except ValueError:
pass
else:
if new_optimum.fun < optim_logL:
os.system("say Moved right")
print("Moved right, index is now", index)
print("############################################################" +
"############################################################")
vminStar_index = index
constr_optimum_log_likelihood = new_optimum
optim_logL = constr_optimum_log_likelihood.fun
if optim_logL == 10**6:
raise ValueError
self.constr_optimal_logl = constr_optimum_log_likelihood.fun
vars_result = constr_optimum_log_likelihood.x
self.constr_optimal_vmin = vars_result[: vars_result.size/2]
self.constr_optimal_logeta = vars_result[vars_result.size/2:]
if plot:
print("vminStar =", vminStar)
print("logetaStar =", logetaStar)
print("vminStar_index =", vminStar_index)
try:
print("original:", original_optimum)
except:
print("Original failed.")
pass
try:
print("new:", constr_optimum_log_likelihood)
print(constr_optimum_log_likelihood.minimizer.kwargs['args'])
except:
print("All attepts failed.")
pass
try:
vminStar_rand = constr_optimum_log_likelihood.minimizer.kwargs['args'][1]
logetaStar_rand = constr_optimum_log_likelihood.minimizer.kwargs['args'][2]
constr_func = ConstraintsFunction(vminStar_rand, logetaStar_rand,
vminStar_index)
constraints = constr_func(constr_optimum_log_likelihood.x)
is_not_close = np.logical_not(np.isclose(constraints,
np.zeros_like(constraints)))
constr_not_valid = np.logical_and(constraints < 0, is_not_close)
sol_not_found = np.any(constr_not_valid)
print("random vminStar =", vminStar_rand)
print("random logetaStar =", logetaStar_rand)
print("x =", constr_optimum_log_likelihood.x)
print("constraints =", constraints)
print("is_not_close =", is_not_close)
print("constr_not_valid =", constr_not_valid)
print("sol_not_found =", sol_not_found)
except:
print("Error")
pass
os.system("say 'Finished plot'")
self.PlotConstrainedOptimum(vminStar_rand, logetaStar_rand, vminStar_index,
xlim_percentage=(0., 1.1),
ylim_percentage=(1.2, 0.8))
return self.constr_optimal_logl
def VminSamplingList(self, output_file_tail, vmin_min, vmin_max, vmin_num_steps,
steepness_vmin=1.5, steepness_vmin_center=2.5, plot=False):
""" Finds a non-linear way to sample the vmin range, such that more points are
sampled near the location of the steps of the best-fit logeta function, and
fewer in between. This is done by building a function of vmin that is steeper
near the steps and flatter elsewhere, and the steeper this function the more
samplings are done in this region.
Input:
output_file_tail: string
Tag to be added to the file name.
vmin_min, vmin_max: float
Range in vmin where the sampling should be made.
vmin_num_steps: int
Number of samples in vmin (approximate, the final number of steps is
not exact, due to taking floor() in some places.
steepness_vmin: float, optional
Parameter related to the steepness of this function to the left of the
leftmost step and to the right of the rightmost step.
steepness_vmin_center: float, optional
Similar parameter, but for the steepness in between the leftmost step
and the rightmost step.
plot: bool, optional
Whether to plot intermediate results such as the sampling function.
"""
self.ImportOptimalLikelihood(output_file_tail)
xmin = vmin_min
xmax = vmin_max
# TODO! This +4 is to compensate for a loss of ~4 points (not always 4 though),
# and it's due to taking floor later on.
# Find a better way to deal with this.
x_num_steps = vmin_num_steps # + 4
s = steepness_vmin
sc = steepness_vmin_center
x_lin = np.linspace(xmin, xmax, 1000)
x0_list = self.optimal_vmin
numx0 = x0_list.size
print("x0 =", x0_list)
def UnitStep(x): return (np.sign(x) + 1) / 2
def g1(x, x0, s0, xmin=xmin):
return np.log10(UnitStep(x - x0) +
UnitStep(x0 - x) *
(x0 - xmin) / (x + 10**s0 * (-x + x0) - xmin))
def g2(x, x0, s0, xmax=xmax):
return np.log10(UnitStep(x0 - x) +
UnitStep(x - x0) *
(x + 10**s0 * (-x + x0) - xmax) / (x0 - xmax))
def g(x, x0, s1, s2): return g1(x, x0, s1) + g2(x, x0, s2)
s_list = np.array([[s, sc]] + [[sc, sc]] * (numx0 - 2) + [[sc, s]])
def g_total(x, sign=1, x0=x0_list, s_list=s_list):
return np.array([sign * g(x, x0_list[i], s_list[i, 0], s_list[i, 1])
for i in range(x0_list.size)]).prod(axis=0)
g_lin = g_total(x_lin)
xT_guess = (x0_list[:-1] + x0_list[1:]) / 2
bounds = np.array([(x0_list[i], x0_list[i + 1])
for i in range(x0_list.size - 1)])
x_turns_max = np.array([minimize(g_total, np.array(xT_guess[i]),
args=(-1,), bounds=[bounds[i]]).x
for i in range(0, xT_guess.size, 2)])
x_turns_min = np.array([minimize(g_total, np.array(xT_guess[i]),
bounds=[bounds[i]]).x
for i in range(1, xT_guess.size, 2)])
x_turns = np.sort(np.append(x_turns_max, x_turns_min))
x_turns = np.append(np.insert(x_turns, 0, xmin), [xmax])
y_turns = g_total(x_turns)
print("x_turns =", x_turns)
print("y_turns =", y_turns)
def g_inverse(y, x1, x2):
return brentq(lambda x: g_total(x) - y, x1, x2)
def g_inverse_list(y_list, x1, x2):
return np.array([g_inverse(y, x1, x2) for y in y_list])
y_diff = np.diff(y_turns)
y_diff_sum = np.abs(y_diff).sum()
print("y_diff =", y_diff)
num_steps = np.array([max(1, np.floor(x_num_steps * np.abs(yd)/y_diff_sum))
for yd in y_diff])
print("num_steps =", num_steps)
y_list = np.array([np.linspace(y_turns[i], y_turns[i+1], num_steps[i])
for i in range(num_steps.size)])
x_list = np.array([g_inverse_list(y_list[i], x_turns[i], x_turns[i+1])
for i in range(y_list.size)])
x_list = np.concatenate(x_list)
y_list = np.concatenate(y_list)
x_list = x_list[np.array([x_list[i] != x_list[i+1]
for i in range(x_list.size - 1)] + [True])]
y_list = y_list[np.array([y_list[i] != y_list[i+1]
for i in range(y_list.size - 1)] + [True])]
self.vmin_sampling_list = x_list
if plot:
plt.close()
plt.plot(x_lin, g_lin)
plt.plot(x_turns, y_turns, 'o')
plt.plot(x_list, y_list, '*')
plt.xlim([xmin, xmax])
plt.ylim([min(-s * sc**(numx0 - 1), np.min(y_turns)),
max(s * sc**(numx0 - 1), np.max(y_turns))])
plt.show()
return
def OptimumStepFunction(self, vmin):
""" Best-fit logeta as a function of vmin for the optimal log(L).
Input:
vmin: float
Value of vmin for which to evaluate logeta.
Returns:
logeta: float
log(eta(vmin)) for the best-fit piecewise constant function.
"""
index = 0
while index < self.optimal_vmin.size and vmin > self.optimal_vmin[index]:
index += 1
if index == self.optimal_vmin.size:
return self.optimal_logeta[-1]*10
return self.optimal_logeta[index]
def VminLogetaSamplingTable(self, output_file_tail, logeta_percent_minus,
logeta_percent_plus, logeta_num_steps,
linear_sampling=True, steepness_logeta=1, plot=False):
""" Finds a non-linear way to sample both the vmin and logeta range, such that
more points are sampled near the location of the steps of the best-fit logeta
function, and fewer in between. This uses the sampling in vmin done by
VminSamplingList, and computes a non-linear sampling in logeta in a similar way
(by building a function of logeta that is steeper near the steps and flatter
elsewhere, and the steeper this function the more samplings are done in this
region).
Input:
output_file_tail: string
Tag to be added to the file name.
logeta_percent_minus, logeta_percent_plus: float
Range in logeta where the sampling should be made, given as percentage
in the negative and positive direction of the best-fit logeta.
logeta_num_steps: int
Number of samples in logeta.
steepness_logeta: float, optional
Parameter related to the steepness of this sampling function in logeta.
plot: bool, optional
Whether to plot intermediate results such as the sampling function.
"""
print(self.optimal_vmin)
print(self.optimal_logeta)
logeta_num_steps_minus = logeta_num_steps * \
logeta_percent_minus / (logeta_percent_minus + logeta_percent_plus)
logeta_num_steps_plus = logeta_num_steps * \
logeta_percent_plus / (logeta_percent_minus + logeta_percent_plus)
s = steepness_logeta
def f(x, xm, i, s0=s):
return (xm - x) / (10**s0 - 1) * 10**i + (10**s0 * x - xm) / (10**s0 - 1)
self.vmin_logeta_sampling_table = []
vmin_last_step = self.optimal_vmin[-1]
if linear_sampling:
for vmin in self.vmin_sampling_list:
logeta_opt = self.OptimumStepFunction(min(vmin, vmin_last_step))
if vmin < self.optimal_vmin[0]:
logeta_min = logeta_opt * (1 + 0.6 * logeta_percent_minus)
logeta_max = logeta_opt * (1 - logeta_percent_plus)
else:
if vmin < 600:
logeta_min = logeta_opt * (1 + logeta_percent_minus)
else:
logeta_min = logeta_opt * (1 + 0.6 * logeta_percent_minus)
logeta_max = logeta_opt * (1 - 0.5 * logeta_percent_plus)
logeta_list = [[vmin, logeta]
for logeta in np.linspace(logeta_min, logeta_max,
logeta_num_steps)]
self.vmin_logeta_sampling_table += [logeta_list]
else:
for vmin in self.vmin_sampling_list:
logeta_opt = self.OptimumStepFunction(min(vmin, vmin_last_step))
logeta_min = logeta_opt * (1 + logeta_percent_minus)
logeta_max = logeta_opt * (1 - logeta_percent_plus)
logeta_list_minus = [[vmin, f(logeta_opt, logeta_min, i)]
for i in np.linspace(s, 0, logeta_num_steps_minus)]
logeta_list_plus = [[vmin, f(logeta_opt, logeta_max, i)]
for i in np.linspace(s / logeta_num_steps_plus, s,
logeta_num_steps_plus)]
self.vmin_logeta_sampling_table += [logeta_list_minus + logeta_list_plus]
self.vmin_logeta_sampling_table = np.array(self.vmin_logeta_sampling_table)
if plot:
self.PlotSamplingTable(plot_close=True)
return
def PlotSamplingTable(self, plot_close=False, plot_show=True, plot_optimum=True):
""" Plots the sampling points in the vmin-logeta plane.
"""
if plot_close:
plt.close()
print("sampling_size =", self.vmin_logeta_sampling_table.shape)
for tab in self.vmin_logeta_sampling_table:
plt.plot(tab[:, 0], tab[:, 1], 'o')
if plot_optimum:
self.PlotOptimum(xlim_percentage=(0.9, 1.1), ylim_percentage=(1.2, 0.8),
plot_close=False, plot_show=plot_show)
elif plot_show:
plt.show()
return
def GetLikelihoodTable(self, index, output_file_tail, logeta_index_range, extra_tail):
""" Prints to file lists of the form [logetaStar_ij, logL_ij] needed for
1D interpolation, where i is the index corresponding to vminStar_i and j is
the index for each logetaStar. Each file corresponds to a different index i.
Here only one file is written for a specific vminStar.
Input:
index: int
Index of vminStar.
output_file_tail: string
Tag to be added to the file name.
logeta_index_range: tuple
A touple (index0, index1) between which logetaStar will be considered.
If this is None, then the whole list of logetaStar is used.
extra_tail: string
Additional tail to be added to filenames.
"""
print('index =', index)
print('output_file_tail =', output_file_tail)
vminStar = self.vmin_logeta_sampling_table[index, 0, 0]
logetaStar_list = self.vmin_logeta_sampling_table[index, :, 1]
plot = False
if logeta_index_range is not None:
logetaStar_list = \
logetaStar_list[logeta_index_range[0]: logeta_index_range[1]]
plot = True
print("vminStar =", vminStar)
table = np.empty((0, 2))
for logetaStar in logetaStar_list:
try:
constr_opt = self.ConstrainedOptimalLikelihood(vminStar, logetaStar,
plot=plot)
except:
print("error")
os.system("say Error")
pass
else:
print("index =", index, "; vminStar =", vminStar,
"; logetaStar =", logetaStar, "; constr_opt =", constr_opt)
table = np.append(table, [[logetaStar, constr_opt]], axis=0)
# table = np.append(table, [logetaStar])
print("vminStar =", vminStar, "; table =", table)
if True:
temp_file = output_file_tail + "_" + str(index) + \
"_LogetaStarLogLikelihoodList" + extra_tail + ".dat"
print(temp_file)
np.savetxt(temp_file, table)
return
def LogLikelihoodList(self, output_file_tail, extra_tail="", processes=None,
vmin_index_list=None, logeta_index_range=None):
""" Loops thorugh the list of all vminStar and calls GetLikelihoodTable,
which will print the likelihood tables to files.
Input:
output_file_tail: string
Tag to be added to the file name.
extra_tail: string, optional
Additional tail to be added to filenames.
processes: int, optional
Number of processes for parallel programming.
vmin_index_list: ndarray, optional
List of indices in vminStar_list for which we calculate the optimal
likelihood. If not given, the whole list of vminStars is used.
logeta_index_range: tuple, optional
Atuple (index0, index1) between which logetaStar will be considered.
If not given, then the whole list of logetaStar is used.
"""
if vmin_index_list is None:
vmin_index_list = range(0, self.vmin_logeta_sampling_table.shape[0])
else:
try:
len(vmin_index_list)
except TypeError:
vmin_index_list = range(vmin_index_list,
self.vmin_logeta_sampling_table.shape[0])
print("vmin_index_list =", vmin_index_list)
print("logeta_index_range =", logeta_index_range)
kwargs = ({'index': index,
'output_file_tail': output_file_tail,
'logeta_index_range': logeta_index_range,
'extra_tail': extra_tail}
for index in vmin_index_list)
par.parmap(self.GetLikelihoodTable, kwargs, processes)
return
def _logL_interp(vars_list, constraints):
constr_not_valid = constraints(vars_list)[:-1] < 0
if np.any(constr_not_valid):
constr_list = constraints(vars_list)[constr_not_valid]
return -constr_list.sum() * 10**2
return logL_interp(vars_list)
def ConfidenceBand(self, output_file_tail, delta_logL, interpolation_order,
extra_tail="", multiplot=True):
""" Compute the confidence band.
Input:
output_file_tail: string
Tag to be added to the file name.
delta_logL: float
Target difference between the constrained minimum and the
unconstrained global minimum of MinusLogLikelihood.
interpolation_order: int
interpolation order for the interpolated constrained minimum of
MinusLogLikelihood as a function of logeta, for a fixed vmin.
extra_tail: string, optional
Additional tail to be added to filenames.
multiplot: bool, optional
Whether to plot log(L) as a function of logeta for each vmin, and the
horizontal line corresponding to a given delta_logL.
"""
print("self.vmin_sampling_list =", self.vmin_sampling_list)
self.vmin_logeta_band_low = []
self.vmin_logeta_band_up = []
vmin_last_step = self.optimal_vmin[-1]
if multiplot:
plt.close()
for index in range(self.vmin_sampling_list.size):
print("index =", index)
print("vmin =", self.vmin_sampling_list[index])
logeta_optim = self.OptimumStepFunction(min(self.vmin_sampling_list[index],
vmin_last_step))
file = output_file_tail + "_" + str(index) + \
"_LogetaStarLogLikelihoodList" + extra_tail + ".dat"
try:
with open(file, 'r') as f_handle:
table = np.loadtxt(f_handle)
except:
continue
x = table[:, 0] # this is logeta
y = table[:, 1] # this is logL
logL_interp = interpolate.interp1d(x, y, kind='cubic')
def _logL_interp(vars_list, constraints):
constr_not_valid = constraints(vars_list)[:-1] < 0
if np.any(constr_not_valid):
constr_list = constraints(vars_list)[constr_not_valid]
return -constr_list.sum() * 1e2
return logL_interp(vars_list)
print(self.optimal_logL - delta_logL)
print(np.array([table[0, 0]]), " ", table[-1, 0])
print(logeta_optim)
def constr_func(logeta, logeta_min=np.array([table[0, 0]]),
logeta_max=np.array([table[-1, 0]])):
return np.concatenate([logeta - logeta_min, logeta_max - logeta])
constr = ({'type': 'ineq', 'fun': constr_func})
try:
logeta_minimLogL = minimize(_logL_interp, np.array([logeta_optim]),
args=(constr_func,), constraints=constr).x[0]
except ValueError:
print("ValueError at logeta_minimLogL")
logeta_minimLogL = logeta_optim
pass
print("logeta_minimLogL =", logeta_minimLogL)
print("x =", x)
print("y =", y)
if multiplot:
plt.close()
plt.plot(x, y, 'o-')
plt.plot(x, (self.optimal_logL + 1) * np.ones_like(y))
plt.plot(x, (self.optimal_logL + 2.7) * np.ones_like(y))
plt.title("index =" + str(index) + ", v_min =" +
str(self.vmin_sampling_list[index]) + "km/s")
plt.xlim(x[0], x[-1])
plt.ylim(-5, 20)
plt.show()
error = F
try:
if y[0] > self.optimal_logL + delta_logL and \
logeta_minimLogL < self.optimal_logL + delta_logL:
sol = brentq(lambda logeta: logL_interp(logeta) - self.optimal_logL -
delta_logL,
table[0, 0], logeta_minimLogL)
self.vmin_logeta_band_low += \
[[self.vmin_sampling_list[index], sol]]
except ValueError:
print("ValueError: Error in calculating vmin_logeta_band_low")
error = T
try:
if y[-1] > self.optimal_logL + delta_logL and \
logeta_minimLogL < self.optimal_logL + delta_logL:
sol = brentq(lambda logeta: logL_interp(logeta) - self.optimal_logL -
delta_logL,
logeta_minimLogL, table[-1, 0])
self.vmin_logeta_band_up += \
[[self.vmin_sampling_list[index], sol]]
except ValueError:
print("ValueError: Error in calculating vmin_logeta_band_hi")
error = T
if error:
plt.close()
plt.plot(x, (self.optimal_logL + 1) * np.ones_like(y))
plt.plot(x, (self.optimal_logL + 2.7) * np.ones_like(y))
plt.title("index =" + str(index) + "; v_min =" +
str(self.vmin_sampling_list[index]) + "km/s")
plt.xlim(x[0], x[-1])
plt.ylim([-5, 20])
plt.plot(x, y, 'o-', color="r")
plt.plot(logeta_optim, logL_interp(logeta_optim), '*')
plt.plot(logeta_optim, self.optimal_logL, '*')
print("ValueError")
plt.show()
# raise
pass
if multiplot:
plt.show()
self.vmin_logeta_band_low = np.array(self.vmin_logeta_band_low)
self.vmin_logeta_band_up = np.array(self.vmin_logeta_band_up)
print("lower band: ", self.vmin_logeta_band_low)
print("upper band: ", self.vmin_logeta_band_up)
self.PlotConfidenceBand()
delta_logL = round(delta_logL, 1)
file = output_file_tail + "_FoxBand_low_deltalogL_" + str(delta_logL) + ".dat"
print(file)
np.savetxt(file, self.vmin_logeta_band_low)
file = output_file_tail + "_FoxBand_up_deltalogL_" + str(delta_logL) + ".dat"
print(file)
np.savetxt(file, self.vmin_logeta_band_up)
return
def PlotConfidenceBand(self):
""" Plot the confidence band and the best-fit function.
"""
plt.close()
try:
plt.plot(self.vmin_logeta_band_low[:, 0], self.vmin_logeta_band_low[:, 1], 'o-')
except IndexError:
pass
try:
plt.plot(self.vmin_logeta_band_up[:, 0], self.vmin_logeta_band_up[:, 1], 'o-')
except IndexError:
pass
self.PlotOptimum(ylim_percentage=(1.2, 0.8), plot_close=F, plot_show=T)
def ImportConfidenceBand(self, output_file_tail, delta_logL, extra_tail=""):
""" Import the confidence band from file.
Input:
output_file_tail: string
Tag to be added to the file name.
delta_logL: float
Target difference between the constrained minimum and the
unconstrained global minimum of MinusLogLikelihood.
extra_tail: string, optional
Additional tail to be added to filenames.
"""
delta_logL = round(delta_logL, 1)
file = output_file_tail + "_FoxBand_low_deltalogL_" + str(delta_logL) + \
extra_tail + ".dat"
print(file)
with open(file, 'r') as f_handle:
self.vmin_logeta_band_low = np.loadtxt(f_handle)
file = output_file_tail + "_FoxBand_up_deltalogL_" + str(delta_logL) + \
extra_tail + ".dat"
with open(file, 'r') as f_handle:
self.vmin_logeta_band_up = np.loadtxt(f_handle)
return
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.