CombinedText stringlengths 4 3.42M |
|---|
# -*- coding: utf-8 -*-
"""
Defines well headers.
:copyright: 2016 Agile Geoscience
:license: Apache 2.0
"""
import csv
from .fields import las_fields
from . import utils
class Header(object):
"""
The well metadata or header information.
Not the same as an LAS header, but we might get info from there.
"""
def __init__(self, params):
"""
Generic initializer for now.
"""
for k, v in params.items():
if k and v:
setattr(self, k, v)
# if getattr(self, 'uwi', None) is None:
# self.uwi = ''
def __repr__(self):
return self.__dict__.__repr__()
@classmethod
def from_lasio(cls, l, remap=None, funcs=None):
"""
Assumes we're starting with a lasio object, l.
Args:
l (lasio): A lasio instance.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
"""
params = {}
for field, (sect, code) in las_fields['header'].items():
params[field] = utils.lasio_get(l,
sect,
code,
remap=remap,
funcs=funcs)
return cls(params)
@classmethod
def from_csv(cls, csv_file):
"""
Not implemented. Will provide a route from CSV file.
"""
try:
param_dict = csv.DictReader(csv_file)
return cls(param_dict)
except:
raise NotImplementedError
sorting out missing data
# -*- coding: utf-8 -*-
"""
Defines well headers.
:copyright: 2016 Agile Geoscience
:license: Apache 2.0
"""
import csv
from .fields import las_fields
from . import utils
class Header(object):
"""
The well metadata or header information.
Not the same as an LAS header, but we might get info from there.
"""
def __init__(self, params):
"""
Generic initializer.
"""
setattr(self, 'name', '') # Prevent error when plotting.
setattr(self, 'uwi', '')
for k, v in params.items():
if k and v:
setattr(self, k, v)
def __repr__(self):
return self.__dict__.__repr__()
@classmethod
def from_lasio(cls, l, remap=None, funcs=None):
"""
Assumes we're starting with a lasio object, l.
Args:
l (lasio): A lasio instance.
remap (dict): Optional. A dict of 'old': 'new' LAS field names.
funcs (dict): Optional. A dict of 'las field': function() for
implementing a transform before loading. Can be a lambda.
"""
params = {}
for field, (sect, code) in las_fields['header'].items():
params[field] = utils.lasio_get(l,
sect,
code,
remap=remap,
funcs=funcs)
return cls(params)
@classmethod
def from_csv(cls, csv_file):
"""
Not implemented. Will provide a route from CSV file.
"""
try:
param_dict = csv.DictReader(csv_file)
return cls(param_dict)
except:
raise NotImplementedError
|
from django.contrib.auth.models import Permission, Group
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.db import models
import uuid
from . import settings as app_settings
class InviteItem(models.Model):
'''This is the model using to generate the forms'''
first_name = models.CharField(max_length=150)
last_name = models.CharField(max_length=150)
email = models.EmailField(max_length=150)
username = models.CharField(max_length=150)
greeting = models.TextField(blank=True)
permissions = models.ManyToManyField(Permission, blank=True)
groups = models.ManyToManyField(Group, blank=True)
is_super_user = models.BooleanField(default=False)
def __unicode__(self):
return self.first_name + ' ' + self.last_name
class Invitation(models.Model):
def make_uuid():
return str(uuid.uuid4())
activation_code = models.CharField(
max_length=36,
default=make_uuid,
editable=False,
unique=True,
help_text="unique id, generated on email submission",
)
first_name = models.CharField(
max_length=36,
)
last_name = models.CharField(
max_length=36,
)
username = models.CharField(
max_length=36,
)
email = models.EmailField(
max_length=41,
help_text="the potential member's email address",
)
custom_msg = models.TextField(
blank=True,
)
date_invited = models.DateField(
auto_now=True,
help_text="the day on which the superuser invited the potential member",
)
permissions = models.ManyToManyField(Permission)
groups = models.ManyToManyField(Group)
is_super_user = models.BooleanField(default=False)
class Meta:
ordering = ["date_invited"]
def __unicode__(self):
return "%s, %s: %s" % (
self.last_name,
self.first_name,
self.date_invited)
def send(self):
"""Sends an invitation email to ``self.email``."""
subject = 'You have been invited to join the %s' % (
app_settings.get_service_name())
message = render_to_string(
'invite/invitation_email.txt',
{
'domain': Site.objects.get_current().domain,
'service_name': app_settings.get_service_name(),
'activation_code': self.activation_code,
'custom_msg': self.custom_msg,
'permissions': self.permissions.all()
}
)
send_mail(
subject,
message,
app_settings.INVITE_DEFAULT_FROM_EMAIL,
[self.email])
class PasswordResetInvitation(Invitation):
def send(self):
"""Sends an invitation email to ``self.email``."""
subject = 'Password Reset: %s' % (app_settings.get_service_name())
message = render_to_string(
'invite/reset_email.txt',
{
'first_name': self.first_name,
'username': self.username,
'domain': Site.objects.get_current().domain,
'reset_code': self.activation_code,
}
)
send_mail(
subject,
message,
app_settings.INVITE_DEFAULT_FROM_EMAIL,
[self.email])
def send_confirm(self):
"""Sends an confirmation email to ``self.email``."""
subject = 'Password Changed: %s' % (app_settings.get_service_name())
message = render_to_string(
'invite/reset_confirm_email.txt',
{
'first_name': self.first_name,
'username': self.username,
'domain': Site.objects.get_current().domain,
'reset_code': self.activation_code,
}
)
send_mail(
subject,
message,
app_settings.INVITE_DEFAULT_FROM_EMAIL,
[self.email])
Create the AbstractInvitation class.
Now Extending this class in Invitation and PasswordResetInvitation to
eleminate the table inheritance.
from django.contrib.auth.models import Permission, Group
from django.template.loader import render_to_string
from django.contrib.sites.models import Site
from django.core.mail import send_mail
from django.db import models
import uuid
from . import settings as app_settings
class InviteItem(models.Model):
'''This is the model using to generate the forms'''
first_name = models.CharField(max_length=150)
last_name = models.CharField(max_length=150)
email = models.EmailField(max_length=150)
username = models.CharField(max_length=150)
greeting = models.TextField(blank=True)
permissions = models.ManyToManyField(Permission, blank=True)
groups = models.ManyToManyField(Group, blank=True)
is_super_user = models.BooleanField(default=False)
def __unicode__(self):
return self.first_name + ' ' + self.last_name
class AbstractInvitation(models.Model):
def make_uuid():
return str(uuid.uuid4())
activation_code = models.CharField(
max_length=36,
default=make_uuid,
editable=False,
unique=True,
help_text="unique id, generated on email submission",
)
first_name = models.CharField(
max_length=36,
)
last_name = models.CharField(
max_length=36,
)
username = models.CharField(max_length=36)
email = models.EmailField(
max_length=41,
help_text="the potential member's email address",
)
custom_msg = models.TextField(blank=True)
date_invited = models.DateField(
auto_now=True,
help_text="the day on which the superuser invited the potential member",
)
permissions = models.ManyToManyField(Permission)
groups = models.ManyToManyField(Group)
is_super_user = models.BooleanField(default=False)
def __unicode__(self):
return "%s, %s: %s" % (
self.last_name,
self.first_name,
self.date_invited)
class Meta:
abstract = True
ordering = ["date_invited"]
class Invitation(AbstractInvitation):
def send(self):
"""Sends an invitation email to ``self.email``."""
subject = 'You have been invited to join the %s' % (
app_settings.get_service_name())
message = render_to_string(
'invite/invitation_email.txt',
{
'domain': Site.objects.get_current().domain,
'service_name': app_settings.get_service_name(),
'activation_code': self.activation_code,
'custom_msg': self.custom_msg,
'permissions': self.permissions.all()
}
)
send_mail(subject, message, app_settings.INVITE_DEFAULT_FROM_EMAIL, [self.email])
class PasswordResetInvitation(AbstractInvitation):
def send(self):
"""Sends an invitation email to ``self.email``."""
subject = 'Password Reset: %s' % (app_settings.get_service_name())
message = render_to_string(
'invite/reset_email.txt',
{
'first_name': self.first_name,
'username': self.username,
'domain': Site.objects.get_current().domain,
'reset_code': self.activation_code,
}
)
send_mail(subject, message, app_settings.INVITE_DEFAULT_FROM_EMAIL, [self.email])
def send_confirm(self):
"""Sends an confirmation email to ``self.email``."""
subject = 'Password Changed: %s' % (app_settings.get_service_name())
message = render_to_string(
'invite/reset_confirm_email.txt',
{
'first_name': self.first_name,
'username': self.username,
'domain': Site.objects.get_current().domain,
'reset_code': self.activation_code,
}
)
send_mail(subject, message, app_settings.INVITE_DEFAULT_FROM_EMAIL, [self.email])
|
# -*- coding: utf-8 -*-
#
# image_registration documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 3 08:57:59 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
try:
import numpy
except ImportError:
pass
try:
import scipy
except ImportError:
pass
try:
import image_registration
except ImportError:
pass
try:
import numpydoc
print "numpydoc path is ",numpydoc.__file__
except ImportError:
print "Failed to import numpydoc"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, rootpath)
#import numpydoc
#sys.path.insert(0, os.path.split(numpydoc.__file__)[0])
sys.path.insert(0, rootpath+"/doc/sphinxext/")
sys.path.append(os.path.abspath('sphinxext'))
sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode','numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'image_registration'
copyright = u'2012, Adam Ginsburg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'image_registrationdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'image_registration.tex', u'image\\_registration Documentation',
u'Adam Ginsburg', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'image_registration', u'image_registration Documentation',
[u'Adam Ginsburg'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'image_registration', u'image_registration Documentation',
u'Adam Ginsburg', 'image_registration', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
todo_include_todos = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# read the docs mocks
import sys
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = ['numpy', 'scipy', 'pyfits', 'astropy', 'pytest', 'astropy.wcs',
'astropy.io', 'astropy.io.fits', 'scipy.ndimage', 'pywcs']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
fixed conf to mock out matplotlib for rtfd
# -*- coding: utf-8 -*-
#
# image_registration documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 3 08:57:59 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
try:
import numpy
except ImportError:
pass
try:
import scipy
except ImportError:
pass
try:
import image_registration
except ImportError:
pass
try:
import numpydoc
print "numpydoc path is ",numpydoc.__file__
except ImportError:
print "Failed to import numpydoc"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
rootpath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, rootpath)
#import numpydoc
#sys.path.insert(0, os.path.split(numpydoc.__file__)[0])
sys.path.insert(0, rootpath+"/doc/sphinxext/")
sys.path.append(os.path.abspath('sphinxext'))
sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage',
'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig',
'sphinx.ext.viewcode','numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'image_registration'
copyright = u'2012, Adam Ginsburg'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'image_registrationdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'image_registration.tex', u'image\\_registration Documentation',
u'Adam Ginsburg', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'image_registration', u'image_registration Documentation',
[u'Adam Ginsburg'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'image_registration', u'image_registration Documentation',
u'Adam Ginsburg', 'image_registration', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
todo_include_todos = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# read the docs mocks
import sys
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
return type(name, (), {})
else:
return Mock()
MOCK_MODULES = ['numpy', 'scipy', 'pyfits', 'astropy', 'pytest', 'astropy.wcs',
'astropy.io', 'astropy.io.fits', 'scipy.ndimage', 'pywcs', 'matplotlib']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
|
# -*- coding: utf-8 -*-
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from oscar.core.loading import get_model
Country = get_model('address', 'Country')
class Command(BaseCommand):
help = "Populates the list of countries with data from pycountry."
# TODO: Allow setting locale to fetch country names in right locale
# https://code.djangoproject.com/ticket/6376
option_list = BaseCommand.option_list + (
make_option(
'--no-shipping',
action='store_false',
dest='is_shipping',
default=True,
help="Don't mark countries for shipping"),
)
def handle(self, *args, **options):
try:
import pycountry
except ImportError:
raise CommandError(
"You are missing the pycountry library. Install it with "
"'pip install pycountry'")
if Country.objects.exists():
raise CommandError(
"You already have countries in your database. This command"
"currently does not support updating existing countries.")
countries = [
Country(
iso_3166_1_a2=country.alpha2,
iso_3166_1_a3=country.alpha3,
iso_3166_1_numeric=country.numeric,
printable_name=country.name,
name=getattr(country, 'official_name', ''),
is_shipping_country=options['is_shipping'])
for country in pycountry.countries]
Country.objects.bulk_create(countries)
self.stdout.write("Successfully added %s countries." % len(countries))
Correct spacing in command help text
# -*- coding: utf-8 -*-
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from oscar.core.loading import get_model
Country = get_model('address', 'Country')
class Command(BaseCommand):
help = "Populates the list of countries with data from pycountry."
# TODO: Allow setting locale to fetch country names in right locale
# https://code.djangoproject.com/ticket/6376
option_list = BaseCommand.option_list + (
make_option(
'--no-shipping',
action='store_false',
dest='is_shipping',
default=True,
help="Don't mark countries for shipping"),
)
def handle(self, *args, **options):
try:
import pycountry
except ImportError:
raise CommandError(
"You are missing the pycountry library. Install it with "
"'pip install pycountry'")
if Country.objects.exists():
raise CommandError(
"You already have countries in your database. This command "
"currently does not support updating existing countries.")
countries = [
Country(
iso_3166_1_a2=country.alpha2,
iso_3166_1_a3=country.alpha3,
iso_3166_1_numeric=country.numeric,
printable_name=country.name,
name=getattr(country, 'official_name', ''),
is_shipping_country=options['is_shipping'])
for country in pycountry.countries]
Country.objects.bulk_create(countries)
self.stdout.write("Successfully added %s countries." % len(countries))
|
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.cluster import KMeans
from sklearn.cluster.k_means_ import (
_init_centroids,
_labels_inertia,
_tolerance,
_validate_center_shape,
)
from sklearn.utils import (
check_array,
check_random_state,
as_float_array,
)
from sklearn.cluster import _k_means
from sklearn.preprocessing import normalize
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils.extmath import row_norms, squared_norm
def _spherical_kmeans_single_lloyd(X, n_clusters, max_iter=300,
init='k-means++', verbose=False,
x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
'''
Modified from sklearn.cluster.k_means_.k_means_single_lloyd.
'''
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment
# TODO: _labels_inertia should be done with cosine distance
# since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized
# this doesn't really matter, except for reported inertia.
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
# l2-normalize centers (this is the main contibution here)
centers = normalize(centers)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def spherical_k_means(X, n_clusters, init='k-means++', n_init=10,
max_iter=300, verbose=False, tol=1e-4, random_state=None,
copy_x=True, n_jobs=1, algorithm="auto", return_n_iter=False):
"""Modified from sklearn.cluster.k_means_.k_means.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _spherical_kmeans_single_lloyd(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
tol=tol, x_squared_norms=x_squared_norms,
random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_spherical_kmeans_single_lloyd)(X, n_clusters,
max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
class SphericalKMeans(KMeans):
"""Spherical K-Means clustering
Modfication of sklearn.cluster.KMeans where cluster centers are normalized
(projected onto the sphere) in each iteration.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, n_jobs=1,
verbose=0, random_state=None, copy_x=True):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
spherical_k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True)
return self
Change comment
import warnings
import numpy as np
import scipy.sparse as sp
from sklearn.cluster import KMeans
from sklearn.cluster.k_means_ import (
_init_centroids,
_labels_inertia,
_tolerance,
_validate_center_shape,
)
from sklearn.utils import (
check_array,
check_random_state,
as_float_array,
)
from sklearn.cluster import _k_means
from sklearn.preprocessing import normalize
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils.extmath import row_norms, squared_norm
def _spherical_kmeans_single_lloyd(X, n_clusters, max_iter=300,
init='k-means++', verbose=False,
x_squared_norms=None,
random_state=None, tol=1e-4,
precompute_distances=True):
'''
Modified from sklearn.cluster.k_means_.k_means_single_lloyd.
'''
random_state = check_random_state(random_state)
best_labels, best_inertia, best_centers = None, None, None
# init
centers = _init_centroids(X, n_clusters, init, random_state=random_state,
x_squared_norms=x_squared_norms)
if verbose:
print("Initialization complete")
# Allocate memory to store the distances for each sample to its
# closer center for reallocation in case of ties
distances = np.zeros(shape=(X.shape[0],), dtype=X.dtype)
# iterations
for i in range(max_iter):
centers_old = centers.copy()
# labels assignment
# TODO: _labels_inertia should be done with cosine distance
# since ||a - b|| = 2(1 - cos(a,b)) when a,b are unit normalized
# this doesn't really matter.
labels, inertia = \
_labels_inertia(X, x_squared_norms, centers,
precompute_distances=precompute_distances,
distances=distances)
# computation of the means
if sp.issparse(X):
centers = _k_means._centers_sparse(X, labels, n_clusters,
distances)
else:
centers = _k_means._centers_dense(X, labels, n_clusters, distances)
# l2-normalize centers (this is the main contibution here)
centers = normalize(centers)
if verbose:
print("Iteration %2d, inertia %.3f" % (i, inertia))
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
center_shift_total = squared_norm(centers_old - centers)
if center_shift_total <= tol:
if verbose:
print("Converged at iteration %d: "
"center shift %e within tolerance %e"
% (i, center_shift_total, tol))
break
if center_shift_total > 0:
# rerun E-step in case of non-convergence so that predicted labels
# match cluster centers
best_labels, best_inertia = \
_labels_inertia(X, x_squared_norms, best_centers,
precompute_distances=precompute_distances,
distances=distances)
return best_labels, best_inertia, best_centers, i + 1
def spherical_k_means(X, n_clusters, init='k-means++', n_init=10,
max_iter=300, verbose=False, tol=1e-4, random_state=None,
copy_x=True, n_jobs=1, algorithm="auto", return_n_iter=False):
"""Modified from sklearn.cluster.k_means_.k_means.
"""
if n_init <= 0:
raise ValueError("Invalid number of initializations."
" n_init=%d must be bigger than zero." % n_init)
random_state = check_random_state(random_state)
if max_iter <= 0:
raise ValueError('Number of iterations should be a positive number,'
' got %d instead' % max_iter)
best_inertia = np.infty
X = as_float_array(X, copy=copy_x)
tol = _tolerance(X, tol)
if hasattr(init, '__array__'):
init = check_array(init, dtype=X.dtype.type, copy=True)
_validate_center_shape(X, n_clusters, init)
if n_init != 1:
warnings.warn(
'Explicit initial center position passed: '
'performing only one init in k-means instead of n_init=%d'
% n_init, RuntimeWarning, stacklevel=2)
n_init = 1
# precompute squared norms of data points
x_squared_norms = row_norms(X, squared=True)
if n_jobs == 1:
# For a single thread, less memory is needed if we just store one set
# of the best results (as opposed to one set per run per thread).
for it in range(n_init):
# run a k-means once
labels, inertia, centers, n_iter_ = _spherical_kmeans_single_lloyd(
X, n_clusters, max_iter=max_iter, init=init, verbose=verbose,
tol=tol, x_squared_norms=x_squared_norms,
random_state=random_state)
# determine if these results are the best so far
if best_inertia is None or inertia < best_inertia:
best_labels = labels.copy()
best_centers = centers.copy()
best_inertia = inertia
best_n_iter = n_iter_
else:
# parallelisation of k-means runs
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(_spherical_kmeans_single_lloyd)(X, n_clusters,
max_iter=max_iter, init=init,
verbose=verbose, tol=tol,
x_squared_norms=x_squared_norms,
# Change seed to ensure variety
random_state=seed)
for seed in seeds)
# Get results with the lowest inertia
labels, inertia, centers, n_iters = zip(*results)
best = np.argmin(inertia)
best_labels = labels[best]
best_inertia = inertia[best]
best_centers = centers[best]
best_n_iter = n_iters[best]
if return_n_iter:
return best_centers, best_labels, best_inertia, best_n_iter
else:
return best_centers, best_labels, best_inertia
class SphericalKMeans(KMeans):
"""Spherical K-Means clustering
Modfication of sklearn.cluster.KMeans where cluster centers are normalized
(projected onto the sphere) in each iteration.
Parameters
----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-means algorithm for a
single run.
n_init : int, default: 10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
init : {'k-means++', 'random' or an ndarray}
Method for initialization, defaults to 'k-means++':
'k-means++' : selects initial cluster centers for k-mean
clustering in a smart way to speed up convergence. See section
Notes in k_init for more details.
'random': choose k observations (rows) at random from data for
the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features)
and gives the initial centers.
tol : float, default: 1e-4
Relative tolerance with regards to inertia to declare convergence
n_jobs : int
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
verbose : int, default 0
Verbosity mode.
copy_x : boolean, default True
When pre-computing distances it is more numerically accurate to center
the data first. If copy_x is True, then the original data is not
modified. If False, the original data is modified, and put back before
the function returns, but small numerical differences may be introduced
by subtracting and then adding the data mean.
Attributes
----------
cluster_centers_ : array, [n_clusters, n_features]
Coordinates of cluster centers
labels_ :
Labels of each point
inertia_ : float
Sum of distances of samples to their closest cluster center.
"""
def __init__(self, n_clusters=8, init='k-means++', n_init=10,
max_iter=300, tol=1e-4, n_jobs=1,
verbose=0, random_state=None, copy_x=True):
self.n_clusters = n_clusters
self.init = init
self.max_iter = max_iter
self.tol = tol
self.n_init = n_init
self.verbose = verbose
self.random_state = random_state
self.copy_x = copy_x
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Compute k-means clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
# TODO: add check that all data is unit-normalized
self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \
spherical_k_means(
X, n_clusters=self.n_clusters, init=self.init,
n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose,
tol=self.tol, random_state=random_state, copy_x=self.copy_x,
n_jobs=self.n_jobs,
return_n_iter=True)
return self
|
# File: ExecutionProcessRP.py ; This file is part of Twister.
# Copyright (C) 2012 , Luxoft
# Authors:
# Andrei Costachi <acostachi@luxoft.com>
# Andrei Toma <atoma@luxoft.com>
# Cristian Constantin <crconstantin@luxoft.com>
# Daniel Cioata <dcioata@luxoft.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import shutil
import time
import xmlrpclib
from subprocess import Popen
from zipfile import ZipFile
import win32com.client
from win32com.client import Dispatch
# -------------------------------------------------------
outDir = os.getcwd()
globEpName = 'EP-1001'
proxy = xmlrpclib.ServerProxy('http://11.126.32.9:8000/') # Tsc Server
#proxy = xmlrpclib.ServerProxy('http://11.126.32.12:8000/') # Dan Ubuntu
#proxy = xmlrpclib.ServerProxy('http://11.126.32.14:8000/') # Cro Windows
#proxy = xmlrpclib.ServerProxy('http://127.0.0.1:8000/') # VirtualBox VM
# -------------------------------------------------------
try:
print 'Central Engine Status:', proxy.getExecStatus(globEpName)
print 'You can start the test from user interface!\n'
except: print 'Cannot connect to Central Engine!'
#
def RUN(tList):
for tcName in tList:
timer_i = time.time()
STATUS = proxy.getExecStatus(globEpName)
if STATUS == 'stopped': # On stop, DIE!
print 'EP::Windows: STOP! Exiting.\n'
return
elif STATUS == 'paused': # On pause, freeze cycle and wait for Resume or Stop
print('EP::Windows: Paused!... Press RESUME to continue, or STOP to exit test suite...')
while 1:
time.sleep(0.5)
STATUS = proxy.getExecStatus(globEpName)
# On resume, stop waiting
if STATUS == 'running' or STATUS == 'resume':
break
# On stop...
elif STATUS == 'stopped': # DIE!
print 'EP::Windows: STOP! Exiting!...\n'
return
print 'EP::Windows: File: %s ...' % tcName
file_ext = os.path.splitext(tcName)[1].lower()
outFile = os.path.split(tcName)[1] # Exec file
# Ignores non-sikuli/ selenium/ testcomplete files
if file_ext != '.zip' and file_ext != '.py' and file_ext != '.testcomplete':
print 'EP::Windows: ... file ignored.\n'
proxy.setFileStatus(globEpName, tcName, 4) # Send status SKIPPED
continue
else:
proxy.setFileStatus(globEpName, tcName, 1) # Send status WORKING
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
if file_ext == '.zip':
with open(outDir + os.sep + outFile, "wb") as handle:
handle.write(proxy.getTestFile(globEpName, tcName).data)
with ZipFile(outDir + os.sep + outFile, 'r') as handle:
handle.extractall(outDir)
#
# Sikuli file and folder
toExecute = outDir + os.sep + os.path.splitext(outFile)[0] + '.skl'
toDelete = outDir + os.sep + os.path.splitext(outFile)[0] + '.sikuli'
if not os.path.exists(toExecute) and not os.path.exists(toDelete):
print 'EP::Sikuli: Cannot find sikuli file and folder!'
print(toExecute)
print(toDelete)
#
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
elif file_ext == '.testcomplete':
with open(outDir + os.sep + outFile, "wb") as handle:
handle.write(proxy.getTestFile(globEpName, tcName).data)
with ZipFile(outDir + os.sep + outFile, 'r') as handle:
handle.extractall(outDir) # This is a FOLDER !
#
# Testcomplete files
toExecute = outDir + os.sep + os.path.splitext(outFile)[0] + os.sep + os.path.splitext(outFile)[0] + '.pjs'
toDelete = outDir + os.sep + os.path.splitext(outFile)[0]
if not os.path.exists(toExecute) and not os.path.exists(toDelete):
print 'EP::Testcomplete: Cannot find testcomplete files!'
print(toExecute)
print(toDelete)
#
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
elif file_ext == '.py':
outPython = outDir + os.sep + outFile
with open(outPython, "wb") as handle:
print 'EP::Selenium: Writing selenium file `%s`.' % outPython
handle.write(proxy.getTestFile(globEpName, tcName).data)
proxy.logMessage('logRunning', 'EP::Windows: Executing file `%s`...\n' % toExecute)
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
if file_ext == '.zip':
tcr_proc = Popen('"C:\Program Files\Sikuli X\Sikuli-ide.bat" -r "%s"' % toExecute, shell=True)
ret = tcr_proc.wait()
#
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
elif file_ext == '.testcomplete':
try:
TestCompleteObject = Dispatch('TestComplete.TestCompleteApplication')
TestCompleteObject.Visible = 1
IntegrationObject = TestCompleteObject.Integration
except:
print('EP::TestComplete: Cannot create COM Object!')
TestCompleteObject = None
IntegrationObject = None
print('Debug: COM object:', TestCompleteObject)
print('Debug: COM integr:', IntegrationObject)
if TestCompleteObject:
IntegrationObject.OpenProjectSuite(toExecute)
if not IntegrationObject.IsProjectSuiteOpened():
print('EP::TestComplete: The project suite was not opened!')
TestCompleteObject.Quit()
TestCompleteObject = None
IntegrationObject = None
exit(1)
if TestCompleteObject:
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Project name must be loaded from some config file
IntegrationObject.RunProject('Project5')
while IntegrationObject.IsRunning():
pass
ret = IntegrationObject.GetLastResultDescription().Status
print('EP::TestComplete: Test status:', ret)
TestCompleteObject.Quit()
TestCompleteObject = None
IntegrationObject = None
os.system('taskkill /F /IM testcomplete.exe /T')
#
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
elif file_ext == '.py':
tcr_proc = Popen('"C:\Python27\python.exe" -B "%s"' % outPython, shell=True)
ret = tcr_proc.wait()
proxy.logMessage('logRunning',
'EP::Windows: Finished execution of file `%s`, return code is `%i`.\n\n' % (toExecute, ret))
timer_f = time.time() - timer_i
if ret:
proxy.setFileStatus(globEpName, tcName, 3, timer_f) # Status FAIL
else:
proxy.setFileStatus(globEpName, tcName, 2, timer_f) # Status PASS
# Cleanup !
if file_ext == '.zip':
try: os.remove(outDir + os.sep + outFile)
except: print 'EP::Sikuli: Cannot cleanup %s!\n' % (outDir + os.sep + outFile)
try: os.remove(toExecute)
except: print 'EP::Sikuli: Cannot cleanup %s!\n' % toExecute
try: shutil.rmtree(path=toDelete, ignore_errors=True)
except: print 'EP::Sikuli: Cannot cleanup %s!\n' % toDelete
#
elif file_ext == '.testcomplete':
try: os.remove(outDir + os.sep + outFile)
except: print 'EP::Testcomplete: Cannot cleanup %s!\n' % (outDir + os.sep + outFile)
try: os.remove(toExecute)
except: print 'EP::Testcomplete: Cannot cleanup %s!\n' % toExecute
try: shutil.rmtree(path=toDelete, ignore_errors=True)
except: print 'EP::Testcomplete: Cannot cleanup %s!\n' % toDelete
#
elif file_ext == '.py':
try: os.remove(outDir + os.sep + outFile)
except: print 'EP::Python: Cannot cleanup %s!\n' % (outDir + os.sep + outFile)
#
#
errMsg = True
# Run forever
while 1:
try:
# Try to get status from CE!
STATUS = proxy.getExecStatus(globEpName)
if not errMsg:
print('EP warning: Central Engine is running. Reconnected successfully.')
errMsg = True
except:
STATUS = False
if errMsg:
print('EP warning: Central Engine is down. Trying to reconnect...')
errMsg = False
# Wait and retry...
time.sleep(2)
continue
if STATUS == 'running':
print('EP debug: Starting the runner!!!')
tList = proxy.getEpFiles(globEpName)
RUN(tList)
proxy.setExecStatus(globEpName, 0) # Set EpId status STOP
time.sleep(1)
Fixed Windows EP. I think. :)
# File: ExecutionProcessRP.py ; This file is part of Twister.
# Copyright (C) 2012 , Luxoft
# Authors:
# Andrei Costachi <acostachi@luxoft.com>
# Andrei Toma <atoma@luxoft.com>
# Cristian Constantin <crconstantin@luxoft.com>
# Daniel Cioata <dcioata@luxoft.com>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import time
import shutil
import time
import xmlrpclib
from subprocess import Popen
from zipfile import ZipFile
import win32com.client
from win32com.client import Dispatch
# -------------------------------------------------------
outDir = os.getcwd()
globEpName = 'EP-1001'
proxy = xmlrpclib.ServerProxy('http://11.126.32.9:8000/') # Tsc Server
#proxy = xmlrpclib.ServerProxy('http://11.126.32.12:8000/') # Dan Ubuntu
#proxy = xmlrpclib.ServerProxy('http://11.126.32.14:8000/') # Cro Windows
#proxy = xmlrpclib.ServerProxy('http://127.0.0.1:8000/') # VirtualBox VM
# -------------------------------------------------------
try:
print 'Central Engine Status:', proxy.getExecStatus(globEpName)
print 'You can start the test from user interface!\n'
except: print 'Cannot connect to Central Engine!'
#
def RUN(tList):
for i in range(len(tList)):
tcId = tList[i]
tcName = proxy.getFileVariable(tcId, 'file')
timer_i = time.time()
STATUS = proxy.getExecStatus(globEpName)
if STATUS == 'stopped': # On stop, DIE!
print 'EP::Windows: STOP! Exiting.\n'
return
elif STATUS == 'paused': # On pause, freeze cycle and wait for Resume or Stop
print('EP::Windows: Paused!... Press RESUME to continue, or STOP to exit test suite...')
while 1:
time.sleep(0.5)
STATUS = proxy.getExecStatus(globEpName)
# On resume, stop waiting
if STATUS == 'running' or STATUS == 'resume':
break
# On stop...
elif STATUS == 'stopped': # DIE!
print 'EP::Windows: STOP! Exiting!...\n'
return
print 'EP::Windows: File: %s ...' % tcName
file_ext = os.path.splitext(tcName)[1].lower()
outFile = os.path.split(tcName)[1] # Exec file
# Ignores non-sikuli/ selenium/ testcomplete files
if file_ext != '.zip' and file_ext != '.py' and file_ext != '.testcomplete':
print 'EP::Windows: ... file ignored.\n'
proxy.setFileStatus(globEpName, tcId, 4) # Send status SKIPPED
continue
else:
proxy.setFileStatus(globEpName, tcId, 1) # Send status WORKING
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
if file_ext == '.zip':
with open(outDir + os.sep + outFile, "wb") as handle:
handle.write(proxy.getTestFile(globEpName, tcId).data)
with ZipFile(outDir + os.sep + outFile, 'r') as handle:
handle.extractall(outDir)
#
# Sikuli file and folder
toExecute = outDir + os.sep + os.path.splitext(outFile)[0] + '.skl'
toDelete = outDir + os.sep + os.path.splitext(outFile)[0] + '.sikuli'
if not os.path.exists(toExecute) and not os.path.exists(toDelete):
print 'EP::Sikuli: Cannot find sikuli file and folder!'
print(toExecute)
print(toDelete)
#
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
elif file_ext == '.testcomplete':
with open(outDir + os.sep + outFile, "wb") as handle:
handle.write(proxy.getTestFile(globEpName, tcId).data)
with ZipFile(outDir + os.sep + outFile, 'r') as handle:
handle.extractall(outDir) # This is a FOLDER !
#
# Testcomplete files
toExecute = outDir + os.sep + os.path.splitext(outFile)[0] + os.sep + os.path.splitext(outFile)[0] + '.pjs'
toDelete = outDir + os.sep + os.path.splitext(outFile)[0]
if not os.path.exists(toExecute) and not os.path.exists(toDelete):
print 'EP::Testcomplete: Cannot find testcomplete files!'
print(toExecute)
print(toDelete)
#
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
elif file_ext == '.py':
outPython = outDir + os.sep + outFile
with open(outPython, "wb") as handle:
print 'EP::Selenium: Writing selenium file `%s`.' % outPython
handle.write(proxy.getTestFile(globEpName, tcId).data)
proxy.logMessage('logRunning', 'EP::Windows: Executing file `%s`...\n' % toExecute)
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
if file_ext == '.zip':
tcr_proc = Popen('"C:\Program Files\Sikuli X\Sikuli-ide.bat" -r "%s"' % toExecute, shell=True)
ret = tcr_proc.wait()
#
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
elif file_ext == '.testcomplete':
try:
TestCompleteObject = Dispatch('TestComplete.TestCompleteApplication')
TestCompleteObject.Visible = 1
IntegrationObject = TestCompleteObject.Integration
except:
print('EP::TestComplete: Cannot create COM Object!')
TestCompleteObject = None
IntegrationObject = None
print('Debug: COM object:', TestCompleteObject)
print('Debug: COM integr:', IntegrationObject)
if TestCompleteObject:
IntegrationObject.OpenProjectSuite(toExecute)
if not IntegrationObject.IsProjectSuiteOpened():
print('EP::TestComplete: The project suite was not opened!')
TestCompleteObject.Quit()
TestCompleteObject = None
IntegrationObject = None
exit(1)
if TestCompleteObject:
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# Project name must be loaded from some config file
IntegrationObject.RunProject('Project5')
while IntegrationObject.IsRunning():
pass
ret = IntegrationObject.GetLastResultDescription().Status
print('EP::TestComplete: Test status:', ret)
TestCompleteObject.Quit()
TestCompleteObject = None
IntegrationObject = None
os.system('taskkill /F /IM testcomplete.exe /T')
#
# ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- -----
#
elif file_ext == '.py':
tcr_proc = Popen('"C:\Python27\python.exe" -B "%s"' % outPython, shell=True)
ret = tcr_proc.wait()
proxy.logMessage('logRunning',
'EP::Windows: Finished execution of file `%s`, return code is `%i`.\n\n' % (toExecute, ret))
timer_f = time.time() - timer_i
if ret:
proxy.setFileStatus(globEpName, tcId, 3, timer_f) # Status FAIL
else:
proxy.setFileStatus(globEpName, tcId, 2, timer_f) # Status PASS
# Cleanup !
if file_ext == '.zip':
try: os.remove(outDir + os.sep + outFile)
except: print 'EP::Sikuli: Cannot cleanup %s!\n' % (outDir + os.sep + outFile)
try: os.remove(toExecute)
except: print 'EP::Sikuli: Cannot cleanup %s!\n' % toExecute
try: shutil.rmtree(path=toDelete, ignore_errors=True)
except: print 'EP::Sikuli: Cannot cleanup %s!\n' % toDelete
#
elif file_ext == '.testcomplete':
try: os.remove(outDir + os.sep + outFile)
except: print 'EP::Testcomplete: Cannot cleanup %s!\n' % (outDir + os.sep + outFile)
try: os.remove(toExecute)
except: print 'EP::Testcomplete: Cannot cleanup %s!\n' % toExecute
try: shutil.rmtree(path=toDelete, ignore_errors=True)
except: print 'EP::Testcomplete: Cannot cleanup %s!\n' % toDelete
#
elif file_ext == '.py':
try: os.remove(outDir + os.sep + outFile)
except: print 'EP::Python: Cannot cleanup %s!\n' % (outDir + os.sep + outFile)
#
#
errMsg = True
# Run forever
while 1:
try:
# Try to get status from CE!
STATUS = proxy.getExecStatus(globEpName)
if not errMsg:
print('EP warning: Central Engine is running. Reconnected successfully.')
errMsg = True
except:
STATUS = False
if errMsg:
print('EP warning: Central Engine is down. Trying to reconnect...')
errMsg = False
# Wait and retry...
time.sleep(2)
continue
if STATUS == 'running':
print('EP debug: Starting the runner!!!')
tList = proxy.getEpFiles(globEpName)
RUN(tList)
proxy.setExecStatus(globEpName, 0) # Set EpId status STOP
time.sleep(2)
|
import copy
import inspect
import json
import os
from os.path import join, splitext, expanduser
try:
from .vendor import six
if six.PY3:
from .vendor import yaml3 as yaml
else:
from .vendor import yaml2 as yaml
except ImportError:
# Use system modules
import six
if six.PY3:
import yaml3 as yaml
else:
import yaml2 as yaml
if six.PY3:
try:
from importlib.machinery import SourceFileLoader
except ImportError: # PyPy3
from importlib._bootstrap import _SourceFileLoader as SourceFileLoader
def load_source(name, path):
if not os.path.exists(path):
return {}
return vars(SourceFileLoader('mod', path).load_module())
else:
import imp
def load_source(name, path):
if not os.path.exists(path):
return {}
return vars(imp.load_source('mod', path))
from .env import Environment
from .exceptions import UnknownFileType
from .util import debug
from .platform import WINDOWS
class DataProxy(object):
"""
Helper class implementing nested dict+attr access for `.Config`.
Specifically, is used both for `.Config` itself, and to wrap any other
dicts assigned as config values (recursively).
.. warning::
All methods (of this object or in subclasses) must take care to
initialize new attributes via ``object.__setattr__``, or they'll run
into recursion errors!
"""
# Attributes which get proxied through to inner merged-dict config obj.
_proxies = tuple("""
clear
get
has_key
items
iteritems
iterkeys
itervalues
keys
popitem
setdefault
update
values
""".split()) + tuple("__{0}__".format(x) for x in """
cmp
contains
iter
sizeof
""".split())
@classmethod
def from_data(cls, data, root=None, keypath=None):
"""
Alternate constructor for 'baby' DataProxies used as sub-dict values.
Allows creating standalone DataProxy objects while also letting
subclasses like `.Config` define their own ``__init__``s without
muddling the two.
:param dict data:
This particular DataProxy's personal data. Required, it's the Data
being Proxied.
:param root:
Optional handle on a root DataProxy/Config which needs notification
on data updates.
:param tuple keypath:
Optional tuple describing the path of keys leading to this
DataProxy's location inside the ``root`` structure. Required if
``root`` was given (and vice versa.)
"""
obj = cls()
object.__setattr__(obj, '_config', data)
object.__setattr__(obj, '_root', root)
if keypath is None:
keypath = tuple()
object.__setattr__(obj, '_keypath', keypath)
return obj
def __getattr__(self, key):
# NOTE: due to default Python attribute-lookup semantics, "real"
# attributes will always be yielded on attribute access and this method
# is skipped. That behavior is good for us (it's more intuitive than
# having a config key accidentally shadow a real attribute or method).
try:
return self._get(key)
except KeyError:
# Proxy most special vars to config for dict procotol.
if key in self._proxies:
return getattr(self._config, key)
# Otherwise, raise useful AttributeError to follow getattr proto.
err = "No attribute or config key found for {0!r}".format(key)
attrs = [x for x in dir(self.__class__) if not x.startswith('_')]
err += "\n\nValid keys: {0!r}".format(
sorted(list(self._config.keys()))
)
err += "\n\nValid real attributes: {0!r}".format(attrs)
raise AttributeError(err)
def __setattr__(self, key, value):
# Turn attribute-sets into config updates anytime we don't have a real
# attribute with the given name/key.
has_real_attr = key in (x[0] for x in inspect.getmembers(self))
if not has_real_attr:
# Make sure to trigger our own __setitem__ instead of going direct
# to our internal dict/cache
self[key] = value
else:
super(DataProxy, self).__setattr__(key, value)
def __iter__(self):
# For some reason Python is ignoring our __hasattr__ when determining
# whether we support __iter__. BOO
return iter(self._config)
def __eq__(self, other):
# NOTE: Can't proxy __eq__ because the RHS will always be an obj of the
# current class, not the proxied-to class, and that causes
# NotImplemented.
# Try comparing to other objects like ourselves, falling back to a not
# very comparable value (None) so comparison fails.
other_val = getattr(other, '_config', None)
# But we can compare to vanilla dicts just fine, since our _config is
# itself just a dict.
if isinstance(other, dict):
other_val = other
return self._config == other_val
# Make unhashable, because our entire raison d'etre is to be somewhat
# mutable. Subclasses with mutable attributes may override this.
# NOTE: this is mostly a concession to Python 2, v3 does it automatically.
__hash__ = None
def __len__(self):
return len(self._config)
def is_leaf(self):
return hasattr(self, '_root')
def is_root(self):
# TODO: also callable()? meh
return hasattr(self, '_modify')
def pop(self, *args):
# Must test this up front before (possibly) mutating self._config
key_existed = args and args[0] in self._config
# We always have a _config (whether it's a real dict or a cache of
# merged levels) so we can fall back to it for all the corner case
# handling re: args (arity, handling a default, raising KeyError, etc)
ret = self._config.pop(*args)
# If it looks like no popping occurred (key wasn't there), presumably
# user gave default, so we can short-circuit return here - no need to
# track a deletion that did not happen.
if not key_existed:
return ret
# Here, we can assume at least the 1st posarg (key) existed.
key = args[0]
if self.is_leaf():
# Bookkeeping, via our root
self._root._remove(self._keypath, key)
elif self.is_root():
# Bookkeeping, via ourselves
self._remove(tuple(), key)
# In all cases, return the popped value.
return ret
def __delitem__(self, key):
key_existed = key in self
del self._config[key]
# TODO: bet this can be tightened further by just ensuring
# self._keypath defaults to empty tuple; then can simply do (self._root
# if self.is_leaf() else self)._remove(self._keypath, key)
# TODO: and then further, can just define ._get_root()
# TODO: and in fact we could presumably just define _remove()...? which
# is presently just on Config? that gets us back to "This needs more
# class reorg" territory tbh
if self.is_leaf():
self._root._remove(self._keypath, key)
elif self.is_root():
self._remove(tuple(), key)
def __setitem__(self, key, value):
# If we appear to be a non-root DataProxy, modify our _config so that
# anybody keeping a reference to us sees the update, and also tell our
# root object so it can track our modifications centrally (& trigger
# cache updating, etc)
if getattr(self, '_root', None):
self._config[key] = value
self._root._modify(self._keypath, key, value)
else:
# If we've got no _root, but we have a 'modify', we're probably a
# root/Config ourselves; so just call modify with an empty
# keypath. (We do _not_ want to touch _config here as it would be
# the config cache.)
if hasattr(self, '_modify') and callable(self._modify):
self._modify(tuple(), key, value)
# If we've got no _root and no _modify(), we're some other rooty
# proxying object that isn't a Config, such as a Context. So we
# just update _config and assume it'll do the needful.
# TODO: this is getting very hairy which is a sign the object
# responsibilities need changing...sigh
else:
self._config[key] = value
def __getitem__(self, key):
return self._get(key)
def _get(self, key):
# Short-circuit if pickling/copying mechanisms are asking if we've got
# __setstate__ etc; they'll ask this w/o calling our __init__ first, so
# we'd be in a RecursionError-causing catch-22 otherwise.
if key in (
'__setstate__',
):
raise AttributeError(key)
# At this point we should be able to assume a self._config...
value = self._config[key]
if isinstance(value, dict):
# New object's keypath is simply the key, prepended with our own
# keypath if we've got one.
keypath = (key,)
if hasattr(self, '_keypath'):
keypath = self._keypath + keypath
# If we have no _root, we must be the root, so it's us. Otherwise,
# pass along our handle on the root.
root = getattr(self, '_root', self)
value = DataProxy.from_data(
data=value,
root=root,
keypath=keypath,
)
return value
def _set(self, **kwargs):
"""
Convenience workaround of default 'attrs are config keys' behavior.
Good for initializing new attributes; is a bit less verbose than
slapping ``object.__setattr__()`` everywhere.
"""
for key, value in six.iteritems(kwargs):
object.__setattr__(self, key, value)
def __str__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self._config)
def __unicode__(self):
return unicode(self.__str__()) # noqa
def __repr__(self):
# TODO: something more useful? Not an easy object to reconstruct from a
# stringrep.
return self.__str__()
def __contains__(self, key):
return key in self._config
class Config(DataProxy):
"""
Invoke's primary configuration handling class.
See :doc:`/concepts/configuration` for details on the configuration system
this class implements, including the :ref:`configuration hierarchy
<config-hierarchy>`. The rest of this class' documentation assumes
familiarity with that document.
**Access**
Configuration values may be accessed and/or updated using dict syntax::
config['foo']
or attribute syntax::
config.foo
Nesting works the same way - dict config values are turned into objects
which honor both the dictionary protocol and the attribute-access method::
config['foo']['bar']
config.foo.bar
**A note about attribute access and methods**
This class implements the entire dictionary protocol: methods such as
``keys``, ``values``, ``items``, ``pop`` and so forth should all function
as they do on regular dicts. It also implements new config-specific methods
such as `.load_files`, `.load_collection` and `.clone`.
.. warning::
Accordingly, this means that if you have configuration options sharing
names with these methods, you **must** use dictionary syntax (e.g.
``myconfig['keys']``) to access the configuration data.
**Lifecycle**
At initialization time, `.Config`:
- creates per-level data structures
- stores levels supplied to `__init__`, such as defaults or overrides, as
well as the various config file paths/prefixes
- loads system, user and project level config files, if found
At this point, `.Config` is fully usable, but in most real-world use cases,
the CLI machinery (or library users) do additional work on a per-task
basis:
- the result of CLI argument parsing is applied to the overrides level
- a runtime config file is loaded, if its flag was supplied
- the base config is cloned (so tasks don't inadvertently affect one
another)
- per-collection data is loaded (only possible now that we have a task in
hand)
- shell environment data is loaded (must be done at end of process due to
using the rest of the config as a guide for interpreting env var names)
Any modifications made directly to the `.Config` itself (usually, after it
has been handed to the task or other end-user code) end up stored in their
own (topmost) config level, making it easy to debug final values.
Finally, any *deletions* made to the `.Config` (e.g. applications of
dict-style mutators like ``pop``, ``clear`` etc) are tracked in their own
structure, allowing the overall object to honor such method calls despite
the source data itself not changing.
"""
@staticmethod
def global_defaults():
"""
Return the core default settings for Invoke.
Generally only for use by `.Config` internals. For descriptions of
these values, see :ref:`default-values`.
Subclasses may choose to override this method, calling
``Config.global_defaults`` and applying `.merge_dicts` to the result,
to add to or modify these values.
"""
return {
# TODO: we document 'debug' but it's not truly implemented outside
# of env var and CLI flag. If we honor it, we have to go around and
# figure out at what points we might want to call
# `util.enable_logging`:
# - just using it as a fallback default for arg parsing isn't much
# use, as at that point the config holds nothing but defaults & CLI
# flag values
# - doing it at file load time might be somewhat useful, though
# where this happens may be subject to change soon
# - doing it at env var load time seems a bit silly given the
# existing support for at-startup testing for INVOKE_DEBUG
# 'debug': False,
'run': {
'warn': False,
'hide': None,
'shell': '/bin/bash',
'pty': False,
'fallback': True,
'env': {},
'replace_env': False,
'echo': False,
'encoding': None,
'out_stream': None,
'err_stream': None,
'in_stream': None,
'watchers': [],
'echo_stdin': None,
},
'sudo': {
'prompt': "[sudo] password: ",
'password': None,
},
'tasks': {
'dedupe': True,
},
}
def __init__(
self,
defaults=None,
overrides=None,
system_prefix=None,
user_prefix=None,
project_home=None,
env_prefix=None,
runtime_path=None,
defer_post_init=False,
):
"""
Creates a new config object.
:param dict defaults:
A dict containing default (lowest level) config data. Default:
`global_defaults`.
:param dict overrides:
A dict containing override-level config data. Default: ``{}``.
:param str system_prefix:
Path & partial filename for the global config file location. Should
include everything but the dot & file extension.
Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or
``/etc/invoke.json``).
:param str user_prefix:
Like ``system_prefix`` but for the per-user config file.
Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``).
:param str project_home:
Optional directory path location of the currently loaded
`.Collection` (as loaded by `.Loader`). When non-empty, will
trigger seeking of per-project config files in this location +
``invoke.(yaml|json|py)``.
:param str env_prefix:
Environment variable seek prefix; optional, defaults to ``None``.
When not ``None``, only environment variables beginning with this
value will be loaded. If it is set, the keys will have the prefix
stripped out before processing, so e.g. ``env_prefix='INVOKE_'``
means users must set ``INVOKE_MYSETTING`` in the shell to affect
the ``"mysetting"`` setting.
:param str runtime_path:
Optional file path to a runtime configuration file.
Used to fill the penultimate slot in the config hierarchy. Should
be a full file path to an existing file, not a directory path, or a
prefix.
:param bool defer_post_init:
Whether to defer certain steps at the end of `__init__`.
Specifically, the `post_init` method is normally called
automatically, and performs initial actions like loading config
files. Advanced users may wish to call that method manually after
manipulating the object; to do so, specify
``defer_post_init=True``.
Default: ``False``.
"""
# Technically an implementation detail - do not expose in public API.
# Stores merged configs and is accessed via DataProxy.
self._set(_config={})
# Config file suffixes to search, in preference order.
self._set(_file_suffixes=('yaml', 'json', 'py'))
# Default configuration values, typically a copy of `global_defaults`.
if defaults is None:
defaults = copy_dict(self.global_defaults())
self._set(_defaults=defaults)
# Collection-driven config data, gathered from the collection tree
# containing the currently executing task.
self._set(_collection={})
# Path prefix searched for the system config file.
# NOTE: There is no default system prefix on Windows.
if system_prefix is None and not WINDOWS:
system_prefix = '/etc/invoke'
self._set(_system_prefix=system_prefix)
# Path to loaded system config file, if any.
self._set(_system_path=None)
# Whether the system config file has been loaded or not (or ``None`` if
# no loading has been attempted yet.)
self._set(_system_found=None)
# Data loaded from the system config file.
self._set(_system={})
# Path prefix searched for per-user config files.
if user_prefix is None:
user_prefix = '~/.invoke'
self._set(_user_prefix=user_prefix)
# Path to loaded user config file, if any.
self._set(_user_path=None)
# Whether the user config file has been loaded or not (or ``None`` if
# no loading has been attempted yet.)
self._set(_user_found=None)
# Data loaded from the per-user config file.
self._set(_user={})
# Parent directory of the current root tasks file, if applicable.
self._set(_project_home=project_home)
# And a normalized prefix version not really publicly exposed
project_prefix = None
if self._project_home is not None:
project_prefix = join(project_home, 'invoke')
self._set(_project_prefix=project_prefix)
# Path to loaded per-project config file, if any.
self._set(_project_path=None)
# Whether the project config file has been loaded or not (or ``None``
# if no loading has been attempted yet.)
self._set(_project_found=None)
# Data loaded from the per-project config file.
self._set(_project={})
# Environment variable name prefix
if env_prefix is None:
env_prefix = ''
self._set(_env_prefix=env_prefix)
# Config data loaded from the shell environment.
self._set(_env={})
# Path to the user-specified runtime config file.
self._set(_runtime_path=runtime_path)
# Data loaded from the runtime config file.
self._set(_runtime={})
# Whether the runtime config file has been loaded or not (or ``None``
# if no loading has been attempted yet.)
self._set(_runtime_found=None)
# Overrides - highest normal config level. Typically filled in from
# command-line flags.
if overrides is None:
overrides = {}
self._set(_overrides=overrides)
# Absolute highest level: user modifications.
self._set(_modifications={})
# And its sibling: user deletions. (stored as a flat dict of keypath
# keys and dummy values, for constant-time membership testing/removal
# w/ no messy recursion. TODO: maybe redo _everything_ that way? in
# _modifications and other levels, the values would of course be
# valuable and not just None)
self._set(_deletions={})
if not defer_post_init:
self.post_init()
def post_init(self):
"""
Call setup steps that can occur immediately after `__init__`.
May need to be manually called if `__init__` was told
``defer_post_init=True``.
:returns: ``None``.
"""
self.load_files()
# TODO: just use a decorator for merging probably? shrug
self.merge()
def _modify(self, keypath, key, value):
"""
Update our user-modifications config level with new data.
:param tuple keypath:
The key path identifying the sub-dict being updated. May be an
empty tuple if the update is occurring at the topmost level.
:param str key:
The actual key receiving an update.
:param value:
The value being written.
"""
# First, ensure we wipe the keypath from _deletions, in case it was
# previously deleted-at-runtime....
self._deletions.pop(keypath + (key,), None)
# Now we can add it to the modifications structure.
data = self._modifications
keypath = list(keypath)
while keypath:
subkey = keypath.pop(0)
# TODO: could use defaultdict here, but...meh?
if subkey not in data:
# TODO: generify this and the subsequent 3 lines...
data[subkey] = {}
data = data[subkey]
data[key] = value
self.merge()
def _remove(self, keypath, key):
"""
Like `._modify`, but for removal.
"""
# TODO: may be sane to push this step up to callers?
full_path = keypath + (key,)
self._deletions[full_path] = None
self.merge()
def load_shell_env(self):
"""
Load values from the shell environment.
`.load_shell_env` is intended for execution late in a `.Config`
object's lifecycle, once all other sources (such as a runtime config
file or per-collection configurations) have been loaded. Loading from
the shell is not terrifically expensive, but must be done at a specific
point in time to ensure the "only known config keys are loaded from the
env" behavior works correctly.
See :ref:`env-vars` for details on this design decision and other info
re: how environment variables are scanned and loaded.
"""
# Force merge of existing data to ensure we have an up to date picture
debug("Running pre-merge for shell env loading...")
self.merge()
debug("Done with pre-merge.")
loader = Environment(config=self._config, prefix=self._env_prefix)
self._set(_env=loader.load())
debug("Loaded shell environment, triggering final merge")
self.merge()
def load_collection(self, data):
"""
Update collection-driven config data.
`.load_collection` is intended for use by the core task execution
machinery, which is responsible for obtaining collection-driven data.
See :ref:`collection-configuration` for details.
"""
debug("Loading collection configuration")
self._set(_collection=data)
self.merge()
def clone(self, into=None):
"""
Return a copy of this configuration object.
The new object will be identical in terms of configured sources and any
loaded (or user-manipulated) data, but will be a distinct object with
as little shared mutable state as possible.
Specifically, all `dict` values within the config are recursively
recreated, with non-dict leaf values subjected to `copy.copy` (note:
*not* `copy.deepcopy`, as this can cause issues with various objects
such as compiled regexen or threading locks, often found buried deep
within rich aggregates like API or DB clients).
The only remaining config values that may end up shared between a
config and its clone are thus those 'rich' objects that do not
`copy.copy` cleanly, or compound non-dict objects (such as lists or
tuples).
:param into:
A `.Config` subclass that the new clone should be "upgraded" to.
Used by client libraries which have their own `.Config` subclasses
that e.g. define additional defaults; cloning "into" one of these
subclasses ensures that any new keys/subtrees are added gracefully,
without overwriting anything that may have been pre-defined.
Default: ``None`` (just clone into another regular `.Config`).
:returns:
A `.Config`, or an instance of the class given to ``into``.
:raises:
``TypeError``, if ``into`` is given a value and that value is not a
`.Config` subclass.
"""
# Sanity check for 'into'
if into is not None and not issubclass(into, self.__class__):
err = "'into' must be a subclass of {0}!"
raise TypeError(err.format(self.__class__.__name__))
# Construct new object
klass = self.__class__ if into is None else into
# Also allow arbitrary constructor kwargs, for subclasses where passing
# (some) data in at init time is desired (vs post-init copying)
# TODO: probably want to pivot the whole class this way eventually...?
# No longer recall exactly why we went with the 'fresh init + attribute
# setting' approach originally...tho there's clearly some impedance
# mismatch going on between "I want stuff to happen in my config's
# instantiation" and "I want cloning to not trigger certain things like
# external data source loading".
# NOTE: this will include defer_post_init, see end of method
new = klass(**self._clone_init_kwargs(into=into))
# Copy/merge/etc all 'private' data sources and attributes
for name in """
collection
system_prefix
system_path
system_found
system
user_prefix
user_path
user_found
user
project_home
project_prefix
project_path
project_found
project
env_prefix
env
runtime_path
runtime_found
runtime
overrides
modifications
""".split():
name = "_{0}".format(name)
my_data = getattr(self, name)
# Non-dict data gets carried over straight (via a copy())
# NOTE: presumably someone could really screw up and change these
# values' types, but at that point it's on them...
if not isinstance(my_data, dict):
setattr(new, name, copy.copy(my_data))
# Dict data gets merged (which also involves a copy.copy
# eventually)
else:
merge_dicts(getattr(new, name), my_data)
# And merge the central config too (cannot just call .merge() on the
# new clone, since the source config may have received custom
# alterations by user code.)
merge_dicts(new._config, self._config)
# Finally, call new.post_init() since it's fully merged up. This way,
# stuff called in post_init() will have access to the final version of
# the data.
new.post_init()
return new
def _clone_init_kwargs(self, into=None):
"""
Supply kwargs suitable for initializing a new clone of this object.
Note that most of the `.clone` process involves copying data between
two instances instead of passing init kwargs; however, sometimes you
really do want init kwargs, which is why this method exists.
:param into: The value of ``into`` as passed to the calling `.clone`.
:returns: A `dict`.
"""
# NOTE: must pass in defaults fresh or otherwise global_defaults() gets
# used instead. Except when 'into' is in play, in which case we truly
# want the union of the two.
new_defaults = copy_dict(self._defaults)
if into is not None:
merge_dicts(new_defaults, into.global_defaults())
# The kwargs.
return dict(
defaults=new_defaults,
# TODO: consider making this 'hardcoded' on the calling end (ie
# inside clone()) to make sure nobody accidentally nukes it via
# subclassing?
defer_post_init=True,
)
def load_files(self):
"""
Load any unloaded/un-searched-for config file sources.
Specifically, any file sources whose ``_found`` values are ``None``
will be sought and loaded if found; if their ``_found`` value is non
``None`` (e.g. ``True`` or ``False``) they will be skipped. Typically
this means this method is idempotent and becomes a no-op after the
first run.
"""
self._load_file(prefix='system')
self._load_file(prefix='user')
self._load_file(prefix='project')
self._load_file(prefix='runtime', absolute=True)
def _load_file(self, prefix, absolute=False):
# Setup
found = "_{0}_found".format(prefix)
path = "_{0}_path".format(prefix)
data = "_{0}".format(prefix)
# Short-circuit if loading appears to have occurred already
if getattr(self, found) is not None:
return
# Moar setup
if absolute:
absolute_path = getattr(self, path)
# None -> expected absolute path but none set, short circuit
if absolute_path is None:
return
paths = [absolute_path]
else:
path_prefix = getattr(self, "_{0}_prefix".format(prefix))
# Short circuit if loading seems unnecessary (eg for project config
# files when not running out of a project)
if path_prefix is None:
return
paths = [
'.'.join((path_prefix, x))
for x in self._file_suffixes
]
# Poke 'em
for filepath in paths:
# Normalize
filepath = expanduser(filepath)
try:
try:
type_ = splitext(filepath)[1].lstrip('.')
loader = getattr(self, "_load_{0}".format(type_))
except AttributeError as e:
msg = "Config files of type {0!r} (from file {1!r}) are not supported! Please use one of: {2!r}" # noqa
raise UnknownFileType(msg.format(
type_, filepath, self._file_suffixes))
# Store data, the path it was found at, and fact that it was
# found
setattr(self, data, loader(filepath))
setattr(self, path, filepath)
setattr(self, found, True)
break
# Typically means 'no such file', so just note & skip past.
except IOError as e:
# TODO: is there a better / x-platform way to detect this?
if "No such file" in e.strerror:
err = "Didn't see any {0}, skipping."
debug(err.format(filepath))
else:
raise
# Still None -> no suffixed paths were found, record this fact
if getattr(self, path) is None:
setattr(self, found, False)
def merge(self):
"""
Merge all config sources, in order.
"""
debug("Merging config sources in order onto new empty _config...")
self._config = {}
debug("Defaults: {0!r}".format(self._defaults))
merge_dicts(self._config, self._defaults)
debug("Collection-driven: {0!r}".format(self._collection))
merge_dicts(self._config, self._collection)
self._merge_file('system', "System-wide")
self._merge_file('user', "Per-user")
self._merge_file('project', "Per-project")
debug("Environment variable config: {0!r}".format(self._env))
merge_dicts(self._config, self._env)
self._merge_file('runtime', "Runtime")
debug("Overrides: {0!r}".format(self._overrides))
merge_dicts(self._config, self._overrides)
debug("Modifications: {0!r}".format(self._modifications))
merge_dicts(self._config, self._modifications)
debug("Deletions: {0!r}".format(self._deletions))
for keypath in self._deletions.keys():
excise(self._config, keypath)
def _merge_file(self, name, desc):
# Setup
desc += " config file" # yup
found = getattr(self, "_{0}_found".format(name))
path = getattr(self, "_{0}_path".format(name))
data = getattr(self, "_{0}".format(name))
# None -> no loading occurred yet
if found is None:
debug("{0} has not been loaded yet, skipping".format(desc))
# True -> hooray
elif found:
debug("{0} ({1}): {2!r}".format(desc, path, data))
merge_dicts(self._config, data)
# False -> did try, did not succeed
else:
# TODO: how to preserve what was tried for each case but only for
# the negative? Just a branch here based on 'name'?
debug("{0} not found, skipping".format(desc))
@property
def paths(self):
"""
An iterable of all successfully loaded config file paths.
No specific order.
"""
paths = []
for prefix in "system user project runtime".split():
value = getattr(self, "_{0}_path".format(prefix))
if value is not None:
paths.append(value)
return paths
def _load_yaml(self, path):
with open(path) as fd:
return yaml.load(fd)
def _load_json(self, path):
with open(path) as fd:
return json.load(fd)
def _load_py(self, path):
data = {}
for key, value in six.iteritems(load_source('mod', path)):
if key.startswith('__'):
continue
data[key] = value
return data
class AmbiguousMergeError(ValueError):
pass
def merge_dicts(base, updates):
"""
Recursively merge dict ``updates`` into dict ``base`` (mutating ``base``.)
* Values which are themselves dicts will be recursed into.
* Values which are a dict in one input and *not* a dict in the other input
(e.g. if our inputs were ``{'foo': 5}`` and ``{'foo': {'bar': 5}}``) are
irreconciliable and will generate an exception.
* Non-dict leaf values are run through `copy.copy` to avoid state bleed.
.. note::
This is effectively a lightweight `copy.deepcopy` which offers
protection from mismatched types (dict vs non-dict) and avoids some
core deepcopy problems (such as how it explodes on certain object
types).
:returns:
The value of ``base``, which is mostly useful for wrapper functions
like `copy_dict`.
"""
# TODO: for chrissakes just make it return instead of mutating?
for key, value in updates.items():
# Dict values whose keys also exist in 'base' -> recurse
# (But only if both types are dicts.)
if key in base:
if isinstance(value, dict):
if isinstance(base[key], dict):
merge_dicts(base[key], value)
else:
raise _merge_error(base[key], value)
else:
if isinstance(base[key], dict):
raise _merge_error(base[key], value)
else:
base[key] = copy.copy(value)
# New values get set anew
else:
# Dict values get reconstructed to avoid being references to the
# updates dict, which can lead to nasty state-bleed bugs otherwise
if isinstance(value, dict):
base[key] = copy_dict(value)
# Non-dict values just get set straight
else:
base[key] = copy.copy(value)
return base
def _merge_error(orig, new_):
return AmbiguousMergeError("Can't cleanly merge {0} with {1}".format(
_format_mismatch(orig), _format_mismatch(new_)
))
def _format_mismatch(x):
return "{0} ({1!r})".format(type(x), x)
def copy_dict(source):
"""
Return a fresh copy of ``source`` with as little shared state as possible.
Uses `merge_dicts` under the hood, with an empty ``base`` dict; see its
documentation for details on behavior.
"""
return merge_dicts({}, source)
def excise(dict_, keypath):
"""
Remove key pointed at by ``keypath`` from nested dict ``dict_``.
"""
data = dict_
keypath = list(keypath)
leaf_key = keypath.pop()
while keypath:
key = keypath.pop(0)
data = data[key]
del data[leaf_key]
Comment
import copy
import inspect
import json
import os
from os.path import join, splitext, expanduser
try:
from .vendor import six
if six.PY3:
from .vendor import yaml3 as yaml
else:
from .vendor import yaml2 as yaml
except ImportError:
# Use system modules
import six
if six.PY3:
import yaml3 as yaml
else:
import yaml2 as yaml
if six.PY3:
try:
from importlib.machinery import SourceFileLoader
except ImportError: # PyPy3
from importlib._bootstrap import _SourceFileLoader as SourceFileLoader
def load_source(name, path):
if not os.path.exists(path):
return {}
return vars(SourceFileLoader('mod', path).load_module())
else:
import imp
def load_source(name, path):
if not os.path.exists(path):
return {}
return vars(imp.load_source('mod', path))
from .env import Environment
from .exceptions import UnknownFileType
from .util import debug
from .platform import WINDOWS
class DataProxy(object):
"""
Helper class implementing nested dict+attr access for `.Config`.
Specifically, is used both for `.Config` itself, and to wrap any other
dicts assigned as config values (recursively).
.. warning::
All methods (of this object or in subclasses) must take care to
initialize new attributes via ``object.__setattr__``, or they'll run
into recursion errors!
"""
# Attributes which get proxied through to inner merged-dict config obj.
_proxies = tuple("""
clear
get
has_key
items
iteritems
iterkeys
itervalues
keys
popitem
setdefault
update
values
""".split()) + tuple("__{0}__".format(x) for x in """
cmp
contains
iter
sizeof
""".split())
@classmethod
def from_data(cls, data, root=None, keypath=None):
"""
Alternate constructor for 'baby' DataProxies used as sub-dict values.
Allows creating standalone DataProxy objects while also letting
subclasses like `.Config` define their own ``__init__``s without
muddling the two.
:param dict data:
This particular DataProxy's personal data. Required, it's the Data
being Proxied.
:param root:
Optional handle on a root DataProxy/Config which needs notification
on data updates.
:param tuple keypath:
Optional tuple describing the path of keys leading to this
DataProxy's location inside the ``root`` structure. Required if
``root`` was given (and vice versa.)
"""
obj = cls()
object.__setattr__(obj, '_config', data)
object.__setattr__(obj, '_root', root)
if keypath is None:
keypath = tuple()
object.__setattr__(obj, '_keypath', keypath)
return obj
def __getattr__(self, key):
# NOTE: due to default Python attribute-lookup semantics, "real"
# attributes will always be yielded on attribute access and this method
# is skipped. That behavior is good for us (it's more intuitive than
# having a config key accidentally shadow a real attribute or method).
try:
return self._get(key)
except KeyError:
# Proxy most special vars to config for dict procotol.
if key in self._proxies:
return getattr(self._config, key)
# Otherwise, raise useful AttributeError to follow getattr proto.
err = "No attribute or config key found for {0!r}".format(key)
attrs = [x for x in dir(self.__class__) if not x.startswith('_')]
err += "\n\nValid keys: {0!r}".format(
sorted(list(self._config.keys()))
)
err += "\n\nValid real attributes: {0!r}".format(attrs)
raise AttributeError(err)
def __setattr__(self, key, value):
# Turn attribute-sets into config updates anytime we don't have a real
# attribute with the given name/key.
has_real_attr = key in (x[0] for x in inspect.getmembers(self))
if not has_real_attr:
# Make sure to trigger our own __setitem__ instead of going direct
# to our internal dict/cache
self[key] = value
else:
super(DataProxy, self).__setattr__(key, value)
def __iter__(self):
# For some reason Python is ignoring our __hasattr__ when determining
# whether we support __iter__. BOO
return iter(self._config)
def __eq__(self, other):
# NOTE: Can't proxy __eq__ because the RHS will always be an obj of the
# current class, not the proxied-to class, and that causes
# NotImplemented.
# Try comparing to other objects like ourselves, falling back to a not
# very comparable value (None) so comparison fails.
other_val = getattr(other, '_config', None)
# But we can compare to vanilla dicts just fine, since our _config is
# itself just a dict.
if isinstance(other, dict):
other_val = other
return self._config == other_val
# Make unhashable, because our entire raison d'etre is to be somewhat
# mutable. Subclasses with mutable attributes may override this.
# NOTE: this is mostly a concession to Python 2, v3 does it automatically.
__hash__ = None
def __len__(self):
return len(self._config)
def is_leaf(self):
return hasattr(self, '_root')
def is_root(self):
# TODO: also callable()? meh
return hasattr(self, '_modify')
def pop(self, *args):
# Must test this up front before (possibly) mutating self._config
key_existed = args and args[0] in self._config
# We always have a _config (whether it's a real dict or a cache of
# merged levels) so we can fall back to it for all the corner case
# handling re: args (arity, handling a default, raising KeyError, etc)
ret = self._config.pop(*args)
# If it looks like no popping occurred (key wasn't there), presumably
# user gave default, so we can short-circuit return here - no need to
# track a deletion that did not happen.
if not key_existed:
return ret
# Here, we can assume at least the 1st posarg (key) existed.
key = args[0]
if self.is_leaf():
# Bookkeeping, via our root
self._root._remove(self._keypath, key)
elif self.is_root():
# Bookkeeping, via ourselves
self._remove(tuple(), key)
# In all cases, return the popped value.
return ret
def __delitem__(self, key):
key_existed = key in self
del self._config[key]
# TODO: bet this can be tightened further by just ensuring
# self._keypath defaults to empty tuple; then can simply do (self._root
# if self.is_leaf() else self)._remove(self._keypath, key)
# TODO: and then further, can just define ._get_root()
# TODO: and in fact we could presumably just define _remove()...? which
# is presently just on Config? that gets us back to "This needs more
# class reorg" territory tbh
if self.is_leaf():
self._root._remove(self._keypath, key)
elif self.is_root():
self._remove(tuple(), key)
def __setitem__(self, key, value):
# If we appear to be a non-root DataProxy, modify our _config so that
# anybody keeping a reference to us sees the update, and also tell our
# root object so it can track our modifications centrally (& trigger
# cache updating, etc)
if getattr(self, '_root', None):
self._config[key] = value
self._root._modify(self._keypath, key, value)
else:
# If we've got no _root, but we have a 'modify', we're probably a
# root/Config ourselves; so just call modify with an empty
# keypath. (We do _not_ want to touch _config here as it would be
# the config cache.)
if hasattr(self, '_modify') and callable(self._modify):
self._modify(tuple(), key, value)
# If we've got no _root and no _modify(), we're some other rooty
# proxying object that isn't a Config, such as a Context. So we
# just update _config and assume it'll do the needful.
# TODO: this is getting very hairy which is a sign the object
# responsibilities need changing...sigh
else:
self._config[key] = value
def __getitem__(self, key):
return self._get(key)
def _get(self, key):
# Short-circuit if pickling/copying mechanisms are asking if we've got
# __setstate__ etc; they'll ask this w/o calling our __init__ first, so
# we'd be in a RecursionError-causing catch-22 otherwise.
if key in (
'__setstate__',
):
raise AttributeError(key)
# At this point we should be able to assume a self._config...
value = self._config[key]
if isinstance(value, dict):
# New object's keypath is simply the key, prepended with our own
# keypath if we've got one.
keypath = (key,)
if hasattr(self, '_keypath'):
keypath = self._keypath + keypath
# If we have no _root, we must be the root, so it's us. Otherwise,
# pass along our handle on the root.
root = getattr(self, '_root', self)
value = DataProxy.from_data(
data=value,
root=root,
keypath=keypath,
)
return value
def _set(self, **kwargs):
"""
Convenience workaround of default 'attrs are config keys' behavior.
Good for initializing new attributes; is a bit less verbose than
slapping ``object.__setattr__()`` everywhere.
"""
for key, value in six.iteritems(kwargs):
object.__setattr__(self, key, value)
def __str__(self):
return "<{0}: {1}>".format(self.__class__.__name__, self._config)
def __unicode__(self):
return unicode(self.__str__()) # noqa
def __repr__(self):
# TODO: something more useful? Not an easy object to reconstruct from a
# stringrep.
return self.__str__()
def __contains__(self, key):
return key in self._config
class Config(DataProxy):
"""
Invoke's primary configuration handling class.
See :doc:`/concepts/configuration` for details on the configuration system
this class implements, including the :ref:`configuration hierarchy
<config-hierarchy>`. The rest of this class' documentation assumes
familiarity with that document.
**Access**
Configuration values may be accessed and/or updated using dict syntax::
config['foo']
or attribute syntax::
config.foo
Nesting works the same way - dict config values are turned into objects
which honor both the dictionary protocol and the attribute-access method::
config['foo']['bar']
config.foo.bar
**A note about attribute access and methods**
This class implements the entire dictionary protocol: methods such as
``keys``, ``values``, ``items``, ``pop`` and so forth should all function
as they do on regular dicts. It also implements new config-specific methods
such as `.load_files`, `.load_collection` and `.clone`.
.. warning::
Accordingly, this means that if you have configuration options sharing
names with these methods, you **must** use dictionary syntax (e.g.
``myconfig['keys']``) to access the configuration data.
**Lifecycle**
At initialization time, `.Config`:
- creates per-level data structures
- stores levels supplied to `__init__`, such as defaults or overrides, as
well as the various config file paths/prefixes
- loads system, user and project level config files, if found
At this point, `.Config` is fully usable, but in most real-world use cases,
the CLI machinery (or library users) do additional work on a per-task
basis:
- the result of CLI argument parsing is applied to the overrides level
- a runtime config file is loaded, if its flag was supplied
- the base config is cloned (so tasks don't inadvertently affect one
another)
- per-collection data is loaded (only possible now that we have a task in
hand)
- shell environment data is loaded (must be done at end of process due to
using the rest of the config as a guide for interpreting env var names)
Any modifications made directly to the `.Config` itself (usually, after it
has been handed to the task or other end-user code) end up stored in their
own (topmost) config level, making it easy to debug final values.
Finally, any *deletions* made to the `.Config` (e.g. applications of
dict-style mutators like ``pop``, ``clear`` etc) are tracked in their own
structure, allowing the overall object to honor such method calls despite
the source data itself not changing.
"""
@staticmethod
def global_defaults():
"""
Return the core default settings for Invoke.
Generally only for use by `.Config` internals. For descriptions of
these values, see :ref:`default-values`.
Subclasses may choose to override this method, calling
``Config.global_defaults`` and applying `.merge_dicts` to the result,
to add to or modify these values.
"""
return {
# TODO: we document 'debug' but it's not truly implemented outside
# of env var and CLI flag. If we honor it, we have to go around and
# figure out at what points we might want to call
# `util.enable_logging`:
# - just using it as a fallback default for arg parsing isn't much
# use, as at that point the config holds nothing but defaults & CLI
# flag values
# - doing it at file load time might be somewhat useful, though
# where this happens may be subject to change soon
# - doing it at env var load time seems a bit silly given the
# existing support for at-startup testing for INVOKE_DEBUG
# 'debug': False,
'run': {
'warn': False,
'hide': None,
'shell': '/bin/bash',
'pty': False,
'fallback': True,
'env': {},
'replace_env': False,
'echo': False,
'encoding': None,
'out_stream': None,
'err_stream': None,
'in_stream': None,
'watchers': [],
'echo_stdin': None,
},
'sudo': {
'prompt': "[sudo] password: ",
'password': None,
},
'tasks': {
'dedupe': True,
},
}
def __init__(
self,
defaults=None,
overrides=None,
system_prefix=None,
user_prefix=None,
project_home=None,
env_prefix=None,
runtime_path=None,
defer_post_init=False,
):
"""
Creates a new config object.
:param dict defaults:
A dict containing default (lowest level) config data. Default:
`global_defaults`.
:param dict overrides:
A dict containing override-level config data. Default: ``{}``.
:param str system_prefix:
Path & partial filename for the global config file location. Should
include everything but the dot & file extension.
Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or
``/etc/invoke.json``).
:param str user_prefix:
Like ``system_prefix`` but for the per-user config file.
Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``).
:param str project_home:
Optional directory path location of the currently loaded
`.Collection` (as loaded by `.Loader`). When non-empty, will
trigger seeking of per-project config files in this location +
``invoke.(yaml|json|py)``.
:param str env_prefix:
Environment variable seek prefix; optional, defaults to ``None``.
When not ``None``, only environment variables beginning with this
value will be loaded. If it is set, the keys will have the prefix
stripped out before processing, so e.g. ``env_prefix='INVOKE_'``
means users must set ``INVOKE_MYSETTING`` in the shell to affect
the ``"mysetting"`` setting.
:param str runtime_path:
Optional file path to a runtime configuration file.
Used to fill the penultimate slot in the config hierarchy. Should
be a full file path to an existing file, not a directory path, or a
prefix.
:param bool defer_post_init:
Whether to defer certain steps at the end of `__init__`.
Specifically, the `post_init` method is normally called
automatically, and performs initial actions like loading config
files. Advanced users may wish to call that method manually after
manipulating the object; to do so, specify
``defer_post_init=True``.
Default: ``False``.
"""
# Technically an implementation detail - do not expose in public API.
# Stores merged configs and is accessed via DataProxy.
self._set(_config={})
# Config file suffixes to search, in preference order.
self._set(_file_suffixes=('yaml', 'json', 'py'))
# Default configuration values, typically a copy of `global_defaults`.
if defaults is None:
defaults = copy_dict(self.global_defaults())
self._set(_defaults=defaults)
# Collection-driven config data, gathered from the collection tree
# containing the currently executing task.
self._set(_collection={})
# Path prefix searched for the system config file.
# NOTE: There is no default system prefix on Windows.
if system_prefix is None and not WINDOWS:
system_prefix = '/etc/invoke'
self._set(_system_prefix=system_prefix)
# Path to loaded system config file, if any.
self._set(_system_path=None)
# Whether the system config file has been loaded or not (or ``None`` if
# no loading has been attempted yet.)
self._set(_system_found=None)
# Data loaded from the system config file.
self._set(_system={})
# Path prefix searched for per-user config files.
if user_prefix is None:
user_prefix = '~/.invoke'
self._set(_user_prefix=user_prefix)
# Path to loaded user config file, if any.
self._set(_user_path=None)
# Whether the user config file has been loaded or not (or ``None`` if
# no loading has been attempted yet.)
self._set(_user_found=None)
# Data loaded from the per-user config file.
self._set(_user={})
# Parent directory of the current root tasks file, if applicable.
self._set(_project_home=project_home)
# And a normalized prefix version not really publicly exposed
project_prefix = None
if self._project_home is not None:
project_prefix = join(project_home, 'invoke')
self._set(_project_prefix=project_prefix)
# Path to loaded per-project config file, if any.
self._set(_project_path=None)
# Whether the project config file has been loaded or not (or ``None``
# if no loading has been attempted yet.)
self._set(_project_found=None)
# Data loaded from the per-project config file.
self._set(_project={})
# Environment variable name prefix
if env_prefix is None:
env_prefix = ''
self._set(_env_prefix=env_prefix)
# Config data loaded from the shell environment.
self._set(_env={})
# Path to the user-specified runtime config file.
self._set(_runtime_path=runtime_path)
# Data loaded from the runtime config file.
self._set(_runtime={})
# Whether the runtime config file has been loaded or not (or ``None``
# if no loading has been attempted yet.)
self._set(_runtime_found=None)
# Overrides - highest normal config level. Typically filled in from
# command-line flags.
if overrides is None:
overrides = {}
self._set(_overrides=overrides)
# Absolute highest level: user modifications.
self._set(_modifications={})
# And its sibling: user deletions. (stored as a flat dict of keypath
# keys and dummy values, for constant-time membership testing/removal
# w/ no messy recursion. TODO: maybe redo _everything_ that way? in
# _modifications and other levels, the values would of course be
# valuable and not just None)
self._set(_deletions={})
if not defer_post_init:
self.post_init()
def post_init(self):
"""
Call setup steps that can occur immediately after `__init__`.
May need to be manually called if `__init__` was told
``defer_post_init=True``.
:returns: ``None``.
"""
self.load_files()
# TODO: just use a decorator for merging probably? shrug
self.merge()
def _modify(self, keypath, key, value):
"""
Update our user-modifications config level with new data.
:param tuple keypath:
The key path identifying the sub-dict being updated. May be an
empty tuple if the update is occurring at the topmost level.
:param str key:
The actual key receiving an update.
:param value:
The value being written.
"""
# First, ensure we wipe the keypath from _deletions, in case it was
# previously deleted-at-runtime....
self._deletions.pop(keypath + (key,), None)
# Now we can add it to the modifications structure.
data = self._modifications
keypath = list(keypath)
while keypath:
subkey = keypath.pop(0)
# TODO: could use defaultdict here, but...meh?
if subkey not in data:
# TODO: generify this and the subsequent 3 lines...
data[subkey] = {}
data = data[subkey]
data[key] = value
self.merge()
def _remove(self, keypath, key):
"""
Like `._modify`, but for removal.
"""
# NOTE: because deletions are processed in merge() last, we do not need
# to remove things from _modifications on removal; but we *do* do the
# inverse - remove from _deletions on modification.
# TODO: may be sane to push this step up to callers?
full_path = keypath + (key,)
self._deletions[full_path] = None
self.merge()
def load_shell_env(self):
"""
Load values from the shell environment.
`.load_shell_env` is intended for execution late in a `.Config`
object's lifecycle, once all other sources (such as a runtime config
file or per-collection configurations) have been loaded. Loading from
the shell is not terrifically expensive, but must be done at a specific
point in time to ensure the "only known config keys are loaded from the
env" behavior works correctly.
See :ref:`env-vars` for details on this design decision and other info
re: how environment variables are scanned and loaded.
"""
# Force merge of existing data to ensure we have an up to date picture
debug("Running pre-merge for shell env loading...")
self.merge()
debug("Done with pre-merge.")
loader = Environment(config=self._config, prefix=self._env_prefix)
self._set(_env=loader.load())
debug("Loaded shell environment, triggering final merge")
self.merge()
def load_collection(self, data):
"""
Update collection-driven config data.
`.load_collection` is intended for use by the core task execution
machinery, which is responsible for obtaining collection-driven data.
See :ref:`collection-configuration` for details.
"""
debug("Loading collection configuration")
self._set(_collection=data)
self.merge()
def clone(self, into=None):
"""
Return a copy of this configuration object.
The new object will be identical in terms of configured sources and any
loaded (or user-manipulated) data, but will be a distinct object with
as little shared mutable state as possible.
Specifically, all `dict` values within the config are recursively
recreated, with non-dict leaf values subjected to `copy.copy` (note:
*not* `copy.deepcopy`, as this can cause issues with various objects
such as compiled regexen or threading locks, often found buried deep
within rich aggregates like API or DB clients).
The only remaining config values that may end up shared between a
config and its clone are thus those 'rich' objects that do not
`copy.copy` cleanly, or compound non-dict objects (such as lists or
tuples).
:param into:
A `.Config` subclass that the new clone should be "upgraded" to.
Used by client libraries which have their own `.Config` subclasses
that e.g. define additional defaults; cloning "into" one of these
subclasses ensures that any new keys/subtrees are added gracefully,
without overwriting anything that may have been pre-defined.
Default: ``None`` (just clone into another regular `.Config`).
:returns:
A `.Config`, or an instance of the class given to ``into``.
:raises:
``TypeError``, if ``into`` is given a value and that value is not a
`.Config` subclass.
"""
# Sanity check for 'into'
if into is not None and not issubclass(into, self.__class__):
err = "'into' must be a subclass of {0}!"
raise TypeError(err.format(self.__class__.__name__))
# Construct new object
klass = self.__class__ if into is None else into
# Also allow arbitrary constructor kwargs, for subclasses where passing
# (some) data in at init time is desired (vs post-init copying)
# TODO: probably want to pivot the whole class this way eventually...?
# No longer recall exactly why we went with the 'fresh init + attribute
# setting' approach originally...tho there's clearly some impedance
# mismatch going on between "I want stuff to happen in my config's
# instantiation" and "I want cloning to not trigger certain things like
# external data source loading".
# NOTE: this will include defer_post_init, see end of method
new = klass(**self._clone_init_kwargs(into=into))
# Copy/merge/etc all 'private' data sources and attributes
for name in """
collection
system_prefix
system_path
system_found
system
user_prefix
user_path
user_found
user
project_home
project_prefix
project_path
project_found
project
env_prefix
env
runtime_path
runtime_found
runtime
overrides
modifications
""".split():
name = "_{0}".format(name)
my_data = getattr(self, name)
# Non-dict data gets carried over straight (via a copy())
# NOTE: presumably someone could really screw up and change these
# values' types, but at that point it's on them...
if not isinstance(my_data, dict):
setattr(new, name, copy.copy(my_data))
# Dict data gets merged (which also involves a copy.copy
# eventually)
else:
merge_dicts(getattr(new, name), my_data)
# And merge the central config too (cannot just call .merge() on the
# new clone, since the source config may have received custom
# alterations by user code.)
merge_dicts(new._config, self._config)
# Finally, call new.post_init() since it's fully merged up. This way,
# stuff called in post_init() will have access to the final version of
# the data.
new.post_init()
return new
def _clone_init_kwargs(self, into=None):
"""
Supply kwargs suitable for initializing a new clone of this object.
Note that most of the `.clone` process involves copying data between
two instances instead of passing init kwargs; however, sometimes you
really do want init kwargs, which is why this method exists.
:param into: The value of ``into`` as passed to the calling `.clone`.
:returns: A `dict`.
"""
# NOTE: must pass in defaults fresh or otherwise global_defaults() gets
# used instead. Except when 'into' is in play, in which case we truly
# want the union of the two.
new_defaults = copy_dict(self._defaults)
if into is not None:
merge_dicts(new_defaults, into.global_defaults())
# The kwargs.
return dict(
defaults=new_defaults,
# TODO: consider making this 'hardcoded' on the calling end (ie
# inside clone()) to make sure nobody accidentally nukes it via
# subclassing?
defer_post_init=True,
)
def load_files(self):
"""
Load any unloaded/un-searched-for config file sources.
Specifically, any file sources whose ``_found`` values are ``None``
will be sought and loaded if found; if their ``_found`` value is non
``None`` (e.g. ``True`` or ``False``) they will be skipped. Typically
this means this method is idempotent and becomes a no-op after the
first run.
"""
self._load_file(prefix='system')
self._load_file(prefix='user')
self._load_file(prefix='project')
self._load_file(prefix='runtime', absolute=True)
def _load_file(self, prefix, absolute=False):
# Setup
found = "_{0}_found".format(prefix)
path = "_{0}_path".format(prefix)
data = "_{0}".format(prefix)
# Short-circuit if loading appears to have occurred already
if getattr(self, found) is not None:
return
# Moar setup
if absolute:
absolute_path = getattr(self, path)
# None -> expected absolute path but none set, short circuit
if absolute_path is None:
return
paths = [absolute_path]
else:
path_prefix = getattr(self, "_{0}_prefix".format(prefix))
# Short circuit if loading seems unnecessary (eg for project config
# files when not running out of a project)
if path_prefix is None:
return
paths = [
'.'.join((path_prefix, x))
for x in self._file_suffixes
]
# Poke 'em
for filepath in paths:
# Normalize
filepath = expanduser(filepath)
try:
try:
type_ = splitext(filepath)[1].lstrip('.')
loader = getattr(self, "_load_{0}".format(type_))
except AttributeError as e:
msg = "Config files of type {0!r} (from file {1!r}) are not supported! Please use one of: {2!r}" # noqa
raise UnknownFileType(msg.format(
type_, filepath, self._file_suffixes))
# Store data, the path it was found at, and fact that it was
# found
setattr(self, data, loader(filepath))
setattr(self, path, filepath)
setattr(self, found, True)
break
# Typically means 'no such file', so just note & skip past.
except IOError as e:
# TODO: is there a better / x-platform way to detect this?
if "No such file" in e.strerror:
err = "Didn't see any {0}, skipping."
debug(err.format(filepath))
else:
raise
# Still None -> no suffixed paths were found, record this fact
if getattr(self, path) is None:
setattr(self, found, False)
def merge(self):
"""
Merge all config sources, in order.
"""
debug("Merging config sources in order onto new empty _config...")
self._config = {}
debug("Defaults: {0!r}".format(self._defaults))
merge_dicts(self._config, self._defaults)
debug("Collection-driven: {0!r}".format(self._collection))
merge_dicts(self._config, self._collection)
self._merge_file('system', "System-wide")
self._merge_file('user', "Per-user")
self._merge_file('project', "Per-project")
debug("Environment variable config: {0!r}".format(self._env))
merge_dicts(self._config, self._env)
self._merge_file('runtime', "Runtime")
debug("Overrides: {0!r}".format(self._overrides))
merge_dicts(self._config, self._overrides)
debug("Modifications: {0!r}".format(self._modifications))
merge_dicts(self._config, self._modifications)
debug("Deletions: {0!r}".format(self._deletions))
for keypath in self._deletions.keys():
excise(self._config, keypath)
def _merge_file(self, name, desc):
# Setup
desc += " config file" # yup
found = getattr(self, "_{0}_found".format(name))
path = getattr(self, "_{0}_path".format(name))
data = getattr(self, "_{0}".format(name))
# None -> no loading occurred yet
if found is None:
debug("{0} has not been loaded yet, skipping".format(desc))
# True -> hooray
elif found:
debug("{0} ({1}): {2!r}".format(desc, path, data))
merge_dicts(self._config, data)
# False -> did try, did not succeed
else:
# TODO: how to preserve what was tried for each case but only for
# the negative? Just a branch here based on 'name'?
debug("{0} not found, skipping".format(desc))
@property
def paths(self):
"""
An iterable of all successfully loaded config file paths.
No specific order.
"""
paths = []
for prefix in "system user project runtime".split():
value = getattr(self, "_{0}_path".format(prefix))
if value is not None:
paths.append(value)
return paths
def _load_yaml(self, path):
with open(path) as fd:
return yaml.load(fd)
def _load_json(self, path):
with open(path) as fd:
return json.load(fd)
def _load_py(self, path):
data = {}
for key, value in six.iteritems(load_source('mod', path)):
if key.startswith('__'):
continue
data[key] = value
return data
class AmbiguousMergeError(ValueError):
pass
def merge_dicts(base, updates):
"""
Recursively merge dict ``updates`` into dict ``base`` (mutating ``base``.)
* Values which are themselves dicts will be recursed into.
* Values which are a dict in one input and *not* a dict in the other input
(e.g. if our inputs were ``{'foo': 5}`` and ``{'foo': {'bar': 5}}``) are
irreconciliable and will generate an exception.
* Non-dict leaf values are run through `copy.copy` to avoid state bleed.
.. note::
This is effectively a lightweight `copy.deepcopy` which offers
protection from mismatched types (dict vs non-dict) and avoids some
core deepcopy problems (such as how it explodes on certain object
types).
:returns:
The value of ``base``, which is mostly useful for wrapper functions
like `copy_dict`.
"""
# TODO: for chrissakes just make it return instead of mutating?
for key, value in updates.items():
# Dict values whose keys also exist in 'base' -> recurse
# (But only if both types are dicts.)
if key in base:
if isinstance(value, dict):
if isinstance(base[key], dict):
merge_dicts(base[key], value)
else:
raise _merge_error(base[key], value)
else:
if isinstance(base[key], dict):
raise _merge_error(base[key], value)
else:
base[key] = copy.copy(value)
# New values get set anew
else:
# Dict values get reconstructed to avoid being references to the
# updates dict, which can lead to nasty state-bleed bugs otherwise
if isinstance(value, dict):
base[key] = copy_dict(value)
# Non-dict values just get set straight
else:
base[key] = copy.copy(value)
return base
def _merge_error(orig, new_):
return AmbiguousMergeError("Can't cleanly merge {0} with {1}".format(
_format_mismatch(orig), _format_mismatch(new_)
))
def _format_mismatch(x):
return "{0} ({1!r})".format(type(x), x)
def copy_dict(source):
"""
Return a fresh copy of ``source`` with as little shared state as possible.
Uses `merge_dicts` under the hood, with an empty ``base`` dict; see its
documentation for details on behavior.
"""
return merge_dicts({}, source)
def excise(dict_, keypath):
"""
Remove key pointed at by ``keypath`` from nested dict ``dict_``.
"""
data = dict_
keypath = list(keypath)
leaf_key = keypath.pop()
while keypath:
key = keypath.pop(0)
data = data[key]
del data[leaf_key]
|
# -*- coding: utf-8 -*-
#
# GPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 18 15:30:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
#Mocking uninstalled modules: https://read-the-docs.readthedocs.org/en/latest/faq.html
#class Mock(object):
#__all__ = []
#def __init__(self, *args, **kwargs):
#for key, value in kwargs.iteritems():
#setattr(self, key, value)
#def __call__(self, *args, **kwargs):
#return Mock()
#__add__ = __mul__ = __getitem__ = __setitem__ = \
#__delitem__ = __sub__ = __floordiv__ = __mod__ = __divmod__ = \
#__pow__ = __lshift__ = __rshift__ = __and__ = __xor__ = __or__ = \
#__rmul__ = __rsub__ = __rfloordiv__ = __rmod__ = __rdivmod__ = \
#__rpow__ = __rlshift__ = __rrshift__ = __rand__ = __rxor__ = __ror__ = \
#__imul__ = __isub__ = __ifloordiv__ = __imod__ = __idivmod__ = \
#__ipow__ = __ilshift__ = __irshift__ = __iand__ = __ixor__ = __ior__ = \
#__neg__ = __pos__ = __abs__ = __invert__ = __call__
#def __getattr__(self, name):
#if name in ('__file__', '__path__'):
#return '/dev/null'
#if name == 'sqrt':
#return math.sqrt
#elif name[0] != '_' and name[0] == name[0].upper():
#return type(name, (), {})
#else:
#return Mock(**vars(self))
#def __lt__(self, *args, **kwargs):
#return True
#__nonzero__ = __le__ = __eq__ = __ne__ = __gt__ = __ge__ = __contains__ = \
#__lt__
#def __repr__(self):
## Use _mock_repr to fake the __repr__ call
#res = getattr(self, "_mock_repr")
#return res if isinstance(res, str) else "Mock"
#def __hash__(self):
#return 1
#__len__ = __int__ = __long__ = __index__ = __hash__
#def __oct__(self):
#return '01'
#def __hex__(self):
#return '0x1'
#def __float__(self):
#return 0.1
#def __complex__(self):
#return 1j
#MOCK_MODULES = [
#'pylab', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'pyfits',
#'scipy.constants.constants', 'matplotlib.cm',
#'matplotlib.image', 'matplotlib.colors', 'sunpy.cm',
#'pandas', 'pandas.io', 'pandas.io.parsers',
#'suds', 'matplotlib.ticker', 'matplotlib.colorbar',
#'matplotlib.dates', 'scipy.optimize', 'scipy.ndimage',
#'matplotlib.figure', 'scipy.ndimage.interpolation', 'bs4']
#for mod_name in MOCK_MODULES:
#sys.modules[mod_name] = Mock()
#sys.modules['numpy'] = Mock(pi=math.pi, G=6.67364e-11,
#ndarray=type('ndarray', (), {}),
#dtype=lambda _: Mock(_mock_repr='np.dtype(\'float32\')'))
#sys.modules['scipy.constants'] = Mock(pi=math.pi, G=6.67364e-11)
##############################################################################
##
## Mock out imports with C dependencies because ReadTheDocs can't build them.
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
#sys.path.append("../GPy")
#import mock
MOCK_MODULES = ['pylab', 'matplotlib', 'sympy', 'sympy.utilities', 'sympy.utilities.codegen', 'sympy.core.cache', 'sympy.core', 'sympy.parsing', 'sympy.parsing.sympy_parser']#'matplotlib', 'matplotlib.color', 'matplotlib.pyplot', 'pylab' ]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('..'))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
#'matplotlib.sphinxext.mathmpl',
#'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
#'matplotlib.sphinxext.ipython_directive',
'sphinx.ext.doctest',
'ipython_console_highlighting',
'inheritance_diagram',
'numpydoc']
# ----------------------- READTHEDOCS ------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
sys.path.append("../GPy")
os.system("pwd")
os.system("sphinx-apidoc -f -o . ../GPy")
#os.system("cd ..")
#os.system("cd ./docs")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
copyright = u'2013, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# This is to revert to the default theme on readthedocs
html_style = '/default.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GPy.tex', u'GPy Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gpy', u'GPy Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GPy', u'GPy Documentation',
u'Author', 'GPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'GPy'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2013, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
#############################################################################
#
# Include constructors in all the docs
# Got this method from:
# http://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
#def skip(app, what, name, obj, skip, options):
#if name == "__init__":
#return False
#return skip
#def setup(app):
#app.connect("autodoc-skip-member", skip)
removed some mocks
# -*- coding: utf-8 -*-
#
# GPy documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 18 15:30:28 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
#Mocking uninstalled modules: https://read-the-docs.readthedocs.org/en/latest/faq.html
#class Mock(object):
#__all__ = []
#def __init__(self, *args, **kwargs):
#for key, value in kwargs.iteritems():
#setattr(self, key, value)
#def __call__(self, *args, **kwargs):
#return Mock()
#__add__ = __mul__ = __getitem__ = __setitem__ = \
#__delitem__ = __sub__ = __floordiv__ = __mod__ = __divmod__ = \
#__pow__ = __lshift__ = __rshift__ = __and__ = __xor__ = __or__ = \
#__rmul__ = __rsub__ = __rfloordiv__ = __rmod__ = __rdivmod__ = \
#__rpow__ = __rlshift__ = __rrshift__ = __rand__ = __rxor__ = __ror__ = \
#__imul__ = __isub__ = __ifloordiv__ = __imod__ = __idivmod__ = \
#__ipow__ = __ilshift__ = __irshift__ = __iand__ = __ixor__ = __ior__ = \
#__neg__ = __pos__ = __abs__ = __invert__ = __call__
#def __getattr__(self, name):
#if name in ('__file__', '__path__'):
#return '/dev/null'
#if name == 'sqrt':
#return math.sqrt
#elif name[0] != '_' and name[0] == name[0].upper():
#return type(name, (), {})
#else:
#return Mock(**vars(self))
#def __lt__(self, *args, **kwargs):
#return True
#__nonzero__ = __le__ = __eq__ = __ne__ = __gt__ = __ge__ = __contains__ = \
#__lt__
#def __repr__(self):
## Use _mock_repr to fake the __repr__ call
#res = getattr(self, "_mock_repr")
#return res if isinstance(res, str) else "Mock"
#def __hash__(self):
#return 1
#__len__ = __int__ = __long__ = __index__ = __hash__
#def __oct__(self):
#return '01'
#def __hex__(self):
#return '0x1'
#def __float__(self):
#return 0.1
#def __complex__(self):
#return 1j
#MOCK_MODULES = [
#'pylab', 'scipy', 'matplotlib', 'matplotlib.pyplot', 'pyfits',
#'scipy.constants.constants', 'matplotlib.cm',
#'matplotlib.image', 'matplotlib.colors', 'sunpy.cm',
#'pandas', 'pandas.io', 'pandas.io.parsers',
#'suds', 'matplotlib.ticker', 'matplotlib.colorbar',
#'matplotlib.dates', 'scipy.optimize', 'scipy.ndimage',
#'matplotlib.figure', 'scipy.ndimage.interpolation', 'bs4']
#for mod_name in MOCK_MODULES:
#sys.modules[mod_name] = Mock()
#sys.modules['numpy'] = Mock(pi=math.pi, G=6.67364e-11,
#ndarray=type('ndarray', (), {}),
#dtype=lambda _: Mock(_mock_repr='np.dtype(\'float32\')'))
#sys.modules['scipy.constants'] = Mock(pi=math.pi, G=6.67364e-11)
##############################################################################
##
## Mock out imports with C dependencies because ReadTheDocs can't build them.
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
#sys.path.append("../GPy")
#import mock
MOCK_MODULES = ['pylab']#, 'matplotlib', 'sympy', 'sympy.utilities', 'sympy.utilities.codegen', 'sympy.core.cache', 'sympy.core', 'sympy.parsing', 'sympy.parsing.sympy_parser']#'matplotlib', 'matplotlib.color', 'matplotlib.pyplot', 'pylab' ]
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('..'))
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('sphinxext'))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode',
'matplotlib.sphinxext.mathmpl',
'matplotlib.sphinxext.only_directives',
'matplotlib.sphinxext.plot_directive',
'matplotlib.sphinxext.ipython_directive',
'sphinx.ext.doctest',
'ipython_console_highlighting',
'inheritance_diagram',
'numpydoc']
# ----------------------- READTHEDOCS ------------------
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
sys.path.append("../GPy")
os.system("pwd")
os.system("sphinx-apidoc -f -o . ../GPy")
#os.system("cd ..")
#os.system("cd ./docs")
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'GPy'
copyright = u'2013, Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# This is to revert to the default theme on readthedocs
html_style = '/default.css'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'GPydoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'GPy.tex', u'GPy Documentation',
u'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gpy', u'GPy Documentation',
[u'Author'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GPy', u'GPy Documentation',
u'Author', 'GPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'GPy'
epub_author = u'Author'
epub_publisher = u'Author'
epub_copyright = u'2013, Author'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
#############################################################################
#
# Include constructors in all the docs
# Got this method from:
# http://stackoverflow.com/questions/5599254/how-to-use-sphinxs-autodoc-to-document-a-classs-init-self-method
#def skip(app, what, name, obj, skip, options):
#if name == "__init__":
#return False
#return skip
#def setup(app):
#app.connect("autodoc-skip-member", skip)
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
str_or_none,
urlencode_postdata,
)
class RoosterTeethIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?roosterteeth\.com/episode/(?P<id>[^/?#&]+)'
_LOGIN_URL = 'https://roosterteeth.com/login'
_NETRC_MACHINE = 'roosterteeth'
_TESTS = [{
'url': 'http://roosterteeth.com/episode/million-dollars-but-season-2-million-dollars-but-the-game-announcement',
'md5': 'e2bd7764732d785ef797700a2489f212',
'info_dict': {
'id': '9156',
'display_id': 'million-dollars-but-season-2-million-dollars-but-the-game-announcement',
'ext': 'mp4',
'title': 'Million Dollars, But... The Game Announcement',
'description': 'md5:168a54b40e228e79f4ddb141e89fe4f5',
'thumbnail': r're:^https?://.*\.png$',
'series': 'Million Dollars, But...',
'episode': 'Million Dollars, But... The Game Announcement',
},
}, {
'url': 'http://achievementhunter.roosterteeth.com/episode/off-topic-the-achievement-hunter-podcast-2016-i-didn-t-think-it-would-pass-31',
'only_matching': True,
}, {
'url': 'http://funhaus.roosterteeth.com/episode/funhaus-shorts-2016-austin-sucks-funhaus-shorts',
'only_matching': True,
}, {
'url': 'http://screwattack.roosterteeth.com/episode/death-battle-season-3-mewtwo-vs-shadow',
'only_matching': True,
}, {
'url': 'http://theknow.roosterteeth.com/episode/the-know-game-news-season-1-boring-steam-sales-are-better',
'only_matching': True,
}, {
# only available for FIRST members
'url': 'http://roosterteeth.com/episode/rt-docs-the-world-s-greatest-head-massage-the-world-s-greatest-head-massage-an-asmr-journey-part-one',
'only_matching': True,
}]
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='Unable to download login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password,
})
login_request = self._download_webpage(
self._LOGIN_URL, None,
note='Logging in',
data=urlencode_postdata(login_form),
headers={
'Referer': self._LOGIN_URL,
})
if not any(re.search(p, login_request) for p in (
r'href=["\']https?://(?:www\.)?roosterteeth\.com/logout"',
r'>Sign Out<')):
error = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?\balert-danger\b.*?\1[^>]*>(?:\s*<button[^>]*>.*?</button>)?(?P<error>.+?)</div>',
login_request, 'alert', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
display_id = self._match_id(url)
api_episode_url = 'https://svod-be.roosterteeth.com/api/v1/episodes/%s' % display_id
try:
m3u8_url = self._download_json(
api_episode_url + '/videos', display_id,
'Downloading video JSON metadata')['data'][0]['attributes']['url']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
if self._parse_json(e.cause.read().decode(), display_id).get('access') is False:
self.raise_login_required(
'%s is only available for FIRST members' % display_id)
raise
formats = self._extract_m3u8_formats(
m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls')
self._sort_formats(formats)
episode = self._download_json(
api_episode_url, display_id,
'Downloading episode JSON metadata')['data'][0]
attributes = episode['attributes']
title = attributes.get('title') or attributes['display_title']
video_id = compat_str(episode['id'])
thumbnails = []
for image in episode.get('included', {}).get('images', []):
if image.get('type') == 'episode_image':
img_attributes = image.get('attributes') or {}
for k in ('thumb', 'small', 'medium', 'large'):
img_url = img_attributes.get(k)
if img_url:
thumbnails.append({
'id': k,
'url': img_url,
})
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': attributes.get('description') or attributes.get('caption'),
'thumbnails': thumbnails,
'series': attributes.get('show_title'),
'season_number': int_or_none(attributes.get('season_number')),
'season_id': attributes.get('season_id'),
'episode': title,
'episode_number': int_or_none(attributes.get('number')),
'episode_id': str_or_none(episode.get('uuid')),
'formats': formats,
'channel_id': attributes.get('channel_id'),
'duration': int_or_none(attributes.get('length')),
}
[roosterteeth] add support for watch URLs
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
str_or_none,
urlencode_postdata,
)
class RoosterTeethIE(InfoExtractor):
_VALID_URL = r'https?://(?:.+?\.)?roosterteeth\.com/(?:episode|watch)/(?P<id>[^/?#&]+)'
_LOGIN_URL = 'https://roosterteeth.com/login'
_NETRC_MACHINE = 'roosterteeth'
_TESTS = [{
'url': 'http://roosterteeth.com/episode/million-dollars-but-season-2-million-dollars-but-the-game-announcement',
'md5': 'e2bd7764732d785ef797700a2489f212',
'info_dict': {
'id': '9156',
'display_id': 'million-dollars-but-season-2-million-dollars-but-the-game-announcement',
'ext': 'mp4',
'title': 'Million Dollars, But... The Game Announcement',
'description': 'md5:168a54b40e228e79f4ddb141e89fe4f5',
'thumbnail': r're:^https?://.*\.png$',
'series': 'Million Dollars, But...',
'episode': 'Million Dollars, But... The Game Announcement',
},
}, {
'url': 'http://achievementhunter.roosterteeth.com/episode/off-topic-the-achievement-hunter-podcast-2016-i-didn-t-think-it-would-pass-31',
'only_matching': True,
}, {
'url': 'http://funhaus.roosterteeth.com/episode/funhaus-shorts-2016-austin-sucks-funhaus-shorts',
'only_matching': True,
}, {
'url': 'http://screwattack.roosterteeth.com/episode/death-battle-season-3-mewtwo-vs-shadow',
'only_matching': True,
}, {
'url': 'http://theknow.roosterteeth.com/episode/the-know-game-news-season-1-boring-steam-sales-are-better',
'only_matching': True,
}, {
# only available for FIRST members
'url': 'http://roosterteeth.com/episode/rt-docs-the-world-s-greatest-head-massage-the-world-s-greatest-head-massage-an-asmr-journey-part-one',
'only_matching': True,
}, {
'url': 'https://roosterteeth.com/watch/million-dollars-but-season-2-million-dollars-but-the-game-announcement',
'only_matching': True,
}]
def _login(self):
username, password = self._get_login_info()
if username is None:
return
login_page = self._download_webpage(
self._LOGIN_URL, None,
note='Downloading login page',
errnote='Unable to download login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'username': username,
'password': password,
})
login_request = self._download_webpage(
self._LOGIN_URL, None,
note='Logging in',
data=urlencode_postdata(login_form),
headers={
'Referer': self._LOGIN_URL,
})
if not any(re.search(p, login_request) for p in (
r'href=["\']https?://(?:www\.)?roosterteeth\.com/logout"',
r'>Sign Out<')):
error = self._html_search_regex(
r'(?s)<div[^>]+class=(["\']).*?\balert-danger\b.*?\1[^>]*>(?:\s*<button[^>]*>.*?</button>)?(?P<error>.+?)</div>',
login_request, 'alert', default=None, group='error')
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
raise ExtractorError('Unable to log in')
def _real_initialize(self):
self._login()
def _real_extract(self, url):
display_id = self._match_id(url)
api_episode_url = 'https://svod-be.roosterteeth.com/api/v1/episodes/%s' % display_id
try:
m3u8_url = self._download_json(
api_episode_url + '/videos', display_id,
'Downloading video JSON metadata')['data'][0]['attributes']['url']
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
if self._parse_json(e.cause.read().decode(), display_id).get('access') is False:
self.raise_login_required(
'%s is only available for FIRST members' % display_id)
raise
formats = self._extract_m3u8_formats(
m3u8_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls')
self._sort_formats(formats)
episode = self._download_json(
api_episode_url, display_id,
'Downloading episode JSON metadata')['data'][0]
attributes = episode['attributes']
title = attributes.get('title') or attributes['display_title']
video_id = compat_str(episode['id'])
thumbnails = []
for image in episode.get('included', {}).get('images', []):
if image.get('type') == 'episode_image':
img_attributes = image.get('attributes') or {}
for k in ('thumb', 'small', 'medium', 'large'):
img_url = img_attributes.get(k)
if img_url:
thumbnails.append({
'id': k,
'url': img_url,
})
return {
'id': video_id,
'display_id': display_id,
'title': title,
'description': attributes.get('description') or attributes.get('caption'),
'thumbnails': thumbnails,
'series': attributes.get('show_title'),
'season_number': int_or_none(attributes.get('season_number')),
'season_id': attributes.get('season_id'),
'episode': title,
'episode_number': int_or_none(attributes.get('number')),
'episode_id': str_or_none(episode.get('uuid')),
'formats': formats,
'channel_id': attributes.get('channel_id'),
'duration': int_or_none(attributes.get('length')),
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals, division, absolute_import
import os, sys, unittest, json, tempfile, subprocess, re
import pexpect
import pipes
from dxpy_testutil import DXTestCase, check_output
import dxpy_testutil as testutil
import dxpy
from dxpy.scripts import dx_build_app
from dxpy.utils.completer import InstanceTypesCompleter
def run(command, **kwargs):
print("$ %s" % (command,))
output = check_output(command, shell=True, **kwargs)
print(output)
return output
supported_languages = ['Python', 'bash']
def run_dx_app_wizard():
old_cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix='Программа')
os.chdir(tempdir)
try:
wizard = pexpect.spawn("dx-app-wizard --template parallelized")
wizard.logfile = sys.stdout
wizard.setwinsize(20, 90)
wizard.expect("App Name:")
wizard.sendline("Имя")
wizard.expect("The name of your app must match")
wizard.expect("App Name:")
wizard.sendline("MyTestApp")
wizard.expect("Title")
wizard.sendline("Заголовок")
wizard.expect("Summary")
wizard.sendline("Конспект")
wizard.expect("Version")
wizard.sendline("1.2.3")
wizard.expect("1st input name")
wizard.sendline("in1")
wizard.expect("Label")
wizard.sendline("Метка")
wizard.expect("Choose a class")
wizard.sendline("int")
wizard.expect("optional parameter")
wizard.sendline("n")
wizard.expect("2nd input name")
wizard.sendline()
wizard.expect("1st output name")
wizard.sendline("out1")
wizard.expect("Label")
wizard.sendline()
wizard.expect("Choose a class")
wizard.sendline("целое")
wizard.expect("Not a recognized class")
wizard.sendline("int")
wizard.expect("2nd output name")
wizard.sendline()
wizard.expect("Timeout policy")
wizard.sendline("31d")
wizard.expect("Error: max allowed timeout is 30 days")
wizard.sendline("ЄЯTЪЦGЇCЄкЇ")
wizard.expect("Error: enter an int with a single-letter suffix")
wizard.expect("Timeout policy")
wizard.sendline("24h")
wizard.expect("Programming language")
wizard.sendline("АЛГОЛ")
wizard.expect("Error: unrecognized response")
wizard.sendline("Python")
wizard.expect("Will this app need access to the Internet?")
wizard.sendline("y")
wizard.expect("Will this app need access to the parent project?")
wizard.sendline("y")
wizard.expect("Choose an instance type for your app")
wizard.sendline("t1.микро")
wizard.expect("Error: unrecognized response, expected one of")
wizard.expect("Choose an instance type for your app")
wizard.sendline()
wizard.expect("App directory created")
wizard.close()
appdir = os.path.join(tempdir, "MyTestApp")
return appdir
finally:
os.chdir(old_cwd)
def create_app_dir():
appdir = run_dx_app_wizard()
with open(os.path.join(appdir, "src", "MyTestApp.py")) as src_fh:
src = [line.rstrip() for line in src_fh.readlines()]
with open(os.path.join(appdir, "src", "MyTestApp.py"), "w") as src_fh:
for line in src:
if line == ' return { "answer": "placeholder value" }':
line = ' return { "answer": sum(process_outputs) }'
elif line == ' return { "output": "placeholder value" }':
line = ' return { "output": input1 ** 2 }'
elif line == ' for i in range(10):':
line = ' for i in range(in1):'
elif line == ' subjob_input = { "input1": True }':
line = ' subjob_input = { "input1": i }'
elif line == ' output["out1"] = out1':
src_fh.write(' out1 = postprocess_job.get_output_ref("answer")\n')
src_fh.write(line + "\n")
return appdir
def create_app_dir_with_dxapp_json(dxapp_json, language):
old_cwd = os.getcwd()
tempdir = tempfile.mkdtemp()
os.chdir(tempdir)
try:
with open('dxapp.json', 'w') as fd:
json.dump(dxapp_json, fd)
wizard = pexpect.spawn("dx-app-wizard --json-file dxapp.json --language " + language)
wizard.setwinsize(20, 90)
wizard.logfile = sys.stdout
wizard.expect("App Name")
wizard.sendline()
wizard.expect("Version")
wizard.sendline()
wizard.expect("Timeout policy")
wizard.sendline()
wizard.expect("Will this app need access to the Internet?")
wizard.sendline()
wizard.expect("Will this app need access to the parent project?")
wizard.sendline()
wizard.expect("Choose an instance type for your app")
wizard.sendline()
wizard.expect("App directory created")
wizard.close()
appdir = os.path.join(tempdir, dxapp_json['name'])
return appdir
finally:
os.chdir(old_cwd)
class TestDXAppWizardAndRunAppLocally(DXTestCase):
def test_invalid_arguments(self):
with self.assertRaises(testutil.DXCalledProcessError):
check_output(['dx-app-wizard', '--template=par'])
def test_dx_app_wizard(self):
appdir = run_dx_app_wizard()
dxapp_json = json.load(open(os.path.join(appdir, 'dxapp.json')))
self.assertEqual(dxapp_json['runSpec']['systemRequirements']['*']['instanceType'],
InstanceTypesCompleter.default_instance_type.Name)
self.assertEqual(dxapp_json['runSpec']['distribution'], 'Ubuntu')
self.assertEqual(dxapp_json['runSpec']['release'], '14.04')
self.assertEqual(dxapp_json['timeoutPolicy']['*']['hours'], 24)
def test_dx_run_app_locally_interactively(self):
appdir = create_app_dir()
local_run = pexpect.spawn("dx-run-app-locally {} -iin1=8".format(appdir))
local_run.expect("Confirm")
local_run.sendline()
local_run.expect("App finished successfully")
local_run.expect("Final output: out1 = 140")
local_run.close()
def test_dx_run_app_locally_noninteractively(self):
appdir = create_app_dir()
output = check_output(['dx-run-app-locally', appdir, '-iin1=8'])
print(output)
self.assertIn("App finished successfully", output)
self.assertIn("Final output: out1 = 140", output)
@unittest.skipUnless(testutil.TEST_RUN_JOBS,
'skipping test that would run jobs')
def test_dx_run_app_locally_and_compare_results(self):
appdir = create_app_dir()
print("Setting current project to", self.project)
dxpy.WORKSPACE_ID = self.project
dxpy.PROJECT_CONTEXT_ID = self.project
applet_id = dx_build_app.build_and_upload_locally(appdir,
mode='applet',
overwrite=True,
dx_toolkit_autodep=False,
return_object_dump=True)['id']
remote_job = dxpy.DXApplet(applet_id).run({"in1": 8})
print("Waiting for", remote_job, "to complete")
remote_job.wait_on_done()
result = remote_job.describe()
self.assertEqual(result["output"]["out1"], 140)
def test_file_download(self):
'''
This test assumes a well-formed input spec and tests that the
templates created automatically download the files only if
they are available and does something sensible otherwise.
'''
print("Setting current project to", self.project)
dxpy.WORKSPACE_ID = self.project
dxpy.PROJECT_CONTEXT_ID = self.project
# Make a couple files for testing
dxfile = dxpy.upload_string("foo", name="afile")
dxpy.upload_string("foobar", name="otherfile")
dxapp_json = {
"name": "files",
"title": "files",
"summary": "files",
"dxapi": "1.0.0",
"version": "0.0.1",
"categories": [],
"inputSpec": [
{
"name": "required_file",
"class": "file",
"optional": False
},
{
"name": "optional_file",
"class": "file",
"optional": True
},
{
"name": "default_file",
"class": "file",
"optional": True,
"default": {"$dnanexus_link": dxfile.get_id()}
},
{
"name": "required_file_array",
"class": "array:file",
"optional": False
},
{
"name": "optional_file_array",
"class": "array:file",
"optional": True
}
],
"outputSpec": []
}
for lang in supported_languages:
appdir = create_app_dir_with_dxapp_json(dxapp_json, lang)
# Test with bare-minimum of inputs
output = subprocess.check_output(['dx-run-app-locally', appdir, '-irequired_file=afile',
'-irequired_file_array=afile'])
print(output)
self.assertIn("App finished successfully", output)
self.assertIn("Local job workspaces can be found in:", output)
local_workdir = output.split("Local job workspaces can be found in:")[1].strip()
file_list = os.listdir(os.path.join(local_workdir, 'localjob-0'))
self.assertIn("required_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'required_file')), 3)
self.assertNotIn("optional_file", file_list)
self.assertIn("default_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'default_file')), 3)
# Test with giving an input to everything
output = subprocess.check_output(['dx-run-app-locally', appdir,
'-irequired_file=afile',
'-ioptional_file=afile',
'-idefault_file=otherfile',
'-irequired_file_array=afile',
'-ioptional_file_array=afile'])
print(output)
self.assertIn("App finished successfully", output)
self.assertIn("Local job workspaces can be found in:", output)
local_workdir = output.split("Local job workspaces can be found in:")[1].strip()
file_list = os.listdir(os.path.join(local_workdir, 'localjob-0'))
self.assertIn("required_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'required_file')), 3)
self.assertIn("optional_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'optional_file')), 3)
self.assertIn("default_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'default_file')), 6)
concatenated_file_list = ",".join(file_list)
# Different languages have different naming conventions
# right now, so just look for the array variable name
self.assertIn("required_file_array", concatenated_file_list)
self.assertIn("optional_file_array", concatenated_file_list)
def test_var_initialization(self):
'''
This test assumes a well-formed input spec and mostly just
tests that everything compiles and the variable initialization
code does not throw any errors.
'''
print("Setting current project to", self.project)
dxpy.WORKSPACE_ID = self.project
dxpy.PROJECT_CONTEXT_ID = self.project
# Make some data objects for input
dxapplet = dxpy.api.applet_new({"project": dxpy.WORKSPACE_ID,
"name": "anapplet",
"dxapi": "1.0.0",
"runSpec": {"code": "", "interpreter": "bash"}})['id']
dxfile = dxpy.upload_string("foo", name="afile")
dxgtable = dxpy.new_dxgtable(columns=[{"name": "int_col", "type": "int"}], name="agtable")
dxgtable.add_rows([[3], [0]])
dxgtable.close(block=True)
dxrecord = dxpy.new_dxrecord(name="arecord")
dxrecord.close()
dxapp_json = {
"name": "all_vars",
"title": "all_vars",
"summary": "all_vars",
"dxapi": "1.0.0",
"version": "0.0.1",
"categories": [],
"inputSpec": [],
"outputSpec": []
}
classes = ['applet', 'record', 'file', 'gtable',
'boolean', 'int', 'float', 'string', 'hash',
'array:applet', 'array:record', 'array:file', 'array:gtable',
'array:boolean', 'array:int', 'array:float', 'array:string']
for classname in classes:
dxapp_json['inputSpec'].append({"name": "required_" + classname.replace(":", "_"),
"class": classname,
"optional": False})
# Note: marking outputs as optional so that empty arrays
# will be acceptable; keeping names the same (as required)
# in order to allow pass-through from input variables
dxapp_json['outputSpec'].append({"name": "required_" + classname.replace(":", "_"),
"class": classname,
"optional": True})
dxapp_json['inputSpec'].append({"name": "optional_" + classname.replace(":", "_"),
"class": classname,
"optional": True})
cmdline_args = ['-irequired_applet=anapplet',
'-irequired_array_applet=anapplet',
'-irequired_record=arecord',
'-irequired_array_record=arecord',
'-irequired_file=afile',
'-irequired_array_file=afile',
'-irequired_gtable=agtable',
'-irequired_array_gtable=agtable',
'-irequired_boolean=true',
'-irequired_array_boolean=true',
'-irequired_array_boolean=false',
'-irequired_int=32',
'-irequired_array_int=42',
'-irequired_float=3.4',
'-irequired_array_float=.42',
'-irequired_string=foo',
'-irequired_array_string=bar',
'-irequired_hash={"foo":"bar"}']
for lang in supported_languages:
appdir = create_app_dir_with_dxapp_json(dxapp_json, lang)
# Test with bare-minimum of inputs
output = subprocess.check_output(['dx-run-app-locally', appdir] + cmdline_args)
print(output)
# Verify array is printed total 3 times once in each input, logs, and final output
self.assertEquals(len(re.findall("required_array_boolean = \[ true, false ]", output)), 3)
self.assertIn("App finished successfully", output)
# See PTFM-13697 for CentOS 5 details
if testutil.TEST_RUN_JOBS and not testutil.host_is_centos_5():
# Now actually make it an applet and run it
applet_name = dxapp_json['name'] + '-' + lang
subprocess.check_output(['dx', 'build', appdir, '--destination', applet_name])
subprocess.check_output(['dx', 'run', applet_name, '-y', '--wait'] + cmdline_args)
@unittest.skipUnless(testutil.TEST_ENV, 'skipping test that would clobber your local environment')
def test_dx_run_app_locally_without_auth(self):
temp_file_path = tempfile.mkdtemp()
app_spec = {
"name": "test_run_locally_without_auth",
"dxapi": "1.0.0",
"runSpec": {"file": "code.py", "interpreter": "python2.7"},
"inputSpec": [{"name": "foo", "class": "file"}],
"outputSpec": [],
"version": "1.0.0"
}
app_dir_path = os.path.join(temp_file_path, app_spec['name'])
os.mkdir(app_dir_path)
with open(os.path.join(app_dir_path, 'dxapp.json'), 'w') as manifest:
manifest.write(json.dumps(app_spec))
with open(os.path.join(app_dir_path, 'code.py'), 'w') as code_file:
code_file.write('')
with testutil.without_auth(), testutil.without_project_context():
with self.assertSubprocessFailure(stderr_regexp="logged in", exit_code=3):
run("dx-run-app-locally " + pipes.quote(app_dir_path) + " -ifoo=nothing")
def test_dx_run_app_locally_invalid_interpreter(self):
temp_file_path = tempfile.mkdtemp()
app_spec = {
"name": "test_run_locally_invalid_interpreter",
"dxapi": "1.0.0",
"runSpec": {"file": "code.py", "interpreter": "python"},
"inputSpec": [],
"outputSpec": [],
"version": "1.0.0"
}
app_dir_path = os.path.join(temp_file_path, app_spec['name'])
os.mkdir(app_dir_path)
with open(os.path.join(app_dir_path, 'dxapp.json'), 'w') as manifest:
manifest.write(json.dumps(app_spec))
with open(os.path.join(app_dir_path, 'code.py'), 'w') as code_file:
code_file.write('')
with self.assertSubprocessFailure(stderr_regexp="Unknown interpreter python", exit_code=3):
run("dx-run-app-locally " + pipes.quote(app_dir_path))
'''
test the upload/download helpers by running them locally
'''
class TestDXBashHelpers(DXTestCase):
def run_test_app_locally(self, app_name, arg_list):
'''
:param app_name: name of app to run
:param arg_list: list of command line arguments given to an app
Runs an app locally, with a given set of command line arguments
'''
path = os.path.join(os.path.dirname(__file__), "file_load")
args = ['dx-run-app-locally', os.path.join(path, app_name)]
args.extend(arg_list)
check_output(args)
def test_vars(self):
"""Tests bash variable generation """
# Make a couple files for testing
dxpy.upload_string("1234", name="A.txt", wait_on_close=True)
self.run_test_app_locally('vars', ['-iseq1=A.txt', '-iseq2=A.txt', '-igenes=A.txt', '-igenes=A.txt',
'-ii=5', '-ix=4.2', '-ib=true', '-is=hello',
'-iil=6', '-iil=7', '-iil=8',
'-ixl=3.3', '-ixl=4.4', '-ixl=5.0',
'-ibl=true', '-ibl=false', '-ibl=true',
'-isl=hello', '-isl=world', '-isl=next',
'-imisc={"hello": "world", "foo": true}'])
def test_prefix_patterns(self):
""" Tests that the bash prefix variable works correctly, and
respects patterns.
"""
buf = "1234"
filenames = ["A.bar", "A.json.dot.bar", "A.vcf.pam", "A.foo.bar", "fooxxx.bam", "A.bar.gz", "x13year23.sam"]
for fname in filenames:
dxpy.upload_string(buf, name=fname, wait_on_close=True)
self.run_test_app_locally('prefix_patterns', ['-iseq1=A.bar',
'-iseq2=A.json.dot.bar',
'-igene=A.vcf.pam',
'-imap=A.foo.bar',
'-imap2=fooxxx.bam',
'-imap3=A.bar',
'-imap4=A.bar.gz',
'-imulti=x13year23.sam'])
def test_deepdirs(self):
self.run_test_app_locally('deepdirs', [])
def test_basic(self):
'''Tests upload/download helpers
'''
# Make a couple files for testing
dxpy.upload_string("1234", wait_on_close=True, name="A.txt")
# this invocation should fail with a CLI exception
with self.assertRaises(testutil.DXCalledProcessError):
self.run_test_app_locally('basic', ['-iseq1=A.txt', '-iseq2=B.txt'])
dxpy.upload_string("ABCD", wait_on_close=True, name="B.txt")
# these should succeed
self.run_test_app_locally('basic', ['-iseq1=A.txt', '-iseq2=B.txt',
'-iref=A.txt', '-iref=B.txt',
"-ivalue=5", '-iages=1'])
self.run_test_app_locally('basic', ['-iseq1=A.txt', '-iseq2=B.txt', '-ibar=A.txt',
'-iref=A.txt', '-iref=B.txt',
"-ivalue=5", '-iages=1'])
self.run_test_app_locally('basic', ['-iseq1=A.txt', '-iseq2=B.txt',
'-iref=A.txt', '-iref=B.txt', "-ivalue=5",
'-iages=1', '-iages=11', '-iages=33'])
# check the except flags
self.run_test_app_locally('basic_except', ['-iseq1=A.txt', '-iseq2=B.txt',
'-iref=A.txt', '-iref=B.txt', "-ivalue=5",
'-iages=1', '-iages=11', '-iages=33'])
def test_sub_jobs(self):
''' Tests a bash script that generates sub-jobs '''
dxpy.upload_string("1234", wait_on_close=True, name="A.txt")
dxpy.upload_string("ABCD", wait_on_close=True, name="B.txt")
self.run_test_app_locally('with-subjobs', ["-ifiles=A.txt", "-ifiles=B.txt"])
def test_parseq(self):
''' Tests the parallel/sequential variations '''
dxpy.upload_string("1234", wait_on_close=True, name="A.txt")
dxpy.upload_string("ABCD", wait_on_close=True, name="B.txt")
self.run_test_app_locally('parseq', ["-iseq1=A.txt", "-iseq2=B.txt", "-iref=A.txt", "-iref=B.txt"])
def test_file_optional(self):
''' Tests that file optional input arguments are handled correctly '''
self.run_test_app_locally('file_optional', ["-icreate_seq3=true"])
if __name__ == '__main__':
unittest.main()
Stop testing GTables as an input class in run-app-locally tests
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function, unicode_literals, division, absolute_import
import os, sys, unittest, json, tempfile, subprocess, re
import pexpect
import pipes
from dxpy_testutil import DXTestCase, check_output
import dxpy_testutil as testutil
import dxpy
from dxpy.scripts import dx_build_app
from dxpy.utils.completer import InstanceTypesCompleter
def run(command, **kwargs):
print("$ %s" % (command,))
output = check_output(command, shell=True, **kwargs)
print(output)
return output
supported_languages = ['Python', 'bash']
def run_dx_app_wizard():
old_cwd = os.getcwd()
tempdir = tempfile.mkdtemp(prefix='Программа')
os.chdir(tempdir)
try:
wizard = pexpect.spawn("dx-app-wizard --template parallelized")
wizard.logfile = sys.stdout
wizard.setwinsize(20, 90)
wizard.expect("App Name:")
wizard.sendline("Имя")
wizard.expect("The name of your app must match")
wizard.expect("App Name:")
wizard.sendline("MyTestApp")
wizard.expect("Title")
wizard.sendline("Заголовок")
wizard.expect("Summary")
wizard.sendline("Конспект")
wizard.expect("Version")
wizard.sendline("1.2.3")
wizard.expect("1st input name")
wizard.sendline("in1")
wizard.expect("Label")
wizard.sendline("Метка")
wizard.expect("Choose a class")
wizard.sendline("int")
wizard.expect("optional parameter")
wizard.sendline("n")
wizard.expect("2nd input name")
wizard.sendline()
wizard.expect("1st output name")
wizard.sendline("out1")
wizard.expect("Label")
wizard.sendline()
wizard.expect("Choose a class")
wizard.sendline("целое")
wizard.expect("Not a recognized class")
wizard.sendline("int")
wizard.expect("2nd output name")
wizard.sendline()
wizard.expect("Timeout policy")
wizard.sendline("31d")
wizard.expect("Error: max allowed timeout is 30 days")
wizard.sendline("ЄЯTЪЦGЇCЄкЇ")
wizard.expect("Error: enter an int with a single-letter suffix")
wizard.expect("Timeout policy")
wizard.sendline("24h")
wizard.expect("Programming language")
wizard.sendline("АЛГОЛ")
wizard.expect("Error: unrecognized response")
wizard.sendline("Python")
wizard.expect("Will this app need access to the Internet?")
wizard.sendline("y")
wizard.expect("Will this app need access to the parent project?")
wizard.sendline("y")
wizard.expect("Choose an instance type for your app")
wizard.sendline("t1.микро")
wizard.expect("Error: unrecognized response, expected one of")
wizard.expect("Choose an instance type for your app")
wizard.sendline()
wizard.expect("App directory created")
wizard.close()
appdir = os.path.join(tempdir, "MyTestApp")
return appdir
finally:
os.chdir(old_cwd)
def create_app_dir():
appdir = run_dx_app_wizard()
with open(os.path.join(appdir, "src", "MyTestApp.py")) as src_fh:
src = [line.rstrip() for line in src_fh.readlines()]
with open(os.path.join(appdir, "src", "MyTestApp.py"), "w") as src_fh:
for line in src:
if line == ' return { "answer": "placeholder value" }':
line = ' return { "answer": sum(process_outputs) }'
elif line == ' return { "output": "placeholder value" }':
line = ' return { "output": input1 ** 2 }'
elif line == ' for i in range(10):':
line = ' for i in range(in1):'
elif line == ' subjob_input = { "input1": True }':
line = ' subjob_input = { "input1": i }'
elif line == ' output["out1"] = out1':
src_fh.write(' out1 = postprocess_job.get_output_ref("answer")\n')
src_fh.write(line + "\n")
return appdir
def create_app_dir_with_dxapp_json(dxapp_json, language):
old_cwd = os.getcwd()
tempdir = tempfile.mkdtemp()
os.chdir(tempdir)
try:
with open('dxapp.json', 'w') as fd:
json.dump(dxapp_json, fd)
wizard = pexpect.spawn("dx-app-wizard --json-file dxapp.json --language " + language)
wizard.setwinsize(20, 90)
wizard.logfile = sys.stdout
wizard.expect("App Name")
wizard.sendline()
wizard.expect("Version")
wizard.sendline()
wizard.expect("Timeout policy")
wizard.sendline()
wizard.expect("Will this app need access to the Internet?")
wizard.sendline()
wizard.expect("Will this app need access to the parent project?")
wizard.sendline()
wizard.expect("Choose an instance type for your app")
wizard.sendline()
wizard.expect("App directory created")
wizard.close()
appdir = os.path.join(tempdir, dxapp_json['name'])
return appdir
finally:
os.chdir(old_cwd)
class TestDXAppWizardAndRunAppLocally(DXTestCase):
def test_invalid_arguments(self):
with self.assertRaises(testutil.DXCalledProcessError):
check_output(['dx-app-wizard', '--template=par'])
def test_dx_app_wizard(self):
appdir = run_dx_app_wizard()
dxapp_json = json.load(open(os.path.join(appdir, 'dxapp.json')))
self.assertEqual(dxapp_json['runSpec']['systemRequirements']['*']['instanceType'],
InstanceTypesCompleter.default_instance_type.Name)
self.assertEqual(dxapp_json['runSpec']['distribution'], 'Ubuntu')
self.assertEqual(dxapp_json['runSpec']['release'], '14.04')
self.assertEqual(dxapp_json['timeoutPolicy']['*']['hours'], 24)
def test_dx_run_app_locally_interactively(self):
appdir = create_app_dir()
local_run = pexpect.spawn("dx-run-app-locally {} -iin1=8".format(appdir))
local_run.expect("Confirm")
local_run.sendline()
local_run.expect("App finished successfully")
local_run.expect("Final output: out1 = 140")
local_run.close()
def test_dx_run_app_locally_noninteractively(self):
appdir = create_app_dir()
output = check_output(['dx-run-app-locally', appdir, '-iin1=8'])
print(output)
self.assertIn("App finished successfully", output)
self.assertIn("Final output: out1 = 140", output)
@unittest.skipUnless(testutil.TEST_RUN_JOBS,
'skipping test that would run jobs')
def test_dx_run_app_locally_and_compare_results(self):
appdir = create_app_dir()
print("Setting current project to", self.project)
dxpy.WORKSPACE_ID = self.project
dxpy.PROJECT_CONTEXT_ID = self.project
applet_id = dx_build_app.build_and_upload_locally(appdir,
mode='applet',
overwrite=True,
dx_toolkit_autodep=False,
return_object_dump=True)['id']
remote_job = dxpy.DXApplet(applet_id).run({"in1": 8})
print("Waiting for", remote_job, "to complete")
remote_job.wait_on_done()
result = remote_job.describe()
self.assertEqual(result["output"]["out1"], 140)
def test_file_download(self):
'''
This test assumes a well-formed input spec and tests that the
templates created automatically download the files only if
they are available and does something sensible otherwise.
'''
print("Setting current project to", self.project)
dxpy.WORKSPACE_ID = self.project
dxpy.PROJECT_CONTEXT_ID = self.project
# Make a couple files for testing
dxfile = dxpy.upload_string("foo", name="afile")
dxpy.upload_string("foobar", name="otherfile")
dxapp_json = {
"name": "files",
"title": "files",
"summary": "files",
"dxapi": "1.0.0",
"version": "0.0.1",
"categories": [],
"inputSpec": [
{
"name": "required_file",
"class": "file",
"optional": False
},
{
"name": "optional_file",
"class": "file",
"optional": True
},
{
"name": "default_file",
"class": "file",
"optional": True,
"default": {"$dnanexus_link": dxfile.get_id()}
},
{
"name": "required_file_array",
"class": "array:file",
"optional": False
},
{
"name": "optional_file_array",
"class": "array:file",
"optional": True
}
],
"outputSpec": []
}
for lang in supported_languages:
appdir = create_app_dir_with_dxapp_json(dxapp_json, lang)
# Test with bare-minimum of inputs
output = subprocess.check_output(['dx-run-app-locally', appdir, '-irequired_file=afile',
'-irequired_file_array=afile'])
print(output)
self.assertIn("App finished successfully", output)
self.assertIn("Local job workspaces can be found in:", output)
local_workdir = output.split("Local job workspaces can be found in:")[1].strip()
file_list = os.listdir(os.path.join(local_workdir, 'localjob-0'))
self.assertIn("required_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'required_file')), 3)
self.assertNotIn("optional_file", file_list)
self.assertIn("default_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'default_file')), 3)
# Test with giving an input to everything
output = subprocess.check_output(['dx-run-app-locally', appdir,
'-irequired_file=afile',
'-ioptional_file=afile',
'-idefault_file=otherfile',
'-irequired_file_array=afile',
'-ioptional_file_array=afile'])
print(output)
self.assertIn("App finished successfully", output)
self.assertIn("Local job workspaces can be found in:", output)
local_workdir = output.split("Local job workspaces can be found in:")[1].strip()
file_list = os.listdir(os.path.join(local_workdir, 'localjob-0'))
self.assertIn("required_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'required_file')), 3)
self.assertIn("optional_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'optional_file')), 3)
self.assertIn("default_file", file_list)
self.assertEqual(os.path.getsize(os.path.join(local_workdir, 'localjob-0', 'default_file')), 6)
concatenated_file_list = ",".join(file_list)
# Different languages have different naming conventions
# right now, so just look for the array variable name
self.assertIn("required_file_array", concatenated_file_list)
self.assertIn("optional_file_array", concatenated_file_list)
def test_var_initialization(self):
'''
This test assumes a well-formed input spec and mostly just
tests that everything compiles and the variable initialization
code does not throw any errors.
'''
print("Setting current project to", self.project)
dxpy.WORKSPACE_ID = self.project
dxpy.PROJECT_CONTEXT_ID = self.project
# Make some data objects for input
dxpy.api.applet_new({"project": dxpy.WORKSPACE_ID,
"name": "anapplet",
"dxapi": "1.0.0",
"runSpec": {"code": "", "interpreter": "bash"}})['id']
dxpy.upload_string("foo", name="afile")
dxrecord = dxpy.new_dxrecord(name="arecord")
dxrecord.close()
dxapp_json = {
"name": "all_vars",
"title": "all_vars",
"summary": "all_vars",
"dxapi": "1.0.0",
"version": "0.0.1",
"categories": [],
"inputSpec": [],
"outputSpec": []
}
classes = ['applet', 'record', 'file',
'boolean', 'int', 'float', 'string', 'hash',
'array:applet', 'array:record', 'array:file',
'array:boolean', 'array:int', 'array:float', 'array:string']
for classname in classes:
dxapp_json['inputSpec'].append({"name": "required_" + classname.replace(":", "_"),
"class": classname,
"optional": False})
# Note: marking outputs as optional so that empty arrays
# will be acceptable; keeping names the same (as required)
# in order to allow pass-through from input variables
dxapp_json['outputSpec'].append({"name": "required_" + classname.replace(":", "_"),
"class": classname,
"optional": True})
dxapp_json['inputSpec'].append({"name": "optional_" + classname.replace(":", "_"),
"class": classname,
"optional": True})
cmdline_args = ['-irequired_applet=anapplet',
'-irequired_array_applet=anapplet',
'-irequired_record=arecord',
'-irequired_array_record=arecord',
'-irequired_file=afile',
'-irequired_array_file=afile',
'-irequired_boolean=true',
'-irequired_array_boolean=true',
'-irequired_array_boolean=false',
'-irequired_int=32',
'-irequired_array_int=42',
'-irequired_float=3.4',
'-irequired_array_float=.42',
'-irequired_string=foo',
'-irequired_array_string=bar',
'-irequired_hash={"foo":"bar"}']
for lang in supported_languages:
appdir = create_app_dir_with_dxapp_json(dxapp_json, lang)
# Test with bare-minimum of inputs
output = subprocess.check_output(['dx-run-app-locally', appdir] + cmdline_args)
print(output)
# Verify array is printed total 3 times once in each input, logs, and final output
self.assertEquals(len(re.findall("required_array_boolean = \[ true, false ]", output)), 3)
self.assertIn("App finished successfully", output)
# See PTFM-13697 for CentOS 5 details
if testutil.TEST_RUN_JOBS and not testutil.host_is_centos_5():
# Now actually make it an applet and run it
applet_name = dxapp_json['name'] + '-' + lang
subprocess.check_output(['dx', 'build', appdir, '--destination', applet_name])
subprocess.check_output(['dx', 'run', applet_name, '-y', '--wait'] + cmdline_args)
@unittest.skipUnless(testutil.TEST_ENV, 'skipping test that would clobber your local environment')
def test_dx_run_app_locally_without_auth(self):
temp_file_path = tempfile.mkdtemp()
app_spec = {
"name": "test_run_locally_without_auth",
"dxapi": "1.0.0",
"runSpec": {"file": "code.py", "interpreter": "python2.7"},
"inputSpec": [{"name": "foo", "class": "file"}],
"outputSpec": [],
"version": "1.0.0"
}
app_dir_path = os.path.join(temp_file_path, app_spec['name'])
os.mkdir(app_dir_path)
with open(os.path.join(app_dir_path, 'dxapp.json'), 'w') as manifest:
manifest.write(json.dumps(app_spec))
with open(os.path.join(app_dir_path, 'code.py'), 'w') as code_file:
code_file.write('')
with testutil.without_auth(), testutil.without_project_context():
with self.assertSubprocessFailure(stderr_regexp="logged in", exit_code=3):
run("dx-run-app-locally " + pipes.quote(app_dir_path) + " -ifoo=nothing")
def test_dx_run_app_locally_invalid_interpreter(self):
temp_file_path = tempfile.mkdtemp()
app_spec = {
"name": "test_run_locally_invalid_interpreter",
"dxapi": "1.0.0",
"runSpec": {"file": "code.py", "interpreter": "python"},
"inputSpec": [],
"outputSpec": [],
"version": "1.0.0"
}
app_dir_path = os.path.join(temp_file_path, app_spec['name'])
os.mkdir(app_dir_path)
with open(os.path.join(app_dir_path, 'dxapp.json'), 'w') as manifest:
manifest.write(json.dumps(app_spec))
with open(os.path.join(app_dir_path, 'code.py'), 'w') as code_file:
code_file.write('')
with self.assertSubprocessFailure(stderr_regexp="Unknown interpreter python", exit_code=3):
run("dx-run-app-locally " + pipes.quote(app_dir_path))
'''
test the upload/download helpers by running them locally
'''
class TestDXBashHelpers(DXTestCase):
def run_test_app_locally(self, app_name, arg_list):
'''
:param app_name: name of app to run
:param arg_list: list of command line arguments given to an app
Runs an app locally, with a given set of command line arguments
'''
path = os.path.join(os.path.dirname(__file__), "file_load")
args = ['dx-run-app-locally', os.path.join(path, app_name)]
args.extend(arg_list)
check_output(args)
def test_vars(self):
"""Tests bash variable generation """
# Make a couple files for testing
dxpy.upload_string("1234", name="A.txt", wait_on_close=True)
self.run_test_app_locally('vars', ['-iseq1=A.txt', '-iseq2=A.txt', '-igenes=A.txt', '-igenes=A.txt',
'-ii=5', '-ix=4.2', '-ib=true', '-is=hello',
'-iil=6', '-iil=7', '-iil=8',
'-ixl=3.3', '-ixl=4.4', '-ixl=5.0',
'-ibl=true', '-ibl=false', '-ibl=true',
'-isl=hello', '-isl=world', '-isl=next',
'-imisc={"hello": "world", "foo": true}'])
def test_prefix_patterns(self):
""" Tests that the bash prefix variable works correctly, and
respects patterns.
"""
buf = "1234"
filenames = ["A.bar", "A.json.dot.bar", "A.vcf.pam", "A.foo.bar", "fooxxx.bam", "A.bar.gz", "x13year23.sam"]
for fname in filenames:
dxpy.upload_string(buf, name=fname, wait_on_close=True)
self.run_test_app_locally('prefix_patterns', ['-iseq1=A.bar',
'-iseq2=A.json.dot.bar',
'-igene=A.vcf.pam',
'-imap=A.foo.bar',
'-imap2=fooxxx.bam',
'-imap3=A.bar',
'-imap4=A.bar.gz',
'-imulti=x13year23.sam'])
def test_deepdirs(self):
self.run_test_app_locally('deepdirs', [])
def test_basic(self):
'''Tests upload/download helpers
'''
# Make a couple files for testing
dxpy.upload_string("1234", wait_on_close=True, name="A.txt")
# this invocation should fail with a CLI exception
with self.assertRaises(testutil.DXCalledProcessError):
self.run_test_app_locally('basic', ['-iseq1=A.txt', '-iseq2=B.txt'])
dxpy.upload_string("ABCD", wait_on_close=True, name="B.txt")
# these should succeed
self.run_test_app_locally('basic', ['-iseq1=A.txt', '-iseq2=B.txt',
'-iref=A.txt', '-iref=B.txt',
"-ivalue=5", '-iages=1'])
self.run_test_app_locally('basic', ['-iseq1=A.txt', '-iseq2=B.txt', '-ibar=A.txt',
'-iref=A.txt', '-iref=B.txt',
"-ivalue=5", '-iages=1'])
self.run_test_app_locally('basic', ['-iseq1=A.txt', '-iseq2=B.txt',
'-iref=A.txt', '-iref=B.txt', "-ivalue=5",
'-iages=1', '-iages=11', '-iages=33'])
# check the except flags
self.run_test_app_locally('basic_except', ['-iseq1=A.txt', '-iseq2=B.txt',
'-iref=A.txt', '-iref=B.txt', "-ivalue=5",
'-iages=1', '-iages=11', '-iages=33'])
def test_sub_jobs(self):
''' Tests a bash script that generates sub-jobs '''
dxpy.upload_string("1234", wait_on_close=True, name="A.txt")
dxpy.upload_string("ABCD", wait_on_close=True, name="B.txt")
self.run_test_app_locally('with-subjobs', ["-ifiles=A.txt", "-ifiles=B.txt"])
def test_parseq(self):
''' Tests the parallel/sequential variations '''
dxpy.upload_string("1234", wait_on_close=True, name="A.txt")
dxpy.upload_string("ABCD", wait_on_close=True, name="B.txt")
self.run_test_app_locally('parseq', ["-iseq1=A.txt", "-iseq2=B.txt", "-iref=A.txt", "-iref=B.txt"])
def test_file_optional(self):
''' Tests that file optional input arguments are handled correctly '''
self.run_test_app_locally('file_optional', ["-icreate_seq3=true"])
if __name__ == '__main__':
unittest.main()
|
add shuffling to classification
|
import bpy
import itertools
import os
from .edm.types import *
from .edm.mathtypes import Matrix, vector_to_edm, matrix_to_edm, Vector
from .edm.basewriter import BaseWriter
def write_file(filename, options={}):
# Get a list of all mesh objects to be exported as renderables
renderables = [x for x in bpy.context.scene.objects if x.type == "MESH" and x.edm.is_renderable]
materials, materialMap = _create_material_map(renderables)
# Now, build each RenderNode object, with it's parents
renderNodes = []
rootNode = Node()
transformNodes = [rootNode]
for obj in [x for x in renderables]:
node = RenderNodeWriter(obj)
node.material = materialMap[obj.material_slots[0].material.name]
# Calculate the parents for this node's animation
parents = node.calculate_parents()
for parent in parents:
parent.index = len(transformNodes)
transformNodes.append(parent)
# We now have the information to properly enmesh the object
node.calculate_mesh(options)
# And, prepare references for writing
node.convert_references_to_index()
renderNodes.append(node)
# Materials: √
# Render Nodes: √
# Parents: √
# Let's build the root node
root = RootNodeWriter()
root.set_bounding_box_from(renderables)
root.materials = materials
# And finally the wrapper
file = EDMFile()
file.root = root
file.nodes = transformNodes
file.renderNodes = renderNodes
writer = BaseWriter(filename)
file.write(writer)
writer.close()
def _create_material_map(blender_objects):
"""Creates an list, and indexed material map from a list of blender objects.
The map will connect material names to the edm-Material instance.
In addition, each Material instance will know it's own .index"""
all_Materials = [obj.material_slots[0].material for obj in blender_objects]
materialMap = {m.name: create_material(m) for m in all_Materials}
materials = []
for i, bMat in enumerate(all_Materials):
mat = materialMap[bMat.name]
mat.index = i
materials.append(mat)
return materials, materialMap
def build_parent_nodes(obj):
"""Inspects an object's actions to build a parent transform node.
Possibly returns a chain of nodes, as in cases of position/visibility
these must be handled by separate nodes. The returned nodes (or none)
must then be parented onto whatever parent nodes the objects parents
posess. If no nodes are returned, then the object should have it's
local transformation applied."""
# Collect all actions for this object
if not obj.animation_data:
return []
actions = set()
if obj.animation_data.action and obj.animation_data.action.argument != -1:
actions.add(obj.animation_data.action)
for track in obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action.argument != -1:
actions.add(strip.action)
# Verify each action handles a separate argument, otherwise - who knows -
# if this becomes a problem we may need to merge actions (ouch)
arguments = set()
for action in actions:
if action.argument in arguments:
raise RuntimeError("More than one action on an object share arguments. Not sure how to deal with this")
arguments.add(action.argument)
if not actions:
return []
# No multiple animations for now - get simple right first
assert len(actions) <= 1, "Do not support multiple actions on object export at this time"
action = next(iter(actions))
nodes = []
# All keyframe types we know how to handle
ALL_KNOWN = {"location", "rotation_quaternion", "scale", "hide_render"}
# Keyframe types that are handled by ArgAnimationNode
AAN_KNOWN = {"location", "rotation_quaternion", "scale"}
data_categories = set(x.data_path for x in action.fcurves)
if not data_categories <= ALL_KNOWN:
print("WARNING: Action has animated keys ({}) that ioEDM can not translate yet!".format(data_categories-ALL_KNOWN))
# do we need to make an ArgAnimationNode?
if data_categories & {"location", "rotation_quaternion", "scale"}:
print("Creating ArgAnimationNode")
nodes.append(create_arganimation_node(obj, [action]))
return nodes
def create_arganimation_node(object, actions):
# For now, let's assume single-action
node = ArgAnimationNode()
assert len(actions) == 1
for action in actions:
curves = set(x.data_path for x in action.fcurves)
rotCurves = [x for x in action.fcurves if x.data_path == "rotation_quaternion"]
posCurves = [x for x in action.fcurves if x.data_path == "location"]
argument = action.argument
# Firstly, we need to decompose the current transform so that the
# animation arguments are all applied corrently
# Build the
if "location" in curves:
# Build up a set of keys
posKeys = []
# What we should scale to - take the maximum keyframe value as '1.0'
scale = 1.0 / (max(abs(x) for x in get_all_keyframe_times(posCurves)) or 100.0)
# Build up the key data for everything
for time in get_all_keyframe_times(posCurves):
position = get_fcurve_position(posCurves, time)
key = PositionKey(frame=time*scale, value=position)
posKeys.append(key)
node.posData.append((argument, posKeys))
if "rotation_quaternion" in curves:
raise NotImplementedError()
if "scale" in curves:
raise NotImplementedError("Curves not totally understood yet")
# Now we've processed everything
return node
def get_all_keyframe_times(fcurves):
"""Get all fixed-point times in a collection of keyframes"""
times = set()
for curve in fcurves:
for keyframe in curve.keyframe_points:
times.add(keyframe.co[0])
return sorted(times)
def get_fcurve_quaternion(fcurves, frame):
"""Retrieve an evaluated quaternion for a single action at a single frame"""
# Get each channel for the quaternion
all_quat = [x for x in fcurves if x.data_path == "rotation_quaternion"]
# Really, quaternion rotation without all channels is stupid
assert len(all_quat) == 4, "Incomplete quaternion rotation channels in action"
channels = [[x for x in all_quat if x.array_index == index][0] for index in range(4)]
return Quaternion([channels[i].evaluate(frame) for i in range(4)])
def get_fcurve_position(fcurves, frame):
"""Retrieve an evaluated fcurve for position"""
all_quat = [x for x in fcurves if x.data_path == "location"]
channelL = [[x for x in all_quat if x.array_index == index] for index in range(3)]
# Get an array of lookups to get the channel value, or zero
channels = [(x[0].evaluate if x else lambda x: 0) for i, x in enumerate(channelL)]
return Vector([channels[i](frame) for i in range(3)])
def calculate_edm_world_bounds(objects):
"""Calculates, in EDM-space, the bounding box of all objects"""
mins = [1e38, 1e38, 1e38]
maxs = [-1e38, -1e38, -1e38]
for obj in objects:
points = [vector_to_edm(obj.matrix_world * Vector(x)) for x in obj.bound_box]
for index in range(3):
mins[index] = min([point[index] for point in points] + [mins[index]])
maxs[index] = max([point[index] for point in points] + [maxs[index]])
return Vector(mins), Vector(maxs)
def create_texture(source):
# Get the texture name stripped of ALL extensions
texName = os.path.basename(source.texture.image.filepath)
texName = texName[:texName.find(".")]
# Work out the channel for this texture
if source.use_map_color_diffuse:
index = 0
elif source.use_map_normal:
index = 1
elif source.use_map_specular:
index = 2
# For now, assume identity transformation until we understand
matrix = Matrix()
return Texture(index=index, name=texName, matrix=matrix)
def create_material(source):
mat = Material()
mat.blending = int(source.edm_blending)
mat.material_name = source.edm_material
mat.name = source.name
mat.uniforms = {
"specPower": float(source.specular_hardness), # Important this is a float
"specFactor": source.specular_intensity,
"diffuseValue": source.diffuse_intensity,
"reflectionValue": 0.0, # Always in uniforms, so keep here for compatibility
}
# No ide what this corresponds to yet:
# "diffuseShift": Vector((0,0)),
if source.raytrace_mirror.use:
mat.uniforms["reflectionValue"] = source.raytrace_mirror.reflect_factor
mat.uniforms["reflectionBlurring"] = 1.0-source.raytrace_mirror.gloss_factor
mat.shadows.recieve = source.use_shadows
mat.shadows.cast = source.use_cast_shadows
mat.shadows.cast_only = source.use_cast_shadows_only
mat.vertex_format = VertexFormat({
"position": 4,
"normal": 3,
"tex0": 2
})
mat.texture_coordinates_channels = [0] + [-1]*11
# Find the textures for each of the layers
# Find diffuse - this will sometimes also include a translucency map
try:
diffuseTex = [x for x in source.texture_slots if x is not None and x.use_map_color_diffuse]
except:
import pdb
pdb.set_trace()
# normalTex = [x for x in source.texture_slots if x.use_map_normal]
# specularTex = [x for x in source.texture_slots if x.use_map_specular]
assert len(diffuseTex) == 1
mat.textures.append(create_texture(diffuseTex[0]))
return mat
def create_mesh_data(source, material, options={}):
"""Takes an object and converts it to a mesh suitable for writing"""
# Always remesh, because we will want to apply transformations
mesh = source.to_mesh(bpy.context.scene,
apply_modifiers=options.get("apply_modifiers", False),
settings="RENDER", calc_tessface=True)
# Apply the local transform. IF there are no parents, then this should
# be identical to the world transform anyway
if options.get("apply_transform", True):
mesh.transform(source.matrix_local)
# Should be more complicated for multiple layers, but will do for now
uv_tex = mesh.tessface_uv_textures.active.data
newVertices = []
newIndexValues = []
# Loop over every face, and the UV data for that face
for face, uvFace in zip(mesh.tessfaces, uv_tex):
# What are the new index values going to be?
newFaceIndex = [len(newVertices)+x for x in range(len(face.vertices))]
# Build the new vertex data
for i, vtxIndex in enumerate(face.vertices):
if options.get("convert_axis", True):
position = vector_to_edm(mesh.vertices[vtxIndex].co)
normal = vector_to_edm(mesh.vertices[vtxIndex].normal)
else:
position = mesh.vertices[vtxIndex].co
normal = mesh.vertices[vtxIndex].normal
uv = [uvFace.uv[i][0], -uvFace.uv[i][1]]
newVertices.append(tuple(itertools.chain(position, [0], normal, uv)))
# We either have triangles or quads. Split into triangles, based on the
# vertex index subindex in face.vertices
if len(face.vertices) == 3:
triangles = ((0, 1, 2),)
else:
triangles = ((0, 1, 2),(2, 3, 0))
# Write each vertex of each triangle
for tri in triangles:
for i in tri:
newIndexValues.append(newFaceIndex[i])
# Cleanup
bpy.data.meshes.remove(mesh)
return newVertices, newIndexValues
class RenderNodeWriter(RenderNode):
def __init__(self, obj):
super(RenderNodeWriter, self).__init__(name=obj.name)
self.source = obj
def calculate_parents(self):
"""Calculate parent objects, assign, and then return them"""
parents = build_parent_nodes(self.source)
if parents:
self.parent = parents[-1]
return parents
def calculate_mesh(self, options):
assert self.material
assert self.source
opt = dict(options)
# If we have any kind of parent (OTHER than an ArgVisibilityNode), then
# we don't want to apply transformations
opt["apply_transform"] = self.parent == None or isinstance(self.parent, ArgVisibilityNode)
# ArgAnimationNode-based parents don't have axis-shifted data
opt["convert_axis"] = not isinstance(self.parent, ArgAnimationNode)
self.vertexData, self.indexData = create_mesh_data(self.source, self.material, options)
def convert_references_to_index(self):
"""Convert all stored references into their index equivalent"""
self.material = self.material.index
if not self.parent:
self.parent = 0
else:
self.parent = self.parent.index
class RootNodeWriter(RootNode):
def __init__(self, *args, **kwargs):
super(RootNodeWriter, self).__init__(*args, **kwargs)
def set_bounding_box_from(self, objectList):
bboxmin, bboxmax = calculate_edm_world_bounds(objectList)
self.boundingBoxMin = bboxmin
self.boundingBoxMax = bboxmax
Add base rotation and scale transforms to animation
import bpy
import itertools
import os
from .edm.types import *
from .edm.mathtypes import Matrix, vector_to_edm, matrix_to_edm, Vector, MatrixScale
from .edm.basewriter import BaseWriter
def write_file(filename, options={}):
# Get a list of all mesh objects to be exported as renderables
renderables = [x for x in bpy.context.scene.objects if x.type == "MESH" and x.edm.is_renderable]
materials, materialMap = _create_material_map(renderables)
# Now, build each RenderNode object, with it's parents
renderNodes = []
rootNode = Node()
transformNodes = [rootNode]
for obj in [x for x in renderables]:
node = RenderNodeWriter(obj)
node.material = materialMap[obj.material_slots[0].material.name]
# Calculate the parents for this node's animation
parents = node.calculate_parents()
for parent in parents:
parent.index = len(transformNodes)
transformNodes.append(parent)
# We now have the information to properly enmesh the object
node.calculate_mesh(options)
# And, prepare references for writing
node.convert_references_to_index()
renderNodes.append(node)
# Materials: √
# Render Nodes: √
# Parents: √
# Let's build the root node
root = RootNodeWriter()
root.set_bounding_box_from(renderables)
root.materials = materials
# And finally the wrapper
file = EDMFile()
file.root = root
file.nodes = transformNodes
file.renderNodes = renderNodes
writer = BaseWriter(filename)
file.write(writer)
writer.close()
def _create_material_map(blender_objects):
"""Creates an list, and indexed material map from a list of blender objects.
The map will connect material names to the edm-Material instance.
In addition, each Material instance will know it's own .index"""
all_Materials = [obj.material_slots[0].material for obj in blender_objects]
materialMap = {m.name: create_material(m) for m in all_Materials}
materials = []
for i, bMat in enumerate(all_Materials):
mat = materialMap[bMat.name]
mat.index = i
materials.append(mat)
return materials, materialMap
def build_parent_nodes(obj):
"""Inspects an object's actions to build a parent transform node.
Possibly returns a chain of nodes, as in cases of position/visibility
these must be handled by separate nodes. The returned nodes (or none)
must then be parented onto whatever parent nodes the objects parents
posess. If no nodes are returned, then the object should have it's
local transformation applied."""
# Collect all actions for this object
if not obj.animation_data:
return []
actions = set()
if obj.animation_data.action and obj.animation_data.action.argument != -1:
actions.add(obj.animation_data.action)
for track in obj.animation_data.nla_tracks:
for strip in track.strips:
if strip.action.argument != -1:
actions.add(strip.action)
# Verify each action handles a separate argument, otherwise - who knows -
# if this becomes a problem we may need to merge actions (ouch)
arguments = set()
for action in actions:
if action.argument in arguments:
raise RuntimeError("More than one action on an object share arguments. Not sure how to deal with this")
arguments.add(action.argument)
if not actions:
return []
# No multiple animations for now - get simple right first
assert len(actions) <= 1, "Do not support multiple actions on object export at this time"
action = next(iter(actions))
nodes = []
# All keyframe types we know how to handle
ALL_KNOWN = {"location", "rotation_quaternion", "scale", "hide_render"}
# Keyframe types that are handled by ArgAnimationNode
AAN_KNOWN = {"location", "rotation_quaternion", "scale"}
data_categories = set(x.data_path for x in action.fcurves)
if not data_categories <= ALL_KNOWN:
print("WARNING: Action has animated keys ({}) that ioEDM can not translate yet!".format(data_categories-ALL_KNOWN))
# do we need to make an ArgAnimationNode?
if data_categories & {"location", "rotation_quaternion", "scale"}:
print("Creating ArgAnimationNode")
nodes.append(create_arganimation_node(obj, [action]))
return nodes
def create_arganimation_node(object, actions):
# For now, let's assume single-action
node = ArgAnimationNode()
assert len(actions) == 1
for action in actions:
curves = set(x.data_path for x in action.fcurves)
rotCurves = [x for x in action.fcurves if x.data_path == "rotation_quaternion"]
posCurves = [x for x in action.fcurves if x.data_path == "location"]
argument = action.argument
# Build the base transforms.
# The base matrix needs to include final scale, because the other scale
# is applied before transformation
node.base.matrix = matrix_to_edm(Matrix())
node.base.position = get_fcurve_position(posCurves, 0.0, object.location)
node.base.scale = object.scale
# Get the base rotation... however we can. Although we need only directly
# support quaternion animation, it's convenient to allow non-quat base
if not object.rotation_mode == "QUATERNION":
node.base.quat_1 = object.matrix_local.decompose()[1]
else:
node.base.quat_1 = object.rotation_quaternion
if "location" in curves:
# Build up a set of keys
posKeys = []
# What we should scale to - take the maximum keyframe value as '1.0'
scale = 1.0 / (max(abs(x) for x in get_all_keyframe_times(posCurves)) or 100.0)
# Build up the key data for everything
for time in get_all_keyframe_times(posCurves):
position = get_fcurve_position(posCurves, time) - node.base.position
key = PositionKey(frame=time*scale, value=position)
posKeys.append(key)
node.posData.append((argument, posKeys))
if "rotation_quaternion" in curves:
raise NotImplementedError()
if "scale" in curves:
raise NotImplementedError("Curves not totally understood yet")
# Now we've processed everything
return node
def get_all_keyframe_times(fcurves):
"""Get all fixed-point times in a collection of keyframes"""
times = set()
for curve in fcurves:
for keyframe in curve.keyframe_points:
times.add(keyframe.co[0])
return sorted(times)
def get_fcurve_quaternion(fcurves, frame):
"""Retrieve an evaluated quaternion for a single action at a single frame"""
# Get each channel for the quaternion
all_quat = [x for x in fcurves if x.data_path == "rotation_quaternion"]
# Really, quaternion rotation without all channels is stupid
assert len(all_quat) == 4, "Incomplete quaternion rotation channels in action"
channels = [[x for x in all_quat if x.array_index == index][0] for index in range(4)]
return Quaternion([channels[i].evaluate(frame) for i in range(4)])
def get_fcurve_position(fcurves, frame, basis=None):
"""Retrieve an evaluated fcurve for position"""
all_quat = [x for x in fcurves if x.data_path == "location"]
channelL = [[x for x in all_quat if x.array_index == index] for index in range(3)]
# Get an array of lookups to get the channel value, or zero
channels = [(x[0].evaluate if x else lambda x: basis[i] if bases else 0) for i, x in enumerate(channelL)]
return Vector([channels[i](frame) for i in range(3)])
def calculate_edm_world_bounds(objects):
"""Calculates, in EDM-space, the bounding box of all objects"""
mins = [1e38, 1e38, 1e38]
maxs = [-1e38, -1e38, -1e38]
for obj in objects:
points = [vector_to_edm(obj.matrix_world * Vector(x)) for x in obj.bound_box]
for index in range(3):
mins[index] = min([point[index] for point in points] + [mins[index]])
maxs[index] = max([point[index] for point in points] + [maxs[index]])
return Vector(mins), Vector(maxs)
def create_texture(source):
# Get the texture name stripped of ALL extensions
texName = os.path.basename(source.texture.image.filepath)
texName = texName[:texName.find(".")]
# Work out the channel for this texture
if source.use_map_color_diffuse:
index = 0
elif source.use_map_normal:
index = 1
elif source.use_map_specular:
index = 2
# For now, assume identity transformation until we understand
matrix = Matrix()
return Texture(index=index, name=texName, matrix=matrix)
def create_material(source):
mat = Material()
mat.blending = int(source.edm_blending)
mat.material_name = source.edm_material
mat.name = source.name
mat.uniforms = {
"specPower": float(source.specular_hardness), # Important this is a float
"specFactor": source.specular_intensity,
"diffuseValue": source.diffuse_intensity,
"reflectionValue": 0.0, # Always in uniforms, so keep here for compatibility
}
# No ide what this corresponds to yet:
# "diffuseShift": Vector((0,0)),
if source.raytrace_mirror.use:
mat.uniforms["reflectionValue"] = source.raytrace_mirror.reflect_factor
mat.uniforms["reflectionBlurring"] = 1.0-source.raytrace_mirror.gloss_factor
mat.shadows.recieve = source.use_shadows
mat.shadows.cast = source.use_cast_shadows
mat.shadows.cast_only = source.use_cast_shadows_only
mat.vertex_format = VertexFormat({
"position": 4,
"normal": 3,
"tex0": 2
})
mat.texture_coordinates_channels = [0] + [-1]*11
# Find the textures for each of the layers
# Find diffuse - this will sometimes also include a translucency map
try:
diffuseTex = [x for x in source.texture_slots if x is not None and x.use_map_color_diffuse]
except:
import pdb
pdb.set_trace()
# normalTex = [x for x in source.texture_slots if x.use_map_normal]
# specularTex = [x for x in source.texture_slots if x.use_map_specular]
assert len(diffuseTex) == 1
mat.textures.append(create_texture(diffuseTex[0]))
return mat
def create_mesh_data(source, material, options={}):
"""Takes an object and converts it to a mesh suitable for writing"""
# Always remesh, because we will want to apply transformations
mesh = source.to_mesh(bpy.context.scene,
apply_modifiers=options.get("apply_modifiers", False),
settings="RENDER", calc_tessface=True)
# Apply the local transform. IF there are no parents, then this should
# be identical to the world transform anyway
if options.get("apply_transform", True):
mesh.transform(source.matrix_local)
# Should be more complicated for multiple layers, but will do for now
uv_tex = mesh.tessface_uv_textures.active.data
newVertices = []
newIndexValues = []
# Loop over every face, and the UV data for that face
for face, uvFace in zip(mesh.tessfaces, uv_tex):
# What are the new index values going to be?
newFaceIndex = [len(newVertices)+x for x in range(len(face.vertices))]
# Build the new vertex data
for i, vtxIndex in enumerate(face.vertices):
if options.get("convert_axis", True):
position = vector_to_edm(mesh.vertices[vtxIndex].co)
normal = vector_to_edm(mesh.vertices[vtxIndex].normal)
else:
position = mesh.vertices[vtxIndex].co
normal = mesh.vertices[vtxIndex].normal
uv = [uvFace.uv[i][0], -uvFace.uv[i][1]]
newVertices.append(tuple(itertools.chain(position, [0], normal, uv)))
# We either have triangles or quads. Split into triangles, based on the
# vertex index subindex in face.vertices
if len(face.vertices) == 3:
triangles = ((0, 1, 2),)
else:
triangles = ((0, 1, 2),(2, 3, 0))
# Write each vertex of each triangle
for tri in triangles:
for i in tri:
newIndexValues.append(newFaceIndex[i])
# Cleanup
bpy.data.meshes.remove(mesh)
return newVertices, newIndexValues
class RenderNodeWriter(RenderNode):
def __init__(self, obj):
super(RenderNodeWriter, self).__init__(name=obj.name)
self.source = obj
def calculate_parents(self):
"""Calculate parent objects, assign, and then return them"""
parents = build_parent_nodes(self.source)
if parents:
self.parent = parents[-1]
return parents
def calculate_mesh(self, options):
assert self.material
assert self.source
opt = dict(options)
# If we have any kind of parent (OTHER than an ArgVisibilityNode), then
# we don't want to apply transformations
opt["apply_transform"] = self.parent == None or isinstance(self.parent, ArgVisibilityNode)
# ArgAnimationNode-based parents don't have axis-shifted data
opt["convert_axis"] = not isinstance(self.parent, ArgAnimationNode)
self.vertexData, self.indexData = create_mesh_data(self.source, self.material, opt)
def convert_references_to_index(self):
"""Convert all stored references into their index equivalent"""
self.material = self.material.index
if not self.parent:
self.parent = 0
else:
self.parent = self.parent.index
class RootNodeWriter(RootNode):
def __init__(self, *args, **kwargs):
super(RootNodeWriter, self).__init__(*args, **kwargs)
def set_bounding_box_from(self, objectList):
bboxmin, bboxmax = calculate_edm_world_bounds(objectList)
self.boundingBoxMin = bboxmin
self.boundingBoxMax = bboxmax
|
# -*- coding: utf-8 -*-
#
# giddy documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 6 15:54:22 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys, os
import sphinx_bootstrap_theme
sys.path.insert(0, os.path.abspath('../../'))
import giddy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [#'sphinx_gallery.gen_gallery',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinxcontrib.bibtex',
'sphinx.ext.mathjax',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'numpydoc',
#'sphinx.ext.napoleon',
'matplotlib.sphinxext.plot_directive',
'nbsphinx']
# sphinx_gallery_conf = {
# # path to your examples scripts
# 'examples_dirs': '../examples',
# # path where to save gallery generated examples
# 'gallery_dirs': 'auto_examples',
# 'backreferences_dir': False,
# }
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'giddy'
copyright = '2018-, pysal developers'
author = 'pysal developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version.
version = giddy.__version__
release = giddy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'tests/*', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_title = "%s v%s Manual" % (project, version)
# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
#html_logo = "_static/images/CGS_logo.jpg"
#html_logo = "_static/images/CGS_logo_green.png"
#html_logo = "_static/images/pysal_logo_small.jpg"
html_favicon = "_static/images/pysal_favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "giddy",
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
#'navbar_pagenav': True,
#'navbar_pagenav': False,
# No sidebar
'nosidebar': True,
# Tab name for the current pages TOC. (Default: "Page")
#'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
#'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': 'footer',
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo", "yeti", "flatly".
'bootswatch_theme': "yeti",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
'navbar_links': [
("Installation", "installation"),
("Tutorial", "tutorial"),
("API", "api"),
("References", "references"),
],
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'giddydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'giddy.tex', u'giddy Documentation',
u'pysal developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'giddy', u'giddy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'giddy', u'giddy Documentation',
author, 'giddy', 'One line description of project.',
'Miscellaneous'),
]
# -----------------------------------------------------------------------------
# Napoleon configuration
# -----------------------------------------------------------------------------
# numpydoc_show_class_members = True
# numpydoc_class_members_toctree = False
#
# napoleon_use_ivar = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
# Generate the API documentation when building
autosummary_generate = True
# avoid showing members twice
numpydoc_show_class_members = False
numpydoc_use_plots = True
# automatically document class members
autodoc_default_options = {
'members': True,
'undoc-members': True
}
# display the source code for Plot directive
plot_include_source = True
def setup(app):
app.add_stylesheet("pysal-styles.css")
# Configuration for intersphinx
intersphinx_mapping = {"python": ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'libpysal': ('https://pysal.org/libpysal/', None),
'mapclassify': ('https://pysal.org/mapclassify/', None),
'esda': ('https://esda.readthedocs.io/', None),
'matplotlib':("https://matplotlib.org/", None)
}
# This is processed by Jinja2 and inserted before each notebook
nbsphinx_prolog = r"""
{% set docname = env.doc2path(env.docname, base='doc') %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
This page was generated from `{{ docname }}`__.
Interactive online version:
:raw-html:`<a href="https://mybinder.org/v2/gh/pysal/giddy/master?filepath={{ docname }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:text-bottom"></a>`
__ https://github.com/pysal/giddy/{{ docname }}
.. raw:: latex
\nbsphinxstartnotebook{\scriptsize\noindent\strut
\textcolor{gray}{The following section was generated from
\sphinxcode{\sphinxupquote{\strut {{ docname | escape_latex }}}} \dotfill}}
"""
# This is processed by Jinja2 and inserted after each notebook
nbsphinx_epilog = r"""
.. raw:: latex
\nbsphinxstopnotebook{\scriptsize\noindent\strut
\textcolor{gray}{\dotfill\ \sphinxcode{\sphinxupquote{\strut
{{ env.doc2path(env.docname, base='doc') | escape_latex }}}} ends here.}}
"""
# List of arguments to be passed to the kernel that executes the notebooks:
nbsphinx_execute_arguments = [
"--InlineBackend.figure_formats={'svg', 'pdf'}",
"--InlineBackend.rc={'figure.dpi': 96}",
]
mathjax_config = {
'TeX': {'equationNumbers': {'autoNumber': 'AMS', 'useLabelIds': True}},
}
fix for #129
# -*- coding: utf-8 -*-
#
# giddy documentation build configuration file, created by
# sphinx-quickstart on Wed Jun 6 15:54:22 2018.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import sys, os
import sphinx_bootstrap_theme
sys.path.insert(0, os.path.abspath('../../'))
import giddy
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [#'sphinx_gallery.gen_gallery',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.viewcode',
'sphinxcontrib.bibtex',
'sphinx.ext.mathjax',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'numpydoc',
#'sphinx.ext.napoleon',
'matplotlib.sphinxext.plot_directive',
'nbsphinx']
# sphinx_gallery_conf = {
# # path to your examples scripts
# 'examples_dirs': '../examples',
# # path where to save gallery generated examples
# 'gallery_dirs': 'auto_examples',
# 'backreferences_dir': False,
# }
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'giddy'
copyright = '2018-, pysal developers'
author = 'pysal developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The full version.
version = giddy.__version__
release = giddy.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'tests/*', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
html_theme = 'bootstrap'
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
html_title = "%s v%s Manual" % (project, version)
# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24).
# Path should be relative to the ``_static`` files directory.
#html_logo = "_static/images/CGS_logo.jpg"
#html_logo = "_static/images/CGS_logo_green.png"
#html_logo = "_static/images/pysal_logo_small.jpg"
html_favicon = "_static/images/pysal_favicon.ico"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "giddy",
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
#'navbar_pagenav': True,
#'navbar_pagenav': False,
# No sidebar
'nosidebar': True,
# Tab name for the current pages TOC. (Default: "Page")
#'navbar_pagenav_name': "Page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 2,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
#'navbar_class': "navbar navbar-inverse",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': 'footer',
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing (default) or the name of a valid theme
# such as "amelia" or "cosmo", "yeti", "flatly".
'bootswatch_theme': "yeti",
# Choose Bootstrap version.
# Values: "3" (default) or "2" (in quotes)
'bootstrap_version': "3",
'navbar_links': [
("Installation", "installation"),
("Tutorial", "tutorial"),
("API", "api"),
("References", "references"),
],
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# html_sidebars = {'sidebar': ['localtoc.html', 'sourcelink.html', 'searchbox.html']}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'giddydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'giddy.tex', u'giddy Documentation',
u'pysal developers', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'giddy', u'giddy Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'giddy', u'giddy Documentation',
author, 'giddy', 'One line description of project.',
'Miscellaneous'),
]
# -----------------------------------------------------------------------------
# Napoleon configuration
# -----------------------------------------------------------------------------
# numpydoc_show_class_members = True
# numpydoc_class_members_toctree = False
#
# napoleon_use_ivar = True
# -----------------------------------------------------------------------------
# Autosummary
# -----------------------------------------------------------------------------
# Generate the API documentation when building
autosummary_generate = True
# avoid showing members twice
numpydoc_show_class_members = False
numpydoc_use_plots = True
# automatically document class members
autodoc_default_options = {
'members': True,
'undoc-members': True
}
# display the source code for Plot directive
plot_include_source = True
def setup(app):
app.add_stylesheet("pysal-styles.css")
# Configuration for intersphinx
intersphinx_mapping = {"python": ('https://docs.python.org/3', None),
'numpy': ('https://docs.scipy.org/doc/numpy', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'libpysal': ('https://pysal.org/libpysal/', None),
'mapclassify': ('https://pysal.org/mapclassify/', None),
'esda': ('https://esda.readthedocs.io/', None),
'matplotlib':("https://matplotlib.org/", None)
}
# This is processed by Jinja2 and inserted before each notebook
nbsphinx_prolog = r"""
{% set docname = env.doc2path(env.docname, base='doc') %}
{% set fullpath = env.doc2path(env.docname, base='tree/master/doc/') %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
This page was generated from `{{ docname }}`__.
Interactive online version:
:raw-html:`<a href="https://mybinder.org/v2/gh/pysal/giddy/master?filepath={{ docname }}"><img alt="Binder badge" src="https://mybinder.org/badge_logo.svg" style="vertical-align:text-bottom"></a>`
__ https://github.com/pysal/giddy/{{ fullpath }}
.. raw:: latex
\nbsphinxstartnotebook{\scriptsize\noindent\strut
\textcolor{gray}{The following section was generated from
\sphinxcode{\sphinxupquote{\strut {{ docname | escape_latex }}}} \dotfill}}
"""
# This is processed by Jinja2 and inserted after each notebook
nbsphinx_epilog = r"""
.. raw:: latex
\nbsphinxstopnotebook{\scriptsize\noindent\strut
\textcolor{gray}{\dotfill\ \sphinxcode{\sphinxupquote{\strut
{{ env.doc2path(env.docname, base='doc') | escape_latex }}}} ends here.}}
"""
# List of arguments to be passed to the kernel that executes the notebooks:
nbsphinx_execute_arguments = [
"--InlineBackend.figure_formats={'svg', 'pdf'}",
"--InlineBackend.rc={'figure.dpi': 96}",
]
mathjax_config = {
'TeX': {'equationNumbers': {'autoNumber': 'AMS', 'useLabelIds': True}},
}
|
'''
This script creates carbon in belowground, deadwood, litter, and soil pools at the time of tree cover loss for loss pixels.
It also calculates total carbon for loss pixels.
For belowground carbon (as with aboveground carbon), the pools are carbon 2000 + carbon gain until loss year.
For deadwood, litter, and soil, the pools are based on carbon 2000.
Total carbon is thus a mixture of stocks in 2000 and in the year of tree cover loss.
NOTE: Because there are so many input files, this script needs a machine with extra disk space.
Thus, create a spot machine with extra disk space: spotutil new r4.16xlarge dgibbs_wri --disk_size 1024 (this is the maximum value).
'''
import create_BGC_deadwood_litter_soil_totalC_in_emis_year
from multiprocessing.pool import Pool
from functools import partial
import subprocess
import os
import pandas as pd
import sys
sys.path.append('../')
import constants_and_names as cn
import universal_util as uu
pd.options.mode.chained_assignment = None
tile_list = uu.tile_list(cn.AGC_emis_year_dir)
# tile_list = ['00N_110E'] # test tiles
# tile_list = ['80N_020E', '00N_020E', '30N_080W', '00N_110E'] # test tiles
print tile_list
print "There are {} tiles to process".format(str(len(tile_list)))
# For downloading all tiles in the input folders.
input_files = [
cn.AGC_emis_year_dir,
cn.WHRC_biomass_2000_unmasked_dir,
cn.mangrove_biomass_2000_dir,
cn.cont_eco_dir,
cn.bor_tem_trop_processed_dir,
cn.precip_processed_dir,
cn.soil_C_full_extent_2000_dir,
cn.elevation_processed_dir
]
# for input in input_files:
# uu.s3_folder_download('{}'.format(input), '.')
# # For copying individual tiles to spot machine for testing.
# for tile in tile_list:
#
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.AGC_emis_year_dir, tile,
# cn.pattern_AGC_emis_year), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.cont_eco_dir, tile,
# cn.pattern_cont_eco_processed), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.bor_tem_trop_processed_dir, tile,
# cn.pattern_bor_tem_trop_processed), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.precip_processed_dir, tile,
# cn.pattern_precip), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.soil_C_full_extent_2000_dir, tile,
# cn.pattern_soil_C_full_extent_2000), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.elevation_processed_dir, tile,
# cn.pattern_elevation), '.')
# try:
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.WHRC_biomass_2000_unmasked_dir, tile,
# cn.pattern_WHRC_biomass_2000_unmasked), '.')
# except:
# print "No WHRC biomass in", tile
# try:
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.mangrove_biomass_2000_dir, tile, cn.pattern_mangrove_biomass_2000), '.')
# except:
# print "No mangrove biomass in", tile
# Table with IPCC Wetland Supplement Table 4.4 default mangrove gain rates
cmd = ['aws', 's3', 'cp', os.path.join(cn.gain_spreadsheet_dir, cn.gain_spreadsheet), '.']
subprocess.check_call(cmd)
# Imports the table with the ecozone-continent codes and the carbon gain rates
gain_table = pd.read_excel("{}".format(cn.gain_spreadsheet),
sheet_name = "mangrove gain, for model")
# Removes rows with duplicate codes (N. and S. America for the same ecozone)
gain_table_simplified = gain_table.drop_duplicates(subset='gainEcoCon', keep='first')
mang_BGB_AGB_ratio = create_BGC_deadwood_litter_soil_totalC_in_emis_year.mangrove_pool_ratio_dict(gain_table_simplified,
cn.below_to_above_trop_dry_mang,
cn.below_to_above_trop_wet_mang,
cn.below_to_above_subtrop_mang)
mang_deadwood_AGB_ratio = create_BGC_deadwood_litter_soil_totalC_in_emis_year.mangrove_pool_ratio_dict(gain_table_simplified,
cn.deadwood_to_above_trop_dry_mang,
cn.deadwood_to_above_trop_wet_mang,
cn.deadwood_to_above_subtrop_mang)
mang_litter_AGB_ratio = create_BGC_deadwood_litter_soil_totalC_in_emis_year.mangrove_pool_ratio_dict(gain_table_simplified,
cn.litter_to_above_trop_dry_mang,
cn.litter_to_above_trop_wet_mang,
cn.litter_to_above_subtrop_mang)
print "Creating carbon pools..."
# # 18 processors used between 300 and 400 GB memory, so it was okay on a r4.16xlarge spot machine
# num_of_processes = 18
# pool = Pool(num_of_processes)
# pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_BGC, mang_BGB_AGB_ratio=mang_BGB_AGB_ratio), tile_list)
# pool.close()
# pool.join()
#
# uu.upload_final_set(cn.BGC_emis_year_dir, cn.pattern_BGC_emis_year)
# cmd = ['rm *{}*.tif'.format(cn.pattern_BGC_emis_year)]
# subprocess.check_call(cmd)
num_of_processes = 20
pool = Pool(num_of_processes)
pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_deadwood, mang_deadwood_AGB_ratio=mang_deadwood_AGB_ratio), tile_list)
pool.close()
pool.join()
uu.upload_final_set(cn.deadwood_emis_year_2000_dir, cn.pattern_deadwood_emis_year_2000)
# cmd = ['rm *{}*.tif'.format(cn.pattern_deadwood_emis_year_2000)]
# subprocess.check_call(cmd)
num_of_processes = 16
pool = Pool(num_of_processes)
pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_litter, mang_litter_AGB_ratio=mang_litter_AGB_ratio), tile_list)
pool.close()
pool.join()
uu.upload_final_set(cn.litter_emis_year_2000_dir, cn.pattern_litter_emis_year_2000)
# cmd = ['rm *{}*.tif'.format(cn.pattern_litter_emis_year_2000)]
# subprocess.check_call(cmd)
num_of_processes = 16
pool = Pool(num_of_processes)
pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_soil), tile_list)
pool.close()
pool.join()
uu.upload_final_set(cn.soil_C_emis_year_2000_dir, cn.pattern_soil_C_emis_year_2000)
# cmd = ['rm *{}*.tif'.format(cn.pattern_soil_C_emis_year_2000)]
# subprocess.check_call(cmd)
num_of_processes = 40
pool = Pool(num_of_processes)
pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_total_C), tile_list)
pool.close()
pool.join()
uu.upload_final_set(cn.total_C_emis_year_dir, cn.pattern_total_C_emis_year)
# cmd = ['rm *{}*.tif'.format(cn.pattern_total_C_emis_year)]
# subprocess.check_call(cmd)
# # For single processor use
# for tile in tile_list:
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_BGC(tile, mang_BGB_AGB_ratio)
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_deadwood(tile, mang_deadwood_AGB_ratio)
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_litter(tile, mang_litter_AGB_ratio)
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_soil(tile)
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_total_C(tile)
#
# uu.upload_final_set(cn.BGC_emis_year_dir, cn.pattern_BGC_emis_year)
# uu.upload_final_set(cn.deadwood_emis_year_2000_dir, cn.pattern_deadwood_emis_year_2000)
# uu.upload_final_set(cn.litter_emis_year_2000_dir, cn.pattern_litter_emis_year_2000)
# uu.upload_final_set(cn.soil_C_emis_year_2000_dir, cn.pattern_soil_C_emis_year_2000)
# uu.upload_final_set(cn.total_C_emis_year_dir, cn.pattern_total_C_emis_year)
Continuing with carbon pool generation.
'''
This script creates carbon in belowground, deadwood, litter, and soil pools at the time of tree cover loss for loss pixels.
It also calculates total carbon for loss pixels.
For belowground carbon (as with aboveground carbon), the pools are carbon 2000 + carbon gain until loss year.
For deadwood, litter, and soil, the pools are based on carbon 2000.
Total carbon is thus a mixture of stocks in 2000 and in the year of tree cover loss.
NOTE: Because there are so many input files, this script needs a machine with extra disk space.
Thus, create a spot machine with extra disk space: spotutil new r4.16xlarge dgibbs_wri --disk_size 1024 (this is the maximum value).
'''
import create_BGC_deadwood_litter_soil_totalC_in_emis_year
from multiprocessing.pool import Pool
from functools import partial
import subprocess
import os
import pandas as pd
import sys
sys.path.append('../')
import constants_and_names as cn
import universal_util as uu
pd.options.mode.chained_assignment = None
tile_list = uu.tile_list(cn.AGC_emis_year_dir)
# tile_list = ['00N_110E'] # test tiles
# tile_list = ['80N_020E', '00N_020E', '30N_080W', '00N_110E'] # test tiles
print tile_list
print "There are {} tiles to process".format(str(len(tile_list)))
# For downloading all tiles in the input folders.
input_files = [
cn.AGC_emis_year_dir,
cn.WHRC_biomass_2000_unmasked_dir,
cn.mangrove_biomass_2000_dir,
cn.cont_eco_dir,
cn.bor_tem_trop_processed_dir,
cn.precip_processed_dir,
cn.soil_C_full_extent_2000_dir,
cn.elevation_processed_dir
]
# for input in input_files:
# uu.s3_folder_download('{}'.format(input), '.')
# # For copying individual tiles to spot machine for testing.
# for tile in tile_list:
#
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.AGC_emis_year_dir, tile,
# cn.pattern_AGC_emis_year), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.cont_eco_dir, tile,
# cn.pattern_cont_eco_processed), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.bor_tem_trop_processed_dir, tile,
# cn.pattern_bor_tem_trop_processed), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.precip_processed_dir, tile,
# cn.pattern_precip), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.soil_C_full_extent_2000_dir, tile,
# cn.pattern_soil_C_full_extent_2000), '.')
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.elevation_processed_dir, tile,
# cn.pattern_elevation), '.')
# try:
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.WHRC_biomass_2000_unmasked_dir, tile,
# cn.pattern_WHRC_biomass_2000_unmasked), '.')
# except:
# print "No WHRC biomass in", tile
# try:
# uu.s3_file_download('{0}{1}_{2}.tif'.format(cn.mangrove_biomass_2000_dir, tile, cn.pattern_mangrove_biomass_2000), '.')
# except:
# print "No mangrove biomass in", tile
# Table with IPCC Wetland Supplement Table 4.4 default mangrove gain rates
cmd = ['aws', 's3', 'cp', os.path.join(cn.gain_spreadsheet_dir, cn.gain_spreadsheet), '.']
subprocess.check_call(cmd)
# Imports the table with the ecozone-continent codes and the carbon gain rates
gain_table = pd.read_excel("{}".format(cn.gain_spreadsheet),
sheet_name = "mangrove gain, for model")
# Removes rows with duplicate codes (N. and S. America for the same ecozone)
gain_table_simplified = gain_table.drop_duplicates(subset='gainEcoCon', keep='first')
mang_BGB_AGB_ratio = create_BGC_deadwood_litter_soil_totalC_in_emis_year.mangrove_pool_ratio_dict(gain_table_simplified,
cn.below_to_above_trop_dry_mang,
cn.below_to_above_trop_wet_mang,
cn.below_to_above_subtrop_mang)
mang_deadwood_AGB_ratio = create_BGC_deadwood_litter_soil_totalC_in_emis_year.mangrove_pool_ratio_dict(gain_table_simplified,
cn.deadwood_to_above_trop_dry_mang,
cn.deadwood_to_above_trop_wet_mang,
cn.deadwood_to_above_subtrop_mang)
mang_litter_AGB_ratio = create_BGC_deadwood_litter_soil_totalC_in_emis_year.mangrove_pool_ratio_dict(gain_table_simplified,
cn.litter_to_above_trop_dry_mang,
cn.litter_to_above_trop_wet_mang,
cn.litter_to_above_subtrop_mang)
print "Creating carbon pools..."
# # 18 processors used between 300 and 400 GB memory, so it was okay on a r4.16xlarge spot machine
# num_of_processes = 18
# pool = Pool(num_of_processes)
# pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_BGC, mang_BGB_AGB_ratio=mang_BGB_AGB_ratio), tile_list)
# pool.close()
# pool.join()
#
# uu.upload_final_set(cn.BGC_emis_year_dir, cn.pattern_BGC_emis_year)
# cmd = ['rm *{}*.tif'.format(cn.pattern_BGC_emis_year)]
# subprocess.check_call(cmd)
num_of_processes = 20
pool = Pool(num_of_processes)
pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_deadwood, mang_deadwood_AGB_ratio=mang_deadwood_AGB_ratio), tile_list)
pool.close()
pool.join()
uu.upload_final_set(cn.deadwood_emis_year_2000_dir, cn.pattern_deadwood_emis_year_2000)
# cmd = ['rm *{}*.tif'.format(cn.pattern_deadwood_emis_year_2000)]
# subprocess.check_call(cmd)
num_of_processes = 20
pool = Pool(num_of_processes)
pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_litter, mang_litter_AGB_ratio=mang_litter_AGB_ratio), tile_list)
pool.close()
pool.join()
uu.upload_final_set(cn.litter_emis_year_2000_dir, cn.pattern_litter_emis_year_2000)
# cmd = ['rm *{}*.tif'.format(cn.pattern_litter_emis_year_2000)]
# subprocess.check_call(cmd)
num_of_processes = 16
pool = Pool(num_of_processes)
pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_soil), tile_list)
pool.close()
pool.join()
uu.upload_final_set(cn.soil_C_emis_year_2000_dir, cn.pattern_soil_C_emis_year_2000)
# cmd = ['rm *{}*.tif'.format(cn.pattern_soil_C_emis_year_2000)]
# subprocess.check_call(cmd)
num_of_processes = 40
pool = Pool(num_of_processes)
pool.map(partial(create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_total_C), tile_list)
pool.close()
pool.join()
uu.upload_final_set(cn.total_C_emis_year_dir, cn.pattern_total_C_emis_year)
# cmd = ['rm *{}*.tif'.format(cn.pattern_total_C_emis_year)]
# subprocess.check_call(cmd)
# # For single processor use
# for tile in tile_list:
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_BGC(tile, mang_BGB_AGB_ratio)
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_deadwood(tile, mang_deadwood_AGB_ratio)
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_litter(tile, mang_litter_AGB_ratio)
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_soil(tile)
# create_BGC_deadwood_litter_soil_totalC_in_emis_year.create_total_C(tile)
#
# uu.upload_final_set(cn.BGC_emis_year_dir, cn.pattern_BGC_emis_year)
# uu.upload_final_set(cn.deadwood_emis_year_2000_dir, cn.pattern_deadwood_emis_year_2000)
# uu.upload_final_set(cn.litter_emis_year_2000_dir, cn.pattern_litter_emis_year_2000)
# uu.upload_final_set(cn.soil_C_emis_year_2000_dir, cn.pattern_soil_C_emis_year_2000)
# uu.upload_final_set(cn.total_C_emis_year_dir, cn.pattern_total_C_emis_year)
|
import json
import requests
from inflection import underscore
from uritemplate import expand
from zc_common.jwt_auth.utils import service_jwt_payload_handler, jwt_encode_handler
from zc_common.settings import zc_settings
# Requests that can be made to another service
GET = 'get'
POST = 'post'
PUT = 'put'
PATCH = 'patch'
class UnsupportedHTTPMethodException(Exception):
pass
class RouteNotFoundException(Exception):
pass
class ServiceRequestException(Exception):
"""An exception commonly thrown when an HTTP request to another service endpoint fails."""
message = None
response = None
def __init(self, message, response=None):
self.message = message
self.response = response
class RemoteResourceException(Exception):
pass
class RemoteResourceWrapper(object):
def __init__(self, data):
self.data = data
self.create_properties_from_data()
def create_properties_from_data(self):
if 'id' in self.data:
setattr(self, 'id', self.data.get('id'))
if 'type' in self.data:
setattr(self, 'type', self.data.get('type'))
if 'attributes' in self.data:
attributes = self.data['attributes']
for key in attributes.keys():
setattr(self, underscore(key), attributes[key])
if 'relationships' in self.data:
relationships = self.data['relationships']
for key in relationships.keys():
if isinstance(relationships[key]['data'], list):
setattr(self, underscore(key), RemoteResourceListWrapper(relationships[key]['data']))
else:
setattr(self, underscore(key), RemoteResourceWrapper(relationships[key]['data']))
class RemoteResourceListWrapper(list):
def __init__(self, seq):
super(RemoteResourceListWrapper, self).__init__()
self.data = seq
self.add_items_from_data()
def add_items_from_data(self):
map(lambda x: self.append(RemoteResourceWrapper(x)), self.data)
def get_route_from_fk(resource_type, pk=None):
"""Gets a fully qualified URL for a given resource_type, pk"""
routes = requests.get(zc_settings.GATEWAY_ROOT_PATH).json()
for route in routes.iterkeys():
if 'resource_type' in routes[route] and routes[route]['resource_type'] == resource_type:
if isinstance(pk, (list, set)):
expanded = '{}?filter[id__in]={}'.format(expand(route, {}), ','.join([str(x) for x in pk]))
else:
expanded = expand(route, {'id': pk})
return '{0}{1}'.format(routes[route]['domain'], expanded)
raise RouteNotFoundException('No route for resource_type: "{0}"'.format(resource_type))
def make_service_request(service_name, endpoint, method=GET, data=None):
"""
Makes a JWT token-authenticated service request to the URL provided.
Args:
service_name: name of the service making this request. e.g. mp-users
endpoint: the url to use
method: HTTP method. supported methods are defined at this module's global variables
data: request payload in case we are making a POST request
Returns: text content of the response
"""
jwt_token = jwt_encode_handler(service_jwt_payload_handler(service_name))
headers = {'Authorization': 'JWT {}'.format(jwt_token), 'Content-Type': 'application/vnd.api+json'}
if method not in [GET, POST, PUT, PATCH]:
raise UnsupportedHTTPMethodException(
"Method {0} is not supported. service_name: {1}, endpoint: {2}".format(method, service_name, endpoint))
response = getattr(requests, method)(endpoint, headers=headers, json=data)
if 400 <= response.status_code < 600:
http_error_msg = '{0} Error: {1} for {2}. Content: {3}'.format(
response.status_code, response.reason, response.url, response.text)
raise ServiceRequestException(http_error_msg, response)
return response
def wrap_resource_from_response(response):
json_response = json.loads(response.text)
if 'data' not in json_response:
msg = 'Error retrieving resource. Url: {0}. Content: {1}'.format(response.request.url, response.content)
raise RemoteResourceException(msg)
resource_data = json_response['data']
if isinstance(resource_data, list):
return RemoteResourceListWrapper(resource_data)
return RemoteResourceWrapper(resource_data)
def get_remote_resource(service_name, resource_type, pk):
"""A shortcut function to make a GET request to a remote service."""
url = get_route_from_fk(resource_type, pk)
response = make_service_request(service_name, url)
wrapped_resource = wrap_resource_from_response(response)
return wrapped_resource
def get_remote_resource_from_url(service_name, url):
response = make_service_request(service_name, url)
wrapped_resource = wrap_resource_from_response(response)
return wrapped_resource
Fix typo in ServiceRequestException
import json
import requests
from inflection import underscore
from uritemplate import expand
from zc_common.jwt_auth.utils import service_jwt_payload_handler, jwt_encode_handler
from zc_common.settings import zc_settings
# Requests that can be made to another service
GET = 'get'
POST = 'post'
PUT = 'put'
PATCH = 'patch'
class UnsupportedHTTPMethodException(Exception):
pass
class RouteNotFoundException(Exception):
pass
class ServiceRequestException(Exception):
"""An exception commonly thrown when an HTTP request to another service endpoint fails."""
message = None
response = None
def __init__(self, message, response):
self.message = message
self.response = response
class RemoteResourceException(Exception):
pass
class RemoteResourceWrapper(object):
def __init__(self, data):
self.data = data
self.create_properties_from_data()
def create_properties_from_data(self):
if 'id' in self.data:
setattr(self, 'id', self.data.get('id'))
if 'type' in self.data:
setattr(self, 'type', self.data.get('type'))
if 'attributes' in self.data:
attributes = self.data['attributes']
for key in attributes.keys():
setattr(self, underscore(key), attributes[key])
if 'relationships' in self.data:
relationships = self.data['relationships']
for key in relationships.keys():
if isinstance(relationships[key]['data'], list):
setattr(self, underscore(key), RemoteResourceListWrapper(relationships[key]['data']))
else:
setattr(self, underscore(key), RemoteResourceWrapper(relationships[key]['data']))
class RemoteResourceListWrapper(list):
def __init__(self, seq):
super(RemoteResourceListWrapper, self).__init__()
self.data = seq
self.add_items_from_data()
def add_items_from_data(self):
map(lambda x: self.append(RemoteResourceWrapper(x)), self.data)
def get_route_from_fk(resource_type, pk=None):
"""Gets a fully qualified URL for a given resource_type, pk"""
routes = requests.get(zc_settings.GATEWAY_ROOT_PATH).json()
for route in routes.iterkeys():
if 'resource_type' in routes[route] and routes[route]['resource_type'] == resource_type:
if isinstance(pk, (list, set)):
expanded = '{}?filter[id__in]={}'.format(expand(route, {}), ','.join([str(x) for x in pk]))
else:
expanded = expand(route, {'id': pk})
return '{0}{1}'.format(routes[route]['domain'], expanded)
raise RouteNotFoundException('No route for resource_type: "{0}"'.format(resource_type))
def make_service_request(service_name, endpoint, method=GET, data=None):
"""
Makes a JWT token-authenticated service request to the URL provided.
Args:
service_name: name of the service making this request. e.g. mp-users
endpoint: the url to use
method: HTTP method. supported methods are defined at this module's global variables
data: request payload in case we are making a POST request
Returns: text content of the response
"""
jwt_token = jwt_encode_handler(service_jwt_payload_handler(service_name))
headers = {'Authorization': 'JWT {}'.format(jwt_token), 'Content-Type': 'application/vnd.api+json'}
if method not in [GET, POST, PUT, PATCH]:
raise UnsupportedHTTPMethodException(
"Method {0} is not supported. service_name: {1}, endpoint: {2}".format(method, service_name, endpoint))
response = getattr(requests, method)(endpoint, headers=headers, json=data)
if 400 <= response.status_code < 600:
http_error_msg = '{0} Error: {1} for {2}. Content: {3}'.format(
response.status_code, response.reason, response.url, response.text)
raise ServiceRequestException(http_error_msg, response)
return response
def wrap_resource_from_response(response):
json_response = json.loads(response.text)
if 'data' not in json_response:
msg = 'Error retrieving resource. Url: {0}. Content: {1}'.format(response.request.url, response.content)
raise RemoteResourceException(msg)
resource_data = json_response['data']
if isinstance(resource_data, list):
return RemoteResourceListWrapper(resource_data)
return RemoteResourceWrapper(resource_data)
def get_remote_resource(service_name, resource_type, pk):
"""A shortcut function to make a GET request to a remote service."""
url = get_route_from_fk(resource_type, pk)
response = make_service_request(service_name, url)
wrapped_resource = wrap_resource_from_response(response)
return wrapped_resource
def get_remote_resource_from_url(service_name, url):
response = make_service_request(service_name, url)
wrapped_resource = wrap_resource_from_response(response)
return wrapped_resource
|
#! /usr/bin/python
'''
clean unit names in fdsn stationxml file
see https://github.com/iris-edu/StationXML-Validator/wiki/Unit-name-overview-for-IRIS-StationXML-validator
'''
import checkNRL as checkNRL
import sisxmlparser2_2_py3 as sisxmlparser
import argparse
import datetime
import os
import re
import sys
VERBOSE=False
KNOWN_UNITS = [ "meter", "m", "m/s", "m/s**2",
"centimeter", "cm", "cm/s", "cm/s**2",
"millimeter", "mm", "mm/s", "mm/s**2", "mm/hour",
"micrometer", "um", "um/s", "um/s**2",
"nanometer", "nm", "nm/s", "nm/s**2",
"second", "s", "millisecond", "ms",
"microsecond", "us", "nanosecond", "ns",
"minute", "min",
"hour",
"radian", "rad", "microradian", "urad",
"nanoradian", "nrad",
"rad/s", "rad/s**2",
"degree", "deg",
"kelvin", "K",
"celsius", "degC",
"candela", "cd",
"pascal", "Pa", "kilopascal", "kPa",
"hectopascal", "hPa",
"bar", "bars", "millibar", "mbar",
"ampere", "amperes", "A", "milliamp", "mA",
"volt", "V", "millivolt", "mV",
"microvolt", "uV",
"ohm",
"hertz", "Hz",
"newton", "N",
"joule", "J",
"tesla", "T", "nanotesla", "nT",
"strain", "m/m", "m**3/m**3", "cm/cm", "mm/mm", "um/um", "nm/nm",
"microstrain",
"watt", "W", "milliwatt", "mW",
"V/m",
"W/m**2",
"gap",
"reboot",
"byte","bit",
"bit/s",
"percent","%",
"count","counts",
"number",
"unitless",
"unknown" ]
#UNITS_WITH_CAPS = set([ "K","Pa","kPa","hPa","A","mA","V","mV","uV","Hz","N","J","T","nT","W","mW","V/m","W/m**2", "degC"])
def hasCap(s): return s != s.lower()
UNITS_WITH_CAPS = set(filter(hasCap, KNOWN_UNITS))
UNITS_WITH_CAPS_AS_UPPER = {x.upper() : x for x in UNITS_WITH_CAPS}
KNOWN_UNIT_SET = set(KNOWN_UNITS)
def setVerbose(b):
VERBOSE = b
def initArgParser():
parser = argparse.ArgumentParser(description='Clean Unit Names in StationXML or ExtendedStationXML.')
parser.add_argument('-s', '--stationxml', required=True, help="input FDSN StationXML file, often retrieved from http://service.iris.edu/fdsnws/station/1/")
parser.add_argument('-o', '--outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument('-v', '--verbose', action='store_true', help="verbose output")
return parser.parse_args()
def cleanUnitName(inUnitName, changes):
if inUnitName != inUnitName.upper():
# some lower case, so keep as is
unit = inUnitName
elif inUnitName in UNITS_WITH_CAPS_AS_UPPER:
# all upper, but matches something we have with both upper and lower
unit = UNITS_WITH_CAPS_AS_UPPER[inUnitName]
else:
# make it lower and hope
unit = inUnitName.lower()
outUnitName = unit
# not sure if I really want to fix these...
if (unit.lower() == 'degc'):
outUnitName = 'degC'
if (unit.lower() == 'volts'):
outUnitName = 'V'
if inUnitName != outUnitName:
changes['numChanges']+=1
changes[inUnitName] = outUnitName
if VERBOSE: print("change %s to %s"%(inUnitName, outUnitName))
if (not outUnitName in KNOWN_UNIT_SET):
print("WARNING: unknown unit: %s"%(inUnitName,))
return outUnitName
def cleanUnit(inUnit, changes):
inUnit.Name = cleanUnitName(inUnit.Name, changes)
def cleanBaseFilter(filter, changes):
cleanUnit(filter.InputUnits, changes)
cleanUnit(filter.OutputUnits, changes)
return True, "ok"
def cleanPolesZeros(pz, changes):
cleanBaseFilter(pz, changes)
return True, "ok"
def cleanCoefficients(coef, changes):
cleanBaseFilter(coef, changes)
return True, "ok"
def cleanFIR(fir, changes):
cleanBaseFilter(fir, changes)
return True, "ok"
def cleanPolynomial(polynomial, changes):
cleanBaseFilter(polynomial, changes)
return True, "ok"
def cleanDecimation(decimation, changes):
return True, "ok"
def cleanGain(gain, changes):
if hasattr(gain, 'InputUnits'):
cleanUnit(gain.InputUnits, changes)
if hasattr(gain, 'OutputUnits'):
cleanUnit(gain.OutputUnits, changes)
def cleanStage(stage, changes):
if hasattr(stage, 'PolesZeros'):
cleanPolesZeros(stage.PolesZeros, changes)
elif hasattr(stage, 'Coefficients'):
cleanCoefficients(stage.Coefficients, changes)
elif hasattr(stage, 'FIR'):
cleanFIR(stage.FIR, changes)
elif hasattr(stage, 'Polynomial'):
cleanPolynomial(stage.Polynomial, changes)
if hasattr(stage, 'Decimation'):
cleanDecimation(stage.Decimation, changes)
if hasattr(stage, 'StageGain'):
cleanGain(stage.StageGain, changes)
def cleanResponse(resp, changes):
if hasattr(resp, 'InstrumentSensitivity'):
cleanUnit(resp.InstrumentSensitivity.InputUnits, changes)
cleanUnit(resp.InstrumentSensitivity.OutputUnits, changes)
if hasattr(resp, 'SubResponse') and hasattr(resp.SubResponse, 'ResponseDetail'):
cleanStage(resp.SubResponse.ResponseDetail, changes)
if hasattr(resp, 'Stage'):
stage = getattr(resp, 'Stage', [])
for i in range(0, len(stage)):
cleanStage(resp.Stage[i], changes)
return True, "ok"
def cleanUnitNames(staxml):
changes = { 'numChanges': 0 }
for n in staxml.Network:
for s in n.Station:
for c in s.Channel:
chanCode = checkNRL.getChanCodeId(n, s, c)
if VERBOSE: print("clean chanCode %s "%(chanCode, ))
if hasattr(c, 'SignalUnits'):
cleanUnit(c.SignalUnits, changes)
if hasattr(c, 'CalibrationUnits'):
cleanUnit(c.CalibrationUnits, changes)
result = cleanResponse(c.Response, changes)
if hasattr(staxml, 'HardwareResponse'):
if hasattr(staxml.HardwareResponse, 'ResponseDictGroup'):
respDict = getattr(staxml.HardwareResponse.ResponseDictGroup, 'ResponseDict', [])
for i in range(0, len(respDict)):
cleanStage(respDict[i], changes)
if hasattr(respDict[i], 'FilterSequence'):
filterStage = getattr(respDict[i].FilterSequence, 'FilterStage', [])
for i in range(0, len(filterStage)):
cleanStage(filterStage[i], changes)
return changes
def usage():
print("python cleanUnitNames <staxml>")
def main():
VERBOSE=False
parseArgs = initArgParser()
if parseArgs.verbose:
VERBOSE=True
for k, v in vars(parseArgs).items():
print(" Args: %s %s"%(k, v))
if parseArgs.stationxml:
if not os.path.exists(parseArgs.stationxml):
print("ERROR: can't fine stationxml file %s"%(parseArgs.stationxml,))
return
staxml = sisxmlparser.parse(parseArgs.stationxml)
print("Clean unit names")
changes = cleanUnitNames(staxml)
print("ok (%d changes)"%(changes['numChanges'],))
if VERBOSE:
for k, v in changes.items():
if k != 'numChanges':
print(" %s => %s"%(k, v))
staxml.exportxml(parseArgs.outfile, 'FDSNStationXML', 'fsx', 0)
if __name__ == '__main__':
main()
manual clean some units:
volts, degC, sec, C
#! /usr/bin/python
'''
clean unit names in fdsn stationxml file
see https://github.com/iris-edu/StationXML-Validator/wiki/Unit-name-overview-for-IRIS-StationXML-validator
'''
import checkNRL as checkNRL
import sisxmlparser2_2_py3 as sisxmlparser
import argparse
import datetime
import os
import re
import sys
VERBOSE=False
KNOWN_UNITS = [ "meter", "m", "m/s", "m/s**2",
"centimeter", "cm", "cm/s", "cm/s**2",
"millimeter", "mm", "mm/s", "mm/s**2", "mm/hour",
"micrometer", "um", "um/s", "um/s**2",
"nanometer", "nm", "nm/s", "nm/s**2",
"second", "s", "millisecond", "ms",
"microsecond", "us", "nanosecond", "ns",
"minute", "min",
"hour",
"radian", "rad", "microradian", "urad",
"nanoradian", "nrad",
"rad/s", "rad/s**2",
"degree", "deg",
"kelvin", "K",
"celsius", "degC",
"candela", "cd",
"pascal", "Pa", "kilopascal", "kPa",
"hectopascal", "hPa",
"bar", "bars", "millibar", "mbar",
"ampere", "amperes", "A", "milliamp", "mA",
"volt", "V", "millivolt", "mV",
"microvolt", "uV",
"ohm",
"hertz", "Hz",
"newton", "N",
"joule", "J",
"tesla", "T", "nanotesla", "nT",
"strain", "m/m", "m**3/m**3", "cm/cm", "mm/mm", "um/um", "nm/nm",
"microstrain",
"watt", "W", "milliwatt", "mW",
"V/m",
"W/m**2",
"gap",
"reboot",
"byte","bit",
"bit/s",
"percent","%",
"count","counts",
"number",
"unitless",
"unknown" ]
manualFix = {
"volts":"V",
"degc":"degC",
"sec":"s",
"c":"degC"
}
#UNITS_WITH_CAPS = set([ "K","Pa","kPa","hPa","A","mA","V","mV","uV","Hz","N","J","T","nT","W","mW","V/m","W/m**2", "degC"])
def hasCap(s): return s != s.lower()
UNITS_WITH_CAPS = set(filter(hasCap, KNOWN_UNITS))
UNITS_WITH_CAPS_AS_UPPER = {x.upper() : x for x in UNITS_WITH_CAPS}
KNOWN_UNIT_SET = set(KNOWN_UNITS)
def setVerbose(b):
VERBOSE = b
def initArgParser():
parser = argparse.ArgumentParser(description='Clean Unit Names in StationXML or ExtendedStationXML.')
parser.add_argument('-s', '--stationxml', required=True, help="input FDSN StationXML file, often retrieved from http://service.iris.edu/fdsnws/station/1/")
parser.add_argument('-o', '--outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)
parser.add_argument('-v', '--verbose', action='store_true', help="verbose output")
return parser.parse_args()
def cleanUnitName(inUnitName, changes):
if inUnitName != inUnitName.upper():
# some lower case, so keep as is
unit = inUnitName
elif inUnitName in UNITS_WITH_CAPS_AS_UPPER:
# all upper, but matches something we have with both upper and lower
unit = UNITS_WITH_CAPS_AS_UPPER[inUnitName]
else:
# make it lower and hope
unit = inUnitName.lower()
outUnitName = unit
# not sure if I really want to fix these...
if outUnitName in manualFix:
outUnitName = manualFix[outUnitName]
if inUnitName != outUnitName:
changes['numChanges']+=1
changes[inUnitName] = outUnitName
if VERBOSE: print("change %s to %s"%(inUnitName, outUnitName))
if (not outUnitName in KNOWN_UNIT_SET):
print("WARNING: unknown unit: %s"%(inUnitName,))
return outUnitName
def cleanUnit(inUnit, changes):
inUnit.Name = cleanUnitName(inUnit.Name, changes)
def cleanBaseFilter(filter, changes):
cleanUnit(filter.InputUnits, changes)
cleanUnit(filter.OutputUnits, changes)
return True, "ok"
def cleanPolesZeros(pz, changes):
cleanBaseFilter(pz, changes)
return True, "ok"
def cleanCoefficients(coef, changes):
cleanBaseFilter(coef, changes)
return True, "ok"
def cleanFIR(fir, changes):
cleanBaseFilter(fir, changes)
return True, "ok"
def cleanPolynomial(polynomial, changes):
cleanBaseFilter(polynomial, changes)
return True, "ok"
def cleanDecimation(decimation, changes):
return True, "ok"
def cleanGain(gain, changes):
if hasattr(gain, 'InputUnits'):
cleanUnit(gain.InputUnits, changes)
if hasattr(gain, 'OutputUnits'):
cleanUnit(gain.OutputUnits, changes)
def cleanStage(stage, changes):
if hasattr(stage, 'PolesZeros'):
cleanPolesZeros(stage.PolesZeros, changes)
elif hasattr(stage, 'Coefficients'):
cleanCoefficients(stage.Coefficients, changes)
elif hasattr(stage, 'FIR'):
cleanFIR(stage.FIR, changes)
elif hasattr(stage, 'Polynomial'):
cleanPolynomial(stage.Polynomial, changes)
if hasattr(stage, 'Decimation'):
cleanDecimation(stage.Decimation, changes)
if hasattr(stage, 'StageGain'):
cleanGain(stage.StageGain, changes)
def cleanResponse(resp, changes):
if hasattr(resp, 'InstrumentSensitivity'):
cleanUnit(resp.InstrumentSensitivity.InputUnits, changes)
cleanUnit(resp.InstrumentSensitivity.OutputUnits, changes)
if hasattr(resp, 'SubResponse') and hasattr(resp.SubResponse, 'ResponseDetail'):
cleanStage(resp.SubResponse.ResponseDetail, changes)
if hasattr(resp, 'Stage'):
stage = getattr(resp, 'Stage', [])
for i in range(0, len(stage)):
cleanStage(resp.Stage[i], changes)
return True, "ok"
def cleanUnitNames(staxml):
changes = { 'numChanges': 0 }
for n in staxml.Network:
for s in n.Station:
for c in s.Channel:
chanCode = checkNRL.getChanCodeId(n, s, c)
if VERBOSE: print("clean chanCode %s "%(chanCode, ))
if hasattr(c, 'SignalUnits'):
cleanUnit(c.SignalUnits, changes)
if hasattr(c, 'CalibrationUnits'):
cleanUnit(c.CalibrationUnits, changes)
result = cleanResponse(c.Response, changes)
if hasattr(staxml, 'HardwareResponse'):
if hasattr(staxml.HardwareResponse, 'ResponseDictGroup'):
respDict = getattr(staxml.HardwareResponse.ResponseDictGroup, 'ResponseDict', [])
for i in range(0, len(respDict)):
cleanStage(respDict[i], changes)
if hasattr(respDict[i], 'FilterSequence'):
filterStage = getattr(respDict[i].FilterSequence, 'FilterStage', [])
for i in range(0, len(filterStage)):
cleanStage(filterStage[i], changes)
return changes
def usage():
print("python cleanUnitNames <staxml>")
def main():
VERBOSE=False
parseArgs = initArgParser()
if parseArgs.verbose:
VERBOSE=True
for k, v in vars(parseArgs).items():
print(" Args: %s %s"%(k, v))
if parseArgs.stationxml:
if not os.path.exists(parseArgs.stationxml):
print("ERROR: can't fine stationxml file %s"%(parseArgs.stationxml,))
return
staxml = sisxmlparser.parse(parseArgs.stationxml)
print("Clean unit names")
changes = cleanUnitNames(staxml)
print("ok (%d changes)"%(changes['numChanges'],))
if VERBOSE:
for k, v in changes.items():
if k != 'numChanges':
print(" %s => %s"%(k, v))
staxml.exportxml(parseArgs.outfile, 'FDSNStationXML', 'fsx', 0)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
import os
import sys
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), "_ext")))
# package data
about = {}
with open("../tmuxp/__about__.py") as fp:
exec(fp.read(), about)
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'aafig',
'releases',
'alabaster',
]
releases_unstable_prehistory = True
releases_document_name = "history"
releases_issue_uri = "https://github.com/tony/tmuxp/issues/%s"
releases_release_uri = "https://github.com/tony/tmuxp/tree/%s"
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = about['__title__']
copyright = about['__copyright__']
version = '%s' % ('.'.join(about['__version__'].split('.'))[:2])
release = '%s' % (about['__version__'])
exclude_patterns = ['_build']
pygments_style = 'sphinx'
import alabaster
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html',
'star.html',
'navigation.html',
'relations.html',
'more.html',
'searchbox.html',
]
}
html_theme_path = ['_themes']
html_static_path = ['_static']
htmlhelp_basename = '%sdoc' % about['__title__']
latex_documents = [
('index', '{0}.tex'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], 'manual'),
]
man_pages = [
('index', about['__package_name__'],
'{0} Documentation'.format(about['__title__']),
about['__author__'], 1),
]
texinfo_documents = [
('index', '{0}'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], about['__package_name__'],
about['__description__'], 'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'libtmux': ('https://libtmux.readthedocs.io/', None)
}
# aafig format, try to get working with pdf
aafig_format = dict(latex='pdf', html='gif')
aafig_default_options = dict(
scale=.75,
aspect=0.5,
proportional=True,
)
add new libtmux intersphinx
# -*- coding: utf-8 -*-
import os
import sys
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
sys.path.insert(0, project_root)
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), "_ext")))
# package data
about = {}
with open("../tmuxp/__about__.py") as fp:
exec(fp.read(), about)
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'aafig',
'releases',
'alabaster',
]
releases_unstable_prehistory = True
releases_document_name = "history"
releases_issue_uri = "https://github.com/tony/tmuxp/issues/%s"
releases_release_uri = "https://github.com/tony/tmuxp/tree/%s"
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = about['__title__']
copyright = about['__copyright__']
version = '%s' % ('.'.join(about['__version__'].split('.'))[:2])
release = '%s' % (about['__version__'])
exclude_patterns = ['_build']
pygments_style = 'sphinx'
import alabaster
html_theme_path = [alabaster.get_path()]
html_theme = 'alabaster'
html_sidebars = {
'**': [
'about.html',
'star.html',
'navigation.html',
'relations.html',
'more.html',
'searchbox.html',
]
}
html_theme_path = ['_themes']
html_static_path = ['_static']
htmlhelp_basename = '%sdoc' % about['__title__']
latex_documents = [
('index', '{0}.tex'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], 'manual'),
]
man_pages = [
('index', about['__package_name__'],
'{0} Documentation'.format(about['__title__']),
about['__author__'], 1),
]
texinfo_documents = [
('index', '{0}'.format(about['__package_name__']),
'{0} Documentation'.format(about['__title__']),
about['__author__'], about['__package_name__'],
about['__description__'], 'Miscellaneous'),
]
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'libtmux': ('https://libtmux.git-pull.com/en/latest/', None)
}
# aafig format, try to get working with pdf
aafig_format = dict(latex='pdf', html='gif')
aafig_default_options = dict(
scale=.75,
aspect=0.5,
proportional=True,
)
|
# -*- coding: utf8 -*-
project = 'karma-loud'
copyright = u'2014–2015, Ruslan Sagitov'
version = '1.0'
release = '1.0.4'
master_doc = 'index'
Use utf-8
# -*- coding: utf-8 -*-
project = 'karma-loud'
copyright = u'2014–2015, Ruslan Sagitov'
version = '1.0'
release = '1.0.4'
master_doc = 'index'
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PySME documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 16 11:08:04 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Need some mock modules since readthedocs doesn't have python-dev installed, so
# it can't install numpy
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'scipy', 'scipy.integrate', 'scipy.special', 'pysme',
'pysme.system_builder', 'pysme.gellmann', 'pysme.gramschmidt',
'pysme.integrate', 'pysme.sde', 'pysme.grid_conv']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src/pysme'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode',
'sphinxcontrib.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySME'
copyright = '2014, Jonathan Gross'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. This sets up a different theme for local and RTD
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PySMEdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r'\usepackage{amssymb}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PySME.tex', 'PySME Documentation',
'Jonathan Gross', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysme', 'PySME Documentation',
['Jonathan Gross'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PySME', 'PySME Documentation',
'Jonathan Gross', 'PySME', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
Explicitly declare python as the default domain
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PySME documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 16 11:08:04 2014.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# Need some mock modules since readthedocs doesn't have python-dev installed, so
# it can't install numpy
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ['numpy', 'scipy', 'scipy.integrate', 'scipy.special', 'pysme',
'pysme.system_builder', 'pysme.gellmann', 'pysme.gramschmidt',
'pysme.integrate', 'pysme.sde', 'pysme.grid_conv']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../src/pysme'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode',
'sphinxcontrib.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'PySME'
copyright = '2014, Jonathan Gross'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. This sets up a different theme for local and RTD
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'PySMEdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': r'\usepackage{amssymb}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'PySME.tex', 'PySME Documentation',
'Jonathan Gross', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pysme', 'PySME Documentation',
['Jonathan Gross'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'PySME', 'PySME Documentation',
'Jonathan Gross', 'PySME', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Set python as the primary domain.
primary_domain = 'py'
|
# -*- coding: utf-8 -*-
#
# Disco documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 6 17:23:01 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('../bin'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Disco'
copyright = '2008-2012, Nokia Corporation and the Disco Project'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '%DISCO_VERSION%'
# The full version, including alpha/beta/rc tags.
release = '%DISCO_RELEASE%'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'sphinxdoc.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Discodoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Disco.tex', 'Disco Documentation', 'Disco Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
def setup(app):
import re
repr_re = re.compile(r'<([\w\s]+) at 0x\w+>')
def process_signature(app, what, name, obj, opts, signature, return_annotation):
if signature:
signature = repr_re.sub(r'\1', signature)
return signature, return_annotation
def skip_member(app, what, name, obj, skip, options):
if skip:
return True
if what != 'module' and type(obj) is type:
return True
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-skip-member', skip_member)
docs: use intersphinx to link between projects
# -*- coding: utf-8 -*-
#
# Disco documentation build configuration file, created by
# sphinx-quickstart on Sun Jul 6 17:23:01 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath('../bin'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.intersphinx']
intersphinx_mapping = {'discodb': ('http://discoproject.org/doc/discodb', None),
'discodex': ('http://discoproject.org/doc/discodex', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Disco'
copyright = '2008-2012, Nokia Corporation and the Disco Project'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '%DISCO_VERSION%'
# The full version, including alpha/beta/rc tags.
release = '%DISCO_RELEASE%'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
#exclude_dirs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'sphinxdoc.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
html_logo = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Discodoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Disco.tex', 'Disco Documentation', 'Disco Project', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
def setup(app):
import re
repr_re = re.compile(r'<([\w\s]+) at 0x\w+>')
def process_signature(app, what, name, obj, opts, signature, return_annotation):
if signature:
signature = repr_re.sub(r'\1', signature)
return signature, return_annotation
def skip_member(app, what, name, obj, skip, options):
if skip:
return True
if what != 'module' and type(obj) is type:
return True
app.connect('autodoc-process-signature', process_signature)
app.connect('autodoc-skip-member', skip_member)
|
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Sun 21 Jun 21:03:10 2015 BRT
import os
import sys
import glob
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# The viewcode extension appeared only on Sphinx >= 1.0.0
import sphinx
if sphinx.__version__ >= "1.0":
extensions.append('sphinx.ext.viewcode')
# Always includes todos
todo_include_todos = True
# If we are on OSX, the 'dvipng' path maybe different
dvipng_osx = '/opt/local/libexec/texlive/binaries/dvipng'
if os.path.exists(dvipng_osx): pngmath_dvipng = dvipng_osx
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rrpack'
import time
copyright = u'%s, Idiap Research Institute' % time.strftime('%Y')
# Grab the setup entry
distribution = pkg_resources.require('rrpack')[0]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = distribution.version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['links.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx.__version__ >= "1.0":
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = 'rrpack'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ''
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rrpack_doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'rrpack.tex', u'Reproducible Research Example in Python',
u'Biometrics Group, Idiap Research Institute', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = ''
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# Included after all input documents
rst_epilog = ''
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rrpack', u'Reproducible Research Package in Python', [u'Idiap Research Institute'], 1)
]
# Default processing flags for sphinx
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = [
'members',
'undoc-members',
'inherited-members',
'show-inheritance',
]
def setup(app):
pass
Fix documentation
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Andre Anjos <andre.anjos@idiap.ch>
# Sun 21 Jun 21:03:10 2015 BRT
import os
import sys
import glob
import pkg_resources
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# The viewcode extension appeared only on Sphinx >= 1.0.0
import sphinx
if sphinx.__version__ >= "1.0":
extensions.append('sphinx.ext.viewcode')
# Always includes todos
todo_include_todos = True
# If we are on OSX, the 'dvipng' path maybe different
dvipng_osx = '/opt/local/libexec/texlive/binaries/dvipng'
if os.path.exists(dvipng_osx): pngmath_dvipng = dvipng_osx
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'rrbob'
import time
copyright = u'%s, Idiap Research Institute' % time.strftime('%Y')
# Grab the setup entry
distribution = pkg_resources.require('rrbob')[0]
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = distribution.version
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['links.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if sphinx.__version__ >= "1.0":
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = 'rrbob'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = ''
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'rrbob_doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'rrbob.tex', u'Reproducible Research Example in Python/Bob',
u'Biometrics Group, Idiap Research Institute', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = ''
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# Included after all input documents
rst_epilog = ''
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'rrbob', u'Reproducible Research Package in Python/Bob', [u'Idiap Research Institute'], 1)
]
# Default processing flags for sphinx
autoclass_content = 'both'
autodoc_member_order = 'bysource'
autodoc_default_flags = [
'members',
'undoc-members',
'inherited-members',
'show-inheritance',
]
def setup(app):
pass
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# BCIM documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 22 18:49:05 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# For Julia
import juliadoc
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'juliadoc.julia',
'juliadoc.jlhelp'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'BCIM'
copyright = '2015, Dan Kolbman'
author = 'Dan Kolbman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
## Julia
html_theme_path = [juliadoc.get_theme_dir()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
## Julia
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = juliadoc.default_sidebars()
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'BCIMdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'BCIM.tex', 'BCIM Documentation',
'Dan Kolbman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bcim', 'BCIM Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'BCIM', 'BCIM Documentation',
author, 'BCIM', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
Update conf for sphinx
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# BCIM documentation build configuration file, created by
# sphinx-quickstart on Sun Mar 22 18:49:05 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# For Julia
import juliadoc
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'juliadoc.julia',
'juliadoc.jlhelp'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'BCIM'
copyright = '2015, Dan Kolbman'
author = 'Dan Kolbman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'julita'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
## Julia
html_theme_path = [juliadoc.get_theme_dir()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
## Julia
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = juliadoc.default_sidebars()
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'BCIMdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'BCIM.tex', 'BCIM Documentation',
'Dan Kolbman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'bcim', 'BCIM Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'BCIM', 'BCIM Documentation',
author, 'BCIM', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
# -*- coding: utf-8 -*-
#
# pyglmnet documentation build configuration file, created by
# sphinx-quickstart on Mon May 9 19:01:11 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
import pyglmnet
import sphinx_gallery
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx_gallery.gen_gallery'
]
# generate autosummary even if no references
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyglmnet'
copyright = u'2016-2019, Pavan Ramkumar'
author = u'Pavan Ramkumar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pyglmnet.__version__
# The full version, including alpha/beta/rc tags.
release = pyglmnet.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes/", ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyglmnetdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyglmnet.tex', u'pyglmnet Documentation',
u'Pavan Ramkumar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyglmnet', u'pyglmnet Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyglmnet', u'pyglmnet Documentation',
author, 'pyglmnet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
sphinx_gallery_conf = {
'examples_dirs': '../examples',
'gallery_dirs': 'auto_examples'
}
WIP on conf
# -*- coding: utf-8 -*-
#
# pyglmnet documentation build configuration file, created by
# sphinx-quickstart on Mon May 9 19:01:11 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import sphinx
import pyglmnet
import sphinx_gallery
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'numpydoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx_gallery.gen_gallery'
]
# generate autosummary even if no references
autosummary_generate = True
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyglmnet'
copyright = u'2016-2019, Pavan Ramkumar'
author = u'Pavan Ramkumar'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pyglmnet.__version__
# The full version, including alpha/beta/rc tags.
release = pyglmnet.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["_themes/", ]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyglmnetdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pyglmnet.tex', u'pyglmnet Documentation',
u'Pavan Ramkumar', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyglmnet', u'pyglmnet Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pyglmnet', u'pyglmnet Documentation',
author, 'pyglmnet', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'numpy': ('https://numpy.org/devdocs', None),
'scipy': ('https://scipy.github.io/devdocs', None),
'matplotlib': ('https://matplotlib.org', None)
}
sphinx_gallery_conf = {
'examples_dirs': '../examples',
'gallery_dirs': 'auto_examples',
'reference_url': dict(pyglmnet=None)
}
|
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import warnings
import re
from datetime import datetime
from sklearn.externals._packaging.version import parse
from pathlib import Path
from io import StringIO
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath("sphinxext"))
from github_link import make_linkcode_resolve
import sphinx_gallery
from sphinx_gallery.sorting import ExampleTitleSortKey
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"numpydoc",
"sphinx.ext.linkcode",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.imgconverter",
"sphinx_gallery.gen_gallery",
"sphinx_issues",
"add_toctree_functions",
"sphinx-prompt",
"sphinxext.opengraph",
"doi_role",
"allow_nan_estimators",
"matplotlib.sphinxext.plot_directive",
]
# Produce `plot::` directives for examples that contain `import matplotlib` or
# `from matplotlib import`.
numpydoc_use_plots = True
# Options for the `::plot` directive:
# https://matplotlib.org/stable/api/sphinxext_plot_directive_api.html
plot_formats = ["png"]
plot_include_source = True
plot_html_show_formats = False
plot_html_show_source_link = False
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get("NO_MATHJAX"):
extensions.append("sphinx.ext.imgmath")
imgmath_image_format = "svg"
mathjax_path = ""
else:
extensions.append("sphinx.ext.mathjax")
mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"
autodoc_default_options = {"members": True, "inherited-members": True}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["templates"]
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8'
# The main toctree document.
root_doc = "contents"
# General information about the project.
project = "scikit-learn"
copyright = f"2007 - {datetime.now().year}, scikit-learn developers (BSD License)"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
parsed_version = parse(sklearn.__version__)
version = ".".join(parsed_version.base_version.split(".")[:2])
# The full version, including alpha/beta/rc tags.
# Removes post from release name
if parsed_version.is_postrelease:
release = parsed_version.base_version
else:
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "templates", "includes", "themes"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "literal"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "scikit-learn-modern"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"google_analytics": True,
"mathjax_path": mathjax_path,
"link_to_live_contributing_page": not parsed_version.is_devrelease,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "scikit-learn"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "logos/scikit-learn-logo-small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "logos/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["images"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "index.html"}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = "scikit-learndoc"
# If true, the reST sources are included in the HTML build as _sources/name.
html_copy_source = True
# Adds variables into templates
html_context = {}
# finds latest release highlights and places it into HTML context for
# index.html
release_highlights_dir = Path("..") / "examples" / "release_highlights"
# Finds the highlight with the latest version number
latest_highlights = sorted(release_highlights_dir.glob("plot_release_highlights_*.py"))[
-1
]
latest_highlights = latest_highlights.with_suffix("").name
html_context[
"release_highlights"
] = f"auto_examples/release_highlights/{latest_highlights}"
# get version from highlight name assuming highlights have the form
# plot_release_highlights_0_22_0
highlight_version = ".".join(latest_highlights.split("_")[-3:-1])
html_context["release_highlights_version"] = highlight_version
# redirects dictionary maps from old links to new links
redirects = {
"documentation": "index",
"auto_examples/feature_selection/plot_permutation_test_for_classification": (
"auto_examples/model_selection/plot_permutation_tests_for_classification"
),
"modules/model_persistence": "model_persistence",
"auto_examples/linear_model/plot_bayesian_ridge": (
"auto_examples/linear_model/plot_ard"
),
}
html_context["redirects"] = redirects
for old_link in redirects:
html_additional_pages[old_link] = "redirects.html"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
"preamble": r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
\let\oldhref\href
\renewcommand{\href}[2]{\oldhref{#1}{\hbox{#2}}}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
(
"contents",
"user_guide.tex",
"scikit-learn user guide",
"scikit-learn developers",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"matplotlib": ("https://matplotlib.org/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"joblib": ("https://joblib.readthedocs.io/en/latest/", None),
"seaborn": ("https://seaborn.pydata.org/", None),
}
v = parse(release)
if v.release is None:
raise ValueError(
"Ill-formed version: {!r}. Version should follow PEP440".format(version)
)
if v.is_devrelease:
binder_branch = "main"
else:
major, minor = v.release[:2]
binder_branch = "{}.{}.X".format(major, minor)
class SubSectionTitleOrder:
"""Sort example gallery by title of subsection.
Assumes README.txt exists for all subsections and uses the subsection with
dashes, '---', as the adornment.
"""
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile(r"^([\w ]+)\n-", re.MULTILINE)
def __repr__(self):
return "<%s>" % (self.__class__.__name__,)
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
# Forces Release Highlights to the top
if os.path.basename(src_path) == "release_highlights":
return "0"
readme = os.path.join(src_path, "README.txt")
try:
with open(readme, "r") as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if title_match is not None:
return title_match.group(1)
return directory
class SKExampleTitleSortKey(ExampleTitleSortKey):
"""Sorts release highlights based on version number."""
def __call__(self, filename):
title = super().__call__(filename)
prefix = "plot_release_highlights_"
# Use title to sort if not a release highlight
if not filename.startswith(prefix):
return title
major_minor = filename[len(prefix) :].split("_")[:2]
version_float = float(".".join(major_minor))
# negate to place the newest version highlights first
return -version_float
sphinx_gallery_conf = {
"doc_module": "sklearn",
"backreferences_dir": os.path.join("modules", "generated"),
"show_memory": False,
"reference_url": {"sklearn": None},
"examples_dirs": ["../examples"],
"gallery_dirs": ["auto_examples"],
"subsection_order": SubSectionTitleOrder("../examples"),
"within_subsection_order": SKExampleTitleSortKey,
"binder": {
"org": "scikit-learn",
"repo": "scikit-learn",
"binderhub_url": "https://mybinder.org",
"branch": binder_branch,
"dependencies": "./binder/requirements.txt",
"use_jupyter_lab": True,
},
# avoid generating too many cross links
"inspect_global_variables": False,
"remove_config_comments": True,
"plot_gallery": "True",
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {"sphx_glr_plot_classifier_comparison_001.png": 600}
# enable experimental module so that experimental estimators can be
# discovered properly by sphinx
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.experimental import enable_halving_search_cv # noqa
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print("Preparing carousel images")
image_dir = os.path.join(app.builder.outdir, "_images")
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + "_carousel.png")
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
def filter_search_index(app, exception):
if exception is not None:
return
# searchindex only exist when generating html
if app.builder.name != "html":
return
print("Removing methods from search index")
searchindex_path = os.path.join(app.builder.outdir, "searchindex.js")
with open(searchindex_path, "r") as f:
searchindex_text = f.read()
searchindex_text = re.sub(r"{__init__.+?}", "{}", searchindex_text)
searchindex_text = re.sub(r"{__call__.+?}", "{}", searchindex_text)
with open(searchindex_path, "w") as f:
f.write(searchindex_text)
def generate_min_dependency_table(app):
"""Generate min dependency table for docs."""
from sklearn._min_dependencies import dependent_packages
# get length of header
package_header_len = max(len(package) for package in dependent_packages) + 4
version_header_len = len("Minimum Version") + 4
tags_header_len = max(len(tags) for _, tags in dependent_packages.values()) + 4
output = StringIO()
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
dependency_title = "Dependency"
version_title = "Minimum Version"
tags_title = "Purpose"
output.write(
f"{dependency_title:<{package_header_len}} "
f"{version_title:<{version_header_len}} "
f"{tags_title}\n"
)
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
for package, (version, tags) in dependent_packages.items():
output.write(
f"{package:<{package_header_len}} {version:<{version_header_len}} {tags}\n"
)
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
output = output.getvalue()
with (Path(".") / "min_dependency_table.rst").open("w") as f:
f.write(output)
def generate_min_dependency_substitutions(app):
"""Generate min dependency substitutions for docs."""
from sklearn._min_dependencies import dependent_packages
output = StringIO()
for package, (version, _) in dependent_packages.items():
package = package.capitalize()
output.write(f".. |{package}MinVersion| replace:: {version}")
output.write("\n")
output = output.getvalue()
with (Path(".") / "min_dependency_substitutions.rst").open("w") as f:
f.write(output)
# Config for sphinx_issues
# we use the issues path for PRs since the issues URL will forward
issues_github_path = "scikit-learn/scikit-learn"
def disable_plot_gallery_for_linkcheck(app):
if app.builder.name == "linkcheck":
sphinx_gallery_conf["plot_gallery"] = "False"
def setup(app):
# do not run the examples when using linkcheck by using a small priority
# (default priority is 500 and sphinx-gallery using builder-inited event too)
app.connect("builder-inited", disable_plot_gallery_for_linkcheck, priority=50)
app.connect("builder-inited", generate_min_dependency_table)
app.connect("builder-inited", generate_min_dependency_substitutions)
# to hide/show the prompt in code examples:
app.connect("build-finished", make_carousel_thumbs)
app.connect("build-finished", filter_search_index)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"sklearn",
"https://github.com/scikit-learn/"
"scikit-learn/blob/{revision}/"
"{package}/{path}#L{lineno}",
)
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=(
"Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure."
),
)
# maps functions with a class name that is indistinguishable when case is
# ignore to another filename
autosummary_filename_map = {
"sklearn.cluster.dbscan": "dbscan-function",
"sklearn.covariance.oas": "oas-function",
"sklearn.decomposition.fastica": "fastica-function",
}
# Config for sphinxext.opengraph
ogp_site_url = "https://scikit-learn/stable/"
ogp_image = "https://scikit-learn.org/stable/_static/scikit-learn-logo-small.png"
ogp_use_first_image = True
ogp_site_name = "scikit-learn"
# Config for linkcheck that checks the documentation for broken links
# ignore all links in 'whats_new' to avoid doing many github requests and
# hitting the github rate threshold that makes linkcheck take a lot of time
linkcheck_exclude_documents = [r"whats_new/.*"]
# default timeout to make some sites links fail faster
linkcheck_timeout = 10
# Allow redirects from doi.org
linkcheck_allowed_redirects = {r"https://doi.org/.+": r".*"}
linkcheck_ignore = [
# ignore links to local html files e.g. in image directive :target: field
r"^..?/",
# ignore links to specific pdf pages because linkcheck does not handle them
# ('utf-8' codec can't decode byte error)
r"http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=.*",
"https://www.fordfoundation.org/media/2976/"
"roads-and-bridges-the-unseen-labor-behind-our-digital-infrastructure.pdf#page=.*",
# links falsely flagged as broken
"https://www.researchgate.net/publication/"
"233096619_A_Dendrite_Method_for_Cluster_Analysis",
"https://www.researchgate.net/publication/"
"221114584_Random_Fourier_Approximations_"
"for_Skewed_Multiplicative_Histogram_Kernels",
"https://doi.org/10.13140/RG.2.2.35280.02565",
"https://www.microsoft.com/en-us/research/uploads/prod/2006/01/"
"Bishop-Pattern-Recognition-and-Machine-Learning-2006.pdf",
# Broken links from testimonials
"http://www.bestofmedia.com",
"http://www.data-publica.com/",
"https://livelovely.com",
"https://www.mars.com/global",
"https://www.yhat.com",
# Ignore some dynamically created anchors. See
# https://github.com/sphinx-doc/sphinx/issues/9016 for more details about
# the github example
r"https://github.com/conda-forge/miniforge#miniforge",
r"https://stackoverflow.com/questions/5836335/"
"consistently-create-same-random-numpy-array/5837352#comment6712034_5837352",
]
# Use a browser-like user agent to avoid some "403 Client Error: Forbidden for
# url" errors. This is taken from the variable navigator.userAgent inside a
# browser console.
user_agent = (
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:100.0) Gecko/20100101 Firefox/100.0"
)
# Use Github token from environment variable to avoid Github rate limits when
# checking Github links
github_token = os.getenv("GITHUB_TOKEN")
if github_token is None:
linkcheck_request_headers = {}
else:
linkcheck_request_headers = {
"https://github.com/": {"Authorization": f"token {github_token}"},
}
DOC Added jstor link to linkcheck_ignore (#23764)
# scikit-learn documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 8 09:13:42 2010.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import warnings
import re
from datetime import datetime
from sklearn.externals._packaging.version import parse
from pathlib import Path
from io import StringIO
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.insert(0, os.path.abspath("sphinxext"))
from github_link import make_linkcode_resolve
import sphinx_gallery
from sphinx_gallery.sorting import ExampleTitleSortKey
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"numpydoc",
"sphinx.ext.linkcode",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.imgconverter",
"sphinx_gallery.gen_gallery",
"sphinx_issues",
"add_toctree_functions",
"sphinx-prompt",
"sphinxext.opengraph",
"doi_role",
"allow_nan_estimators",
"matplotlib.sphinxext.plot_directive",
]
# Produce `plot::` directives for examples that contain `import matplotlib` or
# `from matplotlib import`.
numpydoc_use_plots = True
# Options for the `::plot` directive:
# https://matplotlib.org/stable/api/sphinxext_plot_directive_api.html
plot_formats = ["png"]
plot_include_source = True
plot_html_show_formats = False
plot_html_show_source_link = False
# this is needed for some reason...
# see https://github.com/numpy/numpydoc/issues/69
numpydoc_class_members_toctree = False
# For maths, use mathjax by default and svg if NO_MATHJAX env variable is set
# (useful for viewing the doc offline)
if os.environ.get("NO_MATHJAX"):
extensions.append("sphinx.ext.imgmath")
imgmath_image_format = "svg"
mathjax_path = ""
else:
extensions.append("sphinx.ext.mathjax")
mathjax_path = "https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-chtml.js"
autodoc_default_options = {"members": True, "inherited-members": True}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["templates"]
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8'
# The main toctree document.
root_doc = "contents"
# General information about the project.
project = "scikit-learn"
copyright = f"2007 - {datetime.now().year}, scikit-learn developers (BSD License)"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import sklearn
parsed_version = parse(sklearn.__version__)
version = ".".join(parsed_version.base_version.split(".")[:2])
# The full version, including alpha/beta/rc tags.
# Removes post from release name
if parsed_version.is_postrelease:
release = parsed_version.base_version
else:
release = sklearn.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "templates", "includes", "themes"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = "literal"
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = "scikit-learn-modern"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"google_analytics": True,
"mathjax_path": mathjax_path,
"link_to_live_contributing_page": not parsed_version.is_devrelease,
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ["themes"]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "scikit-learn"
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "logos/scikit-learn-logo-small.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "logos/favicon.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["images"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "index.html"}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = "scikit-learndoc"
# If true, the reST sources are included in the HTML build as _sources/name.
html_copy_source = True
# Adds variables into templates
html_context = {}
# finds latest release highlights and places it into HTML context for
# index.html
release_highlights_dir = Path("..") / "examples" / "release_highlights"
# Finds the highlight with the latest version number
latest_highlights = sorted(release_highlights_dir.glob("plot_release_highlights_*.py"))[
-1
]
latest_highlights = latest_highlights.with_suffix("").name
html_context[
"release_highlights"
] = f"auto_examples/release_highlights/{latest_highlights}"
# get version from highlight name assuming highlights have the form
# plot_release_highlights_0_22_0
highlight_version = ".".join(latest_highlights.split("_")[-3:-1])
html_context["release_highlights_version"] = highlight_version
# redirects dictionary maps from old links to new links
redirects = {
"documentation": "index",
"auto_examples/feature_selection/plot_permutation_test_for_classification": (
"auto_examples/model_selection/plot_permutation_tests_for_classification"
),
"modules/model_persistence": "model_persistence",
"auto_examples/linear_model/plot_bayesian_ridge": (
"auto_examples/linear_model/plot_ard"
),
}
html_context["redirects"] = redirects
for old_link in redirects:
html_additional_pages[old_link] = "redirects.html"
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
"preamble": r"""
\usepackage{amsmath}\usepackage{amsfonts}\usepackage{bm}
\usepackage{morefloats}\usepackage{enumitem} \setlistdepth{10}
\let\oldhref\href
\renewcommand{\href}[2]{\oldhref{#1}{\hbox{#2}}}
"""
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
(
"contents",
"user_guide.tex",
"scikit-learn user guide",
"scikit-learn developers",
"manual",
),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "logos/scikit-learn-logo.png"
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = False
trim_doctests_flags = True
# intersphinx configuration
intersphinx_mapping = {
"python": ("https://docs.python.org/{.major}".format(sys.version_info), None),
"numpy": ("https://numpy.org/doc/stable", None),
"scipy": ("https://docs.scipy.org/doc/scipy/", None),
"matplotlib": ("https://matplotlib.org/", None),
"pandas": ("https://pandas.pydata.org/pandas-docs/stable/", None),
"joblib": ("https://joblib.readthedocs.io/en/latest/", None),
"seaborn": ("https://seaborn.pydata.org/", None),
}
v = parse(release)
if v.release is None:
raise ValueError(
"Ill-formed version: {!r}. Version should follow PEP440".format(version)
)
if v.is_devrelease:
binder_branch = "main"
else:
major, minor = v.release[:2]
binder_branch = "{}.{}.X".format(major, minor)
class SubSectionTitleOrder:
"""Sort example gallery by title of subsection.
Assumes README.txt exists for all subsections and uses the subsection with
dashes, '---', as the adornment.
"""
def __init__(self, src_dir):
self.src_dir = src_dir
self.regex = re.compile(r"^([\w ]+)\n-", re.MULTILINE)
def __repr__(self):
return "<%s>" % (self.__class__.__name__,)
def __call__(self, directory):
src_path = os.path.normpath(os.path.join(self.src_dir, directory))
# Forces Release Highlights to the top
if os.path.basename(src_path) == "release_highlights":
return "0"
readme = os.path.join(src_path, "README.txt")
try:
with open(readme, "r") as f:
content = f.read()
except FileNotFoundError:
return directory
title_match = self.regex.search(content)
if title_match is not None:
return title_match.group(1)
return directory
class SKExampleTitleSortKey(ExampleTitleSortKey):
"""Sorts release highlights based on version number."""
def __call__(self, filename):
title = super().__call__(filename)
prefix = "plot_release_highlights_"
# Use title to sort if not a release highlight
if not filename.startswith(prefix):
return title
major_minor = filename[len(prefix) :].split("_")[:2]
version_float = float(".".join(major_minor))
# negate to place the newest version highlights first
return -version_float
sphinx_gallery_conf = {
"doc_module": "sklearn",
"backreferences_dir": os.path.join("modules", "generated"),
"show_memory": False,
"reference_url": {"sklearn": None},
"examples_dirs": ["../examples"],
"gallery_dirs": ["auto_examples"],
"subsection_order": SubSectionTitleOrder("../examples"),
"within_subsection_order": SKExampleTitleSortKey,
"binder": {
"org": "scikit-learn",
"repo": "scikit-learn",
"binderhub_url": "https://mybinder.org",
"branch": binder_branch,
"dependencies": "./binder/requirements.txt",
"use_jupyter_lab": True,
},
# avoid generating too many cross links
"inspect_global_variables": False,
"remove_config_comments": True,
"plot_gallery": "True",
}
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {"sphx_glr_plot_classifier_comparison_001.png": 600}
# enable experimental module so that experimental estimators can be
# discovered properly by sphinx
from sklearn.experimental import enable_iterative_imputer # noqa
from sklearn.experimental import enable_halving_search_cv # noqa
def make_carousel_thumbs(app, exception):
"""produces the final resized carousel images"""
if exception is not None:
return
print("Preparing carousel images")
image_dir = os.path.join(app.builder.outdir, "_images")
for glr_plot, max_width in carousel_thumbs.items():
image = os.path.join(image_dir, glr_plot)
if os.path.exists(image):
c_thumb = os.path.join(image_dir, glr_plot[:-4] + "_carousel.png")
sphinx_gallery.gen_rst.scale_image(image, c_thumb, max_width, 190)
def filter_search_index(app, exception):
if exception is not None:
return
# searchindex only exist when generating html
if app.builder.name != "html":
return
print("Removing methods from search index")
searchindex_path = os.path.join(app.builder.outdir, "searchindex.js")
with open(searchindex_path, "r") as f:
searchindex_text = f.read()
searchindex_text = re.sub(r"{__init__.+?}", "{}", searchindex_text)
searchindex_text = re.sub(r"{__call__.+?}", "{}", searchindex_text)
with open(searchindex_path, "w") as f:
f.write(searchindex_text)
def generate_min_dependency_table(app):
"""Generate min dependency table for docs."""
from sklearn._min_dependencies import dependent_packages
# get length of header
package_header_len = max(len(package) for package in dependent_packages) + 4
version_header_len = len("Minimum Version") + 4
tags_header_len = max(len(tags) for _, tags in dependent_packages.values()) + 4
output = StringIO()
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
dependency_title = "Dependency"
version_title = "Minimum Version"
tags_title = "Purpose"
output.write(
f"{dependency_title:<{package_header_len}} "
f"{version_title:<{version_header_len}} "
f"{tags_title}\n"
)
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
for package, (version, tags) in dependent_packages.items():
output.write(
f"{package:<{package_header_len}} {version:<{version_header_len}} {tags}\n"
)
output.write(
" ".join(
["=" * package_header_len, "=" * version_header_len, "=" * tags_header_len]
)
)
output.write("\n")
output = output.getvalue()
with (Path(".") / "min_dependency_table.rst").open("w") as f:
f.write(output)
def generate_min_dependency_substitutions(app):
"""Generate min dependency substitutions for docs."""
from sklearn._min_dependencies import dependent_packages
output = StringIO()
for package, (version, _) in dependent_packages.items():
package = package.capitalize()
output.write(f".. |{package}MinVersion| replace:: {version}")
output.write("\n")
output = output.getvalue()
with (Path(".") / "min_dependency_substitutions.rst").open("w") as f:
f.write(output)
# Config for sphinx_issues
# we use the issues path for PRs since the issues URL will forward
issues_github_path = "scikit-learn/scikit-learn"
def disable_plot_gallery_for_linkcheck(app):
if app.builder.name == "linkcheck":
sphinx_gallery_conf["plot_gallery"] = "False"
def setup(app):
# do not run the examples when using linkcheck by using a small priority
# (default priority is 500 and sphinx-gallery using builder-inited event too)
app.connect("builder-inited", disable_plot_gallery_for_linkcheck, priority=50)
app.connect("builder-inited", generate_min_dependency_table)
app.connect("builder-inited", generate_min_dependency_substitutions)
# to hide/show the prompt in code examples:
app.connect("build-finished", make_carousel_thumbs)
app.connect("build-finished", filter_search_index)
# The following is used by sphinx.ext.linkcode to provide links to github
linkcode_resolve = make_linkcode_resolve(
"sklearn",
"https://github.com/scikit-learn/"
"scikit-learn/blob/{revision}/"
"{package}/{path}#L{lineno}",
)
warnings.filterwarnings(
"ignore",
category=UserWarning,
message=(
"Matplotlib is currently using agg, which is a"
" non-GUI backend, so cannot show the figure."
),
)
# maps functions with a class name that is indistinguishable when case is
# ignore to another filename
autosummary_filename_map = {
"sklearn.cluster.dbscan": "dbscan-function",
"sklearn.covariance.oas": "oas-function",
"sklearn.decomposition.fastica": "fastica-function",
}
# Config for sphinxext.opengraph
ogp_site_url = "https://scikit-learn/stable/"
ogp_image = "https://scikit-learn.org/stable/_static/scikit-learn-logo-small.png"
ogp_use_first_image = True
ogp_site_name = "scikit-learn"
# Config for linkcheck that checks the documentation for broken links
# ignore all links in 'whats_new' to avoid doing many github requests and
# hitting the github rate threshold that makes linkcheck take a lot of time
linkcheck_exclude_documents = [r"whats_new/.*"]
# default timeout to make some sites links fail faster
linkcheck_timeout = 10
# Allow redirects from doi.org
linkcheck_allowed_redirects = {r"https://doi.org/.+": r".*"}
linkcheck_ignore = [
# ignore links to local html files e.g. in image directive :target: field
r"^..?/",
# ignore links to specific pdf pages because linkcheck does not handle them
# ('utf-8' codec can't decode byte error)
r"http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=.*",
"https://www.fordfoundation.org/media/2976/"
"roads-and-bridges-the-unseen-labor-behind-our-digital-infrastructure.pdf#page=.*",
# links falsely flagged as broken
"https://www.researchgate.net/publication/"
"233096619_A_Dendrite_Method_for_Cluster_Analysis",
"https://www.researchgate.net/publication/"
"221114584_Random_Fourier_Approximations_"
"for_Skewed_Multiplicative_Histogram_Kernels",
"https://doi.org/10.13140/RG.2.2.35280.02565",
"https://www.microsoft.com/en-us/research/uploads/prod/2006/01/"
"Bishop-Pattern-Recognition-and-Machine-Learning-2006.pdf",
"https://www.jstor.org/stable/2984099",
# Broken links from testimonials
"http://www.bestofmedia.com",
"http://www.data-publica.com/",
"https://livelovely.com",
"https://www.mars.com/global",
"https://www.yhat.com",
# Ignore some dynamically created anchors. See
# https://github.com/sphinx-doc/sphinx/issues/9016 for more details about
# the github example
r"https://github.com/conda-forge/miniforge#miniforge",
r"https://stackoverflow.com/questions/5836335/"
"consistently-create-same-random-numpy-array/5837352#comment6712034_5837352",
]
# Use a browser-like user agent to avoid some "403 Client Error: Forbidden for
# url" errors. This is taken from the variable navigator.userAgent inside a
# browser console.
user_agent = (
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:100.0) Gecko/20100101 Firefox/100.0"
)
# Use Github token from environment variable to avoid Github rate limits when
# checking Github links
github_token = os.getenv("GITHUB_TOKEN")
if github_token is None:
linkcheck_request_headers = {}
else:
linkcheck_request_headers = {
"https://github.com/": {"Authorization": f"token {github_token}"},
}
|
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Mycroft documentation build configuration file
#
import sys
import re
import os
import sphinx_rtd_theme
from sphinx.ext.autodoc import (
ClassLevelDocumenter, InstanceAttributeDocumenter)
def iad_add_directive_header(self, sig):
ClassLevelDocumenter.add_directive_header(self, sig)
InstanceAttributeDocumenter.add_directive_header = iad_add_directive_header
sys.path.insert(0, os.path.abspath('../'))
# General Configuration
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon'
]
req_path = os.path.join(os.path.dirname(os.path.dirname(
os.path.realpath(__file__))), 'requirements', 'requirements.txt')
# To easily run sphinx without additional installation autodoc_mock_imports
# sets modules to mock.
# Step 1: Pull module names to mock from requirements
# Assuming package name is the same as the module name
with open(req_path) as f:
autodoc_mock_imports = map(str.strip, re.findall(r'^\s*[a-zA-Z_]*',
f.read().lower().replace('-', '_'),
flags=re.MULTILINE))
# Step 2: Add custom names
# Not all module names match the package name (as stated in requirements.txt)
# this adds the modules whose names don't match the package name.
autodoc_mock_imports = list(autodoc_mock_imports) + [
'adapt',
'alsaaudio',
'dateutil',
'past',
'serial',
'websocket',
'speech_recognition',
'yaml'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General Info
project = 'Mycroft'
copyright = '2017, Mycroft AI Inc.'
author = 'Mycroft AI Inc.'
version = '0.1.0'
release = '0.1.0' # Includes alpha/beta/rc tags.
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Syntax Highlighting
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'navigation_depth': 4,
}
html_static_path = []
htmlhelp_basename = 'Mycroftdoc'
# Options for LaTeX output
latex_elements = {}
latex_documents = [
(master_doc, 'Mycroft.tex', 'Mycroft Documentation',
'Matthew Scholefield', 'manual'),
]
# Options for manual page output
man_pages = [
(master_doc, 'mycroft', 'Mycroft Documentation',
[author], 1)
]
# Options for Texinfo output
texinfo_documents = [
(master_doc, 'Mycroft', 'Mycroft Documentation',
author, 'Mycroft', 'Mycroft Artificial Intelligence Platform.',
'Miscellaneous'),
]
# Options for Napoleon
napoleon_google_docstring = True
napoleon_numpy_docstring = False
Handle mycroft-message-bus during doc generation
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Mycroft documentation build configuration file
#
import sys
import re
import os
import sphinx_rtd_theme
from sphinx.ext.autodoc import (
ClassLevelDocumenter, InstanceAttributeDocumenter)
def iad_add_directive_header(self, sig):
ClassLevelDocumenter.add_directive_header(self, sig)
InstanceAttributeDocumenter.add_directive_header = iad_add_directive_header
sys.path.insert(0, os.path.abspath('../'))
# General Configuration
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon'
]
req_path = os.path.join(os.path.dirname(os.path.dirname(
os.path.realpath(__file__))), 'requirements', 'requirements.txt')
# To easily run sphinx without additional installation autodoc_mock_imports
# sets modules to mock.
# Step 1: Pull module names to mock from requirements
# Assuming package name is the same as the module name
with open(req_path) as f:
autodoc_mock_imports = map(str.strip, re.findall(r'^\s*[a-zA-Z_]*',
f.read().lower().replace('-', '_'),
flags=re.MULTILINE))
# Step 2: Add custom names
# Not all module names match the package name (as stated in requirements.txt)
# this adds the modules whose names don't match the package name.
autodoc_mock_imports = list(autodoc_mock_imports) + [
'adapt',
'alsaaudio',
'dateutil',
'past',
'serial',
'websocket',
'speech_recognition',
'yaml',
'mycroft_bus_client'
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
# General Info
project = 'Mycroft'
copyright = '2017, Mycroft AI Inc.'
author = 'Mycroft AI Inc.'
version = '0.1.0'
release = '0.1.0' # Includes alpha/beta/rc tags.
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# Syntax Highlighting
pygments_style = 'sphinx'
todo_include_todos = False
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_theme_options = {
'navigation_depth': 4,
}
html_static_path = []
htmlhelp_basename = 'Mycroftdoc'
# Options for LaTeX output
latex_elements = {}
latex_documents = [
(master_doc, 'Mycroft.tex', 'Mycroft Documentation',
'Matthew Scholefield', 'manual'),
]
# Options for manual page output
man_pages = [
(master_doc, 'mycroft', 'Mycroft Documentation',
[author], 1)
]
# Options for Texinfo output
texinfo_documents = [
(master_doc, 'Mycroft', 'Mycroft Documentation',
author, 'Mycroft', 'Mycroft Artificial Intelligence Platform.',
'Miscellaneous'),
]
# Options for Napoleon
napoleon_google_docstring = True
napoleon_numpy_docstring = False
|
# -*- coding: utf-8 -*-
import os.path
import sys
from sphinx import version_info as sphinx_version_info
# Ensure we can import "mongoc" and "taglist" extension modules.
sys.path.append(os.path.dirname(__file__))
extensions = [
'mongoc',
'taglist',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
]
# General information about the project.
project = 'MongoDB C Driver'
copyright = '2017, MongoDB, Inc'
author = 'MongoDB, Inc'
googleanalytics_id = 'UA-92642455-1'
version_path = os.path.join(os.path.dirname(__file__), '..', 'VERSION_CURRENT')
version = open(version_path).read().strip()
release_path = os.path.join(os.path.dirname(__file__), '..', 'VERSION_RELEASED')
release = open(release_path).read().strip()
release_major, release_minor, release_patch = release.split('.')
release_download = 'https://github.com/mongodb/mongo-c-driver/releases/download/{0}/mongo-c-driver-{0}.tar.gz'.format(release)
rst_prolog = """
.. |release_major| replace:: %(release_major)s
.. |release_minor| replace:: %(release_minor)s
.. |release_patch| replace:: %(release_patch)s
.. |release_download| replace:: https://github.com/mongodb/mongo-c-driver/releases/download/%(release)s/mongo-c-driver-%(release)s.tar.gz
""" % locals()
# The extension requires the "base" to contain '%s' exactly once, but we never intend to use it though
extlinks = {'release': (release_download+'%s', '')}
language = 'en'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
# don't fetch libbson's inventory from mongoc.org during build - Debian and
# Fedora package builds must work offline - maintain a recent copy here
intersphinx_mapping = {
'bson': ('http://mongoc.org/libbson/current', 'libbson-objects.inv'),
}
# -- Options for HTML output ----------------------------------------------
html_theme_path = ['.']
html_theme = 'mongoc-theme'
html_title = html_shorttitle = 'MongoDB C Driver %s' % version
# html_favicon = None
if sphinx_version_info >= (1, 6):
smart_quotes = False
else:
html_use_smartypants = False
html_sidebars = {
'**': ['globaltoc.html'],
'errors': [], # Make more room for the big table.
'mongoc_uri_t': [], # Make more room for the big table.
}
html_show_sourcelink = False
# Note: http://www.sphinx-doc.org/en/1.5.1/config.html#confval-html_copy_source
# This will degrade the Javascript quicksearch if we ever use it.
html_copy_source = False
# -- Options for manual page output ---------------------------------------
# HACK: Just trick Sphinx's ManualPageBuilder into thinking there are pages
# configured - we'll do it dynamically in process_nodes.
man_pages = [True]
# If true, show URL addresses after external links.
#
# man_show_urls = False
from docutils.nodes import title
# To publish HTML docs at GitHub Pages, create .nojekyll file. In Sphinx 1.4 we
# could use the githubpages extension, but old Ubuntu still has Sphinx 1.3.
def create_nojekyll(app, env):
if app.builder.format == 'html':
path = os.path.join(app.builder.outdir, '.nojekyll')
with open(path, 'wt') as f:
f.write('foo')
def add_ga_javascript(app, pagename, templatename, context, doctree):
context['metatags'] = context.get('metatags', '') + """<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', '%s', 'auto');
ga('send', 'pageview');
</script>""" % googleanalytics_id
def add_canonical_link(app, pagename, templatename, context, doctree):
link = ('<link rel="canonical"'
' href="http://mongoc.org/libbson/current/%s.html"/>' % pagename)
context['metatags'] = context.get('metatags', '') + link
def setup(app):
app.connect('doctree-read', process_nodes)
app.connect('env-updated', create_nojekyll)
app.connect('html-page-context', add_ga_javascript)
app.connect('html-page-context', add_canonical_link)
def process_nodes(app, doctree):
if man_pages == [True]:
man_pages.pop()
env = app.env
metadata = env.metadata[env.docname]
# A page like installing.rst sets its name with ":man_page: mongoc_installing"
page_name = metadata.get('man_page')
if not page_name:
print('Not creating man page for %s' % env.docname)
return
page_title = find_node(doctree, title)
man_pages.append((env.docname, page_name, page_title.astext(), [author], 3))
def find_node(doctree, klass):
matches = doctree.traverse(lambda node: isinstance(node, klass))
if not matches:
raise IndexError("No %s in %s" % (klass, doctree))
return matches[0]
update google analytics in docs
# -*- coding: utf-8 -*-
import os.path
import sys
from sphinx import version_info as sphinx_version_info
# Ensure we can import "mongoc" and "taglist" extension modules.
sys.path.append(os.path.dirname(__file__))
extensions = [
'mongoc',
'taglist',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
]
# General information about the project.
project = 'MongoDB C Driver'
copyright = '2017, MongoDB, Inc'
author = 'MongoDB, Inc'
version_path = os.path.join(os.path.dirname(__file__), '..', 'VERSION_CURRENT')
version = open(version_path).read().strip()
release_path = os.path.join(os.path.dirname(__file__), '..', 'VERSION_RELEASED')
release = open(release_path).read().strip()
release_major, release_minor, release_patch = release.split('.')
release_download = 'https://github.com/mongodb/mongo-c-driver/releases/download/{0}/mongo-c-driver-{0}.tar.gz'.format(release)
rst_prolog = """
.. |release_major| replace:: %(release_major)s
.. |release_minor| replace:: %(release_minor)s
.. |release_patch| replace:: %(release_patch)s
.. |release_download| replace:: https://github.com/mongodb/mongo-c-driver/releases/download/%(release)s/mongo-c-driver-%(release)s.tar.gz
""" % locals()
# The extension requires the "base" to contain '%s' exactly once, but we never intend to use it though
extlinks = {'release': (release_download+'%s', '')}
language = 'en'
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
master_doc = 'index'
# don't fetch libbson's inventory from mongoc.org during build - Debian and
# Fedora package builds must work offline - maintain a recent copy here
intersphinx_mapping = {
'bson': ('http://mongoc.org/libbson/current', 'libbson-objects.inv'),
}
# -- Options for HTML output ----------------------------------------------
html_theme_path = ['.']
html_theme = 'mongoc-theme'
html_title = html_shorttitle = 'MongoDB C Driver %s' % version
# html_favicon = None
if sphinx_version_info >= (1, 6):
smart_quotes = False
else:
html_use_smartypants = False
html_sidebars = {
'**': ['globaltoc.html'],
'errors': [], # Make more room for the big table.
'mongoc_uri_t': [], # Make more room for the big table.
}
html_show_sourcelink = False
# Note: http://www.sphinx-doc.org/en/1.5.1/config.html#confval-html_copy_source
# This will degrade the Javascript quicksearch if we ever use it.
html_copy_source = False
# -- Options for manual page output ---------------------------------------
# HACK: Just trick Sphinx's ManualPageBuilder into thinking there are pages
# configured - we'll do it dynamically in process_nodes.
man_pages = [True]
# If true, show URL addresses after external links.
#
# man_show_urls = False
from docutils.nodes import title
# To publish HTML docs at GitHub Pages, create .nojekyll file. In Sphinx 1.4 we
# could use the githubpages extension, but old Ubuntu still has Sphinx 1.3.
def create_nojekyll(app, env):
if app.builder.format == 'html':
path = os.path.join(app.builder.outdir, '.nojekyll')
with open(path, 'wt') as f:
f.write('foo')
def add_ga_javascript(app, pagename, templatename, context, doctree):
context['metatags'] = context.get('metatags', '') + """<script>
(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push(
{'gtm.start': new Date().getTime(),event:'gtm.js'}
);var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'//www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-JQHP');
</script>"""
def add_canonical_link(app, pagename, templatename, context, doctree):
link = ('<link rel="canonical"'
' href="http://mongoc.org/libbson/current/%s.html"/>' % pagename)
context['metatags'] = context.get('metatags', '') + link
def setup(app):
app.connect('doctree-read', process_nodes)
app.connect('env-updated', create_nojekyll)
app.connect('html-page-context', add_ga_javascript)
app.connect('html-page-context', add_canonical_link)
def process_nodes(app, doctree):
if man_pages == [True]:
man_pages.pop()
env = app.env
metadata = env.metadata[env.docname]
# A page like installing.rst sets its name with ":man_page: mongoc_installing"
page_name = metadata.get('man_page')
if not page_name:
print('Not creating man page for %s' % env.docname)
return
page_title = find_node(doctree, title)
man_pages.append((env.docname, page_name, page_title.astext(), [author], 3))
def find_node(doctree, klass):
matches = doctree.traverse(lambda node: isinstance(node, klass))
if not matches:
raise IndexError("No %s in %s" % (klass, doctree))
return matches[0]
|
import settings
from commands import _time, joke, youtube, giveaways
#Help vars
class fakeJokeModule():
#base call
call = "help"
#Help text
helpText = "It's infinite"
moduleLookup = {
_time.call : _time,
giveaways.call : giveaways,
joke.call : joke,
youtube.call : youtube,
fakeJokeModule.call : fakeJokeModule}
def getHelp(commandString):
#takes a string (EG: time) and returns the help for that command
return moduleLookup[commandString].helpText
def commandError(command):
return "I don't understand what you're saying\nuse `{}help {}` for more info".format(settings.operator, command.call)
!help can now handle fake modules
import settings
from commands import _time, joke, youtube, giveaways
#Help vars
class fakeHelpModule():
#base call
call = "help"
#Help text
helpText = "`!help [Command]`"
moduleLookup = {
_time.call : _time,
giveaways.call : giveaways,
joke.call : joke,
youtube.call : youtube,
fakeHelpModule.call : fakeHelpModule}
def getHelp(commandString):
#takes a string (EG: time) and returns the help for that command
try:
return moduleLookup[commandString].helpText
except KeyError:
return commandError(fakeHelpModule)
def commandError(command):
return "I don't understand what you're saying\nuse `{}help {}` for more info".format(settings.operator, command.call)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import pkg_resources
import re
import subprocess
import sys
import abc
from junitparser import JUnitXml, Failure, Error
import glob
from setuptools_scm import get_version
DOXYGEN_WARNING_REGEX = r"(?:((?:[/.]|[A-Za-z]).+?):(-?\d+):\s*([Ww]arning|[Ee]rror)|<.+>:-?\d+(?::\s*([Ww]arning|[Ee]rror))?): (.+(?:(?!\s*(?:[Nn]otice|[Ww]arning|[Ee]rror): )[^/<\n][^:\n][^/\n].+)*)|\s*([Nn]otice|[Ww]arning|[Ee]rror): (.+)\n?"
doxy_pattern = re.compile(DOXYGEN_WARNING_REGEX)
SPHINX_WARNING_REGEX = r"(.+?:(?:\d+|None)):\s*(DEBUG|INFO|WARNING|ERROR|SEVERE):\s*(.+)\n?"
sphinx_pattern = re.compile(SPHINX_WARNING_REGEX)
__version__ = get_version()
class WarningsChecker(object):
name = 'checker'
def __init__(self, verbose=False):
''' Constructor
Args:
name (str): Name of the checker
verbose (bool): Enable/disable verbose logging
'''
self.verbose = verbose
self.reset()
def reset(self):
''' Reset function (resets min, max and counter values) '''
self.count = 0
self.warn_min = 0
self.warn_max = 0
@abc.abstractmethod
def check(self, content):
'''
Function for counting the number of warnings in a specific text
Args:
content (str): The content to parse
'''
return
def set_maximum(self, maximum):
''' Setter function for the maximum amount of warnings
Args:
maximum (int): maximum amount of warnings allowed
Raises:
ValueError: Invalid argument (min limit higher than max limit)
'''
if self.warn_min == 0:
self.warn_max = maximum
elif self.warn_min > maximum:
raise ValueError("Invalid argument: mininum limit ({min}) is higher than maximum limit ({max}). Cannot enter {value}". format(min=self.warn_min, max=self.warn_max, value=maximum))
else:
self.warn_max = maximum
def get_maximum(self):
''' Getter function for the maximum amount of warnings
Returns:
int: Maximum amount of warnings
'''
return self.warn_max
def set_minimum(self, minimum):
''' Setter function for the minimum amount of warnings
Args:
minimum (int): minimum amount of warnings allowed
Raises:
ValueError: Invalid argument (min limit higher than max limit)
'''
if minimum > self.warn_max:
raise ValueError("Invalid argument: mininum limit ({min}) is higher than maximum limit ({max}). Cannot enter {value}". format(min=self.warn_min, max=self.warn_max, value=minimum))
else:
self.warn_min = minimum
def get_minimum(self):
''' Getter function for the minimum amount of warnings
Returns:
int: Minimum amount of warnings
'''
return self.warn_min
def return_count(self):
''' Getter function for the amount of warnings found
Returns:
int: Number of warnings found
'''
print("{count} {name} warnings found".format(count=self.count, name=self.name))
return self.count
def return_check_limits(self):
''' Function for checking whether the warning count is within the configured limits
Returns:
int: 0 if the amount of warnings is within limits. the count of warnings otherwise
'''
if self.count > self.warn_max:
print("Number of warnings ({count}) is higher than the maximum limit ({max}). Returning error code 1.".format(count=self.count, max=self.warn_max))
return self.count
elif self.count < self.warn_min:
print("Number of warnings ({count}) is lower than the minimum limit ({min}). Returning error code 1.".format(count=self.count, min=self.warn_min))
return self.count
else:
print("Number of warnings ({count}) is between limits {min} and {max}. Well done.".format(count=self.count, min=self.warn_min, max=self.warn_max))
return 0
class RegexChecker(WarningsChecker):
name = 'regex'
pattern = None
def __init__(self, verbose=False):
''' Constructor
Args:
name (str): Name of the checker
pattern (str): Regular expression used by the checker in order to find warnings
'''
super(RegexChecker, self).__init__(verbose=verbose)
def check(self, content):
'''
Function for counting the number of warnings in a specific text
Args:
content (str): The content to parse
'''
matches = re.finditer(self.pattern, content)
for match in matches:
self.count += 1
if self.verbose:
print(match.group(0).strip())
class SphinxChecker(RegexChecker):
name = 'sphinx'
pattern = sphinx_pattern
class DoxyChecker(RegexChecker):
name = 'doxygen'
pattern = doxy_pattern
class JUnitChecker(WarningsChecker):
name = 'junit'
def __init__(self, verbose=False):
''' Constructor
Args:
verbose (bool): Enable/disable verbose logging
'''
super(JUnitChecker, self).__init__(verbose=verbose)
def check(self, content):
'''
Function for counting the number of JUnit failures in a specific text
Args:
content (str): The content to parse
'''
result = JUnitXml.fromstring(content.encode('utf-8'))
if self.verbose:
for suite in result:
for testcase in filter(lambda testcase: isinstance(testcase.result, (Failure, Error)), suite):
print('{classname}.{testname}'.format(classname=testcase.classname,
testname=testcase.name))
result.update_statistics()
self.count += result.errors + result.failures
class WarningsPlugin:
def __init__(self, sphinx = False, doxygen = False, junit = False, verbose = False):
'''
Function for initializing the parsers
Args:
sphinx (bool, optional): enable sphinx parser
doxygen (bool, optional): enable doxygen parser
junit (bool, optional): enable junit parser
verbose (bool, optional): enable verbose logging
'''
self.checkerList = {}
self.verbose = verbose
if sphinx:
self.activate_checker(SphinxChecker(self.verbose))
if doxygen:
self.activate_checker(DoxyChecker(self.verbose))
if junit:
self.activate_checker(JUnitChecker(self.verbose))
self.warn_min = 0
self.warn_max = 0
self.count = 0
def activate_checker(self, checker):
'''
Activate additional checkers after initialization
Args:
checker (WarningsChecker): checker object
'''
checker.reset()
self.checkerList[checker.name] = checker
def get_checker(self, name):
''' Get checker by name
Args:
name (str): checker name
Return:
checker object (WarningsChecker)
'''
return self.checkerList[name]
def check(self, content):
'''
Function for counting the number of warnings in a specific text
Args:
content (str): The text to parse
'''
if len(self.checkerList) == 0:
print("No checkers activated. Please use activate_checker function")
else:
for name, checker in self.checkerList.items():
checker.check(content)
def set_maximum(self, maximum):
''' Setter function for the maximum amount of warnings
Args:
maximum (int): maximum amount of warnings allowed
'''
for name, checker in self.checkerList.items():
checker.set_maximum(maximum)
def set_minimum(self, minimum):
''' Setter function for the minimum amount of warnings
Args:
minimum (int): minimum amount of warnings allowed
'''
for name, checker in self.checkerList.items():
checker.set_minimum(minimum)
def return_count(self, name = None):
''' Getter function for the amount of found warnings
If the name parameter is set, this function will return the amount of
warnings found by that checker. If not, the function will return the sum
of the warnings found by all registered checkers.
Args:
name (WarningsChecker): The checker for which to return the amount of warnings (if set)
Returns:
int: Amount of found warnings
'''
self.count = 0
if name is None:
for name, checker in self.checkerList.items():
self.count += checker.return_count()
else:
self.count = self.checkerList[name].return_count()
return self.count
def return_check_limits(self, name = None):
''' Function for determining the return value of the script
If the name parameter is set, this function will check (and return) the
return value of that checker. If not, this function checks whether the
warnings for each registred checker are within the configured limits.
Args:
name (WarningsChecker): The checker for which to check the return value
Return:
int: 0 if the amount warnings are within limits otherwise 1
'''
if name is None:
for name, checker in self.checkerList.items():
retval = checker.return_check_limits()
if retval != 0:
return retval
else:
return self.checkerList[name].return_check_limits()
return 0
def warnings_wrapper(args):
parser = argparse.ArgumentParser(prog='mlx-warnings')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', '--doxygen', dest='doxygen', action='store_true')
group.add_argument('-s', '--sphinx', dest='sphinx', action='store_true')
group.add_argument('-j', '--junit', dest='junit', action='store_true')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')
parser.add_argument('-c', '--command', dest='command', action='store_true', help='Treat program arguments as command to execute to obtain data')
parser.add_argument('-m', '--maxwarnings', type=int, required=False, default=0,
help='Maximum amount of warnings accepted')
parser.add_argument('--minwarnings', type=int, required=False, default=0,
help='Minimum amount of warnings accepted')
parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=pkg_resources.require('mlx.warnings')[0].version))
parser.add_argument('logfile', nargs='+', help='Logfile (or command) that might contain warnings')
args = parser.parse_args(args)
warnings = WarningsPlugin(sphinx=args.sphinx, doxygen=args.doxygen, junit=args.junit, verbose=args.verbose)
warnings.set_maximum(args.maxwarnings)
warnings.set_minimum(args.minwarnings)
if args.command:
try:
proc = subprocess.Popen(args.logfile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = proc.communicate()
# Check stdout
if out:
try:
warnings.check(out.decode(encoding="utf-8"))
print(out.decode(encoding="utf-8"))
except AttributeError as e:
warnings.check(out)
print(out)
# Check stderr
if err:
try:
warnings.check(err.decode(encoding="utf-8"))
print(err.decode(encoding="utf-8"), file=sys.stderr)
except AttributeError as e:
warnings.check(err)
print(err, file=sys.stderr)
except OSError as e:
if e.errno == os.errno.ENOENT:
print("It seems like program " + str(args.logfile) + " is not installed.")
else:
raise
else:
# args.logfile doesn't necessarily contain wildcards, but just to be safe, we
# assume it does, and try to expand them.
# This mechanism is put in place to allow wildcards to be passed on even when
# executing the script on windows (in that case there is no shell expansion of wildcards)
# so that the script can be used in the exact same way even when moving from one
# OS to another.
for file_wildcard in args.logfile:
for logfile in glob.glob(file_wildcard):
with open(logfile, 'r') as loghandle:
warnings.check(loghandle.read())
warnings.return_count()
return warnings.return_check_limits()
def main():
sys.exit(warnings_wrapper(sys.argv[1:]))
if __name__ == '__main__':
main()
Minor refactoring to keep codeclimate happy
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import pkg_resources
import re
import subprocess
import sys
import abc
from junitparser import JUnitXml, Failure, Error
import glob
from setuptools_scm import get_version
DOXYGEN_WARNING_REGEX = r"(?:((?:[/.]|[A-Za-z]).+?):(-?\d+):\s*([Ww]arning|[Ee]rror)|<.+>:-?\d+(?::\s*([Ww]arning|[Ee]rror))?): (.+(?:(?!\s*(?:[Nn]otice|[Ww]arning|[Ee]rror): )[^/<\n][^:\n][^/\n].+)*)|\s*([Nn]otice|[Ww]arning|[Ee]rror): (.+)\n?"
doxy_pattern = re.compile(DOXYGEN_WARNING_REGEX)
SPHINX_WARNING_REGEX = r"(.+?:(?:\d+|None)):\s*(DEBUG|INFO|WARNING|ERROR|SEVERE):\s*(.+)\n?"
sphinx_pattern = re.compile(SPHINX_WARNING_REGEX)
__version__ = get_version()
class WarningsChecker(object):
name = 'checker'
def __init__(self, verbose=False):
''' Constructor
Args:
name (str): Name of the checker
verbose (bool): Enable/disable verbose logging
'''
self.verbose = verbose
self.reset()
def reset(self):
''' Reset function (resets min, max and counter values) '''
self.count = 0
self.warn_min = 0
self.warn_max = 0
@abc.abstractmethod
def check(self, content):
'''
Function for counting the number of warnings in a specific text
Args:
content (str): The content to parse
'''
return
def set_maximum(self, maximum):
''' Setter function for the maximum amount of warnings
Args:
maximum (int): maximum amount of warnings allowed
Raises:
ValueError: Invalid argument (min limit higher than max limit)
'''
if self.warn_min == 0:
self.warn_max = maximum
elif self.warn_min > maximum:
raise ValueError("Invalid argument: mininum limit ({min}) is higher than maximum limit ({max}). Cannot enter {value}". format(min=self.warn_min, max=self.warn_max, value=maximum))
else:
self.warn_max = maximum
def get_maximum(self):
''' Getter function for the maximum amount of warnings
Returns:
int: Maximum amount of warnings
'''
return self.warn_max
def set_minimum(self, minimum):
''' Setter function for the minimum amount of warnings
Args:
minimum (int): minimum amount of warnings allowed
Raises:
ValueError: Invalid argument (min limit higher than max limit)
'''
if minimum > self.warn_max:
raise ValueError("Invalid argument: mininum limit ({min}) is higher than maximum limit ({max}). Cannot enter {value}". format(min=self.warn_min, max=self.warn_max, value=minimum))
else:
self.warn_min = minimum
def get_minimum(self):
''' Getter function for the minimum amount of warnings
Returns:
int: Minimum amount of warnings
'''
return self.warn_min
def return_count(self):
''' Getter function for the amount of warnings found
Returns:
int: Number of warnings found
'''
print("{count} {name} warnings found".format(count=self.count, name=self.name))
return self.count
def return_check_limits(self):
''' Function for checking whether the warning count is within the configured limits
Returns:
int: 0 if the amount of warnings is within limits. the count of warnings otherwise
'''
if self.count > self.warn_max:
print("Number of warnings ({count}) is higher than the maximum limit ({max}). Returning error code 1.".format(count=self.count, max=self.warn_max))
return self.count
elif self.count < self.warn_min:
print("Number of warnings ({count}) is lower than the minimum limit ({min}). Returning error code 1.".format(count=self.count, min=self.warn_min))
return self.count
else:
print("Number of warnings ({count}) is between limits {min} and {max}. Well done.".format(count=self.count, min=self.warn_min, max=self.warn_max))
return 0
class RegexChecker(WarningsChecker):
name = 'regex'
pattern = None
def __init__(self, verbose=False):
''' Constructor
Args:
name (str): Name of the checker
pattern (str): Regular expression used by the checker in order to find warnings
'''
super(RegexChecker, self).__init__(verbose=verbose)
def check(self, content):
'''
Function for counting the number of warnings in a specific text
Args:
content (str): The content to parse
'''
matches = re.finditer(self.pattern, content)
for match in matches:
self.count += 1
if self.verbose:
print(match.group(0).strip())
class SphinxChecker(RegexChecker):
name = 'sphinx'
pattern = sphinx_pattern
class DoxyChecker(RegexChecker):
name = 'doxygen'
pattern = doxy_pattern
class JUnitChecker(WarningsChecker):
name = 'junit'
def __init__(self, verbose=False):
''' Constructor
Args:
verbose (bool): Enable/disable verbose logging
'''
super(JUnitChecker, self).__init__(verbose=verbose)
def check(self, content):
'''
Function for counting the number of JUnit failures in a specific text
Args:
content (str): The content to parse
'''
result = JUnitXml.fromstring(content.encode('utf-8'))
if self.verbose:
for suite in result:
for testcase in filter(lambda testcase: isinstance(testcase.result, (Failure, Error)), suite):
print('{classname}.{testname}'.format(classname=testcase.classname,
testname=testcase.name))
result.update_statistics()
self.count += result.errors + result.failures
class WarningsPlugin:
def __init__(self, sphinx = False, doxygen = False, junit = False, verbose = False):
'''
Function for initializing the parsers
Args:
sphinx (bool, optional): enable sphinx parser
doxygen (bool, optional): enable doxygen parser
junit (bool, optional): enable junit parser
verbose (bool, optional): enable verbose logging
'''
self.checkerList = {}
self.verbose = verbose
if sphinx:
self.activate_checker(SphinxChecker(self.verbose))
if doxygen:
self.activate_checker(DoxyChecker(self.verbose))
if junit:
self.activate_checker(JUnitChecker(self.verbose))
self.warn_min = 0
self.warn_max = 0
self.count = 0
def activate_checker(self, checker):
'''
Activate additional checkers after initialization
Args:
checker (WarningsChecker): checker object
'''
checker.reset()
self.checkerList[checker.name] = checker
def get_checker(self, name):
''' Get checker by name
Args:
name (str): checker name
Return:
checker object (WarningsChecker)
'''
return self.checkerList[name]
def check(self, content):
'''
Function for counting the number of warnings in a specific text
Args:
content (str): The text to parse
'''
if len(self.checkerList) == 0:
print("No checkers activated. Please use activate_checker function")
else:
for name, checker in self.checkerList.items():
checker.check(content)
def set_maximum(self, maximum):
''' Setter function for the maximum amount of warnings
Args:
maximum (int): maximum amount of warnings allowed
'''
for name, checker in self.checkerList.items():
checker.set_maximum(maximum)
def set_minimum(self, minimum):
''' Setter function for the minimum amount of warnings
Args:
minimum (int): minimum amount of warnings allowed
'''
for name, checker in self.checkerList.items():
checker.set_minimum(minimum)
def return_count(self, name = None):
''' Getter function for the amount of found warnings
If the name parameter is set, this function will return the amount of
warnings found by that checker. If not, the function will return the sum
of the warnings found by all registered checkers.
Args:
name (WarningsChecker): The checker for which to return the amount of warnings (if set)
Returns:
int: Amount of found warnings
'''
self.count = 0
if name is None:
for name, checker in self.checkerList.items():
self.count += checker.return_count()
else:
self.count = self.checkerList[name].return_count()
return self.count
def return_check_limits(self, name = None):
''' Function for determining the return value of the script
If the name parameter is set, this function will check (and return) the
return value of that checker. If not, this function checks whether the
warnings for each registred checker are within the configured limits.
Args:
name (WarningsChecker): The checker for which to check the return value
Return:
int: 0 if the amount warnings are within limits otherwise 1
'''
if name is None:
for name, checker in self.checkerList.items():
retval = checker.return_check_limits()
if retval != 0:
return retval
else:
return self.checkerList[name].return_check_limits()
return 0
def warnings_wrapper(args):
parser = argparse.ArgumentParser(prog='mlx-warnings')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', '--doxygen', dest='doxygen', action='store_true')
group.add_argument('-s', '--sphinx', dest='sphinx', action='store_true')
group.add_argument('-j', '--junit', dest='junit', action='store_true')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true')
parser.add_argument('-c', '--command', dest='command', action='store_true',
help='Treat program arguments as command to execute to obtain data')
parser.add_argument('-m', '--maxwarnings', type=int, required=False, default=0,
help='Maximum amount of warnings accepted')
parser.add_argument('--minwarnings', type=int, required=False, default=0,
help='Minimum amount of warnings accepted')
parser.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=pkg_resources.require('mlx.warnings')[0].version))
parser.add_argument('logfile', nargs='+', help='Logfile (or command) that might contain warnings')
return warnings_exec(parser.parse_args(args))
def warnings_exec(args):
warnings = WarningsPlugin(sphinx=args.sphinx, doxygen=args.doxygen, junit=args.junit, verbose=args.verbose)
warnings.set_maximum(args.maxwarnings)
warnings.set_minimum(args.minwarnings)
if args.command:
try:
proc = subprocess.Popen(args.logfile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = proc.communicate()
# Check stdout
if out:
try:
warnings.check(out.decode(encoding="utf-8"))
print(out.decode(encoding="utf-8"))
except AttributeError as e:
warnings.check(out)
print(out)
# Check stderr
if err:
try:
warnings.check(err.decode(encoding="utf-8"))
print(err.decode(encoding="utf-8"), file=sys.stderr)
except AttributeError as e:
warnings.check(err)
print(err, file=sys.stderr)
except OSError as e:
if e.errno == os.errno.ENOENT:
print("It seems like program " + str(args.logfile) + " is not installed.")
else:
raise
else:
# args.logfile doesn't necessarily contain wildcards, but just to be safe, we
# assume it does, and try to expand them.
# This mechanism is put in place to allow wildcards to be passed on even when
# executing the script on windows (in that case there is no shell expansion of wildcards)
# so that the script can be used in the exact same way even when moving from one
# OS to another.
for file_wildcard in args.logfile:
for logfile in glob.glob(file_wildcard):
with open(logfile, 'r') as loghandle:
warnings.check(loghandle.read())
warnings.return_count()
return warnings.return_check_limits()
def main():
sys.exit(warnings_wrapper(sys.argv[1:]))
if __name__ == '__main__':
main()
|
import numpy as np
import matplotlib.pyplot as plt
"""
Function to plot the linear regression picture of behavial loss aversion
and neural loss aversion(-betagain-betaloss) at a single coordinate of standard brain
"""
def plot_neur_beh(x, y, z, betagains, betalosses, neural):
"""
plot the linear regression picture of behavial loss aversion
and neural loss aversion(-betagain-betaloss) of a single voxel's
coordinates of standard brain
Parameters:
-----------
x: the x coordinate of the voxel in standard brain
y: the y coordinate of the voxel in standard brain
z: the z coordinate of the voxel in standard brain
betagains: beta gain estimates from OLS regression of a single voxel,
4-d array (91*109*91*16)
betalosses:beta loss estimates from OLS regression of a single voxel,
4-d array (91*109*91*16)
neural: behavial loss aversion
Returns:
--------
A linear regression plot, the x axis is Neural loss aversion.
The y axis is the Behavial loss aversion.The title is the coordinate of
the voxel in standrd brain.The plot is with the p value and r value of the
linear regression.
"""
X = -betagains[x, y, z, :] - betalosses[x, y, z, :]
slope, intercept, r_value, p_value, std_err = stats.linregress(X,Y)
plt.scatter(X,Y)
X_plot = np.linspace(-75,130,100)
plt.plot(X_plot, X_plot*slope + intercept)
plt.xlabel(r'Neural loss aversion ($-\beta loss-\beta{gain}$)' , fontsize=12)
plt.ylabel(r'Behavoiral aversion (In$\lambda$)', fontsize=12)
plt.text(-80, 2.2, r'r = %.4f'%r_value +', p = %.4f'%p_value, fontsize=12)
plt.title('('+str(x)+', '+str(y)+', '+str(z)+')', fontsize=18)
update imports
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
"""
Function to plot the linear regression picture of behavial loss aversion
and neural loss aversion(-betagain-betaloss) at a single coordinate of standard brain
"""
def plot_neur_beh(x, y, z, betagains, betalosses, neural):
"""
plot the linear regression picture of behavial loss aversion
and neural loss aversion(-betagain-betaloss) of a single voxel's
coordinates of standard brain
Parameters:
-----------
x: the x coordinate of the voxel in standard brain
y: the y coordinate of the voxel in standard brain
z: the z coordinate of the voxel in standard brain
betagains: beta gain estimates from OLS regression of a single voxel,
4-d array (91*109*91*16)
betalosses:beta loss estimates from OLS regression of a single voxel,
4-d array (91*109*91*16)
neural: behavial loss aversion
Returns:
--------
A linear regression plot, the x axis is Neural loss aversion.
The y axis is the Behavial loss aversion.The title is the coordinate of
the voxel in standrd brain.The plot is with the p value and r value of the
linear regression.
"""
X = -betagains[x, y, z, :] - betalosses[x, y, z, :]
slope, intercept, r_value, p_value, std_err = stats.linregress(X,Y)
plt.scatter(X,Y)
X_plot = np.linspace(-75,130,100)
plt.plot(X_plot, X_plot*slope + intercept)
plt.xlabel(r'Neural loss aversion ($-\beta loss-\beta{gain}$)' , fontsize=12)
plt.ylabel(r'Behavoiral aversion (In$\lambda$)', fontsize=12)
plt.text(-80, 2.2, r'r = %.4f'%r_value +', p = %.4f'%p_value, fontsize=12)
plt.title('('+str(x)+', '+str(y)+', '+str(z)+')', fontsize=18)
|
# Storage Management Providers
#
# Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
#
# Authors: Jan Safranek <jsafrane@redhat.com>
#
"""
LVM management functions.
"""
from lmi.scripts.common.errors import LmiFailed
from lmi.scripts.common import get_logger
LOG = get_logger(__name__)
from lmi.scripts.storage import common
def get_lvs(ns, vgs=None):
"""
Retrieve list of all logical volumes allocated from given volume groups.
If no volume groups are provided, all logical volumes on the system
are returned.
:type vgs: list of LMIInstance/LMI_VGStoragePool or list of strings
:param vgs: Volume Groups to examine.
:rtype: list of LMIInstance/LMI_LVStorageExtent.
"""
if vgs:
for vg in vgs:
vg = common.str2vg(ns, vg)
LOG().debug("Getting LVs on %s", vg.ElementName)
for lv in get_vg_lvs(ns, vg):
yield lv
else:
# No vgs supplied, list all LVs
for lv in ns.LMI_LVStorageExtent.instances():
if not lv.ThinlyProvisioned:
yield lv
def get_tlvs(ns, tps=None):
if tps:
for tp in tps:
tp = common.str2vg(ns, tp)
for tlv in get_vg_lvs(ns, tp):
yield tlv
else:
for tlv in ns.LMI_LVStorageExtent.instances():
if tlv.ThinlyProvisioned:
yield tlv
def create_lv(ns, vg, name, size):
"""
Create new Logical Volume on given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to allocate the volume from.
:type name: string
:param name: Name of the logical volume.
:type size: int
:param size: Size of the logical volume in bytes.
:rtype: LMIInstance/LMI_LVStorageExtent
"""
vg = common.str2vg(ns, vg)
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, outparams, err) = service.SyncCreateOrModifyLV(
ElementName=name,
Size=size,
InPool=vg)
if ret != 0:
if err:
raise LmiFailed("Cannot create the logical volume: %s." % err)
values = service.CreateOrModifyLV.CreateOrModifyLVValues
raise LmiFailed("Cannot create the logical volume: %s."
% (values.value_name(ret),))
return outparams['TheElement']
def create_tlv(ns, tp, name, size):
tp = common.str2vg(ns, tp)
args = {'ElementName':name,
'ThinPool':tp,
'Size':size}
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, outparams, err) = service.SyncCreateOrModifyThinLV(**args)
if ret != 0:
raise LmiFailed("Cannot create thin LV: %s." % (err if err else ret))
return outparams['TheElement']
def delete_lv(ns, lv):
"""
Destroy given Logical Volume.
:type lv: LMIInstance/LMI_LVStorageExtent or string
:param lv: Logical Volume to destroy.
"""
lv = common.str2device(ns, lv)
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, _outparams, err) = service.SyncDeleteLV(TheElement=lv)
if ret != 0:
if err:
raise LmiFailed("Cannot delete the LV: %s." % err)
raise LmiFailed("Cannot delete the LV: %s."
% (service.DeleteLV.DeleteLVValues.value_name(ret),))
def get_vgs(ns):
"""
Retrieve list of all volume groups on the system.
:rtype: list of LMIInstance/LMI_VGStoragePool
"""
LOG().debug("get_vgs: Loading list of all volume groups.")
for vg in ns.LMI_VGStoragePool.instances():
if vg.SpaceLimitDetermination:
# skip thin pools
continue
yield vg
def create_vg(ns, devices, name, extent_size=None):
"""
Create new Volume Group from given devices.
:type devices: list of LMIInstance/CIM_StorageExtent or list of strings
:param device: Devices to add to the Volume Group.
:type name: string
:param name: Name of the Volume gGoup.
:type extent_size: int
:param extent_size: Extent size in bytes.
:rtype: LMIInstance/LMI_VGStoragePool
"""
devs = [common.str2device(ns, device) for device in devices]
args = { 'InExtents': devs,
'ElementName': name}
goal = None
try:
if extent_size:
# create (and use) VGStorageSetting
caps = ns.LMI_VGStorageCapabilities.first_instance()
(ret, outparams, err) = caps.CreateVGStorageSetting(
InExtents=devs)
if ret != 0:
if err:
raise LmiFailed("Cannot create setting for the volume " \
"group: %s." % err)
vals = caps.CreateVGStorageSetting.CreateVGStorageSettingValues
raise LmiFailed("Cannot create setting for the volume group:" \
" %s." % (vals.value_name(ret),))
goal = outparams['Setting']
goal = goal.to_instance()
goal.ExtentSize = extent_size
(ret, outparams, err) = goal.push()
if ret != 0:
if err:
raise LmiFailed("Cannot modify setting for the volume " \
"group: %s." % err)
raise LmiFailed("Cannot modify setting for the volume group:" \
" %d." % ret)
args['Goal'] = goal
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, outparams, err) = service.SyncCreateOrModifyVG(**args)
if ret != 0:
values = service.CreateOrModifyVG.CreateOrModifyVGValues
raise LmiFailed("Cannot create the volume group: %s."
% (values.value_name(ret),))
finally:
if goal:
goal.delete()
return outparams['Pool']
def create_tp(ns, name, vg, size):
vg = common.str2vg(ns, vg)
args = {'InPool':vg,
'ElementName':name,
'Size':size}
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, outparams, err) = service.SyncCreateOrModifyThinPool(**args)
if ret != 0:
raise LmiFailed("Cannot create thin pool: %s." % (err if err else ret))
return outparams['Pool']
def delete_vg(ns, vg):
"""
Destroy given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to delete.
"""
vg = common.str2vg(ns, vg)
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, _outparams, err) = service.SyncDeleteVG(Pool=vg)
if ret != 0:
if err:
raise LmiFailed("Cannot delete the VG: %s." % err)
raise LmiFailed("Cannot delete the VG: %s."
% (service.DeleteVG.DeleteVGValues.value_name(ret),))
def get_vg_lvs(ns, vg):
"""
Return list of Logical Volumes on given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to examine.
:rtype: list of LMIInstance/LMI_LVStorageExtent
"""
vg = common.str2vg(ns, vg)
return vg.associators(AssocClass="LMI_LVAllocatedFromStoragePool")
def get_lv_vg(ns, lv):
"""
Return Volume Group of given Logical Volume.
:type lv: LMIInstance/LMI_LVStorageExtent or string
:param lv: Logical Volume to examine.
:rtype: LMIInstance/LMI_VGStoragePool
"""
lv = common.str2device(ns, lv)
return lv.first_associator(AssocClass="LMI_LVAllocatedFromStoragePool")
def get_vg_pvs(ns, vg):
"""
Return Physical Volumes of given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to examine.
:rtype: list of LMIInstance/CIM_StorageExtent
"""
vg = common.str2vg(ns, vg)
return vg.associators(AssocClass="LMI_VGAssociatedComponentExtent")
def get_vg_tps(ns, vg):
"""
Return Thin Pools of given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to examine.
:rtype: list of LMIInstance/CIM_StoragePool
"""
vg = common.str2vg(ns, vg)
return vg.associators(AssocClass="LMI_VGAllocatedFromStoragePool")
def get_tps(ns):
"""
Retrieve list of all thin pools on the system.
:rtype: list of LMIInstance/LMI_VGStoragePool
"""
LOG().debug("get_vgs: Loading list of all thin pools.")
for vg in ns.LMI_VGStoragePool.instances():
if vg.SpaceLimitDetermination:
yield vg
def get_tp_vgs(ns, tp):
"""
Return Volume Groups of given Thin Pool.
Alias for get_vg_tps.
"""
return get_vg_tps(ns, tp)
storage: dirty checks for thin provisioning support
Resolves trac tickets 276 and 277
# Storage Management Providers
#
# Copyright (C) 2013-2014 Red Hat, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of the FreeBSD Project.
#
# Authors: Jan Safranek <jsafrane@redhat.com>
#
"""
LVM management functions.
"""
from lmi.scripts.common.errors import LmiFailed
from lmi.scripts.common import get_logger
LOG = get_logger(__name__)
from lmi.scripts.storage import common
def get_lvs(ns, vgs=None):
"""
Retrieve list of all logical volumes allocated from given volume groups.
If no volume groups are provided, all logical volumes on the system
are returned.
:type vgs: list of LMIInstance/LMI_VGStoragePool or list of strings
:param vgs: Volume Groups to examine.
:rtype: list of LMIInstance/LMI_LVStorageExtent.
"""
if vgs:
for vg in vgs:
vg = common.str2vg(ns, vg)
LOG().debug("Getting LVs on %s", vg.ElementName)
for lv in get_vg_lvs(ns, vg):
yield lv
else:
# No vgs supplied, list all LVs
for lv in ns.LMI_LVStorageExtent.instances():
# XXX workaround for https://fedorahosted.org/openlmi/ticket/277
supports_thin = 'ThinlyProvisioned' in lv.properties()
if not supports_thin:
yield lv
elif supports_thin and not lv.ThinlyProvisioned:
yield lv
def get_tlvs(ns, tps=None):
if tps:
for tp in tps:
tp = common.str2vg(ns, tp)
for tlv in get_vg_lvs(ns, tp):
yield tlv
else:
for tlv in ns.LMI_LVStorageExtent.instances():
if tlv.ThinlyProvisioned:
yield tlv
def create_lv(ns, vg, name, size):
"""
Create new Logical Volume on given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to allocate the volume from.
:type name: string
:param name: Name of the logical volume.
:type size: int
:param size: Size of the logical volume in bytes.
:rtype: LMIInstance/LMI_LVStorageExtent
"""
vg = common.str2vg(ns, vg)
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, outparams, err) = service.SyncCreateOrModifyLV(
ElementName=name,
Size=size,
InPool=vg)
if ret != 0:
if err:
raise LmiFailed("Cannot create the logical volume: %s." % err)
values = service.CreateOrModifyLV.CreateOrModifyLVValues
raise LmiFailed("Cannot create the logical volume: %s."
% (values.value_name(ret),))
return outparams['TheElement']
def create_tlv(ns, tp, name, size):
tp = common.str2vg(ns, tp)
args = {'ElementName':name,
'ThinPool':tp,
'Size':size}
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, outparams, err) = service.SyncCreateOrModifyThinLV(**args)
if ret != 0:
raise LmiFailed("Cannot create thin LV: %s." % (err if err else ret))
return outparams['TheElement']
def delete_lv(ns, lv):
"""
Destroy given Logical Volume.
:type lv: LMIInstance/LMI_LVStorageExtent or string
:param lv: Logical Volume to destroy.
"""
lv = common.str2device(ns, lv)
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, _outparams, err) = service.SyncDeleteLV(TheElement=lv)
if ret != 0:
if err:
raise LmiFailed("Cannot delete the LV: %s." % err)
raise LmiFailed("Cannot delete the LV: %s."
% (service.DeleteLV.DeleteLVValues.value_name(ret),))
def get_vgs(ns):
"""
Retrieve list of all volume groups on the system.
:rtype: list of LMIInstance/LMI_VGStoragePool
"""
LOG().debug("get_vgs: Loading list of all volume groups.")
for vg in ns.LMI_VGStoragePool.instances():
if vg.SpaceLimitDetermination:
# skip thin pools
continue
yield vg
def create_vg(ns, devices, name, extent_size=None):
"""
Create new Volume Group from given devices.
:type devices: list of LMIInstance/CIM_StorageExtent or list of strings
:param device: Devices to add to the Volume Group.
:type name: string
:param name: Name of the Volume gGoup.
:type extent_size: int
:param extent_size: Extent size in bytes.
:rtype: LMIInstance/LMI_VGStoragePool
"""
devs = [common.str2device(ns, device) for device in devices]
args = { 'InExtents': devs,
'ElementName': name}
goal = None
try:
if extent_size:
# create (and use) VGStorageSetting
caps = ns.LMI_VGStorageCapabilities.first_instance()
(ret, outparams, err) = caps.CreateVGStorageSetting(
InExtents=devs)
if ret != 0:
if err:
raise LmiFailed("Cannot create setting for the volume " \
"group: %s." % err)
vals = caps.CreateVGStorageSetting.CreateVGStorageSettingValues
raise LmiFailed("Cannot create setting for the volume group:" \
" %s." % (vals.value_name(ret),))
goal = outparams['Setting']
goal = goal.to_instance()
goal.ExtentSize = extent_size
(ret, outparams, err) = goal.push()
if ret != 0:
if err:
raise LmiFailed("Cannot modify setting for the volume " \
"group: %s." % err)
raise LmiFailed("Cannot modify setting for the volume group:" \
" %d." % ret)
args['Goal'] = goal
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, outparams, err) = service.SyncCreateOrModifyVG(**args)
if ret != 0:
values = service.CreateOrModifyVG.CreateOrModifyVGValues
raise LmiFailed("Cannot create the volume group: %s."
% (values.value_name(ret),))
finally:
if goal:
goal.delete()
return outparams['Pool']
def create_tp(ns, name, vg, size):
vg = common.str2vg(ns, vg)
args = {'InPool':vg,
'ElementName':name,
'Size':size}
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, outparams, err) = service.SyncCreateOrModifyThinPool(**args)
if ret != 0:
raise LmiFailed("Cannot create thin pool: %s." % (err if err else ret))
return outparams['Pool']
def delete_vg(ns, vg):
"""
Destroy given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to delete.
"""
vg = common.str2vg(ns, vg)
service = ns.LMI_StorageConfigurationService.first_instance()
(ret, _outparams, err) = service.SyncDeleteVG(Pool=vg)
if ret != 0:
if err:
raise LmiFailed("Cannot delete the VG: %s." % err)
raise LmiFailed("Cannot delete the VG: %s."
% (service.DeleteVG.DeleteVGValues.value_name(ret),))
def get_vg_lvs(ns, vg):
"""
Return list of Logical Volumes on given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to examine.
:rtype: list of LMIInstance/LMI_LVStorageExtent
"""
vg = common.str2vg(ns, vg)
return vg.associators(AssocClass="LMI_LVAllocatedFromStoragePool")
def get_lv_vg(ns, lv):
"""
Return Volume Group of given Logical Volume.
:type lv: LMIInstance/LMI_LVStorageExtent or string
:param lv: Logical Volume to examine.
:rtype: LMIInstance/LMI_VGStoragePool
"""
lv = common.str2device(ns, lv)
return lv.first_associator(AssocClass="LMI_LVAllocatedFromStoragePool")
def get_vg_pvs(ns, vg):
"""
Return Physical Volumes of given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to examine.
:rtype: list of LMIInstance/CIM_StorageExtent
"""
vg = common.str2vg(ns, vg)
return vg.associators(AssocClass="LMI_VGAssociatedComponentExtent")
def get_vg_tps(ns, vg):
"""
Return Thin Pools of given Volume Group.
:type vg: LMIInstance/LMI_VGStoragePool or string
:param vg: Volume Group to examine.
:rtype: list of LMIInstance/CIM_StoragePool
"""
# XXX workaround for https://fedorahosted.org/openlmi/ticket/276
assoc_class = "LMI_VGAllocatedFromStoragePool"
if not assoc_class in ns.classes():
return []
vg = common.str2vg(ns, vg)
return vg.associators(AssocClass=assoc_class)
def get_tps(ns):
"""
Retrieve list of all thin pools on the system.
:rtype: list of LMIInstance/LMI_VGStoragePool
"""
LOG().debug("get_vgs: Loading list of all thin pools.")
for vg in ns.LMI_VGStoragePool.instances():
if vg.SpaceLimitDetermination:
yield vg
def get_tp_vgs(ns, tp):
"""
Return Volume Groups of given Thin Pool.
Alias for get_vg_tps.
"""
return get_vg_tps(ns, tp)
|
from django.db import models
from django.contrib.auth.models import User
from vote.schulze import schulze
from vote.accounts.models import GitHubToken
from .validators import validate_ballot
from os import urandom
from hashlib import sha1
from datetime import datetime
import requests
import logging
import json
logger = logging.getLogger(__name__)
id_help = """You need to be an owner of the SIG-Game organization. Go
to https://github.com/organizations/siggame/teams and choose the team
you want to grant permission to. From the URL, grab the id number for
the team. The URL should look like this:
https://github.com/organizations/siggame/teams/:id
"""
class VoteManager(models.Manager):
def closed_now(self):
return self.filter(closes__lte=datetime.now())
def upcoming(self):
return self.filter(opens__gt=datetime.now())
def open_now(self):
now = datetime.now()
return self.filter(opens__lte=now, closes__gt=now)
def open_to_user(self, user):
"""Votes that are open and user needs to vote"""
now = datetime.now()
return self.exclude(already_voted=user).filter(opens__lte=now,
closes__gt=now)
def closed_to_user(self, user):
"""Votes that are open, but this user already voted"""
now = datetime.now()
return self.filter(already_voted=user).filter(opens__lte=now,
closes__gt=now)
class Vote(models.Model):
class Meta:
permissions = (
("can_process_ballots", "Can process ballots"),
)
objects = VoteManager()
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
team_id = models.PositiveIntegerField(help_text=id_help,
null=True)
created = models.DateTimeField(auto_now_add=True)
opens = models.DateTimeField()
closes = models.DateTimeField()
template = models.TextField(blank=True,
validators=[validate_ballot])
method = models.TextField(blank=True)
result = models.TextField(blank=True)
already_voted = models.ManyToManyField(User, null=True,
blank=True)
def __unicode__(self):
return self.name
def needs_processed(self):
# Returns True if self.result is None or ''
return not bool(self.result)
def can_user_vote(self, user):
if self.already_voted.filter(pk=user.pk).exists():
return False
now = datetime.now()
if self.opens > now or self.closes < now:
return False
try:
endpoint = "https://api.github.com/teams/%d/members" % self.team_id
params = {'access_token': user.githubtoken.token}
response = requests.get(endpoint, params=params)
logger.info("Checking access... GitHub reponse: HTTP %s",
response.status_code)
if response.status_code != 200:
return False
except GitHubToken.DoesNotExist:
logger.info("User %s doesn't have GitHub token", user.username)
except TypeError:
logger.info("GitHub team not set for %s", str(self))
return True
def process_ballots(self):
ballot_data = [json.loads(x.data) for x in self.ballot_set.all()]
if ballot_data != []:
self.result = schulze(ballot_data)
self.save()
return self.result
class Ballot(models.Model):
identifier = models.CharField(max_length=40, primary_key=True)
vote = models.ForeignKey(Vote)
data = models.TextField()
def __unicode__(self):
return self.data
def save(self, *args, **kwargs):
if not self.identifier:
self.identifier = sha1(urandom(100)).hexdigest()
return super(Ballot, self).save(*args, **kwargs)
Broke up can_user_vote
We need to check permissions for viewing results...
from django.db import models
from django.contrib.auth.models import User
from vote.schulze import schulze
from vote.accounts.models import GitHubToken
from .validators import validate_ballot
from os import urandom
from hashlib import sha1
from datetime import datetime
import requests
import logging
import json
logger = logging.getLogger(__name__)
id_help = """You need to be an owner of the SIG-Game organization. Go
to https://github.com/organizations/siggame/teams and choose the team
you want to grant permission to. From the URL, grab the id number for
the team. The URL should look like this:
https://github.com/organizations/siggame/teams/:id
"""
class VoteManager(models.Manager):
def closed_now(self):
return self.filter(closes__lte=datetime.now())
def upcoming(self):
return self.filter(opens__gt=datetime.now())
def open_now(self):
now = datetime.now()
return self.filter(opens__lte=now, closes__gt=now)
def open_to_user(self, user):
"""Votes that are open and user needs to vote"""
now = datetime.now()
return self.exclude(already_voted=user).filter(opens__lte=now,
closes__gt=now)
def closed_to_user(self, user):
"""Votes that are open, but this user already voted"""
now = datetime.now()
return self.filter(already_voted=user).filter(opens__lte=now,
closes__gt=now)
class Vote(models.Model):
class Meta:
permissions = (
("can_process_ballots", "Can process ballots"),
)
objects = VoteManager()
name = models.CharField(max_length=100)
description = models.TextField(blank=True)
team_id = models.PositiveIntegerField(help_text=id_help,
null=True)
created = models.DateTimeField(auto_now_add=True)
opens = models.DateTimeField()
closes = models.DateTimeField()
template = models.TextField(blank=True,
validators=[validate_ballot])
method = models.TextField(blank=True)
result = models.TextField(blank=True)
already_voted = models.ManyToManyField(User, null=True,
blank=True)
def __unicode__(self):
return self.name
def needs_processed(self):
# Returns True if self.result is None or ''
return not bool(self.result)
def can_user_view(self, user):
try:
endpoint = "https://api.github.com/teams/%d/members" % self.team_id
params = {'access_token': user.githubtoken.token}
response = requests.get(endpoint, params=params)
logger.info("Checking access... GitHub reponse: HTTP %s",
response.status_code)
if response.status_code != 200:
return False
except GitHubToken.DoesNotExist:
logger.info("User %s doesn't have GitHub token", user.username)
except TypeError:
logger.info("GitHub team not set for %s", str(self))
return True
def can_user_vote(self, user):
if self.already_voted.filter(pk=user.pk).exists():
return False
now = datetime.now()
if self.opens > now or self.closes < now:
return False
return self.can_user_view(user)
def process_ballots(self):
ballot_data = [json.loads(x.data) for x in self.ballot_set.all()]
if ballot_data != []:
self.result = schulze(ballot_data)
self.save()
return self.result
class Ballot(models.Model):
identifier = models.CharField(max_length=40, primary_key=True)
vote = models.ForeignKey(Vote)
data = models.TextField()
def __unicode__(self):
return self.data
def save(self, *args, **kwargs):
if not self.identifier:
self.identifier = sha1(urandom(100)).hexdigest()
return super(Ballot, self).save(*args, **kwargs)
|
from werkzeug.exceptions import BadRequest
def validate_issue(issue):
# {
# "id": "538471046cd78c831e93e17a",
# "title": "Issue title",
# "phrase": "Issue phrase",
# "motions": [
# {
# "motion": {"id": "abc123"},
# "weights": {"yes": 10}
# },
# ]
# }
for key in ['phrase', 'title']:
if not issue.get(key):
raise BadRequest('Issue must have a non-empty "%s"' % key)
if not 'motions' in issue:
raise BadRequest('Issue must have a "motions" key')
validate_issue_motions(issue['motions'])
def validate_issue_motions(motions):
for m in motions:
# motion: {
# id: "abc123",
# ...
# }
if not 'motion' in m or not m['motion'].get('id'):
raise BadRequest('motion must have a "motion" object with a non-empty "id"')
motion_id = m['motion']['id']
# clean out everything except the id
m['motion'] = {"id": motion_id}
# weights: {
# yes: 2,
# no: -1,
# }
if not 'weights' in m or not m['weights']:
raise BadRequest('motion %s must have a "weights" object with at least one key' % motion_id)
Include motion ids
from werkzeug.exceptions import BadRequest
def validate_issue(issue):
# {
# "id": "538471046cd78c831e93e17a",
# "title": "Issue title",
# "phrase": "Issue phrase",
# "motions": [
# {
# "motion": {"motion_id": "abc123"},
# "weights": {"yes": 10}
# },
# ]
# }
for key in ['phrase', 'title']:
if not issue.get(key):
raise BadRequest('Issue must have a non-empty "%s"' % key)
if not 'motions' in issue:
raise BadRequest('Issue must have a "motions" key')
validate_issue_motions(issue['motions'])
def validate_issue_motions(motions):
for m in motions:
# motion: {
# id: "abc123",
# ...
# }
if not 'motion' in m or not m['motion'].get('motion_id'):
raise BadRequest('motion must have a "motion" object with a non-empty "motion_id"')
motion_id = m['motion']['motion_id']
# clean out everything except the id
m['motion'] = {"motion_id": motion_id}
m['motion_id'] = motion_id
# weights: {
# yes: 2,
# no: -1,
# }
if not 'weights' in m or not m['weights']:
raise BadRequest('motion %s must have a "weights" object with at least one key' % motion_id)
|
# -*- coding:utf-8 -*-
from Products.CMFPlone import interfaces as Plone
from Products.CMFQuickInstallerTool import interfaces as QuickInstaller
from zope.interface import implementer
@implementer(Plone.INonInstallable)
class HiddenProfiles(object):
def getNonInstallableProfiles(self):
"""Do not show on Plone's list of installable profiles."""
return ['bda.plone.cart:install-base']
@implementer(QuickInstaller.INonInstallable)
class HiddenProducts(object):
def getNonInstallableProducts(self):
"""Do not show on QuickInstaller's list of installable products."""
return ['bda.plone.cart:install-base']
minor formatting
# -*- coding:utf-8 -*-
from Products.CMFPlone import interfaces as Plone
from Products.CMFQuickInstallerTool import interfaces as QuickInstaller
from zope.interface import implementer
@implementer(Plone.INonInstallable)
class HiddenProfiles(object):
def getNonInstallableProfiles(self):
"""Do not show on Plone's list of installable profiles.
"""
return ['bda.plone.cart:install-base']
@implementer(QuickInstaller.INonInstallable)
class HiddenProducts(object):
def getNonInstallableProducts(self):
"""Do not show on QuickInstaller's list of installable products.
"""
return ['bda.plone.cart:install-base']
|
# -*- coding: utf-8 -*-
from Acquisition import aq_parent
from Products.CMFPlone.interfaces import IPloneSiteRoot
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bda.plone.discount.interfaces import IDiscountSettingsEnabled
from bda.plone.orders.common import get_vendors_for
from bda.plone.orders.interfaces import IBuyable
from bda.plone.orders.interfaces import IVendor
from bda.plone.shop import message_factory as _
from operator import attrgetter
from plone.app.portlets.portlets import base
from plone.folder.interfaces import IFolder
from plone.portlets.interfaces import IPortletDataProvider
from zope.component import adapter
from zope.component import getAdapters
from zope.component.interfaces import ISite
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implementer
from zope.security import checkPermission
import plone.api
VIEW_OWN_ORDERS_PERMISSION = 'bda.plone.orders.ViewOwnOrders'
VIEW_ORDERS_PERMISSION = 'bda.plone.orders.ViewOrders'
EXPORT_ORDERS_PERMISSION = 'bda.plone.orders.ExportOrders'
MANAGE_TEAMPLETS_PERMISSION = 'bda.plone.orders.ManageTemplates'
MANAGE_DISCOUNT_PERMISSION = 'bda.plone.discount.ManageDiscount'
MANAGE_SHOP_PERMISSION = 'cmf.ManagePortal'
class IShopPortletLink(Interface):
"""Adapter interface for providing links displayed in shop portlet.
"""
display = Attribute(u"Flag whether to display this link")
url = Attribute(u"Link URL")
title = Attribute(u"Link title")
order = Attribute(u"Link order in listing")
cssclass = Attribute(u"Css class for the link")
@implementer(IShopPortletLink)
@adapter(Interface)
class ShopPortletLink(object):
"""Abstract shop portlet link.
"""
def __init__(self, context, view_permissions=[VIEW_ORDERS_PERMISSION]):
self.context = context
self.display = False
for permission in view_permissions:
self.display = checkPermission(permission, context)
if self.display:
break
self.url = self.title = None
self.order = 0
self.cssclass = None
class ShopPortletMyOrdersLink(ShopPortletLink):
def __init__(self, context):
# XXX: buy items permission is meant to control whether a user can buy
# a specific item. Change check to whether a user is customer
# somewhere in the portal, which is semantically the correct way.
permissions = [VIEW_OWN_ORDERS_PERMISSION]
super(ShopPortletMyOrdersLink, self).__init__(
context, view_permissions=permissions)
site = plone.api.portal.get()
self.url = '%s/@@myorders' % site.absolute_url()
self.title = _('my_orders', default=u'My Orders')
self.order = 20
self.cssclass = 'myorders'
class ShopPortletOrdersLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletOrdersLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
# Find the nearest context, where this functionality can be bound to.
def _find_context(ctx):
return ctx\
if ISite.providedBy(ctx) or IVendor.providedBy(ctx)\
else _find_context(aq_parent(ctx))
context = _find_context(context)
if IPloneSiteRoot.providedBy(context):
self.title = _(
'orders_global',
default=u'Orders (global)'
)
elif ISite.providedBy(context):
self.title = _(
'orders_site',
default=u'Orders (site-wide)'
)
elif IVendor.providedBy(context):
self.title = _(
'orders_vendor',
default=u'Orders (vendor specific)'
)
self.url = '%s/@@orders' % context.absolute_url()
self.order = 10
self.cssclass = 'orders'
class ShopPortletOrdersInContextLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletOrdersInContextLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
# Go to appropriate context
if not IBuyable.providedBy(context) \
and not IFolder.providedBy(context) \
and not IPloneSiteRoot.providedBy(context):
context = context.aq_inner.aq_parent
self.url = '%s/@@orders' % context.absolute_url()
self.title = _('orders_in_context', default=u'Orders in Context')
self.order = 11
self.cssclass = 'orders'
class ShopPortletBookingsLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletBookingsLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
# Find the nearest context, where this functionality can be bound to.
def _find_context(ctx):
return ctx\
if ISite.providedBy(ctx) or IVendor.providedBy(ctx)\
else _find_context(aq_parent(ctx))
context = _find_context(context)
if IPloneSiteRoot.providedBy(context):
self.title = _(
'bookings_global',
default=u'Bookings (global)'
)
elif ISite.providedBy(context):
self.title = _(
'bookings_site',
default=u'Bookings (site-wide)'
)
elif IVendor.providedBy(context):
self.title = _(
'bookings_vendor',
default=u'Bookings (vendor specific)'
)
self.url = '%s/@@bookings' % context.absolute_url()
self.order = 21
self.cssclass = 'bookings'
class ShopPortletBookingsInContextLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletBookingsInContextLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
# Go to appropriate context
if not IBuyable.providedBy(context) \
and not IFolder.providedBy(context) \
and not IPloneSiteRoot.providedBy(context):
context = context.aq_inner.aq_parent
self.url = '%s/@@bookings' % context.absolute_url()
self.title = _('bookings_in_context', default=u'Bookings in Context')
self.order = 22
self.cssclass = 'bookings'
class ShopPortletContactsLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletContactsLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
site = plone.api.portal.get()
self.url = '%s/@@contacts' % site.absolute_url()
self.title = _('contacts', default=u'Contacts')
self.order = 23
self.cssclass = 'bookings'
class ShopPortletExportOrdersLink(ShopPortletLink):
def __init__(self, context):
permissions = [EXPORT_ORDERS_PERMISSION]
super(ShopPortletExportOrdersLink, self).__init__(
context, view_permissions=permissions)
site = plone.api.portal.get()
self.url = '%s/@@exportorders' % site.absolute_url()
self.title = _('exportorders', default=u'Export Orders')
self.order = 30
self.cssclass = 'export_orders'
class ShopPortletExportOrdersItemLink(ShopPortletLink):
def __init__(self, context):
permissions = [EXPORT_ORDERS_PERMISSION]
super(ShopPortletExportOrdersItemLink, self).__init__(
context, view_permissions=permissions)
if IPloneSiteRoot.providedBy(context):
self.display = False
return
self.url = '%s/@@exportorders_contextual' % self.context.absolute_url()
self.title = _(
'exportorders_item', default=u'Export Orders on this Item')
self.order = 40
self.cssclass = 'export_orders_item'
class ShopPortletMailTemplatesLink(ShopPortletLink):
def __init__(self, context):
permissions = [MANAGE_TEAMPLETS_PERMISSION]
super(ShopPortletMailTemplatesLink, self).__init__(
context, view_permissions=permissions)
self.display = True
# Find the nearest context, where this functionality can be bound to.
def _find_context(ctx):
return ctx\
if ISite.providedBy(ctx) or IVendor.providedBy(ctx)\
else _find_context(aq_parent(ctx))
context = _find_context(context)
if IPloneSiteRoot.providedBy(context):
self.title = _(
'mailtemplates_global',
default=u'Notification Templates (global)'
)
elif ISite.providedBy(context):
self.title = _(
'mailtemplates_site',
default=u'Notification Templates (site-wide)'
)
elif IVendor.providedBy(context):
self.title = _(
'mailtemplates_vendor',
default=u'Notification Templates (vendor specific)'
)
self.url = '%s/@@mailtemplates' % context.absolute_url()
self.order = 50
self.cssclass = 'mailtemplates'
class ShopPortletCartDiscountLink(ShopPortletLink):
def __init__(self, context):
permissions = [MANAGE_DISCOUNT_PERMISSION]
super(ShopPortletCartDiscountLink, self).__init__(
context, view_permissions=permissions)
if self.display:
self.display = ISite.providedBy(context)
self.url = '%s/@@cart_discount' % context.absolute_url()
self.title = _('cart_discount', default=u'Cart Discount')
self.order = 60
self.cssclass = 'cart_discount'
class ShopPortletCartItemDiscountLink(ShopPortletLink):
def __init__(self, context):
permissions = [MANAGE_DISCOUNT_PERMISSION]
super(ShopPortletCartItemDiscountLink, self).__init__(
context, view_permissions=permissions)
if self.display:
self.display = ISite.providedBy(context) \
or IDiscountSettingsEnabled.providedBy(context)
self.url = '%s/@@item_discount' % context.absolute_url()
self.title = _('item_discount', default=u'Item Discount')
self.order = 70
self.cssclass = 'item_discount'
class ShopPortletControlpanelLink(ShopPortletLink):
def __init__(self, context):
permissions = [MANAGE_SHOP_PERMISSION]
super(ShopPortletControlpanelLink, self).__init__(
context, view_permissions=permissions)
self.display = True
# Find the nearest context, where this functionality can be bound to.
def _find_context(ctx):
return ctx\
if ISite.providedBy(ctx)\
else _find_context(aq_parent(ctx))
context = _find_context(context)
if IPloneSiteRoot.providedBy(context):
self.title = _(
'shop_controlpanel_global',
default=u'Shop Controlpanel (global)'
)
elif ISite.providedBy(context):
self.title = _(
'shop_controlpanel_site',
default=u'Shop Controlpanel (site-wide)'
)
self.url = '%s/@@shop_controlpanel' % context.absolute_url()
self.order = 50
self.cssclass = 'controlpanel'
class IShopAdminPortlet(IPortletDataProvider):
"""A portlet rendering shop portlet links.
"""
@implementer(IShopAdminPortlet)
class ShopAdminAssignment(base.Assignment):
title = _(u'shop_portlet', default=u'Shop Portlet')
class ShopAdminRenderer(base.Renderer):
render = ViewPageTemplateFile('admin.pt')
@property
def available(self):
return bool(self.links())
def links(self):
def unsorted_links():
for name, link in getAdapters((self.context,), IShopPortletLink):
if link.display:
yield link
return sorted(unsorted_links(), key=attrgetter('order'))
class ShopAdminAddForm(base.NullAddForm):
def create(self):
return ShopAdminAssignment()
display is set by superclass to protect links
# -*- coding: utf-8 -*-
from Acquisition import aq_parent
from Products.CMFPlone.interfaces import IPloneSiteRoot
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from bda.plone.discount.interfaces import IDiscountSettingsEnabled
from bda.plone.orders.common import get_vendors_for
from bda.plone.orders.interfaces import IBuyable
from bda.plone.orders.interfaces import IVendor
from bda.plone.shop import message_factory as _
from operator import attrgetter
from plone.app.portlets.portlets import base
from plone.folder.interfaces import IFolder
from plone.portlets.interfaces import IPortletDataProvider
from zope.component import adapter
from zope.component import getAdapters
from zope.component.interfaces import ISite
from zope.interface import Attribute
from zope.interface import Interface
from zope.interface import implementer
from zope.security import checkPermission
import plone.api
VIEW_OWN_ORDERS_PERMISSION = 'bda.plone.orders.ViewOwnOrders'
VIEW_ORDERS_PERMISSION = 'bda.plone.orders.ViewOrders'
EXPORT_ORDERS_PERMISSION = 'bda.plone.orders.ExportOrders'
MANAGE_TEAMPLETS_PERMISSION = 'bda.plone.orders.ManageTemplates'
MANAGE_DISCOUNT_PERMISSION = 'bda.plone.discount.ManageDiscount'
MANAGE_SHOP_PERMISSION = 'cmf.ManagePortal'
class IShopPortletLink(Interface):
"""Adapter interface for providing links displayed in shop portlet.
"""
display = Attribute(u"Flag whether to display this link")
url = Attribute(u"Link URL")
title = Attribute(u"Link title")
order = Attribute(u"Link order in listing")
cssclass = Attribute(u"Css class for the link")
@implementer(IShopPortletLink)
@adapter(Interface)
class ShopPortletLink(object):
"""Abstract shop portlet link.
"""
def __init__(self, context, view_permissions=[VIEW_ORDERS_PERMISSION]):
self.context = context
self.display = False
for permission in view_permissions:
self.display = checkPermission(permission, context)
if self.display:
break
self.url = self.title = None
self.order = 0
self.cssclass = None
class ShopPortletMyOrdersLink(ShopPortletLink):
def __init__(self, context):
# XXX: buy items permission is meant to control whether a user can buy
# a specific item. Change check to whether a user is customer
# somewhere in the portal, which is semantically the correct way.
permissions = [VIEW_OWN_ORDERS_PERMISSION]
super(ShopPortletMyOrdersLink, self).__init__(
context, view_permissions=permissions)
site = plone.api.portal.get()
self.url = '%s/@@myorders' % site.absolute_url()
self.title = _('my_orders', default=u'My Orders')
self.order = 20
self.cssclass = 'myorders'
class ShopPortletOrdersLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletOrdersLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
# Find the nearest context, where this functionality can be bound to.
def _find_context(ctx):
return ctx\
if ISite.providedBy(ctx) or IVendor.providedBy(ctx)\
else _find_context(aq_parent(ctx))
context = _find_context(context)
if IPloneSiteRoot.providedBy(context):
self.title = _(
'orders_global',
default=u'Orders (global)'
)
elif ISite.providedBy(context):
self.title = _(
'orders_site',
default=u'Orders (site-wide)'
)
elif IVendor.providedBy(context):
self.title = _(
'orders_vendor',
default=u'Orders (vendor specific)'
)
self.url = '%s/@@orders' % context.absolute_url()
self.order = 10
self.cssclass = 'orders'
class ShopPortletOrdersInContextLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletOrdersInContextLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
# Go to appropriate context
if not IBuyable.providedBy(context) \
and not IFolder.providedBy(context) \
and not IPloneSiteRoot.providedBy(context):
context = context.aq_inner.aq_parent
self.url = '%s/@@orders' % context.absolute_url()
self.title = _('orders_in_context', default=u'Orders in Context')
self.order = 11
self.cssclass = 'orders'
class ShopPortletBookingsLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletBookingsLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
# Find the nearest context, where this functionality can be bound to.
def _find_context(ctx):
return ctx\
if ISite.providedBy(ctx) or IVendor.providedBy(ctx)\
else _find_context(aq_parent(ctx))
context = _find_context(context)
if IPloneSiteRoot.providedBy(context):
self.title = _(
'bookings_global',
default=u'Bookings (global)'
)
elif ISite.providedBy(context):
self.title = _(
'bookings_site',
default=u'Bookings (site-wide)'
)
elif IVendor.providedBy(context):
self.title = _(
'bookings_vendor',
default=u'Bookings (vendor specific)'
)
self.url = '%s/@@bookings' % context.absolute_url()
self.order = 21
self.cssclass = 'bookings'
class ShopPortletBookingsInContextLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletBookingsInContextLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
# Go to appropriate context
if not IBuyable.providedBy(context) \
and not IFolder.providedBy(context) \
and not IPloneSiteRoot.providedBy(context):
context = context.aq_inner.aq_parent
self.url = '%s/@@bookings' % context.absolute_url()
self.title = _('bookings_in_context', default=u'Bookings in Context')
self.order = 22
self.cssclass = 'bookings'
class ShopPortletContactsLink(ShopPortletLink):
def __init__(self, context):
permissions = [VIEW_ORDERS_PERMISSION]
super(ShopPortletContactsLink, self).__init__(
context, view_permissions=permissions)
# check if authenticated user is vendor
if self.display and not get_vendors_for():
self.display = False
site = plone.api.portal.get()
self.url = '%s/@@contacts' % site.absolute_url()
self.title = _('contacts', default=u'Contacts')
self.order = 23
self.cssclass = 'bookings'
class ShopPortletExportOrdersLink(ShopPortletLink):
def __init__(self, context):
permissions = [EXPORT_ORDERS_PERMISSION]
super(ShopPortletExportOrdersLink, self).__init__(
context, view_permissions=permissions)
site = plone.api.portal.get()
self.url = '%s/@@exportorders' % site.absolute_url()
self.title = _('exportorders', default=u'Export Orders')
self.order = 30
self.cssclass = 'export_orders'
class ShopPortletExportOrdersItemLink(ShopPortletLink):
def __init__(self, context):
permissions = [EXPORT_ORDERS_PERMISSION]
super(ShopPortletExportOrdersItemLink, self).__init__(
context, view_permissions=permissions)
if IPloneSiteRoot.providedBy(context):
self.display = False
return
self.url = '%s/@@exportorders_contextual' % self.context.absolute_url()
self.title = _(
'exportorders_item', default=u'Export Orders on this Item')
self.order = 40
self.cssclass = 'export_orders_item'
class ShopPortletMailTemplatesLink(ShopPortletLink):
def __init__(self, context):
permissions = [MANAGE_TEAMPLETS_PERMISSION]
super(ShopPortletMailTemplatesLink, self).__init__(
context, view_permissions=permissions)
# Find the nearest context, where this functionality can be bound to.
def _find_context(ctx):
return ctx\
if ISite.providedBy(ctx) or IVendor.providedBy(ctx)\
else _find_context(aq_parent(ctx))
context = _find_context(context)
if IPloneSiteRoot.providedBy(context):
self.title = _(
'mailtemplates_global',
default=u'Notification Templates (global)'
)
elif ISite.providedBy(context):
self.title = _(
'mailtemplates_site',
default=u'Notification Templates (site-wide)'
)
elif IVendor.providedBy(context):
self.title = _(
'mailtemplates_vendor',
default=u'Notification Templates (vendor specific)'
)
self.url = '%s/@@mailtemplates' % context.absolute_url()
self.order = 50
self.cssclass = 'mailtemplates'
class ShopPortletCartDiscountLink(ShopPortletLink):
def __init__(self, context):
permissions = [MANAGE_DISCOUNT_PERMISSION]
super(ShopPortletCartDiscountLink, self).__init__(
context, view_permissions=permissions)
if self.display:
self.display = ISite.providedBy(context)
self.url = '%s/@@cart_discount' % context.absolute_url()
self.title = _('cart_discount', default=u'Cart Discount')
self.order = 60
self.cssclass = 'cart_discount'
class ShopPortletCartItemDiscountLink(ShopPortletLink):
def __init__(self, context):
permissions = [MANAGE_DISCOUNT_PERMISSION]
super(ShopPortletCartItemDiscountLink, self).__init__(
context, view_permissions=permissions)
if self.display:
self.display = ISite.providedBy(context) \
or IDiscountSettingsEnabled.providedBy(context)
self.url = '%s/@@item_discount' % context.absolute_url()
self.title = _('item_discount', default=u'Item Discount')
self.order = 70
self.cssclass = 'item_discount'
class ShopPortletControlpanelLink(ShopPortletLink):
def __init__(self, context):
permissions = [MANAGE_SHOP_PERMISSION]
super(ShopPortletControlpanelLink, self).__init__(
context, view_permissions=permissions)
# Find the nearest context, where this functionality can be bound to.
def _find_context(ctx):
return ctx\
if ISite.providedBy(ctx)\
else _find_context(aq_parent(ctx))
context = _find_context(context)
if IPloneSiteRoot.providedBy(context):
self.title = _(
'shop_controlpanel_global',
default=u'Shop Controlpanel (global)'
)
elif ISite.providedBy(context):
self.title = _(
'shop_controlpanel_site',
default=u'Shop Controlpanel (site-wide)'
)
self.url = '%s/@@shop_controlpanel' % context.absolute_url()
self.order = 50
self.cssclass = 'controlpanel'
class IShopAdminPortlet(IPortletDataProvider):
"""A portlet rendering shop portlet links.
"""
@implementer(IShopAdminPortlet)
class ShopAdminAssignment(base.Assignment):
title = _(u'shop_portlet', default=u'Shop Portlet')
class ShopAdminRenderer(base.Renderer):
render = ViewPageTemplateFile('admin.pt')
@property
def available(self):
return bool(self.links())
def links(self):
def unsorted_links():
for name, link in getAdapters((self.context,), IShopPortletLink):
if link.display:
yield link
return sorted(unsorted_links(), key=attrgetter('order'))
class ShopAdminAddForm(base.NullAddForm):
def create(self):
return ShopAdminAssignment()
|
# Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Define quantities that vary over the simulation.
A `Variant` object represents a scalar function of the time step. Some
**Operations** accept `Variant` values for certain parameters, such as the
``kT`` parameter to `NVT`.
"""
from hoomd import _hoomd
class Variant(_hoomd.Variant):
"""Variant base class.
Variants define values as a function of the simulation time step. Use one of
the built in types or define your own custom function:
.. code:: python
class CustomVariant(hoomd.variant.Variant):
def __init__(self):
hoomd.variant.Variant.__init__(self)
def __call__(self, timestep):
return (float(timestep)**(1 / 2))
.. py:method:: __call__(timestep)
Evaluate the function.
:param timestep: The time step.
:type timestep: int
:return: The value of the function at the given time step.
:rtype: float
"""
@property
def min(self):
"""The minimum value of this variant."""
return self._min()
@property
def max(self):
"""The maximum value of this variant."""
return self._max()
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
_hoomd.Variant.__init__(self)
self.__dict__ = state
class Constant(_hoomd.VariantConstant, Variant):
"""A constant value.
Args:
value (float): The value.
`Constant` returns *value* at all time steps.
Attributes:
value (float): The value.
"""
def __init__(self, value):
Variant.__init__(self)
_hoomd.VariantConstant.__init__(self, value)
class Ramp(_hoomd.VariantRamp, Variant):
"""A linear ramp.
Args:
A (float): The start value.
B (float): The end value.
t_start (int): The start time step.
t_ramp (int): The length of the ramp.
`Ramp` holds the value *A* until time *t_start*. Then it ramps linearly from
*A* to *B* over *t_ramp* steps and holds the value *B*.
.. image:: variant-ramp.svg
Attributes:
A (float): The start value.
B (float): The end value.
t_start (int): The start time step.
t_ramp (int): The length of the ramp.
"""
def __init__(self, A, B, t_start, t_ramp):
Variant.__init__(self)
_hoomd.VariantRamp.__init__(self, A, B, t_start, t_ramp)
class Cycle(_hoomd.VariantCycle, Variant):
"""A cycle of linear ramps.
Args:
A (float): The first value.
B (float): The second value.
t_start (int): The start time step.
t_A (int): The hold time at the first value.
t_AB (int): The time spent ramping from A to B.
t_B (int): The hold time at the second value.
t_BA (int): The time spent ramping from B to A.
:py:class:`Cycle` holds the value *A* until time *t_start*. It continues
holding that value until *t_start + t_A*. Then it ramps linearly from *A* to
*B* over *t_AB* steps and holds the value *B* for *t_B* steps. After this,
it ramps back from *B* to *A* over *t_BA* steps and repeats the cycle
starting with *t_A*. :py:class:`Cycle` repeats this cycle indefinitely.
.. image:: variant-cycle.svg
Attributes:
A (float): The first value.
B (float): The second value.
t_start (int): The start time step.
t_A (int): The holding time at A.
t_AB (int): The time spent ramping from A to B.
t_B (int): The holding time at B.
t_BA (int): The time spent ramping from B to A.
"""
def __init__(self, A, B, t_start, t_A, t_AB, t_B, t_BA):
Variant.__init__(self)
_hoomd.VariantCycle.__init__(self, A, B, t_start, t_A, t_AB, t_B, t_BA)
class Power(_hoomd.VariantPower, Variant):
"""A approach from initial to final value of x ^ (power).
Args:
A (float): The start value.
B (float): The end value.
power (float): The power of the approach to ``B``.
t_start (int): The start time step.
t_ramp (int): The length of the ramp.
:py:class:`Power` holds the value *A* until time *t_start*. Then it
progresses at :math:`x^{power}` from *A* to *B* over *t_ramp* steps and
holds the value *B* after that.
.. code-block:: python
p = Power(2, 8, 1 / 10, 10, 20)
.. image:: variant-power.svg
Attributes:
A (float): The start value.
B (float): The end value.
power (float): The power of the approach to ``B``.
t_start (int): The start time step.
t_ramp (int): The length of the ramp.
"""
def __init__(self, A, B, power, t_start, t_ramp):
Variant.__init__(self)
_hoomd.VariantPower.__init__(self, A, B, power, t_start, t_ramp)
Add docs for hoomd.variant.Variant methods
# Copyright (c) 2009-2019 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""Define quantities that vary over the simulation.
A `Variant` object represents a scalar function of the time step. Some
**Operations** accept `Variant` values for certain parameters, such as the
``kT`` parameter to `NVT`.
"""
from hoomd import _hoomd
class Variant(_hoomd.Variant):
"""Variant base class.
Variants define values as a function of the simulation time step. Use one of
the built in types or define your own custom function:
.. code:: python
class CustomVariant(hoomd.variant.Variant):
def __init__(self):
hoomd.variant.Variant.__init__(self)
def __call__(self, timestep):
return (float(timestep)**(1 / 2))
.. py:method:: __call__(timestep)
Evaluate the function.
:param timestep: The time step.
:type timestep: int
:return: The value of the function at the given time step.
:rtype: float
"""
@property
def min(self):
"""The minimum value of this variant."""
return self._min()
@property
def max(self):
"""The maximum value of this variant."""
return self._max()
def __getstate__(self):
"""Get the variant's ``__dict__`` attributue."""
return self.__dict__
def __setstate__(self, state):
"""Restore the state of the variant."""
_hoomd.Variant.__init__(self)
self.__dict__ = state
class Constant(_hoomd.VariantConstant, Variant):
"""A constant value.
Args:
value (float): The value.
`Constant` returns *value* at all time steps.
Attributes:
value (float): The value.
"""
def __init__(self, value):
Variant.__init__(self)
_hoomd.VariantConstant.__init__(self, value)
class Ramp(_hoomd.VariantRamp, Variant):
"""A linear ramp.
Args:
A (float): The start value.
B (float): The end value.
t_start (int): The start time step.
t_ramp (int): The length of the ramp.
`Ramp` holds the value *A* until time *t_start*. Then it ramps linearly from
*A* to *B* over *t_ramp* steps and holds the value *B*.
.. image:: variant-ramp.svg
Attributes:
A (float): The start value.
B (float): The end value.
t_start (int): The start time step.
t_ramp (int): The length of the ramp.
"""
def __init__(self, A, B, t_start, t_ramp):
Variant.__init__(self)
_hoomd.VariantRamp.__init__(self, A, B, t_start, t_ramp)
class Cycle(_hoomd.VariantCycle, Variant):
"""A cycle of linear ramps.
Args:
A (float): The first value.
B (float): The second value.
t_start (int): The start time step.
t_A (int): The hold time at the first value.
t_AB (int): The time spent ramping from A to B.
t_B (int): The hold time at the second value.
t_BA (int): The time spent ramping from B to A.
:py:class:`Cycle` holds the value *A* until time *t_start*. It continues
holding that value until *t_start + t_A*. Then it ramps linearly from *A* to
*B* over *t_AB* steps and holds the value *B* for *t_B* steps. After this,
it ramps back from *B* to *A* over *t_BA* steps and repeats the cycle
starting with *t_A*. :py:class:`Cycle` repeats this cycle indefinitely.
.. image:: variant-cycle.svg
Attributes:
A (float): The first value.
B (float): The second value.
t_start (int): The start time step.
t_A (int): The holding time at A.
t_AB (int): The time spent ramping from A to B.
t_B (int): The holding time at B.
t_BA (int): The time spent ramping from B to A.
"""
def __init__(self, A, B, t_start, t_A, t_AB, t_B, t_BA):
Variant.__init__(self)
_hoomd.VariantCycle.__init__(self, A, B, t_start, t_A, t_AB, t_B, t_BA)
class Power(_hoomd.VariantPower, Variant):
"""A approach from initial to final value of x ^ (power).
Args:
A (float): The start value.
B (float): The end value.
power (float): The power of the approach to ``B``.
t_start (int): The start time step.
t_ramp (int): The length of the ramp.
:py:class:`Power` holds the value *A* until time *t_start*. Then it
progresses at :math:`x^{power}` from *A* to *B* over *t_ramp* steps and
holds the value *B* after that.
.. code-block:: python
p = Power(2, 8, 1 / 10, 10, 20)
.. image:: variant-power.svg
Attributes:
A (float): The start value.
B (float): The end value.
power (float): The power of the approach to ``B``.
t_start (int): The start time step.
t_ramp (int): The length of the ramp.
"""
def __init__(self, A, B, power, t_start, t_ramp):
Variant.__init__(self)
_hoomd.VariantPower.__init__(self, A, B, power, t_start, t_ramp)
|
"""The ArcGIS Server REST API, short for Representational State Transfer,
provides a simple, open Web interface to services hosted by ArcGIS Server.
All resources and operations exposed by the REST API are accessible through
a hierarchy of endpoints or Uniform Resource Locators (URLs) for each GIS
service published with ArcGIS Server."""
import cgi
import json
import mimetypes
import os
import urllib
import urllib2
import urlparse
import uuid
import geometry
import gptypes
import utils
#: User agent to report when making requests
USER_AGENT = "Mozilla/4.0 (arcrest)"
# Note that nearly every class below derives from this RestURL class.
# The reasoning is that every object has an underlying URL resource on
# the REST server. Some are static or near-static, such as a folder or a
# service's definition, but some URLs are volatile and represent the
# application of an action, such as Buffering a set of points using the
# geometry service. This class attempts to provide some baseline functionality
# required by the set of operations one performs using the ArcGIS REST API,
# such as making sure the format is always set to json, parsing the json,
# keeping the result in memory as needed, and returning instances of objects
# represented by relative URLs.
class RestURL(object):
"""Represents a top-level, base REST-style URL."""
__cache_request__ = False # Fetch every time or just once?
__urldata__ = Ellipsis # What actually gets HTTP GETten
__json_struct__ = Ellipsis # Cache for json.loads(self.__urldata__)
__has_json__ = True # Parse the data as a json struct? Set to
# false for binary data, html, etc.
__token__ = None # For token-based auth
__lazy_fetch__ = True # Fetch when constructed, or later on?
__parent_type__ = None # For automatically generated parent URLs
__post__ = False # Move query string to POST
_parent = None
def __init__(self, url, file_data=None):
# Expects a urlparse.urlsplitted list as the url, but accepts a
# string because that is easier/makes more sense everywhere.
if isinstance(url, basestring):
url = urlparse.urlsplit(url)
# Ellipsis is used instead of None for the case where no data
# is returned from the server due to an error condition -- we
# need to differentiate between 'NULL' and 'UNDEFINED'
self.__urldata__ = Ellipsis
# Pull out query, whatever it may be
urllist = list(url)
query_dict = {}
# parse_qs returns a dict, but every value is a list (it assumes
# that keys can be set multiple times like ?a=1&a=2 -- this flexibility
# is probably useful somewhere, but not here). Pull out the first
# element of every list so when we convert back to a query string
# it doesn't enclose all values in []
for k, v in cgi.parse_qs(urllist[3]).iteritems():
query_dict[k] = v[0]
if k.lower() == 'token':
self.__token__ = v[0]
# Set the f= flag to json (so we can interface with it)
if self.__has_json__ is True:
query_dict['f'] = 'json'
if self.__token__ is not None:
query_dict['token'] = self.__token__
# Hack our modified query string back into URL components
urllist[3] = urllib.urlencode(query_dict)
self._url = urllist
# Finally, set any file data parameters' data to local store.
# file_data is expected to be a dictionary of name/filehandle
# pairs if defined. And if there are any files, fetching will
# automatically become a forced multipart upload. Also, force
# keeping the results around; uploading data multiple times
# is probably NEVER what anyone wants to do and file handles
# can be exhausted.
self._file_data = file_data
if file_data:
self.__cache_request__ = True
# Nonlazy: force a fetch
if self.__lazy_fetch__ is False and self.__cache_request__ is True:
self._contents
def __repr__(self):
url = self.url
if len(url) > 100:
url = url[:97] + "..."
return "<%s(%r)>" % (self.__class__.__name__, url)
def _get_subfolder(self, foldername, returntype, params={}, file_data={}):
"""Return an object of the requested type with the path relative
to the current object's URL. Optionally, query parameters
may be set."""
newurl = urlparse.urljoin(self.url, urllib.quote(foldername), False)
# Add the key-value pairs sent in params to query string if they
# are so defined.
query_dict = {}
url_tuple = urlparse.urlsplit(newurl)
urllist = list(url_tuple)
if params:
# As above, pull out first element from parse_qs' values
query_dict = dict((k, v[0]) for k, v in
cgi.parse_qs(urllist[3]).iteritems())
for key, val in params.iteritems():
# Lowercase bool string
if isinstance(val, bool):
query_dict[key] = str(val).lower()
# Special case: convert an envelope to .bbox in the bb
# parameter
elif isinstance(val, geometry.Envelope):
query_dict[key] = val.bbox
# Another special case: strings can't be quoted/escaped at the
# top level
elif isinstance(val, gptypes.GPString):
query_dict[key] = val.value
# Just use the wkid of SpatialReferences
elif isinstance(val, geometry.SpatialReference):
query_dict[key] = val.wkid
# If it's a list, make it a comma-separated string
elif isinstance(val, (list, tuple)):
val = ",".join([str(v.id)
if isinstance(v, Layer)
else str(v) for v in val])
# Ignore null values, and coerce string values (hopefully
# everything sent in to a query has a sane __str__)
elif val is not None:
query_dict[key] = str(val)
if self.__token__ is not None:
query_dict['token'] = self.__token__
# Replace URL query component with newly altered component
urllist[3] = urllib.urlencode(query_dict)
newurl = urllist
# Instantiate new RestURL or subclass
rt = returntype(newurl, file_data)
# Remind the resource where it came from
rt.parent = self
return rt
def _clear_cache(self):
self.__json_struct__ = Ellipsis
self.__urldata__ = Ellipsis
@property
def url(self):
"""The URL as a string of the resource."""
urlparts = self._url
if self.__post__:
urlparts = list(urlparts)
urlparts[3] = '' # Clear out query string on POST
return urlparse.urlunsplit(urlparts)
@property
def query(self):
return self._url[3]
@property
def _contents(self):
"""The raw contents of the URL as fetched, this is done lazily.
For non-lazy fetching this is accessed in the object constructor."""
if self.__urldata__ is Ellipsis or self.__cache_request__ is False:
if self._file_data:
# Special-case: do a multipart upload if there's file data
self.__post__ = True
boundary = "-"*12+str(uuid.uuid4())+"$"
multipart_data = ''
for k, v in cgi.parse_qs(self.query).iteritems():
if not isinstance(v, list):
v = [v]
for val in v:
multipart_data += boundary + "\r\n"
multipart_data += ('Content-Disposition: form-data; '
'name="%s"\r\n\r\n' % k)
multipart_data += val + "\r\n"
for k, v in self._file_data.iteritems():
fn = os.path.basename(getattr(v, 'name', 'file'))
ct = (mimetypes.guess_type(fn)
or ("application/octet-stream",))[0]
multipart_data += boundary + "\r\n"
multipart_data += ('Content-Disposition: form-data; '
'name="%s"; filename="%s"\r\n'
'Content-Type:%s\r\n\r\n' %
(k, fn, ct))
multipart_data += v.read() + "\r\n"
multipart_data += boundary + "--\r\n\r\n"
request = urllib2.Request(self.url, multipart_data,
{'User-Agent' : USER_AGENT,
'Content-Type':
'multipart/form-data; '
'boundary='+boundary[2:],
'Content-Length':
str(
len(
multipart_data))})
else:
request = urllib2.Request(self.url, self.query
if self.__post__
else None,
{'User-Agent' : USER_AGENT})
handle = urllib2.urlopen(request)
# Handle the special case of a redirect (only follow once) --
# Note that only the first 3 components (protocol, hostname, path)
# are altered as component 4 is the query string, which can get
# clobbered by the server.
fetched_url = list(urlparse.urlsplit(handle.url)[:3])
if fetched_url != list(self._url[:3]):
self._url[:3] = fetched_url
return self._contents
# No redirect, proceed as usual.
self.__urldata__ = handle.read()
data = self.__urldata__
if self.__cache_request__ is False:
self.__urldata__ = Ellipsis
return data
@property
def _json_struct(self):
"""The json data structure in the URL contents, it will cache this
if it makes sense so it doesn't parse over and over."""
if self.__has_json__:
if self.__cache_request__:
if self.__json_struct__ is Ellipsis:
self.__json_struct__ = json.loads(self._contents)
return self.__json_struct__
else:
return json.loads(self._contents)
else:
# Return an empty dict for things so they don't have to special
# case against a None value or anything
return {}
@apply
def parent():
def get_(self):
"Get this object's parent"
if self._parent:
return self._parent
# auto-compute parent if needed
elif getattr(self, '__parent_type__', None):
return self._get_subfolder('..' if self._url[2].endswith('/')
else '.', self.__parent_type__)
else:
raise AttributeError("%r has no parent attribute" % type(self))
def set_(self, val):
self._parent = val
return property(get_, set_)
# On top of a URL, the ArcGIS Server folder structure lists subfolders
# and services.
class Folder(RestURL):
"""Represents a folder path on an ArcGIS REST server."""
__cache_request__ = True
# Conversion table from type string to class instance.
_service_type_mapping = {}
@classmethod
def _register_service_type(cls, subclass):
"""Registers subclass handlers of various service-type-specific service
implementations. Look for classes decorated with
@Folder._register_service_type for hints on how this works."""
if hasattr(subclass, '__service_type__'):
cls._service_type_mapping[subclass.__service_type__] = subclass
if subclass.__service_type__:
setattr(subclass,
subclass.__service_type__,
property(lambda x: x))
return subclass
@property
def __members__(self):
return sorted(self.foldernames +
list(self.servicenames) +
self.clusternames)
@property
def foldernames(self):
"Returns a list of folder names available from this folder."
return [folder.strip('/').split('/')[-1] for folder
in self._json_struct.get('folders', [])]
@property
def folders(self):
"Returns a list of Folder objects available in this folder."
return [self._get_subfolder(fn+'/', Folder) for fn in self.foldernames]
@property
def clusternames(self):
"Returns a list of cluster names available from this folder."
return [cluster.strip('/').split('/')[-1] for cluster
in self._json_struct.get('clusters', [])]
@property
def clusters(self):
"Returns a list of Folder objects available in this folder."
return [self._get_subfolder(fn+'/', Folder) for fn in self.clusternames]
@property
def servicenames(self):
"Give the list of services available in this folder."
return set([service['name'].rstrip('/').split('/')[-1]
for service in self._json_struct.get('services', [])])
@property
def services(self):
"Returns a list of Service objects available in this folder"
return [self._get_subfolder("%s/%s/" %
(s['name'].rstrip('/').split('/')[-1], s['type']),
self._service_type_mapping.get(s['type'], Service)) for s
in self._json_struct.get('services', [])]
@property
def url(self):
"""The URL as a string of the resource."""
if not self._url[2].endswith('/'):
self._url[2] += '/'
return RestURL.url.__get__(self)
def __getattr__(self, attr):
return self[attr]
def __getitem__(self, attr):
# If it's a folder, easy:
if attr in self.foldernames:
return self._get_subfolder(attr, Folder)
elif attr in self.clusternames:
return self._get_subfolder(attr, Folder)
services = [x.copy() for x in self._json_struct['services']]
# Strip out relative paths
for service in services:
service['name'] = service['name'].rstrip('/').split('/')[-1]
# Handle the case of Folder_Name being potentially of Service_Type
# format
if '_' in attr: # May have a Name_Type service here
al = attr.rstrip('/').split('/')[-1].split('_')
servicetype = al.pop()
untyped_attr = '_'.join(al)
matchingservices = [svc for svc in services
if svc['name'] == untyped_attr
and svc['type'] == servicetype]
if len(matchingservices) == 1:
return self._get_subfolder("%s/%s/" %
(untyped_attr, servicetype),
self._service_type_mapping.get(servicetype, Service))
# Then match by service name
matchingservices = [svc for svc in services if svc['name'] == attr]
# Found more than one match, there is ambiguity so return an
# object holding .ServiceType attributes representing each service.
if len(matchingservices) > 1:
# Return an instance with accessors for overlapping services
class AmbiguousService(object):
"""This service name has multiple service types."""
ambiguous = AmbiguousService()
for svc in matchingservices:
attr, servicetype = svc['name'], svc['type']
service = self._get_subfolder("%s/%s/" % (attr, servicetype),
self._service_type_mapping.get(servicetype, Service))
setattr(ambiguous, servicetype, service)
return ambiguous
# Just one match, can return itself.
elif len(matchingservices) == 1:
servicetype = matchingservices[0]['type']
return self._get_subfolder("%s/%s/" % (attr, servicetype),
self._service_type_mapping.get(servicetype, Service))
raise AttributeError("No service or folder named %r found" % attr)
# A catalog root functions the same as a folder, so treat Catalog as just a
# special case of Folder
class Catalog(Folder):
"""The catalog resource is the root node and initial entry point into an
ArcGIS Server host. This resource represents a catalog of folders and
services published on the host."""
_pwdmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
_handler = urllib2.HTTPBasicAuthHandler(_pwdmgr)
"""Class-level password manager -- if a Catalog is constructed with a
username/password pair for HTTP auth it will be handled by this."""
_opener = urllib2.build_opener(_handler)
urllib2.install_opener(_opener)
def __init__(self, url, username=None, password=None, token=None):
if username is not None and password is not None:
self.__class__._pwdmgr.add_password(None, url, username, password)
url_ = list(urlparse.urlsplit(url))
if not url_[2].endswith('/'):
url_[2] += "/"
if token is not None:
self.__token__ = token
super(Catalog, self).__init__(url_)
# Basically a Folder, but do some really, really rudimentary sanity
# checking (look for folders/services, make sure format is JSON) so we
# can verify this URL behaves like a Folder -- catch errors early
# before any other manipulations go on.
assert 'folders' in self._json_struct, "No folders in catalog root"
assert 'services' in self._json_struct, "No services in catalog root"
@property
def currentVersion(self):
return self._json_struct.get('currentVersion', 9.3)
# Definitions for classes calling/manipulating services
class Service(RestURL):
"""Represents an ArcGIS REST service. This is an abstract base -- services
derive from this."""
__cache_request__ = True
__service_type__ = None
__parent_type__ = Folder
def __init__(self, url, file_data=None):
if not isinstance(url, (tuple, list)):
url_ = list(urlparse.urlsplit(url))
else:
url_ = url
if not url_[2].endswith('/'):
url_[2] += "/"
super(Service, self).__init__(url_, file_data)
@property
def serviceDescription(self):
"""Get a short description of the service. Will return None if there is
no description for this service or service type."""
return self._json_struct.get('serviceDescription', None)
def __repr__(self):
return "<%s%s (%r)>" % (self.__service_type__,
" - %r" % self.serviceDescription
if self.serviceDescription
else '',
self.url)
class ServerError(Exception):
"""Exception for server-side error responses"""
class Result(RestURL):
"""Abstract class representing the result of an operation performed on a
REST service"""
__cache_request__ = True # Only request the URL once
__lazy_fetch__ = False # Force-fetch immediately
class BinaryResult(Result):
"""Class representing the result of an operation perfomed on a service with
some sort of opaque binary data, such as a PNG or KMZ. Contrast to a
JsonResult, which has an immediately accessible data structure."""
__has_json__ = False
@property
def data(self):
"""Return the raw data from this request"""
return self._contents
def save(self, outfile):
"""Save the image data to a file or file-like object"""
if isinstance(outfile, basestring):
outfile = open(outfile, 'wb')
outfile.write(self._contents)
class JsonResult(Result):
"""Class representing a specialization to results that expect
some sort of json data"""
__has_json__ = True
def __init__(self, url, file_data=None):
super(JsonResult, self).__init__(url, file_data)
js = self._json_struct
if 'error' in js:
detailstring = ", ".join(js['error'].get('details', []))
if detailstring:
detailstring = " -- " + detailstring
raise ServerError("ERROR %r: %r%s <%s>" %
(js['error']['code'],
js['error']['message'] or
'Unspecified',
detailstring,
self.url))
elif "status" in js:
if js['status'] == "error":
raise ServerError(''.join(
js.get('messages',
[js.get('message',
'Unspecified Error')])))
class JsonPostResult(JsonResult):
"""Class representing a sepcialization of a REST call which moves all
parameters to the payload of a POST request instead of in the URL
query string in a GET"""
__post__ = True
pass
class Layer(RestURL):
"""The base class for map and network layers"""
__cache_request__ = True # Only request the URL once
__lazy_fetch__ = False # Force-fetch immediately
# Service implementations -- mostly simple conversion wrappers for the
# functionality handled up above, wrapper types for results, etc.
class AttachmentData(BinaryResult):
"""Represents the binary attachment data associated with a layer"""
__lazy_fetch__ = True
class AttachmentInfos(JsonResult):
"""The attachment infos resource returns information about attachments
associated with a feature. This resource is available only if the layer
has advertised that it has attachments. A layer has attachments if its
hasAttachments property is true."""
@property
def attachments(self):
for attachment in self._json_struct['attachmentInfos']:
attachment_dict = attachment.copy()
attachment_dict['attachment'] = \
self_get_subfolder("%i/" % attachment_dict['id'],
AttachmentData)
class MapLayer(Layer):
"""The layer resource represents a single layer or standalone table in a
map of a map service published by ArcGIS Server. It provides basic
information about the layer such as its name, type, parent and
sub-layers, fields, min and max scales, extent, and copyright text."""
def QueryLayer(self, text=None, Geometry=None, inSR=None,
spatialRel='esriSpatialRelIntersects', where=None,
outFields=None, returnGeometry=None, outSR=None,
objectIds=None, time=None, maxAllowableOffset=None,
returnIdsOnly=None):
"""The query operation is performed on a layer resource. The result
of this operation is a resultset resource. This resource provides
information about query results including the values for the fields
requested by the user. If you request geometry information, the
geometry of each result is also returned in the resultset.
B{Spatial Relation Options:}
- esriSpatialRelIntersects
- esriSpatialRelContains
- esriSpatialRelCrosses
- esriSpatialRelEnvelopeIntersects
- esriSpatialRelIndexIntersects
- esriSpatialRelOverlaps
- esriSpatialRelTouches
- esriSpatialRelWithin"""
if not inSR:
if Geometry:
inSR = Geometry.spatialReference
out = self._get_subfolder("./query", JsonResult, {
'text': text,
'geometry': geometry,
'inSR': inSR,
'spatialRel': spatialRel,
'where': where,
'outFields': outFields,
'returnGeometry':
returnGeometry,
'outSR': outSR,
'objectIds': objectIds,
'time':
utils.pythonvaluetotime(
time),
'maxAllowableOffset':
maxAllowableOffset,
'returnIdsOnly':
returnIdsOnly
})
return gptypes.GPFeatureRecordSetLayer.fromJson(out._json_struct)
@property
def id(self):
return self._json_struct['id']
@property
def name(self):
return self._json_struct['name']
@property
def type(self):
return self._json_struct['type']
@property
def geometryType(self):
return self._json_struct['geometryType']
@property
def copyrightText(self):
return self._json_struct['copyrightText']
@property
def parentLayer(self):
return self._get_subfolder("../%s/" %
self._json_struct['parentLayer']['id'],
MapLayer)
@property
def subLayers(self):
return [self._get_subfolder("../%s/" %
layer['parentLayer']['id'],
MapLayer)
for layer in self._json_struct['subLayers']]
@property
def minScale(self):
return self._json_struct['minScale']
@property
def maxScale(self):
return self._json_struct['maxScale']
@property
def extent(self):
return geometry.fromJson(self._json_struct['extent'])
@property
def displayField(self):
return self._json_struct['displayField']
@property
def fields(self):
return self._json_struct['fields']
@property
def types(self):
return self._json_struct.get('types', [])
@property
def relationships(self):
return self._json_struct.get('relationships', [])
@property
def timeInfo(self):
"""Return the time info for this Map Service"""
time_info = self._json_struct.get('timeInfo', {})
if not time_info:
return None
time_info = time_info.copy()
if 'timeExtent' in time_info:
time_info['timeExtent'] = utils.timetopythonvalue(
time_info['timeExtent'])
return time_info
@property
def hasAttachments(self):
return self._json_struct.get('hasAttachments', False)
@property
def attachments(self):
if not self.hasAttachments:
return []
return self._get_subfolder("attachments/", AttachmentInfos).attachments
class MapTile(BinaryResult):
"""Represents the map tile fetched from a map service."""
pass
class ExportMapResult(JsonResult):
"""Represents the result of an Export Map operation performed on a Map
Service."""
@property
def href(self):
return self._json_struct['href']
@property
def width(self):
return self._json_struct['width']
@property
def height(self):
return self._json_struct['height']
@property
def extent(self):
return geometry.fromJson(self._json_struct['extent'])
@property
def scale(self):
return self._json_struct['scale']
@property
def data(self):
if not hasattr(self, '_data'):
self._data = urllib2.urlopen(self.href).read()
return self._data
def save(self, outfile):
"""Save the image data to a file or file-like object"""
if isinstance(outfile, basestring):
outfile = open(outfile, 'wb')
assert hasattr(outfile, 'write') and callable(outfile.write), \
"Expect a file or file-like object with a .write() method"
outfile.write(self.data)
class IdentifyOrFindResult(JsonResult):
"""Represents the result of a Find or Identify operation performed on a
Map Service."""
@property
def results(self):
def resiter():
for result in self._json_struct['results']:
if 'geometry' in result:
geom = geometry.fromJson(result['geometry'])
else:
geom = geometry.NullGeometry()
geom.attributes = result.get('attributes')
for key in ('displayFieldName', 'value',
'layerId', 'layerName'):
geom.attributes[key] = result[key]
yield geom
return gptypes.GPFeatureRecordSetLayer(list(resiter()),
self.parent.spatialReference)
class ExportKMLResult(BinaryResult):
"""Represents the result of an Export KML operation performed on a Map
Service."""
@Folder._register_service_type
class MapService(Service):
"""Map services offer access to map and layer content. Map services can
either be cached or dynamic. A map service that fulfills requests with
pre-created tiles from a cache instead of dynamically rendering part of
the map is called a cached map service. A dynamic map service requires
the server to render the map each time a request comes in. Map services
using a tile cache can significantly improve performance while
delivering maps, while dynamic map services offer more flexibility."""
__service_type__ = "MapServer"
def ExportMap(self, bbox, size=None, dpi=None, imageSR=None, bboxSR=None,
format=None, layerDefs=None, layers=None, transparent=False,
time=None):
"""The export operation is performed on a map service resource. The
result of this operation is a map image resource. This resource
provides information about the exported map image such as its URL,
its width and height, extent and scale."""
return self._get_subfolder('export/', ExportMapResult,
{'bbox': bbox,
'size': size,
'dpi': dpi,
'imageSR': imageSR,
'bboxSR': bboxSR,
'format': format,
'layerDefs': layerDefs,
'layers': layers,
'transparent': transparent,
'time':
utils.pythonvaluetotime(
time)
})
def Identify(self, Geometry, sr=None, layers=None, tolerance=1,
mapExtent=None, imageDisplay=None, returnGeometry=True):
"""The identify operation is performed on a map service resource. The
result of this operation is an identify results resource. Each
identified result includes its name, layer ID, layer name, geometry
and geometry type, and other attributes of that result as name-value
pairs."""
assert hasattr(Geometry, '__geometry_type__'), "Invalid geometry"
gt = Geometry.__geometry_type__
if sr is None:
sr = Geometry.spatialReference.wkid
geo_json = json.dumps(Geometry._json_struct_without_sr)
return self._get_subfolder('identify/', IdentifyOrFindResult,
{'geometry': geo_json,
'geometryType': gt,
'sr': sr,
'layers': layers,
'tolerance': tolerance,
'mapExtent': mapExtent,
'imageDisplay':
imageDisplay,
'returnGeometry':
returnGeometry})
def Find(self, searchText, contains=True, searchFields=None, sr=None,
layers=None, returnGeometry=True):
"""The find operation is performed on a map service resource. The
result of this operation is a find results resource. Each result
includes its value, feature ID, field name, layer ID, layer name,
geometry, geometry type, and attributes in the form of name-value
pairs."""
return self._get_subfolder('find/', IdentifyOrFindResult,
{'searchText': searchText,
'contains': contains,
'searchFields': searchFields,
'sr': sr,
'layers': layers,
'returnGeometry': returnGeometry})
def GenerateKML(self, docName, layers, layerOptions='composite'):
"""The generateKml operation is performed on a map service resource.
The result of this operation is a KML document wrapped in a KMZ
file. The document contains a network link to the KML Service
endpoint with properties and parameters you specify.
B{Layer Options:}
- composite: (default) All layers as a single composite image.
Layers cannot be turned on and off in the client.
- separateImage: Each layer as a separate image.
- nonComposite: Vector layers as vectors and raster layers as
images."""
return self._get_subfolder('generateKml/', GenerateKMLResult,
{'docName': docName,
'layers': layers,
'layerOptions': layerOptions})
def tile(self, row, col, zoomlevel):
"""For cached maps, this resource represents a single cached tile for
the map. The image bytes for the tile at the specified level, row
and column are directly streamed to the client. If the tile is not
found, an HTTP status code of 404 (Not found) is returned."""
return self._get_subfolder("tile/%s/%s/%s/" % (row, col, zoomlevel),
MapTile)
@property
def mapName(self):
"""This map's name"""
return self._json_struct['mapName']
@property
def description(self):
"""This map's description"""
return self._json_struct['description']
@property
def copyrightText(self):
"""This map's copyright text"""
return self._json_struct['copyrightText']
@property
def spatialReference(self):
"""This map's Spatial Reference"""
return geometry.fromJson(
self._json_struct['spatialReference'])
@property
def initialExtent(self):
"""This map's initial extent"""
return geometry.fromJson(
self._json_struct['initialExtent'])
@property
def fullExtent(self):
"""This map's full extent"""
return geometry.fromJson(
self._json_struct['fullExtent'])
@property
def layernames(self):
"""Return a list of the names of this map's layers"""
return [layer['name'] for layer in self._json_struct['layers']]
@property
def layers(self):
"""Return a list of this map's layer objects"""
return [self._get_subfolder("%s/" % layer['id'], MapLayer)
for layer in self._json_struct['layers']]
@property
def tablenames(self):
"""Return a list of the names of this map's tables"""
return [table['name'] for table in self._json_struct.get('tables', [])]
@property
def tables(self):
"""Return a list of this map's table objects"""
return [self._get_subfolder("%s/" % table['id'], MapLayer)
for table in self._json_struct.get('tables', [])]
@property
def timeInfo(self):
"""Return the time info for this Map Service"""
time_info = self._json_struct.get('timeInfo', {})
if not time_info:
return None
time_info = time_info.copy()
if 'timeExtent' in time_info:
time_info['timeExtent'] = utils.timetopythonvalue(
time_info['timeExtent'])
return time_info
@property
def supportedImageFormatTypes(self):
"""Return a list of supported image formats for this Map Service"""
return [x.strip()
for x in
self._json_struct['supportedImageFormatTypes'].split(',')]
class FindAddressCandidatesResult(JsonResult):
"""Represents the result from a geocode operation. The .candidates
field holds a list of candidate addresses as python dicts; the
['location'] key in each is a geometry.Point for the location of the
address."""
@property
def candidates(self):
"""A list of candidate addresses (as dictionaries) from a geocode
operation"""
# convert x['location'] to a point from a json point struct
def cditer():
for candidate in self._json_struct['candidates']:
newcandidate = candidate.copy()
newcandidate['location'] = \
geometry.fromJson(newcandidate['location'])
yield newcandidate
return list(cditer())
class ReverseGeocodeResult(JsonResult):
"""Represents the result from a reverse geocode operation -- the two
interesting fields are .address, which is a dictionary with the
fields of the candidate address, and .location, which is a
geometry.Point which is the actual location of the address."""
@property
def address(self):
return self._json_struct['address']
@property
def location(self):
return geometry.fromJson(self._json_struct['location'])
def __getitem__(self, attr):
return self._json_struct['address'][attr]
def __getattr__(self, attr):
try:
return self[attr]
except KeyError as e:
raise AttributeError(str(e))
@Folder._register_service_type
class GeocodeService(Service):
"""Geocoding is the process of assigning a location, usually in the form
of coordinate values (points), to an address by comparing the
descriptive location elements in the address to those present in the
reference material. Addresses come in many forms, ranging from the
common address format of a house number followed by the street name and
succeeding information to other location descriptions such as postal
zone or census tract. An address includes any type of information that
distinguishes a place."""
__service_type__ = "GeocodeServer"
def FindAddressCandidates(self, outFields=[], outSR=None, **fields):
"""The findAddressCandidates operation is performed on a geocode
service resource. The result of this operation is a resource
representing the list of address candidates. This resource
provides information about candidates including the address,
location, and score."""
required_unset_fields = []
for field in self._json_struct['addressFields']:
if field['required'] and field['name'] not in fields:
required_unset_fields.append(field['name'])
if required_unset_fields:
raise ValueError("Required field%s not set for Geocode: %s" %
('' if len(required_unset_fields) == 1
else 's', ', '.join(required_unset_fields)))
query = fields.copy()
query['outFields'] = outFields
if outSR:
query['outSR'] = (outSR.wkid
if isinstance(outSR, geometry.SpatialReference)
else outSR)
return self._get_subfolder('findAddressCandidates/',
FindAddressCandidatesResult, query)
def ReverseGeocode(self, location, distance, outSR=None):
"""The reverseGeocode operation is performed on a geocode service
resource. The result of this operation is a reverse geocoded address
resource. This resource provides information about all the address
fields pertaining to the reverse geocoded address as well as its
exact location."""
if outSR:
outSR = (outSR.wkid
if isinstance(outSR, geometry.SpatialReference)
else outSR)
return self._get_subfolder('reverseGeocode/', ReverseGeocodeResult,
{'location': location,
'distance': distance,
'outSR': outSR})
class GPMessage(object):
"""Represents a message generated during the execution of a
geoprocessing task. It includes information such as when the
processing started, what parameter values are being used, the task
progress, warnings of potential problems and errors. It is composed
of a message type and description."""
__message_types = set(["esriJobMessageTypeInformative",
"esriJobMessageTypeWarning",
"esriJobMessageTypeError",
"esriJobMessageTypeEmpty",
"esriJobMessageTypeAbort"])
def __init__(self, description, type=None):
if isinstance(description, dict):
description, type = (description.get('description'),
description.get('type'))
elif isinstance(description, (tuple, list)):
description, type = description[0], description[1]
self.description, self.type = description, type
def __repr__(self):
return "<% 11s: %r>" % (self.type[len('esriJobMessageType'):],
self.description)
@Folder._register_service_type
class GPService(Service):
"""Geoprocessing is a fundamental part of enterprise GIS operations.
Geoprocessing provides the data analysis, data management, and data
conversion tools necessary for all GIS users.
A geoprocessing service represents a collection of published tools that
perform tasks necessary for manipulating and analyzing geographic
information across a wide range of disciplines. Each tool performs one
or more operations, such as projecting a data set from one map
projection to another, adding fields to a table, or creating buffer
zones around features. A tool accepts input (such as feature sets,
tables, and property values), executes operations using the input data,
and generates output for presentation in a map or further processing by
the client. Tools can be executed synchronously (in sequence) or
asynchronously."""
__service_type__ = "GPServer"
@property
def tasknames(self):
return [task for task in self._json_struct['tasks']]
@property
def tasks(self):
return [self._get_subfolder(taskname+'/', GPTask)
for taskname in self.tasknames]
@property
def executionType(self):
"""Returns the execution type of this task."""
return self._json_struct['executionType']
@property
def synchronous(self):
"""Returns a boolean indicating whether this tasks runs synchronously
(True) or asynchronously (False)."""
sv = self._json_struct['executionType']
if sv == 'esriExecutionTypeSynchronous':
return True
elif sv == 'esriExecutionTypeAsynchronous':
return False
raise ValueError("Unknown synchronous value: %r" % sv)
def __getitem__(self, attr):
for task in self.tasknames:
if task == attr:
return self._get_subfolder(task+'/', GPTask)
raise KeyError("No task named %r found" % attr)
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
return Service.__getattr__(self, attr)
class GPJobStatus(RestURL):
"""This class represents the current/pending status of an asynchronous
GP Task. Please refer to the GPJob class for more information."""
__cache_request__ = False
_results = None
# All the job status codes we are aware of (from Java API)
job_statuses = set([
'esriJobCancelled',
'esriJobCancelling',
'esriJobDeleted',
'esriJobDeleting',
'esriJobExecuting',
'esriJobFailed',
'esriJobNew',
'esriJobSubmitted',
'esriJobSucceeded',
'esriJobTimedOut',
'esriJobWaiting'])
# If this is the status, self.running = True
_still_running = set([
'esriJobCancelling',
'esriJobDeleting',
'esriJobExecuting',
'esriJobNew',
'esriJobSubmitted',
'esriJobWaiting'])
# If this is the status, then throw an error
_error_status = set([
'esriJobCancelled',
'esriJobDeleted',
'esriJobFailed',
'esriJobTimedOut',
])
@property
def _json_struct(self):
js = RestURL._json_struct.__get__(self)
if js['jobStatus'] not in self._still_running:
self.__cache_request__ = True
self.__json_struct__ = js
return js
@property
def jobId(self):
return self._json_struct['jobId']
@property
def jobStatus(self):
return self._json_struct['jobStatus']
@property
def running(self):
return self._json_struct['jobStatus'] in self._still_running
@property
def results(self):
assert (not self.running), "Task is still executing."
if self.jobStatus in self._error_status:
raise ServerError("Error: job status %r" % self.jobStatus)
if self._results is None:
def item_iterator():
for resref in self._json_struct['results'].itervalues():
rel = self._get_subfolder(resref['paramUrl'], RestURL)
result = rel._json_struct
#self.parent.parent.parameters
#datatype = gptypes.GPBaseType._gp_type_mapping.get(
# result['dataType'],None)
datatype = None
conversion = None
for param in self.parent.parent.parameters:
if param['name'] == result['paramName']:
datatype = param['datatype']
if datatype is None:
conversion = str
else:
conversion = datatype.fromJson
dt = result['paramName']
val = conversion(result['value'])
yield (dt, val)
self._results = dict(item_iterator())
return self._results
@property
def messages(self):
"Return a list of messages returned from the server."
return map(GPMessage, self._json_struct['messages'])
def __getitem__(self, key):
return self.__class__.results.__get__(self)[key]
def __getattr__(self, attr):
return self.__class__.results.__get__(self)[attr]
class GPJob(JsonResult):
"""The GP job resource represents a job submitted using the submit job
operation. It provides basic information about the job such as the job
ID, status and messages. Additionally, if the job has successfully
completed, it provides information about the result parameters as well
as input parameters."""
_jobstatus = None
def __init__(self, url):
super(GPJob, self).__init__(url)
self._jobstatus = self._get_subfolder('../jobs/%s/' %
self._json_struct['jobId'],
GPJobStatus)
@property
def jobId(self):
"Return the unique ID the server assigned this task"
return self._jobstatus.jobId
@property
def jobStatus(self):
return self._jobstatus.jobStatus
@property
def running(self):
"A boolean (True: job completion pending; False: no longer executing)"
return self._jobstatus.running
@property
def results(self):
"Returns a dict of outputs from the GPTask execution."
return self._jobstatus.results
@property
def messages(self):
"Return a list of messages returned from the server."
return self._jobstatus.messages
def __getitem__(self, key):
return self._jobstatus.results[key]
def __getattr__(self, attr):
return self._jobstatus.results[attr]
class GPExecutionResult(JsonResult):
"""The GPExecutionResult object represents the output of running a
synchronous GPTask."""
_results = None
@property
def messages(self):
"Return a list of messages returned from the server."
return map(GPMessage, self._json_struct['messages'])
@property
def results(self):
"Returns a dict of outputs from the GPTask execution."
if self._results is None:
results = self._json_struct['results']
def result_iterator():
for result in results:
datatype = None
conversion = None
for param in self.parent.parameters:
if param['name'] == result['paramName']:
datatype = param['datatype']
if datatype is None:
conversion = str
else:
conversion = datatype.fromJson
dt = result['paramName']
val = conversion(result['value'])
yield (dt, val)
self._results = dict(res for res in result_iterator())
return self._results
@property
def running(self):
"For method compatibility with GPJob, always return false"
return False
def __getitem__(self, key):
return self.__class__.results.__get__(self)[key]
def __getattr__(self, attr):
return self.__class__.results.__get__(self)[attr]
class GPTask(RestURL):
"""The GP task resource represents a single task in a GP service published
using the ArcGIS Server. It provides basic information about the task
including its name and display name. It also provides detailed
information about the various input and output parameters exposed by the
task"""
__parent_type__ = GPService
__cache_request__ = True
def __init__(self, url):
# Need to force final slash
if isinstance(url, basestring):
url = list(urlparse.urlsplit(url))
if not url[2].endswith('/'):
url[2] += '/'
super(GPTask, self).__init__(url)
def __expandparamstodict(self, params, kw):
self_parameters = self.parameters
parametervalues = dict(zip((p['name'] for p in self_parameters),
params))
for kw, kwval in kw.iteritems():
if kw in parametervalues:
raise KeyError("Multiple definitions of parameter %r" % kw)
parametervalues[kw] = kwval
for param_to_convert in self_parameters:
if param_to_convert['name'] in parametervalues:
val = parametervalues[param_to_convert['name']]
if not isinstance(val, param_to_convert['datatype']):
parametervalues[param_to_convert['name']] = \
param_to_convert['datatype'](val)._json_struct
return parametervalues
def Execute(self, *params, **kw):
"""Synchronously execute the specified GP task. Parameters are passed
in either in order or as keywords."""
fp = self.__expandparamstodict(params, kw)
return self._get_subfolder('execute/', GPExecutionResult, fp)
def SubmitJob(self, *params, **kw):
"""Asynchronously execute the specified GP task. This will return a
Geoprocessing Job object. Parameters are passed in either in order
or as keywords."""
fp = self.__expandparamstodict(params, kw)
return self._get_subfolder('submitJob/', GPJob, fp)._jobstatus
def __call__(self, *params, **kw):
"""Either submit a job, if the task is synchronous, or execute it,
if it is synchronous. Note that the GPJob and GPExecutionResult
objects both have the C{.running} property that will return True
while the job is running in the case of a job, and always return
False with the case of the execution result. This can be used to
treat both types of execution as the same in your code; with the
idiom
>>> result = task(Param_1, Param_2, Param_3, ...)
>>> while result.running:
... time.sleep(0.125)
>>> print result.Output1
"""
if self.synchronous:
return self.Execute(*params, **kw)
else:
return self.SubmitJob(*params, **kw)
@property
def name(self):
return self._json_struct.get('name', '')
@property
def displayName(self):
return self._json_struct['displayName']
@property
def category(self):
return self._json_struct['category']
@property
def helpUrl(self):
return self._json_struct['helpUrl']
@property
def parameters(self):
parameters = self._json_struct['parameters']
for parameter in parameters:
dt = parameter['dataType']
parameter['datatype'] = \
gptypes.GPBaseType._get_type_by_name(
dt)._from_json_def(parameter)
return parameters
@property
def executionType(self):
"""Returns the execution type of this task."""
return self.parent.executionType
@property
def synchronous(self):
"""Returns a boolean indicating whether this tasks runs synchronously
(True) or asynchronously (False)."""
return self.parent.synchronous
class GeometryResult(JsonResult):
"""Represents the output of a Project, Simplify or Buffer operation
performed by an ArcGIS REST API Geometry service."""
@property
def geometries(self):
return [geometry.fromJson(geo)
for geo in self._json_struct['geometries']]
class LengthsResult(JsonResult):
"""Represents the output of a Lengths operation performed by an ArcGIS
REST API Geometry service."""
@property
def lengths(self):
return map(float(length) for length in self._json_struct['lengths'])
class AreasAndLengthsResult(LengthsResult):
"""Represents the output of a AreasAndLengths operation performed by an
ArcGIS REST API Geometry service."""
@property
def areas(self):
return map(float(area) for area in self._json_struct['areas'])
class LabelPointsResult(JsonResult):
"""Represents the output of a Label Points operation
performed by an ArcGIS REST API Geometry service."""
@property
def labelPoints(self):
"""Label points for the provided polygon(s)."""
return [geometry.fromJson(geo)
for geo in self._json_struct['labelPoints']]
@Folder._register_service_type
class GeometryService(Service):
"""A geometry service contains utility methods, which provide access to
sophisticated and frequently used geometric operations. An ArcGIS Server
Web site can only expose one geometry service with the static name
"Geometry." Note that geometry input and output, where required, are
always packaged as an array."""
__service_type__ = "GeometryServer"
def Project(self, geometries, inSR=None, outSR=None):
"""The project operation is performed on a geometry service resource.
The result of this operation is an array of projected geometries.
This resource projects an array of input geometries from an input
spatial reference to an output spatial reference."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
if inSR is None:
inSR = geometries[0].spatialReference.wkid
assert outSR, "Cannot project to an empty output projection."
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
return self._get_subfolder('project', GeometryResult,
{'geometries': geo_json,
'inSR': inSR,
'outSR': outSR
})
def Simplify(self, geometries, sr=None):
"""The simplify operation is performed on a geometry service resource.
Simplify permanently alters the input geometry so that the geometry
becomes topologically consistent. This resource applies the ArcGIS
simplify operation to each geometry in the input array. For more
information, see ITopologicalOperator.Simplify Method and
IPolyline.SimplifyNetwork Method."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
if sr is None:
sr = geometries[0].spatialReference.wkid
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
return self._get_subfolder('simplify', GeometryResult,
{'geometries': geo_json,
'sr': sr
})
def Buffer(self, geometries, distances, unit=None, unionResults=False,
inSR=None, outSR=None, bufferSR=None):
"""The buffer operation is performed on a geometry service resource.
The result of this operation is buffer polygons at the specified
distances for the input geometry array. An option is available to
union buffers at each distance."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
if isinstance(distances, (list, tuple)):
distances=",".join(str(distance) for distance in distances)
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if inSR is None:
inSR = geometries[0].spatialReference.wkid
if outSR is None:
outSR = geometries[0].spatialReference.wkid
if bufferSR is None:
bufferSR = geometries[0].spatialReference.wkid
return self._get_subfolder('buffer', GeometryResult,
{'geometries': geo_json,
'distances': distances,
'unit': unit,
'unionResults': unionResults,
'inSR': inSR,
'outSR': outSR,
'bufferSR': bufferSR
})
def AreasAndLengths(self, polygons, sr=None, lengthUnit=None,
areaUnit=None):
"""The areasAndLengths operation is performed on a geometry service
resource. This operation calculates areas and perimeter lengths for
each polygon specified in the input array."""
if isinstance(polygons, geometry.Geometry):
polygons = [polygons]
assert all(isinstance(polygon, geometry.Polygon)
for polygon in polygons), "Must use polygons"
if sr is None:
sr = polygons[0].spatialReference.wkid
geo_json = json.dumps([polygon._json_struct_without_sr
for polygon in polygons])
return self._get_subfolder('areasAndLengths', AreasAndLengthsResult,
{'polygons': geo_json,
'sr': sr,
'lengthUnit': lengthUnit,
'areaUnit': areaUnit
})
def Lengths(self, polylines, sr=None, lengthUnit=None, geodesic=None):
"""The lengths operation is performed on a geometry service resource.
This operation calculates the lengths of each polyline specified in
the input array"""
if isinstance(polylines, geometry.Geometry):
polylines = [polylines]
assert all(isinstance(polyline, geometry.Polyline)
for polyline in polylines), "Must use polylines"
if sr is None:
sr = polylines[0].spatialReference.wkid
geo_json = json.dumps([polyline._json_struct_without_sr
for polyline in polylines])
if geodesic is not None:
geodesic = bool(geodesic)
return self._get_subfolder('lengths', LengthsResult,
{'polylines': geo_json,
'sr': sr,
'lengthUnit': lengthUnit,
'geodesic': geodesic
})
def LabelPoints(self, polygons, sr):
"""The labelPoints operation is performed on a geometry service
resource. This operation calculates an interior point for each
polygon specified in the input array. These interior points can be
used by clients for labeling the polygons."""
if isinstance(polygons, geometry.Geometry):
polygons = [polygons]
assert all(isinstance(polygon, geometry.Polygon)
for polygon in polygons), "Must use polygons"
if sr is None:
sr = polygons[0].spatialReference.wkid
geo_json = json.dumps([polygon._json_struct_without_sr
for polygon in polygons])
return self._get_subfolder('labelPoints', LabelPointsResult,
{'polygons': geo_json,
'sr': sr
})
def ConvexHull(self, geometries=None, sr=None):
"""The convexHull operation is performed on a geometry service
resource. It returns the convex hull of the input geometry. The
input geometry can be a point, multipoint, polyline or polygon. The
hull is typically a polygon but can also be a polyline or point in
degenerate cases."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if sr is None:
sr = geometries[0].spatialReference.wkid
return self._get_subfolder('convexHull', GeometryResult,
{'geometries': geo_json, 'sr': sr})
def Densify(self, geometries=None, sr=None, maxSegmentLength=None,
geodesic=None, lengthUnit=None):
"""The densify operation is performed on a geometry service resource.
This operation densifies geometries by plotting points between
existing vertices."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if sr is None:
sr = geometries[0].spatialReference.wkid
return self._get_subfolder('convexHull', GeometryResult,
{'geometries': geo_json,
'sr': sr,
'maxSegmentLength': maxSegmentLength,
'geodesic': geodesic,
'lengthUnit': lengthUnit
})
def Distance(self, geometry1=None, geometry2=None, sr=None,
distanceUnit=None, geodesic=None):
"""The distance operation is performed on a geometry service resource.
It reports the planar (projected space) / geodesic shortest distance
between A and B. sr is a projected coordinate system. Distance is
reported in the linear units specified by units or the units of sr
if units is null."""
if not sr:
sr = (geometry1.spatialReference.wkid or
geometry2.spatialReference.wkid)
geo_json_1 = json.dumps({'geometryType': geometry1.__geometry_type__,
'geometry': geometry1._json_struct})
geo_json_2 = json.dumps({'geometryType': geometry2.__geometry_type__,
'geometry': geometry2._json_struct})
folder = self._get_subfolder('distance', JsonResult,
{'geometry1': geo_json_1,
'geometry2': geo_json_2,
'sr': sr,
'distanceUnit': distanceUnit,
'geodesic': geodesic,
})
return folder._json_struct['distance']
def Generalize(self, geometries=None, sr=None, maxDeviation=None,
deviationUnit=None):
"""The generalize operation is performed on a geometry service
resource. It returns generalized (Douglas-Poiker) versions of the
input geometries."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if sr is None:
sr = geometries[0].spatialReference.wkid
return self._get_subfolder('generalize', GeometryResult,
{'geometries': geo_json,
'sr': sr,
'maxDeviation': maxDeviation,
'deviationUnit': deviationUnit
})
def Offset(self, geometries=None, sr=None, offsetDistance=None,
offsetUnit=None, offsetHow=None, bevelRatio=None):
"""The offset operation is performed on a geometry service resource.
Offset constructs the offset of the given input geometries. If the
offset parameter is positive the constructed offset will be on the
right side of the geometry. (Left side offsets are constructed with
negative parameters.) Tracing the geometry from it's first vertex to
the last will give you a direction along the geometry. It is to the
right and left perspective of this direction that the positive and
negative parameters will dictate where the offset is contructed. In
these terms it is simple to infer where the offset of even
horizontal geometries will be constructed. The offsetHow parameter
determines how outer corners between segments are handled. Rounded
offset rounds the corner between extended offsets. Bevelled offset
squares off the corner after a given ratio distance. Mitered offset
attempts to allow extended offsets to naturally intersect, but if
that intersection occurs too far from the corner, the corner is
eventually bevelled off at a fixed distance."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if sr is None:
sr = geometries[0].spatialReference.wkid
return self._get_subfolder('offset', GeometryResult,
{'geometries': geo_json,
'sr': sr,
'offsetDistance': offsetUnit,
'offsetUnit': offsetHow,
'bevelRatio': bevelRatio
})
def TrimExtend(self, polylines=None, trimExtendTo=None, sr=None,
extendHow=None):
"""The trimExtend operation is performed on a geometry service
resource. This operation trims / extends each polyline specified
in the input array, using the user specified guide polylines. When
trimming features, the part to the left of the oriented cutting line
is preserved in the output and the other part is discarded. An empty
polyline is added to the output array if the corresponding input
polyline is neither cut nor extended."""
if isinstance(polylines, geometry.Geometry):
polylines = [polylines]
assert all(isinstance(polyline, geometry.Polyline)
for polyline in polylines), "Must use polylines"
if sr is None:
sr = polylines[0].spatialReference.wkid
geo_json = json.dumps([polyline._json_struct_without_sr
for polyline in polylines])
return self._get_subfolder('trimExtend', GeometryResult,
{'polylines': geo_json,
'trimExtendTo': trimExtendTo,
'extendHow': extendHow,
'sr': sr
})
def AutoComplete(self, polygons=None, polylines=None, sr=None):
"""The Auto Complete operation is performed on a geometry service
resource. The AutoComplete operation simplifies the process of
constructing new polygons that are adjacent to other polygons. It
constructs polygons that fill in the gaps between existing polygons
and a set of polylines."""
raise NotImplementedError()
def Cut(self, cutter=None, target=None, sr=None):
"""The cut operation is performed on a geometry service resource. This
operation splits the input polyline or polygon where it crosses a
cutting polyline"""
raise NotImplementedError()
def Difference(self, geometries=None, geometry=None, sr=None):
"""The difference operation is performed on a geometry service
resource. This operation constructs the set-theoretic difference
between an array of geometries and another geometry."""
raise NotImplementedError()
def Intersect(self, geometries=None, geometry=None, sr=None):
"""The intersect operation is performed on a geometry service
resource. This operation constructs the set-theoretic intersection
between an array of geometries and another geometry"""
raise NotImplementedError()
def Reshape(self, target=None, reshaper=None, sr=None):
"""The reshape operation is performed on a geometry service resource.
It reshapes a polyline or a part of a polygon using a reshaping
line."""
raise NotImplementedError()
def Union(self, geometries=None, sr=None):
"""The union operation is performed on a geometry service resource.
This operation constructs the set-theoretic union of the geometries
in the input array. All inputs must be of the same type."""
raise NotImplementedError()
class ExportImageResult(JsonResult):
"""Represents the output of an Image Service exportImage call."""
@property
def href(self):
return self._json_struct['href']
@property
def width(self):
return self._json_struct['width']
@property
def height(self):
return self._json_struct['height']
@property
def extent(self):
return geometry.fromJson(self._json_struct['extent'])
def save(self, outfile):
"""Save the image data to a file or file-like object"""
if isinstance(outfile, basestring):
outfile = open(outfile, 'wb')
outfile.write(urllib2.urlopen(self.href).read())
@Folder._register_service_type
class ImageService(Service):
"""An image service provides read-only access to a mosaicked collection of
images or a raster data set."""
__service_type__ = "ImageServer"
def ExportImage(self, bbox=None, size=None, imageSR=None, bboxSR=None,
format=None, pixelType=None, noData=None,
interpolation=None, compressionQuality=None, bandIds=None,
mosaicProperties=None, viewpointProperties=None,
mosaicRule=None, renderingRule=None):
"""The export operation is performed on a map service resource. The
result of this operation is a map image resource. This resource
provides information about the exported map image such as its URL,
its width and height, extent and scale."""
return self._get_subfolder('exportImage/', ExportImageResult,
{'bbox': bbox,
'size': size,
'imageSR': imageSR,
'bboxSR': bboxSR,
'format': format,
'pixelType': pixelType,
'noData': noData,
'interpolation': interpolation,
'compressionQuality': compressionQuality,
'bandIds': bandIds,
'mosaicProperties': mosaicProperties,
'viewpointProperties': viewpointProperties,
'mosaicRule': mosaicRule,
'renderingRule': renderingRule
})
@Folder._register_service_type
class NetworkService(Service):
"""The network service resource represents a network analysis service
published with ArcGIS Server. The resource provides information about
the service such as the service description and the various network
layers (route, closest facility and service area layers) contained in
the network analysis service."""
__service_type__ = "NAServer"
@property
def routeLayers(self):
return [self._get_subfolder("%s/" % layer, RouteNetworkLayer)
for layer in self._json_struct['routeLayers']]
@property
def serviceAreaLayers(self):
return [self._get_subfolder("%s/" % layer, NetworkLayer)
for layer in self._json_struct['serviceAreaLayers']]
@property
def closestFacilityLayers(self):
return [self._get_subfolder("%s/" % layer, NetworkLayer)
for layer in self._json_struct['closestFacilityLayers']]
def __getitem__(self, attr):
layer_names = set(self._json_struct['routeLayers'] +
self._json_struct['serviceAreaLayers'] +
self._json_struct['closestFacilityLayers'])
if attr in layer_names:
self._get_subfolder("%s/" % attr, NetworkLayer)
raise KeyError("No attribute %r found" % attr)
def __getattr__(self, attr):
try:
return self[attr]
except KeyError as e:
raise AttributeError(str(e))
class DirectionResult(object):
"""Represents an individual directions entry in a Network Solve operation
result."""
def __init__(self, direction):
self._json_struct = direction
@property
def routeId(self):
return self._json_struct["routeId"]
@property
def routeName(self):
return self._json_struct["routeName"]
@property
def summary(self):
return self._json_struct["summary"]
@property
def features(self):
return gptypes.GPFeatureRecordSetLayer.fromJson(self._json_struct)
class NetworkSolveResult(JsonResult):
"""Represents a solve operation's output performed on a Route Network
layer."""
@property
def directions(self):
return [DirectionResult(direction)
for direction in self._json_struct['directions']]
@property
def routes(self):
return gptypes.GPFeatureRecordSetLayer.fromJson(
self._json_struct['routes'])
@property
def stops(self):
return gptypes.GPFeatureRecordSetLayer.fromJson(
self._json_struct['stops'])
@property
def barriers(self):
return gptypes.GPFeatureRecordSetLayer.fromJson(
self._json_struct['barriers'])
@property
def messages(self):
return self._json_struct['messages']
class NetworkLayer(Layer):
"""The network layer resource represents a single network layer in a
network analysis service published by ArcGIS Server. It provides
basic information about the network layer such as its name, type,
and network classes. Additionally, depending on the layer type, it
provides different pieces of information as detailed in the
examples."""
__parent_type__ = NetworkService
@property
def layerName(self):
return self._json_struct['layerName']
@property
def layerType(self):
return self._json_struct['layerType']
@property
def impedance(self):
return self._json_struct['impedance']
@property
def useStartTime(self):
return self._json_struct['useStartTime']
@property
def useTimeWindows(self):
return self._json_struct['useTimeWindows']
@property
def preserveFirstStop(self):
return self._json_struct['preserveFirstStop']
@property
def preserveLastStop(self):
return self._json_struct['preserveLastStop']
@property
def restrictUTurns(self):
return self._json_struct['restrictUTurns']
@property
def outputLineType(self):
return self._json_struct['outputLineType']
@property
def useHierarchy(self):
return self._json_struct['useHierarchy']
@property
def ignoreInvalidLocations(self):
return self._json_struct['ignoreInvalidLocations']
@property
def restrictions(self):
return self._json_struct['restrictions']
@property
def distanceUnits(self):
return self._json_struct['distanceUnits']
@property
def useTimeAttribute(self):
return self._json_struct['useTimeAttribute']
@property
def networkClasses(self):
return self._json_struct['networkClasses']
def SolveClosestFacility(self, facilities=None,
incidents=None,
barriers=None,
polylineBarriers=None,
polygonBarriers=None,
attributeParameterValues=None,
returnDirections=None,
directionsLanguage=None,
directionsStyleName=None,
directionsLengthUnits=None,
directionsTimeAttributeName=None,
returnCFRoutes=None,
returnFacilities=None,
returnIncidents=None,
returnBarriers=None,
returnPolylineBarriers=None,
returnPolygonBarriers=None,
facilityReturnType=None,
outputLines=None,
defaultCutoff=None,
defaultTargetFacilityCount=None,
travelDirection=None,
outSR=None,
impedanceAttributeName=None,
restrictionAttributeNames=None,
restrictUTurns=None,
useHierarchy=None,
outputGeometryPrecision=None,
outputGeometryPrecisionUnits=None):
"""The solve operation is performed on a network layer resource of type
closest facility."""
raise NotImplementedError()
def SolveServiceArea(self, facilities=None,
barriers=None,
polylineBarriers=None,
polygonBarriers=None,
attributeParameterValues=None,
defaultBreaks=None,
excludeSourcesFromPolygons=None,
mergeSimilarPolygonRanges=None,
outputLines=None,
outputPolygons=None,
overlapLines=None,
overlapPolygons=None,
splitLinesAtBreaks=None,
splitPolygonsAtBreaks=None,
travelDirection=None,
trimOuterPolygon=None,
trimPolygonDistance=None,
trimPolygonDistanceUnits=None,
accumulateAttributeNames=None,
impedanceAttributeName=None,
restrictionAttributeNames=None,
restrictUTurns=None,
outputGeometryPrecision=None,
outputGeometryPrecisionUnits=None):
"""The solve operation is performed on a network layer resource of type
service area (layerType is esriNAServerServiceArea)."""
raise NotImplementedError()
class RouteNetworkLayer(NetworkLayer):
"""Represents a Route Network Layer"""
def Solve(self, stops=None, barriers=None, returnDirections=None,
returnRoutes=None, returnStops=None, returnBarriers=None,
outSR=None, ignoreInvalidLocations=None, outputLines=None,
findBestSequence=None, preserveFirstStop=None,
preserveLastStop=None, useTimeWindows=None, startTime=None,
accumulateAttributeNames=None, impedanceAttributeName=None,
restrictionAttributeNames=None, restrictUTurns=None,
useHierarchy=None, directionsLanguage=None,
outputGeometryPrecision=None, directionsLengthUnits=None,
directionsTimeAttributeName=None, attributeParameterValues=None,
polylineBarriers=None, polygonBarriers=None):
"""The solve operation is performed on a network layer resource.
At 9.3.1, the solve operation is supported only on the route layer.
Or specifically, on a network layer whose layerType is
esriNAServerRouteLayer.
You can provide arguments to the solve route operation as query
parameters defined in the parameters table below.
"""
def ptlist_as_semilist(lst):
if isinstance(lst, geometry.Point):
lst = [lst]
if isinstance(lst, (list, tuple)):
return ";".join(','.join(str(x) for x in pt) for pt in lst)
return lst
if self.layerType != "esriNAServerRouteLayer":
raise TypeError("Layer is of type %s; Solve is not available."
% self.layerType)
return self._get_subfolder('solve/', NetworkSolveResult,
{'stops': ptlist_as_semilist(stops),
'barriers': ptlist_as_semilist(barriers),
'returnDirections': returnDirections,
'returnRoutes': returnRoutes,
'returnStops': returnStops,
'returnBarriers': returnBarriers,
'outSR': outSR,
'ignoreInvalidLocations': ignoreInvalidLocations,
'outputLines': outputLines,
'findBestSequence': findBestSequence,
'preserveFirstStop': preserveFirstStop,
'preserveLastStop': preserveLastStop,
'useTimeWindows': useTimeWindows,
'startTime': startTime,
'accumulateAttributeNames': accumulateAttributeNames,
'impedanceAttributeName': impedanceAttributeName,
'restrictionAttributeNames': restrictionAttributeNames,
'restrictUTurns': restrictUTurns,
'useHierarchy': useHierarchy,
'directionsLanguage': directionsLanguage,
'outputGeometryPrecision': outputGeometryPrecision,
'directionsLengthUnits': directionsLengthUnits,
'directionsTimeAttributeName':
directionsTimeAttributeName,
'attributeParameterValues': attributeParameterValues,
'polylineBarriers': polylineBarriers,
'polygonBarriers': polygonBarriers})
class GeoDataVersion(RestURL):
"""The geodata version resource represents a single version in a geodata
service published using ArcGIS Server. It provides basic information
about the version such as its description, created and modified times,
access type, as well as parent, children and ancestor versions."""
@property
def name(self):
return self._json_struct['name']
@property
def description(self):
return self._json_struct['description']
@property
def created(self):
return self._json_struct['created']
@property
def modified(self):
return self._json_struct['modified']
@property
def access(self):
return self._json_struct['access']
@property
def parentVersion(self):
return self._json_struct['parentVersion']
@property
def childVersions(self):
return self._json_struct['childVersions']
@property
def ancestorVersions(self):
return self._json_struct['ancestorVersions']
class GeoDataReplica(RestURL):
"""The geodata replica resource represents a single replica in a geodata
service published using ArcGIS Server. It provides basic information
about the replica such as its id, replica version, creation date, GUID,
role, access type, and reconcile policy."""
@property
def name(self):
return self._json_struct['name']
@property
def id(self):
return self._json_struct['id']
@property
def replicaVersion(self):
return self._json_struct['replicaVersion']
@property
def guid(self):
return self._json_struct['guid']
@property
def role(self):
return self._json_struct['role']
@property
def accessType(self):
return self._json_struct['accessType']
@property
def myGenerationNumber(self):
return self._json_struct['myGenerationNumber']
@property
def sibGenerationNumber(self):
return self._json_struct['sibGenerationNumber']
@property
def sibMyGenerationNumber(self):
return self._json_struct['sibMyGenerationNumber']
@property
def replicaState(self):
return self._json_struct['replicaState']
@property
def sibConnectionString(self):
return self._json_struct['sibConnectionString']
@property
def modelType(self):
return self._json_struct['modelType']
@property
def singleGeneration(self):
return self._json_struct['singleGeneration']
@property
def spatialRelation(self):
return self._json_struct['spatialRelation']
@property
def queryGeometryType(self):
return self._json_struct['queryGeometryType']
@property
def queryGeometry(self):
return geometry.fromJson(self._json_struct['queryGeometry'])
@property
def transferRelatedObjects(self):
return self._json_struct['transferRelatedObjects']
@property
def reconcilePolicy(self):
return self._json_struct['reconcilePolicy']
@Folder._register_service_type
class GeoDataService(Service):
"""The geodata service resource represents a geodata service that you have
published with ArcGIS Server. The resource provides basic information
associated with the geodata service such as the service description,
its workspace type, default working version, versions, and replicas."""
__service_type__ = "GeoDataServer"
@property
def workspaceType(self):
return self._json_struct['workspaceType']
@property
def defaultWorkingVersionName(self):
return self._json_struct['defaultWorkingVersion']
@property
def defaultWorkingVersion(self):
return self._get_subfolder("versions/%s/" %
self.defaultWorkingVersionName,
GeoDataVersion)
@property
def versionNames(self):
return self._json_struct['versions']
@property
def versions(self):
return [self._get_subfolder("versions/%s/" % version, GeoDataVersion)
for version in self.versionNames]
@property
def replicaNames(self):
return self._json_struct['replicas']
@property
def replicas(self):
return [self._get_subfolder("replicas/%s/" % version, GeoDataReplica)
for replica in self.replicaNames]
class GlobeLayer(Layer):
"""The globe layer resource represents a single layer in a globe service
published by ArcGIS Server. It provides basic information about the
layer such as its ID, name, type, parent and sub-layers, fields, extent,
data type, sampling mode, and extrusion type."""
@property
def id(self):
return self._json_struct['id']
@property
def name(self):
return self._json_struct['name']
@property
def type(self):
return self._json_struct['type']
@property
def description(self):
return self._json_struct['description']
@property
def extent(self):
return geometry.fromJson(self._json_struct['extent'])
@property
def dataType(self):
return self._json_struct['dataType']
@property
def maxDistance(self):
return self._json_struct['maxDistance']
@property
def minDistance(self):
return self._json_struct['minDistance']
@property
def samplingMode(self):
return self._json_struct['samplingMode']
@property
def baseID(self):
return self._json_struct['baseID']
@property
def baseOption(self):
return self._json_struct['baseOption']
@property
def extrusionType(self):
return self._json_struct['extrusionType']
@property
def extrusionExpression(self):
return self._json_struct['extrusionExpression']
@property
def cullMode(self):
return self._json_struct['cullMode']
@property
def copyrightText(self):
return self._json_struct['copyrightText']
@property
def displayField(self):
return self._json_struct['displayField']
@property
def fields(self):
return self._json_struct['fields']
@property
def parentLayer(self):
return self._get_subfolder("../%s/" %
self._json_struct['parentLayer']['id'],
GlobeLayer)
@property
def subLayers(self):
return [self._get_subfolder("../%s/" % layer['id'], GlobeLayer)
for layer in self._json_struct['subLayers']]
@Folder._register_service_type
class GlobeService(Service):
"""The globe service resource represents a globe service published with
ArcGIS Server. The resource provides information about the service such
as the service description and the various layers contained in the
published globe document."""
__service_type__ = "GlobeServer"
@property
def layernames(self):
"""Return a list of the names of this globe service's layers"""
return [layer['name'] for layer in self._json_struct['layers']]
@property
def layers(self):
"""Return a list of this globe service's layer objects"""
return [self._get_subfolder("./%s/" % layer['id'], GlobeLayer)
for layer in self._json_struct['layers']]
@Folder._register_service_type
class FeatureLayerFeature(object):
"""The feature resource represents a single feature in a layer in a feature
service."""
@property
def geometry(self):
if 'geometry' in self._json_struct['feature']:
geom = geometry.fromJson(
self._json_struct['feature'].get('geometry',
None),
self._json_struct['feature'].get('attributes',
{}))
else:
geom = geometry.NullGeometry()
geom.attributes = self._json_struct['feature'].get('attributes',
{})
return geom
@property
def attributes(self):
return self._json_struct['feature'].get('attributes',
{})
@property
def attachments(self):
return self._get_subfolder("./attachments/", AttachmentInfos)
def AddAttachment(self, attachment=None):
"""This operation adds an attachment to the associated feature (POST
only). The add attachment operation is performed on a feature
service feature resource."""
return self._get_subfolder("./addAttachment", JsonPostResult,
{'attachment': attachment})
def UpdateAttachment(self, attachmentId=None, attachment=None):
"""This operation updates an attachment associated with a feature
(POST only). The update attachment operation is performed on a
feature service feature resource."""
return self._get_subfolder("./updateAttachment", JsonPostResult,
{'attachment': attachment,
'attachmentId': attachmentId})
def DeleteAttachments(self, attachmentIds=None):
"""This operation deletes attachments associated with a feature (POST
only). The delete attachments operation is performed on a feature
service feature resource."""
return self._get_subfolder("./deleteAttachments", JsonPostResult,
{'attachmentIds': attachmentIds})
class FeatureLayer(MapLayer):
"""The layer resource represents a single editable feature layer or non
spatial table in a feature service."""
def __getitem__(self, index):
"""Get a feature by featureId"""
return self._get_subfolder(str(index), FeatureLayerFeature)
def Feature(self, featureId):
"""Return a feature from this FeatureService by its ID"""
return self[featureId]
def QueryRelatedRecords(self, objectIds=None, relationshipId=None,
outFields=None, definitionExpression=None,
returnGeometry=None, outSR=None):
"""The query operation is performed on a feature service layer
resource. The result of this operation are featuresets grouped by
source layer / table object IDs. Each featureset contains Feature
objects including the values for the fields requested by the user.
For related layers, if you request geometry information, the
geometry of each feature is also returned in the featureset. For
related tables, the featureset does not include geometries."""
out = self._get_subfolder("./queryRelatedRecords", JsonResult, {
'objectIds':
objectIds,
'relationshipId':
relationshipId,
'outFields':
outFields,
'definitionExpression':
definitionExpression,
'returnGeometry':
returnGeometry,
'outSR': outSR
})
return out._json_struct
def AddFeatures(self, features):
"""This operation adds features to the associated feature layer or
table (POST only). The add features operation is performed on a
feature service layer resource. The result of this operation is an
array of edit results. Each edit result identifies a single feature
and indicates if the edit were successful or not. If not, it also
includes an error code and an error description."""
fd = {'features': ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in features)}
return self._get_subfolder("./addFeatures", JsonPostResult, fd)
def UpdateFeatures(self, features):
"""This operation updates features to the associated feature layer or
table (POST only). The update features operation is performed on a
feature service layer resource. The result of this operation is an
array of edit results. Each edit result identifies a single feature
and indicates if the edit were successful or not. If not, it also
includes an error code and an error description."""
fd = {'features': ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in features)}
return self._get_subfolder("./updateFeatures", JsonPostResult, fd)
def DeleteFeatures(self, objectIds=None, where=None, geometry=None,
inSR=None, spatialRel=None):
"""This operation deletes features in a feature layer or table (POST
only). The delete features operation is performed on a feature
service layer resource. The result of this operation is an array
of edit results. Each edit result identifies a single feature and
indicates if the edit were successful or not. If not, it also
includes an error code and an error description."""
gt = geometry.__geometry_type__
if sr is None:
sr = geometry.spatialReference.wkid
geo_json = json.dumps(Geometry._json_struct_without_sr)
return self._get_subfolder("./deleteFeatures", JsonPostResult, {
'objectIds': objectIds,
'where': where,
'geometry': geo_json,
'geometryType':
geometryType,
'inSR': inSR,
'spatialRel': spatialRel
})
def ApplyEdits(self, adds=None, updates=None, deletes=None):
"""This operation adds, updates and deletes features to the associated
feature layer or table in a single call (POST only). The apply edits
operation is performed on a feature service layer resource. The
result of this operation are 3 arrays of edit results (for adds,
updates and deletes respectively). Each edit result identifies a
single feature and indicates if the edit were successful or not. If
not, it also includes an error code and an error description."""
add_str, update_str = None, None
if adds:
add_str = ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in adds)
if updates:
update_str = ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in updates)
return self._get_subfolder("./applyEdits", JsonPostResult,
{'adds':
add_str,
'updates':
update_str,
'deletes':
deletes
})
@Folder._register_service_type
class FeatureService(Service):
"""A feature service allows clients to query and edit features. Features
include geometry, attributes and symbology and are organized into layers
and sub types within a layer."""
__service_type__ = "FeatureServer"
@property
def layernames(self):
"""Return a list of the names of this service's layers"""
return [layer['name'] for layer in self._json_struct['layers']]
@property
def layers(self):
"""Return a list of this service's layer objects"""
return [self._get_subfolder("%s/" % layer['id'], FeatureLayer)
for layer in self._json_struct['layers']]
@property
def tablenames(self):
"""Return a list of the names of this service's tables"""
return [table['name'] for table in self._json_struct.get('tables', [])]
@property
def tables(self):
"""Return a list of this service's table objects"""
return [self._get_subfolder("%s/" % table['id'], FeatureLayer)
for table in self._json_struct.get('tables', [])]
GP tasks work again
"""The ArcGIS Server REST API, short for Representational State Transfer,
provides a simple, open Web interface to services hosted by ArcGIS Server.
All resources and operations exposed by the REST API are accessible through
a hierarchy of endpoints or Uniform Resource Locators (URLs) for each GIS
service published with ArcGIS Server."""
import cgi
import json
import mimetypes
import os
import urllib
import urllib2
import urlparse
import uuid
import geometry
import gptypes
import utils
#: User agent to report when making requests
USER_AGENT = "Mozilla/4.0 (arcrest)"
# Note that nearly every class below derives from this RestURL class.
# The reasoning is that every object has an underlying URL resource on
# the REST server. Some are static or near-static, such as a folder or a
# service's definition, but some URLs are volatile and represent the
# application of an action, such as Buffering a set of points using the
# geometry service. This class attempts to provide some baseline functionality
# required by the set of operations one performs using the ArcGIS REST API,
# such as making sure the format is always set to json, parsing the json,
# keeping the result in memory as needed, and returning instances of objects
# represented by relative URLs.
class RestURL(object):
"""Represents a top-level, base REST-style URL."""
__cache_request__ = False # Fetch every time or just once?
__urldata__ = Ellipsis # What actually gets HTTP GETten
__json_struct__ = Ellipsis # Cache for json.loads(self.__urldata__)
__has_json__ = True # Parse the data as a json struct? Set to
# false for binary data, html, etc.
__token__ = None # For token-based auth
__lazy_fetch__ = True # Fetch when constructed, or later on?
__parent_type__ = None # For automatically generated parent URLs
__post__ = False # Move query string to POST
_parent = None
def __init__(self, url, file_data=None):
# Expects a urlparse.urlsplitted list as the url, but accepts a
# string because that is easier/makes more sense everywhere.
if isinstance(url, basestring):
url = urlparse.urlsplit(url)
# Ellipsis is used instead of None for the case where no data
# is returned from the server due to an error condition -- we
# need to differentiate between 'NULL' and 'UNDEFINED'
self.__urldata__ = Ellipsis
# Pull out query, whatever it may be
urllist = list(url)
query_dict = {}
# parse_qs returns a dict, but every value is a list (it assumes
# that keys can be set multiple times like ?a=1&a=2 -- this flexibility
# is probably useful somewhere, but not here). Pull out the first
# element of every list so when we convert back to a query string
# it doesn't enclose all values in []
for k, v in cgi.parse_qs(urllist[3]).iteritems():
query_dict[k] = v[0]
if k.lower() == 'token':
self.__token__ = v[0]
# Set the f= flag to json (so we can interface with it)
if self.__has_json__ is True:
query_dict['f'] = 'json'
if self.__token__ is not None:
query_dict['token'] = self.__token__
# Hack our modified query string back into URL components
urllist[3] = urllib.urlencode(query_dict)
self._url = urllist
# Finally, set any file data parameters' data to local store.
# file_data is expected to be a dictionary of name/filehandle
# pairs if defined. And if there are any files, fetching will
# automatically become a forced multipart upload. Also, force
# keeping the results around; uploading data multiple times
# is probably NEVER what anyone wants to do and file handles
# can be exhausted.
self._file_data = file_data
if file_data:
self.__cache_request__ = True
# Nonlazy: force a fetch
if self.__lazy_fetch__ is False and self.__cache_request__ is True:
self._contents
def __repr__(self):
url = self.url
if len(url) > 100:
url = url[:97] + "..."
return "<%s(%r)>" % (self.__class__.__name__, url)
def _get_subfolder(self, foldername, returntype, params={}, file_data={}):
"""Return an object of the requested type with the path relative
to the current object's URL. Optionally, query parameters
may be set."""
newurl = urlparse.urljoin(self.url, urllib.quote(foldername), False)
# Add the key-value pairs sent in params to query string if they
# are so defined.
query_dict = {}
url_tuple = urlparse.urlsplit(newurl)
urllist = list(url_tuple)
if params:
# As above, pull out first element from parse_qs' values
query_dict = dict((k, v[0]) for k, v in
cgi.parse_qs(urllist[3]).iteritems())
for key, val in params.iteritems():
# Lowercase bool string
if isinstance(val, bool):
query_dict[key] = str(val).lower()
# Special case: convert an envelope to .bbox in the bb
# parameter
elif isinstance(val, geometry.Envelope):
query_dict[key] = val.bbox
# Another special case: strings can't be quoted/escaped at the
# top level
elif isinstance(val, gptypes.GPString):
query_dict[key] = val.value
# Just use the wkid of SpatialReferences
elif isinstance(val, geometry.SpatialReference):
query_dict[key] = val.wkid
# If it's a list, make it a comma-separated string
elif isinstance(val, (list, tuple)):
val = ",".join([str(v.id)
if isinstance(v, Layer)
else str(v) for v in val])
# Ignore null values, and coerce string values (hopefully
# everything sent in to a query has a sane __str__)
elif val is not None:
query_dict[key] = str(val)
if self.__token__ is not None:
query_dict['token'] = self.__token__
# Replace URL query component with newly altered component
urllist[3] = urllib.urlencode(query_dict)
newurl = urllist
# Instantiate new RestURL or subclass
rt = returntype(newurl, file_data)
# Remind the resource where it came from
rt.parent = self
return rt
def _clear_cache(self):
self.__json_struct__ = Ellipsis
self.__urldata__ = Ellipsis
@property
def url(self):
"""The URL as a string of the resource."""
urlparts = self._url
if self.__post__:
urlparts = list(urlparts)
urlparts[3] = '' # Clear out query string on POST
return urlparse.urlunsplit(urlparts)
@property
def query(self):
return self._url[3]
@property
def _contents(self):
"""The raw contents of the URL as fetched, this is done lazily.
For non-lazy fetching this is accessed in the object constructor."""
if self.__urldata__ is Ellipsis or self.__cache_request__ is False:
if self._file_data:
# Special-case: do a multipart upload if there's file data
self.__post__ = True
boundary = "-"*12+str(uuid.uuid4())+"$"
multipart_data = ''
for k, v in cgi.parse_qs(self.query).iteritems():
if not isinstance(v, list):
v = [v]
for val in v:
multipart_data += boundary + "\r\n"
multipart_data += ('Content-Disposition: form-data; '
'name="%s"\r\n\r\n' % k)
multipart_data += val + "\r\n"
for k, v in self._file_data.iteritems():
fn = os.path.basename(getattr(v, 'name', 'file'))
ct = (mimetypes.guess_type(fn)
or ("application/octet-stream",))[0]
multipart_data += boundary + "\r\n"
multipart_data += ('Content-Disposition: form-data; '
'name="%s"; filename="%s"\r\n'
'Content-Type:%s\r\n\r\n' %
(k, fn, ct))
multipart_data += v.read() + "\r\n"
multipart_data += boundary + "--\r\n\r\n"
request = urllib2.Request(self.url, multipart_data,
{'User-Agent' : USER_AGENT,
'Content-Type':
'multipart/form-data; '
'boundary='+boundary[2:],
'Content-Length':
str(
len(
multipart_data))})
else:
request = urllib2.Request(self.url, self.query
if self.__post__
else None,
{'User-Agent' : USER_AGENT})
handle = urllib2.urlopen(request)
# Handle the special case of a redirect (only follow once) --
# Note that only the first 3 components (protocol, hostname, path)
# are altered as component 4 is the query string, which can get
# clobbered by the server.
fetched_url = list(urlparse.urlsplit(handle.url)[:3])
if fetched_url != list(self._url[:3]):
self._url[:3] = fetched_url
return self._contents
# No redirect, proceed as usual.
self.__urldata__ = handle.read()
data = self.__urldata__
if self.__cache_request__ is False:
self.__urldata__ = Ellipsis
return data
@property
def _json_struct(self):
"""The json data structure in the URL contents, it will cache this
if it makes sense so it doesn't parse over and over."""
if self.__has_json__:
if self.__cache_request__:
if self.__json_struct__ is Ellipsis:
self.__json_struct__ = json.loads(self._contents)
return self.__json_struct__
else:
return json.loads(self._contents)
else:
# Return an empty dict for things so they don't have to special
# case against a None value or anything
return {}
@apply
def parent():
def get_(self):
"Get this object's parent"
if self._parent:
return self._parent
# auto-compute parent if needed
elif getattr(self, '__parent_type__', None):
return self._get_subfolder('..' if self._url[2].endswith('/')
else '.', self.__parent_type__)
else:
raise AttributeError("%r has no parent attribute" % type(self))
def set_(self, val):
self._parent = val
return property(get_, set_)
# On top of a URL, the ArcGIS Server folder structure lists subfolders
# and services.
class Folder(RestURL):
"""Represents a folder path on an ArcGIS REST server."""
__cache_request__ = True
# Conversion table from type string to class instance.
_service_type_mapping = {}
@classmethod
def _register_service_type(cls, subclass):
"""Registers subclass handlers of various service-type-specific service
implementations. Look for classes decorated with
@Folder._register_service_type for hints on how this works."""
if hasattr(subclass, '__service_type__'):
cls._service_type_mapping[subclass.__service_type__] = subclass
if subclass.__service_type__:
setattr(subclass,
subclass.__service_type__,
property(lambda x: x))
return subclass
@property
def __members__(self):
return sorted(self.foldernames +
list(self.servicenames) +
self.clusternames)
@property
def foldernames(self):
"Returns a list of folder names available from this folder."
return [folder.strip('/').split('/')[-1] for folder
in self._json_struct.get('folders', [])]
@property
def folders(self):
"Returns a list of Folder objects available in this folder."
return [self._get_subfolder(fn+'/', Folder) for fn in self.foldernames]
@property
def clusternames(self):
"Returns a list of cluster names available from this folder."
return [cluster.strip('/').split('/')[-1] for cluster
in self._json_struct.get('clusters', [])]
@property
def clusters(self):
"Returns a list of Folder objects available in this folder."
return [self._get_subfolder(fn+'/', Folder) for fn in self.clusternames]
@property
def servicenames(self):
"Give the list of services available in this folder."
return set([service['name'].rstrip('/').split('/')[-1]
for service in self._json_struct.get('services', [])])
@property
def services(self):
"Returns a list of Service objects available in this folder"
return [self._get_subfolder("%s/%s/" %
(s['name'].rstrip('/').split('/')[-1], s['type']),
self._service_type_mapping.get(s['type'], Service)) for s
in self._json_struct.get('services', [])]
@property
def url(self):
"""The URL as a string of the resource."""
if not self._url[2].endswith('/'):
self._url[2] += '/'
return RestURL.url.__get__(self)
def __getattr__(self, attr):
return self[attr]
def __getitem__(self, attr):
# If it's a folder, easy:
if attr in self.foldernames:
return self._get_subfolder(attr, Folder)
elif attr in self.clusternames:
return self._get_subfolder(attr, Folder)
services = [x.copy() for x in self._json_struct['services']]
# Strip out relative paths
for service in services:
service['name'] = service['name'].rstrip('/').split('/')[-1]
# Handle the case of Folder_Name being potentially of Service_Type
# format
if '_' in attr: # May have a Name_Type service here
al = attr.rstrip('/').split('/')[-1].split('_')
servicetype = al.pop()
untyped_attr = '_'.join(al)
matchingservices = [svc for svc in services
if svc['name'] == untyped_attr
and svc['type'] == servicetype]
if len(matchingservices) == 1:
return self._get_subfolder("%s/%s/" %
(untyped_attr, servicetype),
self._service_type_mapping.get(servicetype, Service))
# Then match by service name
matchingservices = [svc for svc in services if svc['name'] == attr]
# Found more than one match, there is ambiguity so return an
# object holding .ServiceType attributes representing each service.
if len(matchingservices) > 1:
# Return an instance with accessors for overlapping services
class AmbiguousService(object):
"""This service name has multiple service types."""
ambiguous = AmbiguousService()
for svc in matchingservices:
attr, servicetype = svc['name'], svc['type']
service = self._get_subfolder("%s/%s/" % (attr, servicetype),
self._service_type_mapping.get(servicetype, Service))
setattr(ambiguous, servicetype, service)
return ambiguous
# Just one match, can return itself.
elif len(matchingservices) == 1:
servicetype = matchingservices[0]['type']
return self._get_subfolder("%s/%s/" % (attr, servicetype),
self._service_type_mapping.get(servicetype, Service))
raise AttributeError("No service or folder named %r found" % attr)
# A catalog root functions the same as a folder, so treat Catalog as just a
# special case of Folder
class Catalog(Folder):
"""The catalog resource is the root node and initial entry point into an
ArcGIS Server host. This resource represents a catalog of folders and
services published on the host."""
_pwdmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
_handler = urllib2.HTTPBasicAuthHandler(_pwdmgr)
"""Class-level password manager -- if a Catalog is constructed with a
username/password pair for HTTP auth it will be handled by this."""
_opener = urllib2.build_opener(_handler)
urllib2.install_opener(_opener)
def __init__(self, url, username=None, password=None, token=None):
if username is not None and password is not None:
self.__class__._pwdmgr.add_password(None, url, username, password)
url_ = list(urlparse.urlsplit(url))
if not url_[2].endswith('/'):
url_[2] += "/"
if token is not None:
self.__token__ = token
super(Catalog, self).__init__(url_)
# Basically a Folder, but do some really, really rudimentary sanity
# checking (look for folders/services, make sure format is JSON) so we
# can verify this URL behaves like a Folder -- catch errors early
# before any other manipulations go on.
assert 'folders' in self._json_struct, "No folders in catalog root"
assert 'services' in self._json_struct, "No services in catalog root"
@property
def currentVersion(self):
return self._json_struct.get('currentVersion', 9.3)
# Definitions for classes calling/manipulating services
class Service(RestURL):
"""Represents an ArcGIS REST service. This is an abstract base -- services
derive from this."""
__cache_request__ = True
__service_type__ = None
__parent_type__ = Folder
def __init__(self, url, file_data=None):
if not isinstance(url, (tuple, list)):
url_ = list(urlparse.urlsplit(url))
else:
url_ = url
if not url_[2].endswith('/'):
url_[2] += "/"
super(Service, self).__init__(url_, file_data)
@property
def serviceDescription(self):
"""Get a short description of the service. Will return None if there is
no description for this service or service type."""
return self._json_struct.get('serviceDescription', None)
def __repr__(self):
return "<%s%s (%r)>" % (self.__service_type__,
" - %r" % self.serviceDescription
if self.serviceDescription
else '',
self.url)
class ServerError(Exception):
"""Exception for server-side error responses"""
class Result(RestURL):
"""Abstract class representing the result of an operation performed on a
REST service"""
__cache_request__ = True # Only request the URL once
__lazy_fetch__ = False # Force-fetch immediately
class BinaryResult(Result):
"""Class representing the result of an operation perfomed on a service with
some sort of opaque binary data, such as a PNG or KMZ. Contrast to a
JsonResult, which has an immediately accessible data structure."""
__has_json__ = False
@property
def data(self):
"""Return the raw data from this request"""
return self._contents
def save(self, outfile):
"""Save the image data to a file or file-like object"""
if isinstance(outfile, basestring):
outfile = open(outfile, 'wb')
outfile.write(self._contents)
class JsonResult(Result):
"""Class representing a specialization to results that expect
some sort of json data"""
__has_json__ = True
def __init__(self, url, file_data=None):
super(JsonResult, self).__init__(url, file_data)
js = self._json_struct
if 'error' in js:
detailstring = ", ".join(js['error'].get('details', []))
if detailstring:
detailstring = " -- " + detailstring
raise ServerError("ERROR %r: %r%s <%s>" %
(js['error']['code'],
js['error']['message'] or
'Unspecified',
detailstring,
self.url))
elif "status" in js:
if js['status'] == "error":
raise ServerError(''.join(
js.get('messages',
[js.get('message',
'Unspecified Error')])))
class JsonPostResult(JsonResult):
"""Class representing a sepcialization of a REST call which moves all
parameters to the payload of a POST request instead of in the URL
query string in a GET"""
__post__ = True
pass
class Layer(RestURL):
"""The base class for map and network layers"""
__cache_request__ = True # Only request the URL once
__lazy_fetch__ = False # Force-fetch immediately
# Service implementations -- mostly simple conversion wrappers for the
# functionality handled up above, wrapper types for results, etc.
class AttachmentData(BinaryResult):
"""Represents the binary attachment data associated with a layer"""
__lazy_fetch__ = True
class AttachmentInfos(JsonResult):
"""The attachment infos resource returns information about attachments
associated with a feature. This resource is available only if the layer
has advertised that it has attachments. A layer has attachments if its
hasAttachments property is true."""
@property
def attachments(self):
for attachment in self._json_struct['attachmentInfos']:
attachment_dict = attachment.copy()
attachment_dict['attachment'] = \
self_get_subfolder("%i/" % attachment_dict['id'],
AttachmentData)
class MapLayer(Layer):
"""The layer resource represents a single layer or standalone table in a
map of a map service published by ArcGIS Server. It provides basic
information about the layer such as its name, type, parent and
sub-layers, fields, min and max scales, extent, and copyright text."""
def QueryLayer(self, text=None, Geometry=None, inSR=None,
spatialRel='esriSpatialRelIntersects', where=None,
outFields=None, returnGeometry=None, outSR=None,
objectIds=None, time=None, maxAllowableOffset=None,
returnIdsOnly=None):
"""The query operation is performed on a layer resource. The result
of this operation is a resultset resource. This resource provides
information about query results including the values for the fields
requested by the user. If you request geometry information, the
geometry of each result is also returned in the resultset.
B{Spatial Relation Options:}
- esriSpatialRelIntersects
- esriSpatialRelContains
- esriSpatialRelCrosses
- esriSpatialRelEnvelopeIntersects
- esriSpatialRelIndexIntersects
- esriSpatialRelOverlaps
- esriSpatialRelTouches
- esriSpatialRelWithin"""
if not inSR:
if Geometry:
inSR = Geometry.spatialReference
out = self._get_subfolder("./query", JsonResult, {
'text': text,
'geometry': geometry,
'inSR': inSR,
'spatialRel': spatialRel,
'where': where,
'outFields': outFields,
'returnGeometry':
returnGeometry,
'outSR': outSR,
'objectIds': objectIds,
'time':
utils.pythonvaluetotime(
time),
'maxAllowableOffset':
maxAllowableOffset,
'returnIdsOnly':
returnIdsOnly
})
return gptypes.GPFeatureRecordSetLayer.fromJson(out._json_struct)
@property
def id(self):
return self._json_struct['id']
@property
def name(self):
return self._json_struct['name']
@property
def type(self):
return self._json_struct['type']
@property
def geometryType(self):
return self._json_struct['geometryType']
@property
def copyrightText(self):
return self._json_struct['copyrightText']
@property
def parentLayer(self):
return self._get_subfolder("../%s/" %
self._json_struct['parentLayer']['id'],
MapLayer)
@property
def subLayers(self):
return [self._get_subfolder("../%s/" %
layer['parentLayer']['id'],
MapLayer)
for layer in self._json_struct['subLayers']]
@property
def minScale(self):
return self._json_struct['minScale']
@property
def maxScale(self):
return self._json_struct['maxScale']
@property
def extent(self):
return geometry.fromJson(self._json_struct['extent'])
@property
def displayField(self):
return self._json_struct['displayField']
@property
def fields(self):
return self._json_struct['fields']
@property
def types(self):
return self._json_struct.get('types', [])
@property
def relationships(self):
return self._json_struct.get('relationships', [])
@property
def timeInfo(self):
"""Return the time info for this Map Service"""
time_info = self._json_struct.get('timeInfo', {})
if not time_info:
return None
time_info = time_info.copy()
if 'timeExtent' in time_info:
time_info['timeExtent'] = utils.timetopythonvalue(
time_info['timeExtent'])
return time_info
@property
def hasAttachments(self):
return self._json_struct.get('hasAttachments', False)
@property
def attachments(self):
if not self.hasAttachments:
return []
return self._get_subfolder("attachments/", AttachmentInfos).attachments
class MapTile(BinaryResult):
"""Represents the map tile fetched from a map service."""
pass
class ExportMapResult(JsonResult):
"""Represents the result of an Export Map operation performed on a Map
Service."""
@property
def href(self):
return self._json_struct['href']
@property
def width(self):
return self._json_struct['width']
@property
def height(self):
return self._json_struct['height']
@property
def extent(self):
return geometry.fromJson(self._json_struct['extent'])
@property
def scale(self):
return self._json_struct['scale']
@property
def data(self):
if not hasattr(self, '_data'):
self._data = urllib2.urlopen(self.href).read()
return self._data
def save(self, outfile):
"""Save the image data to a file or file-like object"""
if isinstance(outfile, basestring):
outfile = open(outfile, 'wb')
assert hasattr(outfile, 'write') and callable(outfile.write), \
"Expect a file or file-like object with a .write() method"
outfile.write(self.data)
class IdentifyOrFindResult(JsonResult):
"""Represents the result of a Find or Identify operation performed on a
Map Service."""
@property
def results(self):
def resiter():
for result in self._json_struct['results']:
if 'geometry' in result:
geom = geometry.fromJson(result['geometry'])
else:
geom = geometry.NullGeometry()
geom.attributes = result.get('attributes')
for key in ('displayFieldName', 'value',
'layerId', 'layerName'):
geom.attributes[key] = result[key]
yield geom
return gptypes.GPFeatureRecordSetLayer(list(resiter()),
self.parent.spatialReference)
class ExportKMLResult(BinaryResult):
"""Represents the result of an Export KML operation performed on a Map
Service."""
@Folder._register_service_type
class MapService(Service):
"""Map services offer access to map and layer content. Map services can
either be cached or dynamic. A map service that fulfills requests with
pre-created tiles from a cache instead of dynamically rendering part of
the map is called a cached map service. A dynamic map service requires
the server to render the map each time a request comes in. Map services
using a tile cache can significantly improve performance while
delivering maps, while dynamic map services offer more flexibility."""
__service_type__ = "MapServer"
def ExportMap(self, bbox, size=None, dpi=None, imageSR=None, bboxSR=None,
format=None, layerDefs=None, layers=None, transparent=False,
time=None):
"""The export operation is performed on a map service resource. The
result of this operation is a map image resource. This resource
provides information about the exported map image such as its URL,
its width and height, extent and scale."""
return self._get_subfolder('export/', ExportMapResult,
{'bbox': bbox,
'size': size,
'dpi': dpi,
'imageSR': imageSR,
'bboxSR': bboxSR,
'format': format,
'layerDefs': layerDefs,
'layers': layers,
'transparent': transparent,
'time':
utils.pythonvaluetotime(
time)
})
def Identify(self, Geometry, sr=None, layers=None, tolerance=1,
mapExtent=None, imageDisplay=None, returnGeometry=True):
"""The identify operation is performed on a map service resource. The
result of this operation is an identify results resource. Each
identified result includes its name, layer ID, layer name, geometry
and geometry type, and other attributes of that result as name-value
pairs."""
assert hasattr(Geometry, '__geometry_type__'), "Invalid geometry"
gt = Geometry.__geometry_type__
if sr is None:
sr = Geometry.spatialReference.wkid
geo_json = json.dumps(Geometry._json_struct_without_sr)
return self._get_subfolder('identify/', IdentifyOrFindResult,
{'geometry': geo_json,
'geometryType': gt,
'sr': sr,
'layers': layers,
'tolerance': tolerance,
'mapExtent': mapExtent,
'imageDisplay':
imageDisplay,
'returnGeometry':
returnGeometry})
def Find(self, searchText, contains=True, searchFields=None, sr=None,
layers=None, returnGeometry=True):
"""The find operation is performed on a map service resource. The
result of this operation is a find results resource. Each result
includes its value, feature ID, field name, layer ID, layer name,
geometry, geometry type, and attributes in the form of name-value
pairs."""
return self._get_subfolder('find/', IdentifyOrFindResult,
{'searchText': searchText,
'contains': contains,
'searchFields': searchFields,
'sr': sr,
'layers': layers,
'returnGeometry': returnGeometry})
def GenerateKML(self, docName, layers, layerOptions='composite'):
"""The generateKml operation is performed on a map service resource.
The result of this operation is a KML document wrapped in a KMZ
file. The document contains a network link to the KML Service
endpoint with properties and parameters you specify.
B{Layer Options:}
- composite: (default) All layers as a single composite image.
Layers cannot be turned on and off in the client.
- separateImage: Each layer as a separate image.
- nonComposite: Vector layers as vectors and raster layers as
images."""
return self._get_subfolder('generateKml/', GenerateKMLResult,
{'docName': docName,
'layers': layers,
'layerOptions': layerOptions})
def tile(self, row, col, zoomlevel):
"""For cached maps, this resource represents a single cached tile for
the map. The image bytes for the tile at the specified level, row
and column are directly streamed to the client. If the tile is not
found, an HTTP status code of 404 (Not found) is returned."""
return self._get_subfolder("tile/%s/%s/%s/" % (row, col, zoomlevel),
MapTile)
@property
def mapName(self):
"""This map's name"""
return self._json_struct['mapName']
@property
def description(self):
"""This map's description"""
return self._json_struct['description']
@property
def copyrightText(self):
"""This map's copyright text"""
return self._json_struct['copyrightText']
@property
def spatialReference(self):
"""This map's Spatial Reference"""
return geometry.fromJson(
self._json_struct['spatialReference'])
@property
def initialExtent(self):
"""This map's initial extent"""
return geometry.fromJson(
self._json_struct['initialExtent'])
@property
def fullExtent(self):
"""This map's full extent"""
return geometry.fromJson(
self._json_struct['fullExtent'])
@property
def layernames(self):
"""Return a list of the names of this map's layers"""
return [layer['name'] for layer in self._json_struct['layers']]
@property
def layers(self):
"""Return a list of this map's layer objects"""
return [self._get_subfolder("%s/" % layer['id'], MapLayer)
for layer in self._json_struct['layers']]
@property
def tablenames(self):
"""Return a list of the names of this map's tables"""
return [table['name'] for table in self._json_struct.get('tables', [])]
@property
def tables(self):
"""Return a list of this map's table objects"""
return [self._get_subfolder("%s/" % table['id'], MapLayer)
for table in self._json_struct.get('tables', [])]
@property
def timeInfo(self):
"""Return the time info for this Map Service"""
time_info = self._json_struct.get('timeInfo', {})
if not time_info:
return None
time_info = time_info.copy()
if 'timeExtent' in time_info:
time_info['timeExtent'] = utils.timetopythonvalue(
time_info['timeExtent'])
return time_info
@property
def supportedImageFormatTypes(self):
"""Return a list of supported image formats for this Map Service"""
return [x.strip()
for x in
self._json_struct['supportedImageFormatTypes'].split(',')]
class FindAddressCandidatesResult(JsonResult):
"""Represents the result from a geocode operation. The .candidates
field holds a list of candidate addresses as python dicts; the
['location'] key in each is a geometry.Point for the location of the
address."""
@property
def candidates(self):
"""A list of candidate addresses (as dictionaries) from a geocode
operation"""
# convert x['location'] to a point from a json point struct
def cditer():
for candidate in self._json_struct['candidates']:
newcandidate = candidate.copy()
newcandidate['location'] = \
geometry.fromJson(newcandidate['location'])
yield newcandidate
return list(cditer())
class ReverseGeocodeResult(JsonResult):
"""Represents the result from a reverse geocode operation -- the two
interesting fields are .address, which is a dictionary with the
fields of the candidate address, and .location, which is a
geometry.Point which is the actual location of the address."""
@property
def address(self):
return self._json_struct['address']
@property
def location(self):
return geometry.fromJson(self._json_struct['location'])
def __getitem__(self, attr):
return self._json_struct['address'][attr]
def __getattr__(self, attr):
try:
return self[attr]
except KeyError as e:
raise AttributeError(str(e))
@Folder._register_service_type
class GeocodeService(Service):
"""Geocoding is the process of assigning a location, usually in the form
of coordinate values (points), to an address by comparing the
descriptive location elements in the address to those present in the
reference material. Addresses come in many forms, ranging from the
common address format of a house number followed by the street name and
succeeding information to other location descriptions such as postal
zone or census tract. An address includes any type of information that
distinguishes a place."""
__service_type__ = "GeocodeServer"
def FindAddressCandidates(self, outFields=[], outSR=None, **fields):
"""The findAddressCandidates operation is performed on a geocode
service resource. The result of this operation is a resource
representing the list of address candidates. This resource
provides information about candidates including the address,
location, and score."""
required_unset_fields = []
for field in self._json_struct['addressFields']:
if field['required'] and field['name'] not in fields:
required_unset_fields.append(field['name'])
if required_unset_fields:
raise ValueError("Required field%s not set for Geocode: %s" %
('' if len(required_unset_fields) == 1
else 's', ', '.join(required_unset_fields)))
query = fields.copy()
query['outFields'] = outFields
if outSR:
query['outSR'] = (outSR.wkid
if isinstance(outSR, geometry.SpatialReference)
else outSR)
return self._get_subfolder('findAddressCandidates/',
FindAddressCandidatesResult, query)
def ReverseGeocode(self, location, distance, outSR=None):
"""The reverseGeocode operation is performed on a geocode service
resource. The result of this operation is a reverse geocoded address
resource. This resource provides information about all the address
fields pertaining to the reverse geocoded address as well as its
exact location."""
if outSR:
outSR = (outSR.wkid
if isinstance(outSR, geometry.SpatialReference)
else outSR)
return self._get_subfolder('reverseGeocode/', ReverseGeocodeResult,
{'location': location,
'distance': distance,
'outSR': outSR})
class GPMessage(object):
"""Represents a message generated during the execution of a
geoprocessing task. It includes information such as when the
processing started, what parameter values are being used, the task
progress, warnings of potential problems and errors. It is composed
of a message type and description."""
__message_types = set(["esriJobMessageTypeInformative",
"esriJobMessageTypeWarning",
"esriJobMessageTypeError",
"esriJobMessageTypeEmpty",
"esriJobMessageTypeAbort"])
def __init__(self, description, type=None):
if isinstance(description, dict):
description, type = (description.get('description'),
description.get('type'))
elif isinstance(description, (tuple, list)):
description, type = description[0], description[1]
self.description, self.type = description, type
def __repr__(self):
return "<% 11s: %r>" % (self.type[len('esriJobMessageType'):],
self.description)
@Folder._register_service_type
class GPService(Service):
"""Geoprocessing is a fundamental part of enterprise GIS operations.
Geoprocessing provides the data analysis, data management, and data
conversion tools necessary for all GIS users.
A geoprocessing service represents a collection of published tools that
perform tasks necessary for manipulating and analyzing geographic
information across a wide range of disciplines. Each tool performs one
or more operations, such as projecting a data set from one map
projection to another, adding fields to a table, or creating buffer
zones around features. A tool accepts input (such as feature sets,
tables, and property values), executes operations using the input data,
and generates output for presentation in a map or further processing by
the client. Tools can be executed synchronously (in sequence) or
asynchronously."""
__service_type__ = "GPServer"
@property
def tasknames(self):
return [task for task in self._json_struct['tasks']]
@property
def tasks(self):
return [self._get_subfolder(taskname+'/', GPTask)
for taskname in self.tasknames]
@property
def executionType(self):
"""Returns the execution type of this task."""
return self._json_struct['executionType']
@property
def synchronous(self):
"""Returns a boolean indicating whether this tasks runs synchronously
(True) or asynchronously (False)."""
sv = self._json_struct['executionType']
if sv == 'esriExecutionTypeSynchronous':
return True
elif sv == 'esriExecutionTypeAsynchronous':
return False
raise ValueError("Unknown synchronous value: %r" % sv)
def __getitem__(self, attr):
for task in self.tasknames:
if task == attr:
return self._get_subfolder(task+'/', GPTask)
raise KeyError("No task named %r found" % attr)
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
return Service.__getattr__(self, attr)
class GPJobStatus(RestURL):
"""This class represents the current/pending status of an asynchronous
GP Task. Please refer to the GPJob class for more information."""
__cache_request__ = False
_results = None
# All the job status codes we are aware of (from Java API)
job_statuses = set([
'esriJobCancelled',
'esriJobCancelling',
'esriJobDeleted',
'esriJobDeleting',
'esriJobExecuting',
'esriJobFailed',
'esriJobNew',
'esriJobSubmitted',
'esriJobSucceeded',
'esriJobTimedOut',
'esriJobWaiting'])
# If this is the status, self.running = True
_still_running = set([
'esriJobCancelling',
'esriJobDeleting',
'esriJobExecuting',
'esriJobNew',
'esriJobSubmitted',
'esriJobWaiting'])
# If this is the status, then throw an error
_error_status = set([
'esriJobCancelled',
'esriJobDeleted',
'esriJobFailed',
'esriJobTimedOut',
])
@property
def _json_struct(self):
js = RestURL._json_struct.__get__(self)
if js['jobStatus'] not in self._still_running:
self.__cache_request__ = True
self.__json_struct__ = js
return js
@property
def jobId(self):
return self._json_struct['jobId']
@property
def jobStatus(self):
return self._json_struct['jobStatus']
@property
def running(self):
return self._json_struct['jobStatus'] in self._still_running
@property
def results(self):
assert (not self.running), "Task is still executing."
if self.jobStatus in self._error_status:
raise ServerError("Error: job status %r" % self.jobStatus)
if self._results is None:
def item_iterator():
for resref in self._json_struct['results'].itervalues():
rel = self._get_subfolder(resref['paramUrl'], RestURL)
result = rel._json_struct
#self.parent.parent.parameters
#datatype = gptypes.GPBaseType._gp_type_mapping.get(
# result['dataType'],None)
datatype = None
conversion = None
for param in self.parent.parent.parameters:
if param['name'] == result['paramName']:
datatype = param['datatype']
if datatype is None:
conversion = str
else:
conversion = datatype.fromJson
dt = result['paramName']
val = conversion(result['value'])
yield (dt, val)
self._results = dict(item_iterator())
return self._results
@property
def messages(self):
"Return a list of messages returned from the server."
return map(GPMessage, self._json_struct['messages'])
def __getitem__(self, key):
return self.__class__.results.__get__(self)[key]
def __getattr__(self, attr):
return self.__class__.results.__get__(self)[attr]
class GPJob(JsonResult):
"""The GP job resource represents a job submitted using the submit job
operation. It provides basic information about the job such as the job
ID, status and messages. Additionally, if the job has successfully
completed, it provides information about the result parameters as well
as input parameters."""
_jobstatus = None
def __init__(self, url):
super(GPJob, self).__init__(url)
self._jobstatus = self._get_subfolder('../jobs/%s/' %
self._json_struct['jobId'],
GPJobStatus)
@property
def jobId(self):
"Return the unique ID the server assigned this task"
return self._jobstatus.jobId
@property
def jobStatus(self):
return self._jobstatus.jobStatus
@property
def running(self):
"A boolean (True: job completion pending; False: no longer executing)"
return self._jobstatus.running
@property
def results(self):
"Returns a dict of outputs from the GPTask execution."
return self._jobstatus.results
@property
def messages(self):
"Return a list of messages returned from the server."
return self._jobstatus.messages
def __getitem__(self, key):
return self._jobstatus.results[key]
def __getattr__(self, attr):
return self._jobstatus.results[attr]
class GPExecutionResult(JsonResult):
"""The GPExecutionResult object represents the output of running a
synchronous GPTask."""
_results = None
@property
def messages(self):
"Return a list of messages returned from the server."
return map(GPMessage, self._json_struct['messages'])
@property
def results(self):
"Returns a dict of outputs from the GPTask execution."
if self._results is None:
results = self._json_struct['results']
def result_iterator():
for result in results:
datatype = None
conversion = None
for param in self.parent.parameters:
if param['name'] == result['paramName']:
datatype = param['datatype']
if datatype is None:
conversion = str
else:
conversion = datatype.fromJson
dt = result['paramName']
val = conversion(result['value'])
yield (dt, val)
self._results = dict(res for res in result_iterator())
return self._results
@property
def running(self):
"For method compatibility with GPJob, always return false"
return False
def __getitem__(self, key):
return self.__class__.results.__get__(self)[key]
def __getattr__(self, attr):
return self.__class__.results.__get__(self)[attr]
class GPTask(RestURL):
"""The GP task resource represents a single task in a GP service published
using the ArcGIS Server. It provides basic information about the task
including its name and display name. It also provides detailed
information about the various input and output parameters exposed by the
task"""
__parent_type__ = GPService
__cache_request__ = True
def __init__(self, url, file_data=None):
# Need to force final slash
if isinstance(url, basestring):
url = list(urlparse.urlsplit(url))
if not url[2].endswith('/'):
url[2] += '/'
super(GPTask, self).__init__(url, file_data)
def __expandparamstodict(self, params, kw):
self_parameters = self.parameters
parametervalues = dict(zip((p['name'] for p in self_parameters),
params))
for kw, kwval in kw.iteritems():
if kw in parametervalues:
raise KeyError("Multiple definitions of parameter %r" % kw)
parametervalues[kw] = kwval
for param_to_convert in self_parameters:
if param_to_convert['name'] in parametervalues:
val = parametervalues[param_to_convert['name']]
if not isinstance(val, param_to_convert['datatype']):
parametervalues[param_to_convert['name']] = \
param_to_convert['datatype'](val)._json_struct
return parametervalues
def Execute(self, *params, **kw):
"""Synchronously execute the specified GP task. Parameters are passed
in either in order or as keywords."""
fp = self.__expandparamstodict(params, kw)
return self._get_subfolder('execute/', GPExecutionResult, fp)
def SubmitJob(self, *params, **kw):
"""Asynchronously execute the specified GP task. This will return a
Geoprocessing Job object. Parameters are passed in either in order
or as keywords."""
fp = self.__expandparamstodict(params, kw)
return self._get_subfolder('submitJob/', GPJob, fp)._jobstatus
def __call__(self, *params, **kw):
"""Either submit a job, if the task is synchronous, or execute it,
if it is synchronous. Note that the GPJob and GPExecutionResult
objects both have the C{.running} property that will return True
while the job is running in the case of a job, and always return
False with the case of the execution result. This can be used to
treat both types of execution as the same in your code; with the
idiom
>>> result = task(Param_1, Param_2, Param_3, ...)
>>> while result.running:
... time.sleep(0.125)
>>> print result.Output1
"""
if self.synchronous:
return self.Execute(*params, **kw)
else:
return self.SubmitJob(*params, **kw)
@property
def name(self):
return self._json_struct.get('name', '')
@property
def displayName(self):
return self._json_struct['displayName']
@property
def category(self):
return self._json_struct['category']
@property
def helpUrl(self):
return self._json_struct['helpUrl']
@property
def parameters(self):
parameters = self._json_struct['parameters']
for parameter in parameters:
dt = parameter['dataType']
parameter['datatype'] = \
gptypes.GPBaseType._get_type_by_name(
dt)._from_json_def(parameter)
return parameters
@property
def executionType(self):
"""Returns the execution type of this task."""
return self.parent.executionType
@property
def synchronous(self):
"""Returns a boolean indicating whether this tasks runs synchronously
(True) or asynchronously (False)."""
return self.parent.synchronous
class GeometryResult(JsonResult):
"""Represents the output of a Project, Simplify or Buffer operation
performed by an ArcGIS REST API Geometry service."""
@property
def geometries(self):
return [geometry.fromJson(geo)
for geo in self._json_struct['geometries']]
class LengthsResult(JsonResult):
"""Represents the output of a Lengths operation performed by an ArcGIS
REST API Geometry service."""
@property
def lengths(self):
return map(float(length) for length in self._json_struct['lengths'])
class AreasAndLengthsResult(LengthsResult):
"""Represents the output of a AreasAndLengths operation performed by an
ArcGIS REST API Geometry service."""
@property
def areas(self):
return map(float(area) for area in self._json_struct['areas'])
class LabelPointsResult(JsonResult):
"""Represents the output of a Label Points operation
performed by an ArcGIS REST API Geometry service."""
@property
def labelPoints(self):
"""Label points for the provided polygon(s)."""
return [geometry.fromJson(geo)
for geo in self._json_struct['labelPoints']]
@Folder._register_service_type
class GeometryService(Service):
"""A geometry service contains utility methods, which provide access to
sophisticated and frequently used geometric operations. An ArcGIS Server
Web site can only expose one geometry service with the static name
"Geometry." Note that geometry input and output, where required, are
always packaged as an array."""
__service_type__ = "GeometryServer"
def Project(self, geometries, inSR=None, outSR=None):
"""The project operation is performed on a geometry service resource.
The result of this operation is an array of projected geometries.
This resource projects an array of input geometries from an input
spatial reference to an output spatial reference."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
if inSR is None:
inSR = geometries[0].spatialReference.wkid
assert outSR, "Cannot project to an empty output projection."
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
return self._get_subfolder('project', GeometryResult,
{'geometries': geo_json,
'inSR': inSR,
'outSR': outSR
})
def Simplify(self, geometries, sr=None):
"""The simplify operation is performed on a geometry service resource.
Simplify permanently alters the input geometry so that the geometry
becomes topologically consistent. This resource applies the ArcGIS
simplify operation to each geometry in the input array. For more
information, see ITopologicalOperator.Simplify Method and
IPolyline.SimplifyNetwork Method."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
if sr is None:
sr = geometries[0].spatialReference.wkid
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
return self._get_subfolder('simplify', GeometryResult,
{'geometries': geo_json,
'sr': sr
})
def Buffer(self, geometries, distances, unit=None, unionResults=False,
inSR=None, outSR=None, bufferSR=None):
"""The buffer operation is performed on a geometry service resource.
The result of this operation is buffer polygons at the specified
distances for the input geometry array. An option is available to
union buffers at each distance."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
if isinstance(distances, (list, tuple)):
distances=",".join(str(distance) for distance in distances)
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if inSR is None:
inSR = geometries[0].spatialReference.wkid
if outSR is None:
outSR = geometries[0].spatialReference.wkid
if bufferSR is None:
bufferSR = geometries[0].spatialReference.wkid
return self._get_subfolder('buffer', GeometryResult,
{'geometries': geo_json,
'distances': distances,
'unit': unit,
'unionResults': unionResults,
'inSR': inSR,
'outSR': outSR,
'bufferSR': bufferSR
})
def AreasAndLengths(self, polygons, sr=None, lengthUnit=None,
areaUnit=None):
"""The areasAndLengths operation is performed on a geometry service
resource. This operation calculates areas and perimeter lengths for
each polygon specified in the input array."""
if isinstance(polygons, geometry.Geometry):
polygons = [polygons]
assert all(isinstance(polygon, geometry.Polygon)
for polygon in polygons), "Must use polygons"
if sr is None:
sr = polygons[0].spatialReference.wkid
geo_json = json.dumps([polygon._json_struct_without_sr
for polygon in polygons])
return self._get_subfolder('areasAndLengths', AreasAndLengthsResult,
{'polygons': geo_json,
'sr': sr,
'lengthUnit': lengthUnit,
'areaUnit': areaUnit
})
def Lengths(self, polylines, sr=None, lengthUnit=None, geodesic=None):
"""The lengths operation is performed on a geometry service resource.
This operation calculates the lengths of each polyline specified in
the input array"""
if isinstance(polylines, geometry.Geometry):
polylines = [polylines]
assert all(isinstance(polyline, geometry.Polyline)
for polyline in polylines), "Must use polylines"
if sr is None:
sr = polylines[0].spatialReference.wkid
geo_json = json.dumps([polyline._json_struct_without_sr
for polyline in polylines])
if geodesic is not None:
geodesic = bool(geodesic)
return self._get_subfolder('lengths', LengthsResult,
{'polylines': geo_json,
'sr': sr,
'lengthUnit': lengthUnit,
'geodesic': geodesic
})
def LabelPoints(self, polygons, sr):
"""The labelPoints operation is performed on a geometry service
resource. This operation calculates an interior point for each
polygon specified in the input array. These interior points can be
used by clients for labeling the polygons."""
if isinstance(polygons, geometry.Geometry):
polygons = [polygons]
assert all(isinstance(polygon, geometry.Polygon)
for polygon in polygons), "Must use polygons"
if sr is None:
sr = polygons[0].spatialReference.wkid
geo_json = json.dumps([polygon._json_struct_without_sr
for polygon in polygons])
return self._get_subfolder('labelPoints', LabelPointsResult,
{'polygons': geo_json,
'sr': sr
})
def ConvexHull(self, geometries=None, sr=None):
"""The convexHull operation is performed on a geometry service
resource. It returns the convex hull of the input geometry. The
input geometry can be a point, multipoint, polyline or polygon. The
hull is typically a polygon but can also be a polyline or point in
degenerate cases."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if sr is None:
sr = geometries[0].spatialReference.wkid
return self._get_subfolder('convexHull', GeometryResult,
{'geometries': geo_json, 'sr': sr})
def Densify(self, geometries=None, sr=None, maxSegmentLength=None,
geodesic=None, lengthUnit=None):
"""The densify operation is performed on a geometry service resource.
This operation densifies geometries by plotting points between
existing vertices."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if sr is None:
sr = geometries[0].spatialReference.wkid
return self._get_subfolder('convexHull', GeometryResult,
{'geometries': geo_json,
'sr': sr,
'maxSegmentLength': maxSegmentLength,
'geodesic': geodesic,
'lengthUnit': lengthUnit
})
def Distance(self, geometry1=None, geometry2=None, sr=None,
distanceUnit=None, geodesic=None):
"""The distance operation is performed on a geometry service resource.
It reports the planar (projected space) / geodesic shortest distance
between A and B. sr is a projected coordinate system. Distance is
reported in the linear units specified by units or the units of sr
if units is null."""
if not sr:
sr = (geometry1.spatialReference.wkid or
geometry2.spatialReference.wkid)
geo_json_1 = json.dumps({'geometryType': geometry1.__geometry_type__,
'geometry': geometry1._json_struct})
geo_json_2 = json.dumps({'geometryType': geometry2.__geometry_type__,
'geometry': geometry2._json_struct})
folder = self._get_subfolder('distance', JsonResult,
{'geometry1': geo_json_1,
'geometry2': geo_json_2,
'sr': sr,
'distanceUnit': distanceUnit,
'geodesic': geodesic,
})
return folder._json_struct['distance']
def Generalize(self, geometries=None, sr=None, maxDeviation=None,
deviationUnit=None):
"""The generalize operation is performed on a geometry service
resource. It returns generalized (Douglas-Poiker) versions of the
input geometries."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if sr is None:
sr = geometries[0].spatialReference.wkid
return self._get_subfolder('generalize', GeometryResult,
{'geometries': geo_json,
'sr': sr,
'maxDeviation': maxDeviation,
'deviationUnit': deviationUnit
})
def Offset(self, geometries=None, sr=None, offsetDistance=None,
offsetUnit=None, offsetHow=None, bevelRatio=None):
"""The offset operation is performed on a geometry service resource.
Offset constructs the offset of the given input geometries. If the
offset parameter is positive the constructed offset will be on the
right side of the geometry. (Left side offsets are constructed with
negative parameters.) Tracing the geometry from it's first vertex to
the last will give you a direction along the geometry. It is to the
right and left perspective of this direction that the positive and
negative parameters will dictate where the offset is contructed. In
these terms it is simple to infer where the offset of even
horizontal geometries will be constructed. The offsetHow parameter
determines how outer corners between segments are handled. Rounded
offset rounds the corner between extended offsets. Bevelled offset
squares off the corner after a given ratio distance. Mitered offset
attempts to allow extended offsets to naturally intersect, but if
that intersection occurs too far from the corner, the corner is
eventually bevelled off at a fixed distance."""
if isinstance(geometries, geometry.Geometry):
geometries = [geometries]
geometry_types = set([x.__geometry_type__ for x in geometries])
assert len(geometry_types) == 1, "Too many geometry types"
geo_json = json.dumps({'geometryType': list(geometry_types)[0],
'geometries': [geo._json_struct_without_sr
for geo in geometries]
})
if sr is None:
sr = geometries[0].spatialReference.wkid
return self._get_subfolder('offset', GeometryResult,
{'geometries': geo_json,
'sr': sr,
'offsetDistance': offsetUnit,
'offsetUnit': offsetHow,
'bevelRatio': bevelRatio
})
def TrimExtend(self, polylines=None, trimExtendTo=None, sr=None,
extendHow=None):
"""The trimExtend operation is performed on a geometry service
resource. This operation trims / extends each polyline specified
in the input array, using the user specified guide polylines. When
trimming features, the part to the left of the oriented cutting line
is preserved in the output and the other part is discarded. An empty
polyline is added to the output array if the corresponding input
polyline is neither cut nor extended."""
if isinstance(polylines, geometry.Geometry):
polylines = [polylines]
assert all(isinstance(polyline, geometry.Polyline)
for polyline in polylines), "Must use polylines"
if sr is None:
sr = polylines[0].spatialReference.wkid
geo_json = json.dumps([polyline._json_struct_without_sr
for polyline in polylines])
return self._get_subfolder('trimExtend', GeometryResult,
{'polylines': geo_json,
'trimExtendTo': trimExtendTo,
'extendHow': extendHow,
'sr': sr
})
def AutoComplete(self, polygons=None, polylines=None, sr=None):
"""The Auto Complete operation is performed on a geometry service
resource. The AutoComplete operation simplifies the process of
constructing new polygons that are adjacent to other polygons. It
constructs polygons that fill in the gaps between existing polygons
and a set of polylines."""
raise NotImplementedError()
def Cut(self, cutter=None, target=None, sr=None):
"""The cut operation is performed on a geometry service resource. This
operation splits the input polyline or polygon where it crosses a
cutting polyline"""
raise NotImplementedError()
def Difference(self, geometries=None, geometry=None, sr=None):
"""The difference operation is performed on a geometry service
resource. This operation constructs the set-theoretic difference
between an array of geometries and another geometry."""
raise NotImplementedError()
def Intersect(self, geometries=None, geometry=None, sr=None):
"""The intersect operation is performed on a geometry service
resource. This operation constructs the set-theoretic intersection
between an array of geometries and another geometry"""
raise NotImplementedError()
def Reshape(self, target=None, reshaper=None, sr=None):
"""The reshape operation is performed on a geometry service resource.
It reshapes a polyline or a part of a polygon using a reshaping
line."""
raise NotImplementedError()
def Union(self, geometries=None, sr=None):
"""The union operation is performed on a geometry service resource.
This operation constructs the set-theoretic union of the geometries
in the input array. All inputs must be of the same type."""
raise NotImplementedError()
class ExportImageResult(JsonResult):
"""Represents the output of an Image Service exportImage call."""
@property
def href(self):
return self._json_struct['href']
@property
def width(self):
return self._json_struct['width']
@property
def height(self):
return self._json_struct['height']
@property
def extent(self):
return geometry.fromJson(self._json_struct['extent'])
def save(self, outfile):
"""Save the image data to a file or file-like object"""
if isinstance(outfile, basestring):
outfile = open(outfile, 'wb')
outfile.write(urllib2.urlopen(self.href).read())
@Folder._register_service_type
class ImageService(Service):
"""An image service provides read-only access to a mosaicked collection of
images or a raster data set."""
__service_type__ = "ImageServer"
def ExportImage(self, bbox=None, size=None, imageSR=None, bboxSR=None,
format=None, pixelType=None, noData=None,
interpolation=None, compressionQuality=None, bandIds=None,
mosaicProperties=None, viewpointProperties=None,
mosaicRule=None, renderingRule=None):
"""The export operation is performed on a map service resource. The
result of this operation is a map image resource. This resource
provides information about the exported map image such as its URL,
its width and height, extent and scale."""
return self._get_subfolder('exportImage/', ExportImageResult,
{'bbox': bbox,
'size': size,
'imageSR': imageSR,
'bboxSR': bboxSR,
'format': format,
'pixelType': pixelType,
'noData': noData,
'interpolation': interpolation,
'compressionQuality': compressionQuality,
'bandIds': bandIds,
'mosaicProperties': mosaicProperties,
'viewpointProperties': viewpointProperties,
'mosaicRule': mosaicRule,
'renderingRule': renderingRule
})
@Folder._register_service_type
class NetworkService(Service):
"""The network service resource represents a network analysis service
published with ArcGIS Server. The resource provides information about
the service such as the service description and the various network
layers (route, closest facility and service area layers) contained in
the network analysis service."""
__service_type__ = "NAServer"
@property
def routeLayers(self):
return [self._get_subfolder("%s/" % layer, RouteNetworkLayer)
for layer in self._json_struct['routeLayers']]
@property
def serviceAreaLayers(self):
return [self._get_subfolder("%s/" % layer, NetworkLayer)
for layer in self._json_struct['serviceAreaLayers']]
@property
def closestFacilityLayers(self):
return [self._get_subfolder("%s/" % layer, NetworkLayer)
for layer in self._json_struct['closestFacilityLayers']]
def __getitem__(self, attr):
layer_names = set(self._json_struct['routeLayers'] +
self._json_struct['serviceAreaLayers'] +
self._json_struct['closestFacilityLayers'])
if attr in layer_names:
self._get_subfolder("%s/" % attr, NetworkLayer)
raise KeyError("No attribute %r found" % attr)
def __getattr__(self, attr):
try:
return self[attr]
except KeyError as e:
raise AttributeError(str(e))
class DirectionResult(object):
"""Represents an individual directions entry in a Network Solve operation
result."""
def __init__(self, direction):
self._json_struct = direction
@property
def routeId(self):
return self._json_struct["routeId"]
@property
def routeName(self):
return self._json_struct["routeName"]
@property
def summary(self):
return self._json_struct["summary"]
@property
def features(self):
return gptypes.GPFeatureRecordSetLayer.fromJson(self._json_struct)
class NetworkSolveResult(JsonResult):
"""Represents a solve operation's output performed on a Route Network
layer."""
@property
def directions(self):
return [DirectionResult(direction)
for direction in self._json_struct['directions']]
@property
def routes(self):
return gptypes.GPFeatureRecordSetLayer.fromJson(
self._json_struct['routes'])
@property
def stops(self):
return gptypes.GPFeatureRecordSetLayer.fromJson(
self._json_struct['stops'])
@property
def barriers(self):
return gptypes.GPFeatureRecordSetLayer.fromJson(
self._json_struct['barriers'])
@property
def messages(self):
return self._json_struct['messages']
class NetworkLayer(Layer):
"""The network layer resource represents a single network layer in a
network analysis service published by ArcGIS Server. It provides
basic information about the network layer such as its name, type,
and network classes. Additionally, depending on the layer type, it
provides different pieces of information as detailed in the
examples."""
__parent_type__ = NetworkService
@property
def layerName(self):
return self._json_struct['layerName']
@property
def layerType(self):
return self._json_struct['layerType']
@property
def impedance(self):
return self._json_struct['impedance']
@property
def useStartTime(self):
return self._json_struct['useStartTime']
@property
def useTimeWindows(self):
return self._json_struct['useTimeWindows']
@property
def preserveFirstStop(self):
return self._json_struct['preserveFirstStop']
@property
def preserveLastStop(self):
return self._json_struct['preserveLastStop']
@property
def restrictUTurns(self):
return self._json_struct['restrictUTurns']
@property
def outputLineType(self):
return self._json_struct['outputLineType']
@property
def useHierarchy(self):
return self._json_struct['useHierarchy']
@property
def ignoreInvalidLocations(self):
return self._json_struct['ignoreInvalidLocations']
@property
def restrictions(self):
return self._json_struct['restrictions']
@property
def distanceUnits(self):
return self._json_struct['distanceUnits']
@property
def useTimeAttribute(self):
return self._json_struct['useTimeAttribute']
@property
def networkClasses(self):
return self._json_struct['networkClasses']
def SolveClosestFacility(self, facilities=None,
incidents=None,
barriers=None,
polylineBarriers=None,
polygonBarriers=None,
attributeParameterValues=None,
returnDirections=None,
directionsLanguage=None,
directionsStyleName=None,
directionsLengthUnits=None,
directionsTimeAttributeName=None,
returnCFRoutes=None,
returnFacilities=None,
returnIncidents=None,
returnBarriers=None,
returnPolylineBarriers=None,
returnPolygonBarriers=None,
facilityReturnType=None,
outputLines=None,
defaultCutoff=None,
defaultTargetFacilityCount=None,
travelDirection=None,
outSR=None,
impedanceAttributeName=None,
restrictionAttributeNames=None,
restrictUTurns=None,
useHierarchy=None,
outputGeometryPrecision=None,
outputGeometryPrecisionUnits=None):
"""The solve operation is performed on a network layer resource of type
closest facility."""
raise NotImplementedError()
def SolveServiceArea(self, facilities=None,
barriers=None,
polylineBarriers=None,
polygonBarriers=None,
attributeParameterValues=None,
defaultBreaks=None,
excludeSourcesFromPolygons=None,
mergeSimilarPolygonRanges=None,
outputLines=None,
outputPolygons=None,
overlapLines=None,
overlapPolygons=None,
splitLinesAtBreaks=None,
splitPolygonsAtBreaks=None,
travelDirection=None,
trimOuterPolygon=None,
trimPolygonDistance=None,
trimPolygonDistanceUnits=None,
accumulateAttributeNames=None,
impedanceAttributeName=None,
restrictionAttributeNames=None,
restrictUTurns=None,
outputGeometryPrecision=None,
outputGeometryPrecisionUnits=None):
"""The solve operation is performed on a network layer resource of type
service area (layerType is esriNAServerServiceArea)."""
raise NotImplementedError()
class RouteNetworkLayer(NetworkLayer):
"""Represents a Route Network Layer"""
def Solve(self, stops=None, barriers=None, returnDirections=None,
returnRoutes=None, returnStops=None, returnBarriers=None,
outSR=None, ignoreInvalidLocations=None, outputLines=None,
findBestSequence=None, preserveFirstStop=None,
preserveLastStop=None, useTimeWindows=None, startTime=None,
accumulateAttributeNames=None, impedanceAttributeName=None,
restrictionAttributeNames=None, restrictUTurns=None,
useHierarchy=None, directionsLanguage=None,
outputGeometryPrecision=None, directionsLengthUnits=None,
directionsTimeAttributeName=None, attributeParameterValues=None,
polylineBarriers=None, polygonBarriers=None):
"""The solve operation is performed on a network layer resource.
At 9.3.1, the solve operation is supported only on the route layer.
Or specifically, on a network layer whose layerType is
esriNAServerRouteLayer.
You can provide arguments to the solve route operation as query
parameters defined in the parameters table below.
"""
def ptlist_as_semilist(lst):
if isinstance(lst, geometry.Point):
lst = [lst]
if isinstance(lst, (list, tuple)):
return ";".join(','.join(str(x) for x in pt) for pt in lst)
return lst
if self.layerType != "esriNAServerRouteLayer":
raise TypeError("Layer is of type %s; Solve is not available."
% self.layerType)
return self._get_subfolder('solve/', NetworkSolveResult,
{'stops': ptlist_as_semilist(stops),
'barriers': ptlist_as_semilist(barriers),
'returnDirections': returnDirections,
'returnRoutes': returnRoutes,
'returnStops': returnStops,
'returnBarriers': returnBarriers,
'outSR': outSR,
'ignoreInvalidLocations': ignoreInvalidLocations,
'outputLines': outputLines,
'findBestSequence': findBestSequence,
'preserveFirstStop': preserveFirstStop,
'preserveLastStop': preserveLastStop,
'useTimeWindows': useTimeWindows,
'startTime': startTime,
'accumulateAttributeNames': accumulateAttributeNames,
'impedanceAttributeName': impedanceAttributeName,
'restrictionAttributeNames': restrictionAttributeNames,
'restrictUTurns': restrictUTurns,
'useHierarchy': useHierarchy,
'directionsLanguage': directionsLanguage,
'outputGeometryPrecision': outputGeometryPrecision,
'directionsLengthUnits': directionsLengthUnits,
'directionsTimeAttributeName':
directionsTimeAttributeName,
'attributeParameterValues': attributeParameterValues,
'polylineBarriers': polylineBarriers,
'polygonBarriers': polygonBarriers})
class GeoDataVersion(RestURL):
"""The geodata version resource represents a single version in a geodata
service published using ArcGIS Server. It provides basic information
about the version such as its description, created and modified times,
access type, as well as parent, children and ancestor versions."""
@property
def name(self):
return self._json_struct['name']
@property
def description(self):
return self._json_struct['description']
@property
def created(self):
return self._json_struct['created']
@property
def modified(self):
return self._json_struct['modified']
@property
def access(self):
return self._json_struct['access']
@property
def parentVersion(self):
return self._json_struct['parentVersion']
@property
def childVersions(self):
return self._json_struct['childVersions']
@property
def ancestorVersions(self):
return self._json_struct['ancestorVersions']
class GeoDataReplica(RestURL):
"""The geodata replica resource represents a single replica in a geodata
service published using ArcGIS Server. It provides basic information
about the replica such as its id, replica version, creation date, GUID,
role, access type, and reconcile policy."""
@property
def name(self):
return self._json_struct['name']
@property
def id(self):
return self._json_struct['id']
@property
def replicaVersion(self):
return self._json_struct['replicaVersion']
@property
def guid(self):
return self._json_struct['guid']
@property
def role(self):
return self._json_struct['role']
@property
def accessType(self):
return self._json_struct['accessType']
@property
def myGenerationNumber(self):
return self._json_struct['myGenerationNumber']
@property
def sibGenerationNumber(self):
return self._json_struct['sibGenerationNumber']
@property
def sibMyGenerationNumber(self):
return self._json_struct['sibMyGenerationNumber']
@property
def replicaState(self):
return self._json_struct['replicaState']
@property
def sibConnectionString(self):
return self._json_struct['sibConnectionString']
@property
def modelType(self):
return self._json_struct['modelType']
@property
def singleGeneration(self):
return self._json_struct['singleGeneration']
@property
def spatialRelation(self):
return self._json_struct['spatialRelation']
@property
def queryGeometryType(self):
return self._json_struct['queryGeometryType']
@property
def queryGeometry(self):
return geometry.fromJson(self._json_struct['queryGeometry'])
@property
def transferRelatedObjects(self):
return self._json_struct['transferRelatedObjects']
@property
def reconcilePolicy(self):
return self._json_struct['reconcilePolicy']
@Folder._register_service_type
class GeoDataService(Service):
"""The geodata service resource represents a geodata service that you have
published with ArcGIS Server. The resource provides basic information
associated with the geodata service such as the service description,
its workspace type, default working version, versions, and replicas."""
__service_type__ = "GeoDataServer"
@property
def workspaceType(self):
return self._json_struct['workspaceType']
@property
def defaultWorkingVersionName(self):
return self._json_struct['defaultWorkingVersion']
@property
def defaultWorkingVersion(self):
return self._get_subfolder("versions/%s/" %
self.defaultWorkingVersionName,
GeoDataVersion)
@property
def versionNames(self):
return self._json_struct['versions']
@property
def versions(self):
return [self._get_subfolder("versions/%s/" % version, GeoDataVersion)
for version in self.versionNames]
@property
def replicaNames(self):
return self._json_struct['replicas']
@property
def replicas(self):
return [self._get_subfolder("replicas/%s/" % version, GeoDataReplica)
for replica in self.replicaNames]
class GlobeLayer(Layer):
"""The globe layer resource represents a single layer in a globe service
published by ArcGIS Server. It provides basic information about the
layer such as its ID, name, type, parent and sub-layers, fields, extent,
data type, sampling mode, and extrusion type."""
@property
def id(self):
return self._json_struct['id']
@property
def name(self):
return self._json_struct['name']
@property
def type(self):
return self._json_struct['type']
@property
def description(self):
return self._json_struct['description']
@property
def extent(self):
return geometry.fromJson(self._json_struct['extent'])
@property
def dataType(self):
return self._json_struct['dataType']
@property
def maxDistance(self):
return self._json_struct['maxDistance']
@property
def minDistance(self):
return self._json_struct['minDistance']
@property
def samplingMode(self):
return self._json_struct['samplingMode']
@property
def baseID(self):
return self._json_struct['baseID']
@property
def baseOption(self):
return self._json_struct['baseOption']
@property
def extrusionType(self):
return self._json_struct['extrusionType']
@property
def extrusionExpression(self):
return self._json_struct['extrusionExpression']
@property
def cullMode(self):
return self._json_struct['cullMode']
@property
def copyrightText(self):
return self._json_struct['copyrightText']
@property
def displayField(self):
return self._json_struct['displayField']
@property
def fields(self):
return self._json_struct['fields']
@property
def parentLayer(self):
return self._get_subfolder("../%s/" %
self._json_struct['parentLayer']['id'],
GlobeLayer)
@property
def subLayers(self):
return [self._get_subfolder("../%s/" % layer['id'], GlobeLayer)
for layer in self._json_struct['subLayers']]
@Folder._register_service_type
class GlobeService(Service):
"""The globe service resource represents a globe service published with
ArcGIS Server. The resource provides information about the service such
as the service description and the various layers contained in the
published globe document."""
__service_type__ = "GlobeServer"
@property
def layernames(self):
"""Return a list of the names of this globe service's layers"""
return [layer['name'] for layer in self._json_struct['layers']]
@property
def layers(self):
"""Return a list of this globe service's layer objects"""
return [self._get_subfolder("./%s/" % layer['id'], GlobeLayer)
for layer in self._json_struct['layers']]
@Folder._register_service_type
class FeatureLayerFeature(object):
"""The feature resource represents a single feature in a layer in a feature
service."""
@property
def geometry(self):
if 'geometry' in self._json_struct['feature']:
geom = geometry.fromJson(
self._json_struct['feature'].get('geometry',
None),
self._json_struct['feature'].get('attributes',
{}))
else:
geom = geometry.NullGeometry()
geom.attributes = self._json_struct['feature'].get('attributes',
{})
return geom
@property
def attributes(self):
return self._json_struct['feature'].get('attributes',
{})
@property
def attachments(self):
return self._get_subfolder("./attachments/", AttachmentInfos)
def AddAttachment(self, attachment=None):
"""This operation adds an attachment to the associated feature (POST
only). The add attachment operation is performed on a feature
service feature resource."""
return self._get_subfolder("./addAttachment", JsonPostResult,
{'attachment': attachment})
def UpdateAttachment(self, attachmentId=None, attachment=None):
"""This operation updates an attachment associated with a feature
(POST only). The update attachment operation is performed on a
feature service feature resource."""
return self._get_subfolder("./updateAttachment", JsonPostResult,
{'attachment': attachment,
'attachmentId': attachmentId})
def DeleteAttachments(self, attachmentIds=None):
"""This operation deletes attachments associated with a feature (POST
only). The delete attachments operation is performed on a feature
service feature resource."""
return self._get_subfolder("./deleteAttachments", JsonPostResult,
{'attachmentIds': attachmentIds})
class FeatureLayer(MapLayer):
"""The layer resource represents a single editable feature layer or non
spatial table in a feature service."""
def __getitem__(self, index):
"""Get a feature by featureId"""
return self._get_subfolder(str(index), FeatureLayerFeature)
def Feature(self, featureId):
"""Return a feature from this FeatureService by its ID"""
return self[featureId]
def QueryRelatedRecords(self, objectIds=None, relationshipId=None,
outFields=None, definitionExpression=None,
returnGeometry=None, outSR=None):
"""The query operation is performed on a feature service layer
resource. The result of this operation are featuresets grouped by
source layer / table object IDs. Each featureset contains Feature
objects including the values for the fields requested by the user.
For related layers, if you request geometry information, the
geometry of each feature is also returned in the featureset. For
related tables, the featureset does not include geometries."""
out = self._get_subfolder("./queryRelatedRecords", JsonResult, {
'objectIds':
objectIds,
'relationshipId':
relationshipId,
'outFields':
outFields,
'definitionExpression':
definitionExpression,
'returnGeometry':
returnGeometry,
'outSR': outSR
})
return out._json_struct
def AddFeatures(self, features):
"""This operation adds features to the associated feature layer or
table (POST only). The add features operation is performed on a
feature service layer resource. The result of this operation is an
array of edit results. Each edit result identifies a single feature
and indicates if the edit were successful or not. If not, it also
includes an error code and an error description."""
fd = {'features': ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in features)}
return self._get_subfolder("./addFeatures", JsonPostResult, fd)
def UpdateFeatures(self, features):
"""This operation updates features to the associated feature layer or
table (POST only). The update features operation is performed on a
feature service layer resource. The result of this operation is an
array of edit results. Each edit result identifies a single feature
and indicates if the edit were successful or not. If not, it also
includes an error code and an error description."""
fd = {'features': ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in features)}
return self._get_subfolder("./updateFeatures", JsonPostResult, fd)
def DeleteFeatures(self, objectIds=None, where=None, geometry=None,
inSR=None, spatialRel=None):
"""This operation deletes features in a feature layer or table (POST
only). The delete features operation is performed on a feature
service layer resource. The result of this operation is an array
of edit results. Each edit result identifies a single feature and
indicates if the edit were successful or not. If not, it also
includes an error code and an error description."""
gt = geometry.__geometry_type__
if sr is None:
sr = geometry.spatialReference.wkid
geo_json = json.dumps(Geometry._json_struct_without_sr)
return self._get_subfolder("./deleteFeatures", JsonPostResult, {
'objectIds': objectIds,
'where': where,
'geometry': geo_json,
'geometryType':
geometryType,
'inSR': inSR,
'spatialRel': spatialRel
})
def ApplyEdits(self, adds=None, updates=None, deletes=None):
"""This operation adds, updates and deletes features to the associated
feature layer or table in a single call (POST only). The apply edits
operation is performed on a feature service layer resource. The
result of this operation are 3 arrays of edit results (for adds,
updates and deletes respectively). Each edit result identifies a
single feature and indicates if the edit were successful or not. If
not, it also includes an error code and an error description."""
add_str, update_str = None, None
if adds:
add_str = ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in adds)
if updates:
update_str = ",".join(json.dumps(
feature._json_struct_for_featureset)
for feature in updates)
return self._get_subfolder("./applyEdits", JsonPostResult,
{'adds':
add_str,
'updates':
update_str,
'deletes':
deletes
})
@Folder._register_service_type
class FeatureService(Service):
"""A feature service allows clients to query and edit features. Features
include geometry, attributes and symbology and are organized into layers
and sub types within a layer."""
__service_type__ = "FeatureServer"
@property
def layernames(self):
"""Return a list of the names of this service's layers"""
return [layer['name'] for layer in self._json_struct['layers']]
@property
def layers(self):
"""Return a list of this service's layer objects"""
return [self._get_subfolder("%s/" % layer['id'], FeatureLayer)
for layer in self._json_struct['layers']]
@property
def tablenames(self):
"""Return a list of the names of this service's tables"""
return [table['name'] for table in self._json_struct.get('tables', [])]
@property
def tables(self):
"""Return a list of this service's table objects"""
return [self._get_subfolder("%s/" % table['id'], FeatureLayer)
for table in self._json_struct.get('tables', [])]
|
import os
from pprint import pprint
from datetime import datetime, timedelta
from django.db import models
from django.core.exceptions import PermissionDenied, ValidationError
from mezzanine.conf import settings
from hs_core.signals import pre_check_bag_flag
class ResourceIRODSMixin(models.Model):
""" This contains iRODS methods to be included as options for resources """
class Meta:
abstract = True
def __home_path(self):
""" Return the home path for local iRODS resources """
return settings.IRODS_CWD
def update_bag(self):
"""
Update a bag before issuing a download ticket
"""
from hs_core.tasks import create_bag_by_irods
from hs_core.hydroshare.resource import check_resource_type
from hs_core.hydroshare.hs_bagit import create_bag_files
# send signal for pre_check_bag_flag
resource_cls = check_resource_type(self.resource_type)
# pre_check_bag_flag.send(sender=resource_cls, resource=self)
metadata_dirty = self.getAVU('metadata_dirty')
bag_modified = self.getAVU('bag_modified')
if metadata_dirty: # automatically cast to Bool
create_bag_files(self)
self.setAVU('metadata_dirty', False)
# the ticket system does synchronous bag creation.
# async bag creation isn't supported.
if bag_modified: # automatically cast to Bool
create_bag_by_irods(self.short_id)
self.setAVU('bag_modified', False)
def update_metadata_files(self):
from hs_core.hydroshare.hs_bagit import create_bag_files
metadata_dirty = self.getAVU('metadata_dirty')
create_bag_files(self)
self.setAVU('metadata_dirty', False)
def create_ticket(self, user, path=None, write=False, allowed_uses=1):
"""
create an iRODS ticket for reading or modifying a resource
:param user: user to authorize
:param path: path in iRODS to the object being requested.
:return:
:raises PermissionDenied: if user is not allowed to create the ticket.
:raises SessionException: if ticket fails to be created for some reason.
:raises SuspiciousFileOperation: if path uses .. illegally
WARNING: This creates a ticket that expires in one hour in UTC. If the
iRODS and django servers are in different time zones and not set for UTC,
this results in a useless ticket. This includes federated servers.
THERE IS NO STRAIGHTFORWARD MECHANISM IN IRODS for determining the time zone
or local time of an iRODS server.
Also, note that there is no mechanism for asynchronous bag creation in the
ticketing system. The bag is created *synchronously* if required. The
ticket is not issued until the bag exists.
"""
from hs_core.models import path_is_allowed
from hs_core.tasks import create_bag_by_irods
from hs_core.hydroshare.resource import check_resource_type
from hs_core.hydroshare.hs_bagit import create_bag_files
# raises SuspiciousFileOperation if path is not allowed
path_is_allowed(path)
# authorize user
if write:
if not user.uaccess.can_change_resource(self):
raise PermissionDenied("user {} cannot change resource {}"
.format(user.username, self.short_id))
else:
if not user.uaccess.can_view_resource(self):
raise PermissionDenied("user {} cannot view resource {}"
.format(user.username, self.short_id))
if path is None:
path = self.file_path # default = all data files
# can only write resource files
if write:
if not path.startswith(self.file_path):
raise PermissionDenied("{} can only write data files to {}"
.format(user.username, self.short_id))
# can read anything inside this particular resource
else:
if path == self.bag_path:
self.update_bag()
elif not path.startswith(self.root_path):
raise PermissionDenied("invalid resource file path {}".format(path))
elif path == self.resmap_path or path == self.scimeta_path:
self.update_metadata_files()
istorage = self.get_irods_storage()
read_or_write = 'write' if write else 'read'
if path.startswith(self.short_id) or path.startswith('bags/'): # local path
path = os.path.join(self.__home_path(), path)
stdout, stderr = istorage.session.run("iticket", None, 'create', read_or_write, path)
if not stdout.startswith('ticket:'):
raise ValidationError("ticket creation failed: {}", stderr)
ticket = stdout.split('\n')[0]
ticket = ticket[len('ticket:'):]
_, _ = istorage.session.run('iticket', None, 'mod', ticket,
'uses', str(allowed_uses))
# This creates a timestamp with a one-hour timeout.
# Note that this is a timeout on when the ticket is first used, and
# not on the completion of the use, which can take considerably longer.
# TODO: this will fail unless Django and iRODS are both running in UTC.
# There is no current mechanism for determining the timezone of a remote iRODS
# server from within iRODS; shell access is required.
timeout = datetime.now() + timedelta(hours=1)
formatted = timeout.strftime("%Y-%m-%d.%H:%M")
_, _ = istorage.session.run('iticket', None, 'mod', ticket,
'expires', formatted)
return ticket
def list_ticket(self, ticket):
""" List a ticket's attributes """
istorage = self.get_irods_storage()
stdout, stderr = istorage.session.run("iticket", None, 'ls', ticket)
if stdout.startswith('id:'):
stuff = stdout.split('\n')
output = {}
for s in stuff:
try:
line = s.split(': ')
key = line[0]
value = line[1]
if key == 'collection name' or key == 'data collection':
output['full_path'] = value
if self.resource_federation_path:
if __debug__:
assert(value.startswith(self.resource_federation_path))
output['long_path'] = value[len(self.resource_federation_path):]
output['fed_path'] = self.resource_federation_path
else:
location = value.search(self.short_id)
if __debug__:
assert(location >= 0)
output['long_path'] = value[location:]
output['home_path'] = value[:(location-1)]
if __debug__:
assert(output['long_path'].startswith(self.short_id))
# data/....
qual_path = output['long_path'][len(self.short_id)+1:]
output['qual_path'] = qual_path
output['folder'] = None
if qual_path.startswith('data/contents/'):
output['folder'] = qual_path[len('data/contents/'):]
if key == 'data-object name':
output['filename'] = value
elif key == 'ticket type':
output['type'] = value
elif key == 'owner name':
output['owner'] = value
elif key == 'owner zone':
output['zone'] = value
elif key == 'expire time':
output['expires'] = value
else:
output[line[0]] = line[1]
except Exception: # no ':' in line
pass
if 'filename' in output:
output['full_path'] = os.path.join(output['full_path'], output['filename'])
return output
else:
raise ValidationError("ticket {} cannot be listed".format(ticket))
def delete_ticket(self, user, ticket):
"""
delete an existing ticket
:param ticket: ticket string
:raises SessionException: if ticket does not exist.
This checks that the user at least has the privilege the ticket grants,
before deleting it. This is not quite as comprehensive as keeping a
ticket history, but provides a small amount of safety nonetheless.
It remains possible for one user to delete the ticket of another user without
that user's knowledge, provided that the users have the same privilege.
However, tickets are not broadcast, so this is unlikely to happen.
The usual mechanism -- of checking that the user owns the ticket -- is not
practical, because the ticket owner is always the proxy user.
"""
meta = self.list_ticket(ticket)
if self.root_path not in meta['full_path']:
raise PermissionDenied("user {} cannot delete ticket for a different resource"
.format(user.username))
# get kind of ticket
write = meta['type'] == 'write'
# authorize user
if write:
if not user.uaccess.can_change_resource(self):
raise PermissionDenied("user {} cannot delete change ticket for {}"
.format(user.username, self.short_id))
else:
if not user.uaccess.can_view_resource(self):
raise PermissionDenied("user {} cannot delete view ticket for {}"
.format(user.username, self.short_id))
istorage = self.get_irods_storage()
_, _ = istorage.session.run('iticket', None, 'delete', ticket)
class ResourceFileIRODSMixin(models.Model):
""" This contains iRODS functions related to resource files """
class Meta:
abstract = True
def create_ticket(self, user, write=False):
""" This creates a ticket to read or modify this file """
return self.resource.create_ticket(user, path=self.storage_path, write=write)
fix flake8
import os
from datetime import datetime, timedelta
from django.db import models
from django.core.exceptions import PermissionDenied, ValidationError
from mezzanine.conf import settings
from hs_core.signals import pre_check_bag_flag
class ResourceIRODSMixin(models.Model):
""" This contains iRODS methods to be included as options for resources """
class Meta:
abstract = True
def __home_path(self):
""" Return the home path for local iRODS resources """
return settings.IRODS_CWD
def update_bag(self):
"""
Update a bag if necessary.
This uses the Django signal pre_check_bag_flag to prepare collections, i
and then checks the AVUs 'metadata_dirty' and 'bag_modified' to determine
whether to regenerate the metadata files and/or bag.
This is a synchronous update. The call waits until the update is finished.
"""
from hs_core.tasks import create_bag_by_irods
from hs_core.hydroshare.resource import check_resource_type
from hs_core.hydroshare.hs_bagit import create_bag_files
# send signal for pre_check_bag_flag
resource_cls = check_resource_type(self.resource_type)
pre_check_bag_flag.send(sender=resource_cls, resource=self)
metadata_dirty = self.getAVU('metadata_dirty')
bag_modified = self.getAVU('bag_modified')
if metadata_dirty: # automatically cast to Bool
create_bag_files(self)
self.setAVU('metadata_dirty', False)
# the ticket system does synchronous bag creation.
# async bag creation isn't supported.
if bag_modified: # automatically cast to Bool
create_bag_by_irods(self.short_id)
self.setAVU('bag_modified', False)
def update_metadata_files(self):
"""
Make the metadata files resourcemetadata.xml and resourcemap.xml up to date.
This checks the "metadata dirty" AVU before updating files if necessary.
"""
from hs_core.hydroshare.hs_bagit import create_bag_files
metadata_dirty = self.getAVU('metadata_dirty')
if metadata_dirty:
create_bag_files(self)
self.setAVU('metadata_dirty', False)
def create_ticket(self, user, path=None, write=False, allowed_uses=1):
"""
create an iRODS ticket for reading or modifying a resource
:param user: user to authorize
:param path: path in iRODS to the object being requested.
:return:
:raises PermissionDenied: if user is not allowed to create the ticket.
:raises SessionException: if ticket fails to be created for some reason.
:raises SuspiciousFileOperation: if path uses .. illegally
WARNING: This creates a ticket that expires in one hour in UTC. If the
iRODS and django servers are in different time zones and not set for UTC,
this results in a useless ticket. This includes federated servers.
THERE IS NO STRAIGHTFORWARD MECHANISM IN IRODS for determining the time zone
or local time of an iRODS server.
Also, note that there is no mechanism for asynchronous bag creation in the
ticketing system. The bag is created *synchronously* if required. The
ticket is not issued until the bag exists.
"""
from hs_core.models import path_is_allowed
# raises SuspiciousFileOperation if path is not allowed
path_is_allowed(path)
# authorize user
if write:
if not user.uaccess.can_change_resource(self):
raise PermissionDenied("user {} cannot change resource {}"
.format(user.username, self.short_id))
else:
if not user.uaccess.can_view_resource(self):
raise PermissionDenied("user {} cannot view resource {}"
.format(user.username, self.short_id))
if path is None:
path = self.file_path # default = all data files
# can only write resource files
if write:
if not path.startswith(self.file_path):
raise PermissionDenied("{} can only write data files to {}"
.format(user.username, self.short_id))
# can read anything inside this particular resource
else:
if path == self.bag_path:
self.update_bag()
elif not path.startswith(self.root_path):
raise PermissionDenied("invalid resource file path {}".format(path))
elif path == self.resmap_path or path == self.scimeta_path:
self.update_metadata_files()
istorage = self.get_irods_storage()
read_or_write = 'write' if write else 'read'
if path.startswith(self.short_id) or path.startswith('bags/'): # local path
path = os.path.join(self.__home_path(), path)
stdout, stderr = istorage.session.run("iticket", None, 'create', read_or_write, path)
if not stdout.startswith('ticket:'):
raise ValidationError("ticket creation failed: {}", stderr)
ticket = stdout.split('\n')[0]
ticket = ticket[len('ticket:'):]
_, _ = istorage.session.run('iticket', None, 'mod', ticket,
'uses', str(allowed_uses))
# This creates a timestamp with a one-hour timeout.
# Note that this is a timeout on when the ticket is first used, and
# not on the completion of the use, which can take considerably longer.
# TODO: this will fail unless Django and iRODS are both running in UTC.
# There is no current mechanism for determining the timezone of a remote iRODS
# server from within iRODS; shell access is required.
timeout = datetime.now() + timedelta(hours=1)
formatted = timeout.strftime("%Y-%m-%d.%H:%M")
_, _ = istorage.session.run('iticket', None, 'mod', ticket,
'expires', formatted)
return ticket
def list_ticket(self, ticket):
""" List a ticket's attributes """
istorage = self.get_irods_storage()
stdout, stderr = istorage.session.run("iticket", None, 'ls', ticket)
if stdout.startswith('id:'):
stuff = stdout.split('\n')
output = {}
for s in stuff:
try:
line = s.split(': ')
key = line[0]
value = line[1]
if key == 'collection name' or key == 'data collection':
output['full_path'] = value
if self.resource_federation_path:
if __debug__:
assert(value.startswith(self.resource_federation_path))
output['long_path'] = value[len(self.resource_federation_path):]
output['fed_path'] = self.resource_federation_path
else:
location = value.search(self.short_id)
if __debug__:
assert(location >= 0)
output['long_path'] = value[location:]
output['home_path'] = value[:(location-1)]
if __debug__:
assert(output['long_path'].startswith(self.short_id))
# data/....
qual_path = output['long_path'][len(self.short_id)+1:]
output['qual_path'] = qual_path
output['folder'] = None
if qual_path.startswith('data/contents/'):
output['folder'] = qual_path[len('data/contents/'):]
if key == 'data-object name':
output['filename'] = value
elif key == 'ticket type':
output['type'] = value
elif key == 'owner name':
output['owner'] = value
elif key == 'owner zone':
output['zone'] = value
elif key == 'expire time':
output['expires'] = value
else:
output[line[0]] = line[1]
except Exception: # no ':' in line
pass
if 'filename' in output:
output['full_path'] = os.path.join(output['full_path'], output['filename'])
return output
else:
raise ValidationError("ticket {} cannot be listed".format(ticket))
def delete_ticket(self, user, ticket):
"""
delete an existing ticket
:param ticket: ticket string
:raises SessionException: if ticket does not exist.
This checks that the user at least has the privilege the ticket grants,
before deleting it. This is not quite as comprehensive as keeping a
ticket history, but provides a small amount of safety nonetheless.
It remains possible for one user to delete the ticket of another user without
that user's knowledge, provided that the users have the same privilege.
However, tickets are not broadcast, so this is unlikely to happen.
The usual mechanism -- of checking that the user owns the ticket -- is not
practical, because the ticket owner is always the proxy user.
"""
meta = self.list_ticket(ticket)
if self.root_path not in meta['full_path']:
raise PermissionDenied("user {} cannot delete ticket for a different resource"
.format(user.username))
# get kind of ticket
write = meta['type'] == 'write'
# authorize user
if write:
if not user.uaccess.can_change_resource(self):
raise PermissionDenied("user {} cannot delete change ticket for {}"
.format(user.username, self.short_id))
else:
if not user.uaccess.can_view_resource(self):
raise PermissionDenied("user {} cannot delete view ticket for {}"
.format(user.username, self.short_id))
istorage = self.get_irods_storage()
_, _ = istorage.session.run('iticket', None, 'delete', ticket)
class ResourceFileIRODSMixin(models.Model):
""" This contains iRODS functions related to resource files """
class Meta:
abstract = True
def create_ticket(self, user, write=False):
""" This creates a ticket to read or modify this file """
return self.resource.create_ticket(user, path=self.storage_path, write=write)
|
from django.db import models
from django.db.models import signals
from django.core.urlresolvers import reverse
from .signals import create_slug
from django.contrib.auth.models import User
class Artist(models.Model):
name = models.CharField("Nome", max_length=120)
press_release = models.TextField("Press Release", db_index=True)
photo = models.ImageField("Foto", upload_to='artists/')
phone = models.CharField("Telefone", max_length=20)
site = models.CharField("Site", max_length=200)
contact_email = models.EmailField("Email")
facebook = models.CharField("Facebook", max_length=255, blank=False, null=True)
instagram = models.CharField("Instagram", max_length=120, blank=False, null=True)
twitter = models.CharField("Twitter", max_length=120, blank=False, null=True)
snapchat = models.CharField("Snapchat", max_length=120, blank=False, null=True)
slug = models.SlugField(max_length=170, blank=True)
created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True)
slug_field_name = 'slug'
slug_from = 'name'
user = models.OneToOneField(User)
class Meta:
db_table = 'artist'
verbose_name = 'Artista'
verbose_name_plural = 'Artistas'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("artist-profile", kwargs={'pk': self.pk})
signals.post_save.connect(create_slug, sender=Artist)
Change artist photo to use a hash as filename
from django.db import models
from django.db.models import signals
from django.core.urlresolvers import reverse
from .signals import create_slug
from django.contrib.auth.models import User
import hashlib
import random
def photo_path_and_name(instance, filename):
random_filename = hashlib.sha256(str(random.getrandbits(256)).encode('utf-8')).hexdigest()
path = 'artists/'
ext = filename.split('.')[-1]
filename = "%s.%s" % (random_filename, ext)
return '/'.join([path, filename, ])
class Artist(models.Model):
name = models.CharField("Nome", max_length=120)
press_release = models.TextField("Press Release", db_index=True)
photo = models.ImageField("Foto", upload_to=photo_path_and_name)
phone = models.CharField("Telefone", max_length=20)
site = models.CharField("Site", max_length=200)
contact_email = models.EmailField("Email")
facebook = models.CharField("Facebook", max_length=255, blank=False, null=True)
instagram = models.CharField("Instagram", max_length=120, blank=False, null=True)
twitter = models.CharField("Twitter", max_length=120, blank=False, null=True)
snapchat = models.CharField("Snapchat", max_length=120, blank=False, null=True)
slug = models.SlugField(max_length=170, blank=True)
created_at = models.DateTimeField(auto_now_add=True, null=True, blank=True)
updated_at = models.DateTimeField(auto_now=True, null=True, blank=True)
slug_field_name = 'slug'
slug_from = 'name'
user = models.OneToOneField(User)
class Meta:
db_table = 'artist'
verbose_name = 'Artista'
verbose_name_plural = 'Artistas'
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse("artist-profile", kwargs={'pk': self.pk})
signals.post_save.connect(create_slug, sender=Artist)
|
"""Define celery tasks for hs_core app."""
import os
import sys
import traceback
import zipfile
import logging
import json
from celery.signals import task_postrun
from datetime import datetime, timedelta, date
from xml.etree import ElementTree
import requests
from celery import shared_task
from celery.schedules import crontab
from django.conf import settings
from django.core.mail import send_mail
from django.contrib.sites.models import Site
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from rest_framework import status
from hs_access_control.models import GroupMembershipRequest
from hs_core.hydroshare import utils, create_empty_resource, set_dirty_bag_flag
from hydroshare.hydrocelery import app as celery_app
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files, create_bag, create_bagit_files_by_irods
from hs_core.hydroshare.resource import get_activated_doi, get_crossref_url, deposit_res_metadata_with_crossref
from hs_core.task_utils import get_or_create_task_notification
from hs_odm2.models import ODM2Variable
from django_irods.storage import IrodsStorage
from theme.models import UserQuota, QuotaMessage, User
from django_irods.icommands import SessionException
from celery.result import states
from hs_core.models import BaseResource, TaskNotification
from hs_core.enums import RelationTypes
from theme.utils import get_quota_message
from hs_collection_resource.models import CollectionDeletedResource
from hs_file_types.models import (
FileSetLogicalFile,
GenericLogicalFile,
GeoFeatureLogicalFile,
GeoRasterLogicalFile,
ModelProgramLogicalFile,
ModelInstanceLogicalFile,
NetCDFLogicalFile,
RefTimeseriesLogicalFile,
TimeSeriesLogicalFile
)
FILE_TYPE_MAP = {"GenericLogicalFile": GenericLogicalFile,
"FileSetLogicalFile": FileSetLogicalFile,
"GeoRasterLogicalFile": GeoRasterLogicalFile,
"NetCDFLogicalFile": NetCDFLogicalFile,
"GeoFeatureLogicalFile": GeoFeatureLogicalFile,
"RefTimeseriesLogicalFile": RefTimeseriesLogicalFile,
"TimeSeriesLogicalFile": TimeSeriesLogicalFile,
"ModelProgramLogicalFile": ModelProgramLogicalFile,
"ModelInstanceLogicalFile": ModelInstanceLogicalFile
}
# Pass 'django' into getLogger instead of __name__
# for celery tasks (as this seems to be the
# only way to successfully log in code executed
# by celery, despite our catch-all handler).
logger = logging.getLogger('django')
class FileOverrideException(Exception):
def __init__(self, error_message):
super(FileOverrideException, self).__init__(self, error_message)
@celery_app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
if (hasattr(settings, 'DISABLE_PERIODIC_TASKS') and settings.DISABLE_PERIODIC_TASKS):
logger.debug("Periodic tasks are disabled in SETTINGS")
else:
sender.add_periodic_task(crontab(minute=30, hour=23), nightly_zips_cleanup.s())
sender.add_periodic_task(crontab(minute=0, hour=0), manage_task_nightly.s())
sender.add_periodic_task(crontab(minute=15, hour=0, day_of_week=1, day_of_month='1-7'),
send_over_quota_emails.s())
sender.add_periodic_task(crontab(minute=00, hour=12), daily_odm2_sync.s())
sender.add_periodic_task(crontab(day_of_month=1), monthly_group_membership_requests_cleanup.s())
sender.add_periodic_task(crontab(minute=30, hour=0), daily_innactive_group_requests_cleanup.s())
sender.add_periodic_task(crontab(day_of_week=1), task_notification_cleanup.s())
# Currently there are two different cleanups scheduled.
# One is 20 minutes after creation, the other is nightly.
# TODO Clean up zipfiles in remote federated storage as well.
@celery_app.task(ignore_result=True)
def nightly_zips_cleanup():
# delete 2 days ago
date_folder = (date.today() - timedelta(2)).strftime('%Y-%m-%d')
zips_daily_date = "zips/{daily_date}".format(daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage()
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
federated_prefixes = BaseResource.objects.all().values_list('resource_federation_path')\
.distinct()
for p in federated_prefixes:
prefix = p[0] # strip tuple
if prefix != "":
zips_daily_date = "{prefix}/zips/{daily_date}"\
.format(prefix=prefix, daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage("federated")
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
@celery_app.task(ignore_result=True)
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = act_doi
res.save()
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'),
verify=False)
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst and not settings.DISABLE_TASK_EMAILS:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@celery_app.task(ignore_result=True)
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
if settings.DEBUG or settings.DISABLE_TASK_EMAILS:
logger.info("quota warning email not sent out on debug server but logged instead: "
"{}".format(msg_str))
else:
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, aggregation_name=None, sf_zip=False, download_path='',
request_username=None):
""" Create temporary zip file from input_path and store in output_path
:param resource_id: the short_id of a resource
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param aggregation_name: The name of the aggregation to zip
:param sf_zip: signals a single file to zip
:param download_path: download path to return as task payload
:param request_username: the username of the requesting user
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
aggregation = None
if aggregation_name:
aggregation = res.get_aggregation_by_aggregation_name(aggregation_name)
istorage = res.get_irods_storage() # invoke federated storage as necessary
if res.resource_type == "CompositeResource":
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
res.create_aggregation_meta_files(path=short_path)
else: # all metadata included, e.g., /data/*
res.create_aggregation_meta_files()
if aggregation or sf_zip:
# input path points to single file aggregation
# ensure that foo.zip contains aggregation metadata
# by copying these into a temp subdirectory foo/foo parallel to where foo.zip is stored
temp_folder_name, ext = os.path.splitext(output_path) # strip zip to get scratch dir
head, tail = os.path.split(temp_folder_name) # tail is unqualified folder name "foo"
out_with_folder = os.path.join(temp_folder_name, tail) # foo/foo is subdir to zip
# in the case of user provided zip file name, out_with_folder path may not end with
# aggregation file name
aggr_filename = os.path.basename(input_path)
if not out_with_folder.endswith(aggr_filename):
out_with_folder = os.path.join(os.path.dirname(out_with_folder), aggr_filename)
istorage.copyFiles(input_path, out_with_folder)
if not aggregation:
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
else:
short_path = input_path
try:
aggregation = res.get_aggregation_by_name(short_path)
except ObjectDoesNotExist:
pass
if aggregation:
try:
istorage.copyFiles(aggregation.map_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.map_file_path))
try:
istorage.copyFiles(aggregation.metadata_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.metadata_file_path))
if aggregation.is_model_program or aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_file_path))
if aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_values_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_values_file_path))
for file in aggregation.files.all():
try:
istorage.copyFiles(file.storage_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(file.storage_path))
istorage.zipup(temp_folder_name, output_path)
istorage.delete(temp_folder_name) # delete working directory; this isn't the zipfile
else: # regular folder to zip
istorage.zipup(input_path, output_path)
return download_path
@shared_task
def create_bag_by_irods(resource_id, create_zip=True):
"""Create a resource bag on iRODS side by running the bagit rule and ibun zip.
This function runs as a celery task, invoked asynchronously so that it does not
block the main web thread when it creates bags for very large files which will take some time.
:param
resource_id: the resource uuid that is used to look for the resource to create the bag for.
:param create_zip: defaults to True, set to false to create bagit files without zipping
:return: bag_url if bag creation operation succeeds or
raise an exception if resource does not exist or any other issues that prevent bags from being created.
"""
res = utils.get_resource_by_shortkey(resource_id)
istorage = res.get_irods_storage()
bag_path = res.bag_path
metadata_dirty = res.getAVU('metadata_dirty')
metadata_dirty = metadata_dirty is None or metadata_dirty
# if metadata has been changed, then regenerate metadata xml files
if metadata_dirty:
create_bag_metadata_files(res)
bag_modified = res.getAVU("bag_modified")
bag_modified = bag_modified is None or bag_modified
if metadata_dirty or bag_modified:
create_bagit_files_by_irods(res, istorage)
res.setAVU("bag_modified", False)
if create_zip:
irods_bagit_input_path = res.get_irods_path(resource_id, prepend_short_id=False)
# only proceed when the resource is not deleted potentially by another request
# when being downloaded
is_exist = istorage.exists(irods_bagit_input_path)
if is_exist:
try:
if istorage.exists(bag_path):
istorage.delete(bag_path)
istorage.zipup(irods_bagit_input_path, bag_path)
if res.raccess.published:
# compute checksum to meet DataONE distribution requirement
chksum = istorage.checksum(bag_path)
res.bag_checksum = chksum
return res.bag_url
except SessionException as ex:
raise SessionException(-1, '', ex.stderr)
else:
raise ObjectDoesNotExist('Resource {} does not exist.'.format(resource_id))
@shared_task
def copy_resource_task(ori_res_id, new_res_id=None, request_username=None):
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, request_username, action='copy')
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
if new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
# the resource to be copied is a versioned resource, need to delete this isVersionOf
# relation element to maintain the single versioning obsolescence chain
new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().delete()
# create the relation element for the new_res
today = date.today().strftime("%m/%d/%Y")
derived_from = "{}, accessed on: {}".format(ori_res.get_citation(), today)
# since we are allowing user to add relation of type source, need to check we don't already have it
if not new_res.metadata.relations.all().filter(type=RelationTypes.source, value=derived_from).exists():
new_res.metadata.create_element('relation', type=RelationTypes.source, value=derived_from)
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceCopyException(str(ex))
@shared_task
def create_new_version_resource_task(ori_res_id, username, new_res_id=None):
"""
Task for creating a new version of a resource
Args:
ori_res_id: the original resource id that is to be versioned.
new_res_id: the new versioned resource id from the original resource. If None, a
new resource will be created.
username: the requesting user's username
Returns:
the new versioned resource url as the payload
"""
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, username)
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
# copy metadata from source resource to target new-versioned resource except three elements
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
# add or update Relation element to link source and target resources
ori_res.metadata.create_element('relation', type=RelationTypes.isReplacedBy, value=new_res.get_citation())
if new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
# the original resource is already a versioned resource, and its isVersionOf relation
# element is copied over to this new version resource, needs to delete this element so
# it can be created to link to its original resource correctly
new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().delete()
new_res.metadata.create_element('relation', type=RelationTypes.isVersionOf, value=ori_res.get_citation())
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new version collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
# since an isReplaceBy relation element is added to original resource, needs to call
# resource_modified() for original resource
# if everything goes well up to this point, set original resource to be immutable so that
# obsoleted resources cannot be modified from REST API
ori_res.raccess.immutable = True
ori_res.raccess.save()
ori_res.save()
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceVersioningException(str(ex))
finally:
# release the lock regardless
ori_res.locked_time = None
ori_res.save()
@shared_task
def replicate_resource_bag_to_user_zone_task(res_id, request_username):
"""
Task for replicating resource bag which will be created on demand if not existent already to iRODS user zone
Args:
res_id: the resource id with its bag to be replicated to iRODS user zone
request_username: the requesting user's username to whose user zone space the bag is copied to
Returns:
None, but exceptions will be raised if there is an issue with iRODS operation
"""
res = utils.get_resource_by_shortkey(res_id)
res_coll = res.root_path
istorage = res.get_irods_storage()
if istorage.exists(res_coll):
bag_modified = res.getAVU('bag_modified')
if bag_modified is None or not bag_modified:
if not istorage.exists(res.bag_path):
create_bag_by_irods(res_id)
else:
create_bag_by_irods(res_id)
# do replication of the resource bag to irods user zone
if not res.resource_federation_path:
istorage.set_fed_zone_session()
src_file = res.bag_path
tgt_file = '/{userzone}/home/{username}/{resid}.zip'.format(
userzone=settings.HS_USER_IRODS_ZONE, username=request_username, resid=res_id)
fsize = istorage.size(src_file)
utils.validate_user_quota(request_username, fsize)
istorage.copyFiles(src_file, tgt_file)
return None
else:
raise ValidationError("Resource {} does not exist in iRODS".format(res.short_id))
@shared_task
def delete_resource_task(resource_id, request_username=None):
"""
Deletes a resource managed by HydroShare. The caller must be an owner of the resource or an
administrator to perform this function.
:param resource_id: The unique HydroShare identifier of the resource to be deleted
:return: resource_id if delete operation succeeds
raise an exception if there were errors.
"""
res = utils.get_resource_by_shortkey(resource_id)
res_title = res.metadata.title
res_type = res.resource_type
resource_related_collections = [col for col in res.collections.all()]
owners_list = [owner for owner in res.raccess.owners.all()]
# when the most recent version of a resource in an obsolescence chain is deleted, the previous
# version in the chain needs to be set as the "active" version by deleting "isReplacedBy"
# relation element
if res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
is_version_of_res_link = \
res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().value
idx = is_version_of_res_link.rindex('/')
if idx == -1:
obsolete_res_id = is_version_of_res_link
else:
obsolete_res_id = is_version_of_res_link[idx + 1:]
obsolete_res = utils.get_resource_by_shortkey(obsolete_res_id)
if obsolete_res.metadata.relations.all().filter(type=RelationTypes.isReplacedBy).exists():
eid = obsolete_res.metadata.relations.all().filter(type=RelationTypes.isReplacedBy).first().id
obsolete_res.metadata.delete_element('relation', eid)
# also make this obsoleted resource editable if not published now that it becomes the latest version
if not obsolete_res.raccess.published:
obsolete_res.raccess.immutable = False
obsolete_res.raccess.save()
for res_in_col in res.resources.all():
# res being deleted is a collection resource - delete isPartOf relation of all resources that are part of the
# collection
if res_in_col.metadata.relations.filter(type='isPartOf', value__endswith=res.short_id).exists():
res_in_col.metadata.relations.filter(type='isPartOf', value__endswith=res.short_id).delete()
set_dirty_bag_flag(res_in_col)
for collection_res in resource_related_collections:
# res being deleted is part of one or more collections - delete hasPart relation for all those collections
collection_res.metadata.relations.filter(type='hasPart', value__endswith=res.short_id).delete()
set_dirty_bag_flag(collection_res)
res.delete()
if request_username:
# if the deleted resource is part of any collection resource, then for each of those collection
# create a CollectionDeletedResource object which can then be used to list collection deleted
# resources on collection resource landing page
for collection_res in resource_related_collections:
o = CollectionDeletedResource.objects.create(
resource_title=res_title,
deleted_by=User.objects.get(username=request_username),
resource_id=resource_id,
resource_type=res_type,
collection=collection_res
)
o.resource_owners.add(*owners_list)
# return the page URL to redirect to after resource deletion task is complete
return '/my-resources/'
@shared_task
def update_web_services(services_url, api_token, timeout, publish_urls, res_id):
"""Update web services hosted by GeoServer and HydroServer.
This function sends a resource id to the HydroShare web services manager
application, which will check the current status of the resource and register
or unregister services hosted by GeoServer and HydroServer.
The HydroShare web services manager will return a list of endpoint URLs
for both the resource and individual aggregations. If publish_urls is set to
True, these endpoints will be added to the extra metadata fields of the
resource and aggregations.
"""
session = requests.Session()
session.headers.update(
{"Authorization": " ".join(("Token", str(api_token)))}
)
rest_url = str(services_url) + "/" + str(res_id) + "/"
try:
response = session.post(rest_url, timeout=timeout)
if publish_urls and response.status_code == status.HTTP_201_CREATED:
try:
resource = utils.get_resource_by_shortkey(res_id)
response_content = json.loads(response.content.decode())
for key, value in response_content["resource"].items():
resource.extra_metadata[key] = value
resource.save()
for url in response_content["content"]:
logical_files = list(resource.logical_files)
lf = logical_files[[i.aggregation_name for i in
logical_files].index(
url["layer_name"].encode()
)]
lf.metadata.extra_metadata["Web Services URL"] = url["message"]
lf.metadata.save()
except Exception as e:
logger.error(e)
return e
return response
except (requests.exceptions.RequestException, ValueError) as e:
logger.error(e)
return e
@shared_task
def resource_debug(resource_id):
"""Update web services hosted by GeoServer and HydroServer.
"""
resource = utils.get_resource_by_shortkey(resource_id)
from hs_core.management.utils import check_irods_files
return check_irods_files(resource, log_errors=False, return_errors=True)
@shared_task
def unzip_task(user_pk, res_id, zip_with_rel_path, bool_remove_original, overwrite=False, auto_aggregate=False,
ingest_metadata=False, unzip_to_folder=False):
from hs_core.views.utils import unzip_file
user = User.objects.get(pk=user_pk)
unzip_file(user, res_id, zip_with_rel_path, bool_remove_original, overwrite, auto_aggregate, ingest_metadata,
unzip_to_folder)
@shared_task
def move_aggregation_task(res_id, file_type_id, file_type, tgt_path):
from hs_core.views.utils import rename_irods_file_or_folder_in_django
res = utils.get_resource_by_shortkey(res_id)
istorage = res.get_irods_storage()
res_files = []
file_type_obj = FILE_TYPE_MAP[file_type]
aggregation = file_type_obj.objects.get(id=file_type_id)
res_files.extend(aggregation.files.all())
orig_aggregation_name = aggregation.aggregation_name
for file in res_files:
tgt_full_path = os.path.join(res.file_path, tgt_path, os.path.basename(file.storage_path))
istorage.moveFile(file.storage_path, tgt_full_path)
rename_irods_file_or_folder_in_django(res, file.storage_path, tgt_full_path)
new_aggregation_name = os.path.join(tgt_path, os.path.basename(orig_aggregation_name))
res.set_flag_to_recreate_aggregation_meta_files(orig_path=orig_aggregation_name,
new_path=new_aggregation_name)
return res.get_absolute_url()
@celery_app.task(ignore_result=True)
def daily_odm2_sync():
"""
ODM2 variables are maintained on an external site this synchronizes data to HydroShare for local caching
"""
ODM2Variable.sync()
@celery_app.task(ignore_result=True)
def monthly_group_membership_requests_cleanup():
"""
Delete expired and redeemed group membership requests
"""
two_months_ago = datetime.today() - timedelta(days=60)
GroupMembershipRequest.objects.filter(my_date__lte=two_months_ago).delete()
@celery_app.task(ignore_result=True)
def daily_innactive_group_requests_cleanup():
"""
Redeem group membership requests for innactive users
"""
GroupMembershipRequest.objects.filter(request_from__is_active=False).update(redeemed=True)
GroupMembershipRequest.objects.filter(invitation_to__is_active=False).update(redeemed=True)
@task_postrun.connect
def update_task_notification(sender=None, task_id=None, task=None, state=None, retval=None, **kwargs):
"""
Updates the state of TaskNotification model when a celery task completes
:param sender:
:param task_id: task id
:param task: task object
:param state: task return state
:param retval: task return value
:param kwargs:
:return:
"""
if task.name in settings.TASK_NAME_LIST:
if state == states.SUCCESS:
get_or_create_task_notification(task_id, status="completed", payload=retval)
elif state in states.EXCEPTION_STATES:
get_or_create_task_notification(task_id, status="failed", payload=retval)
elif state == states.REVOKED:
get_or_create_task_notification(task_id, status="aborted", payload=retval)
else:
logger.warning("Unhandled task state of {} for {}".format(state, task_id))
@celery_app.task(ignore_result=True)
def task_notification_cleanup():
"""
Delete expired task notifications each week
"""
week_ago = datetime.today() - timedelta(days=7)
TaskNotification.objects.filter(created__lte=week_ago).delete()
[#4811] lint
"""Define celery tasks for hs_core app."""
import os
import sys
import traceback
import zipfile
import logging
import json
from celery.signals import task_postrun
from datetime import datetime, timedelta, date
from xml.etree import ElementTree
import requests
from celery import shared_task
from celery.schedules import crontab
from django.conf import settings
from django.core.mail import send_mail
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from rest_framework import status
from hs_access_control.models import GroupMembershipRequest
from hs_core.hydroshare import utils, create_empty_resource, set_dirty_bag_flag
from hydroshare.hydrocelery import app as celery_app
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files, create_bag, create_bagit_files_by_irods
from hs_core.hydroshare.resource import get_activated_doi, get_crossref_url, deposit_res_metadata_with_crossref
from hs_core.task_utils import get_or_create_task_notification
from hs_odm2.models import ODM2Variable
from django_irods.storage import IrodsStorage
from theme.models import UserQuota, QuotaMessage, User
from django_irods.icommands import SessionException
from celery.result import states
from hs_core.models import BaseResource, TaskNotification
from hs_core.enums import RelationTypes
from theme.utils import get_quota_message
from hs_collection_resource.models import CollectionDeletedResource
from hs_file_types.models import (
FileSetLogicalFile,
GenericLogicalFile,
GeoFeatureLogicalFile,
GeoRasterLogicalFile,
ModelProgramLogicalFile,
ModelInstanceLogicalFile,
NetCDFLogicalFile,
RefTimeseriesLogicalFile,
TimeSeriesLogicalFile
)
FILE_TYPE_MAP = {"GenericLogicalFile": GenericLogicalFile,
"FileSetLogicalFile": FileSetLogicalFile,
"GeoRasterLogicalFile": GeoRasterLogicalFile,
"NetCDFLogicalFile": NetCDFLogicalFile,
"GeoFeatureLogicalFile": GeoFeatureLogicalFile,
"RefTimeseriesLogicalFile": RefTimeseriesLogicalFile,
"TimeSeriesLogicalFile": TimeSeriesLogicalFile,
"ModelProgramLogicalFile": ModelProgramLogicalFile,
"ModelInstanceLogicalFile": ModelInstanceLogicalFile
}
# Pass 'django' into getLogger instead of __name__
# for celery tasks (as this seems to be the
# only way to successfully log in code executed
# by celery, despite our catch-all handler).
logger = logging.getLogger('django')
class FileOverrideException(Exception):
def __init__(self, error_message):
super(FileOverrideException, self).__init__(self, error_message)
@celery_app.on_after_configure.connect
def setup_periodic_tasks(sender, **kwargs):
if (hasattr(settings, 'DISABLE_PERIODIC_TASKS') and settings.DISABLE_PERIODIC_TASKS):
logger.debug("Periodic tasks are disabled in SETTINGS")
else:
sender.add_periodic_task(crontab(minute=30, hour=23), nightly_zips_cleanup.s())
sender.add_periodic_task(crontab(minute=0, hour=0), manage_task_nightly.s())
sender.add_periodic_task(crontab(minute=15, hour=0, day_of_week=1, day_of_month='1-7'),
send_over_quota_emails.s())
sender.add_periodic_task(crontab(minute=00, hour=12), daily_odm2_sync.s())
sender.add_periodic_task(crontab(day_of_month=1), monthly_group_membership_requests_cleanup.s())
sender.add_periodic_task(crontab(minute=30, hour=0), daily_innactive_group_requests_cleanup.s())
sender.add_periodic_task(crontab(day_of_week=1), task_notification_cleanup.s())
# Currently there are two different cleanups scheduled.
# One is 20 minutes after creation, the other is nightly.
# TODO Clean up zipfiles in remote federated storage as well.
@celery_app.task(ignore_result=True)
def nightly_zips_cleanup():
# delete 2 days ago
date_folder = (date.today() - timedelta(2)).strftime('%Y-%m-%d')
zips_daily_date = "zips/{daily_date}".format(daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage()
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
federated_prefixes = BaseResource.objects.all().values_list('resource_federation_path')\
.distinct()
for p in federated_prefixes:
prefix = p[0] # strip tuple
if prefix != "":
zips_daily_date = "{prefix}/zips/{daily_date}"\
.format(prefix=prefix, daily_date=date_folder)
if __debug__:
logger.debug("cleaning up {}".format(zips_daily_date))
istorage = IrodsStorage("federated")
if istorage.exists(zips_daily_date):
istorage.delete(zips_daily_date)
@celery_app.task(ignore_result=True)
def manage_task_nightly():
# The nightly running task do DOI activation check
# Check DOI activation on failed and pending resources and send email.
msg_lst = []
# retrieve all published resources with failed metadata deposition with CrossRef if any and
# retry metadata deposition
failed_resources = BaseResource.objects.filter(raccess__published=True, doi__contains='failure')
for res in failed_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
response = deposit_res_metadata_with_crossref(res)
if response.status_code == status.HTTP_200_OK:
# retry of metadata deposition succeeds, change resource flag from failure
# to pending
res.doi = act_doi
res.save()
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
else:
# retry of metadata deposition failed again, notify admin
msg_lst.append("Metadata deposition with CrossRef for the published resource "
"DOI {res_doi} failed again after retry with first metadata "
"deposition requested since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
pending_resources = BaseResource.objects.filter(raccess__published=True,
doi__contains='pending')
for res in pending_resources:
if res.metadata.dates.all().filter(type='published'):
pub_date = res.metadata.dates.all().filter(type='published')[0]
pub_date = pub_date.start_date.strftime('%m/%d/%Y')
act_doi = get_activated_doi(res.doi)
main_url = get_crossref_url()
req_str = '{MAIN_URL}servlet/submissionDownload?usr={USERNAME}&pwd=' \
'{PASSWORD}&doi_batch_id={DOI_BATCH_ID}&type={TYPE}'
response = requests.get(req_str.format(MAIN_URL=main_url,
USERNAME=settings.CROSSREF_LOGIN_ID,
PASSWORD=settings.CROSSREF_LOGIN_PWD,
DOI_BATCH_ID=res.short_id,
TYPE='result'),
verify=False)
root = ElementTree.fromstring(response.content)
rec_cnt_elem = root.find('.//record_count')
failure_cnt_elem = root.find('.//failure_count')
success = False
if rec_cnt_elem is not None and failure_cnt_elem is not None:
rec_cnt = int(rec_cnt_elem.text)
failure_cnt = int(failure_cnt_elem.text)
if rec_cnt > 0 and failure_cnt == 0:
res.doi = act_doi
res.save()
success = True
# create bag and compute checksum for published resource to meet DataONE requirement
create_bag_by_irods(res.short_id)
if not success:
msg_lst.append("Published resource DOI {res_doi} is not yet activated with request "
"data deposited since {pub_date}.".format(res_doi=act_doi,
pub_date=pub_date))
logger.debug(response.content)
else:
msg_lst.append("{res_id} does not have published date in its metadata.".format(
res_id=res.short_id))
if msg_lst and not settings.DISABLE_TASK_EMAILS:
email_msg = '\n'.join(msg_lst)
subject = 'Notification of pending DOI deposition/activation of published resources'
# send email for people monitoring and follow-up as needed
send_mail(subject, email_msg, settings.DEFAULT_FROM_EMAIL, [settings.DEFAULT_SUPPORT_EMAIL])
@celery_app.task(ignore_result=True)
def send_over_quota_emails():
# check over quota cases and send quota warning emails as needed
hs_internal_zone = "hydroshare"
if not QuotaMessage.objects.exists():
QuotaMessage.objects.create()
qmsg = QuotaMessage.objects.first()
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if uq:
used_percent = uq.used_percent
if used_percent >= qmsg.soft_limit_percent:
if used_percent >= 100 and used_percent < qmsg.hard_limit_percent:
if uq.remaining_grace_period < 0:
# triggers grace period counting
uq.remaining_grace_period = qmsg.grace_period
elif uq.remaining_grace_period > 0:
# reduce remaining_grace_period by one day
uq.remaining_grace_period -= 1
elif used_percent >= qmsg.hard_limit_percent:
# set grace period to 0 when user quota exceeds hard limit
uq.remaining_grace_period = 0
uq.save()
if u.first_name and u.last_name:
sal_name = '{} {}'.format(u.first_name, u.last_name)
elif u.first_name:
sal_name = u.first_name
elif u.last_name:
sal_name = u.last_name
else:
sal_name = u.username
msg_str = 'Dear ' + sal_name + ':\n\n'
ori_qm = get_quota_message(u)
# make embedded settings.DEFAULT_SUPPORT_EMAIL clickable with subject auto-filled
replace_substr = "<a href='mailto:{0}?subject=Request more quota'>{0}</a>".format(
settings.DEFAULT_SUPPORT_EMAIL)
new_qm = ori_qm.replace(settings.DEFAULT_SUPPORT_EMAIL, replace_substr)
msg_str += new_qm
msg_str += '\n\nHydroShare Support'
subject = 'Quota warning'
if settings.DEBUG or settings.DISABLE_TASK_EMAILS:
logger.info("quota warning email not sent out on debug server but logged instead: "
"{}".format(msg_str))
else:
try:
# send email for people monitoring and follow-up as needed
send_mail(subject, '', settings.DEFAULT_FROM_EMAIL,
[u.email, settings.DEFAULT_SUPPORT_EMAIL],
html_message=msg_str)
except Exception as ex:
logger.debug("Failed to send quota warning email: " + ex.message)
else:
if uq.remaining_grace_period >= 0:
# turn grace period off now that the user is below quota soft limit
uq.remaining_grace_period = -1
uq.save()
else:
logger.debug('user ' + u.username + ' does not have UserQuota foreign key relation')
@shared_task
def add_zip_file_contents_to_resource(pk, zip_file_path):
"""Add zip file to existing resource and remove tmp zip file."""
zfile = None
resource = None
try:
resource = utils.get_resource_by_shortkey(pk, or_404=False)
zfile = zipfile.ZipFile(zip_file_path)
num_files = len(zfile.infolist())
zcontents = utils.ZipContents(zfile)
files = zcontents.get_files()
resource.file_unpack_status = 'Running'
resource.save()
for i, f in enumerate(files):
logger.debug("Adding file {0} to resource {1}".format(f.name, pk))
utils.add_file_to_resource(resource, f)
resource.file_unpack_message = "Imported {0} of about {1} file(s) ...".format(
i, num_files)
resource.save()
# This might make the resource unsuitable for public consumption
resource.update_public_and_discoverable()
# TODO: this is a bit of a lie because a different user requested the bag overwrite
utils.resource_modified(resource, resource.creator, overwrite_bag=False)
# Call success callback
resource.file_unpack_message = None
resource.file_unpack_status = 'Done'
resource.save()
except BaseResource.DoesNotExist:
msg = "Unable to add zip file contents to non-existent resource {pk}."
msg = msg.format(pk=pk)
logger.error(msg)
except:
exc_info = "".join(traceback.format_exception(*sys.exc_info()))
if resource:
resource.file_unpack_status = 'Error'
resource.file_unpack_message = exc_info
resource.save()
if zfile:
zfile.close()
logger.error(exc_info)
finally:
# Delete upload file
os.unlink(zip_file_path)
@shared_task
def delete_zip(zip_path):
istorage = IrodsStorage()
if istorage.exists(zip_path):
istorage.delete(zip_path)
@shared_task
def create_temp_zip(resource_id, input_path, output_path, aggregation_name=None, sf_zip=False, download_path='',
request_username=None):
""" Create temporary zip file from input_path and store in output_path
:param resource_id: the short_id of a resource
:param input_path: full irods path of input starting with federation path
:param output_path: full irods path of output starting with federation path
:param aggregation_name: The name of the aggregation to zip
:param sf_zip: signals a single file to zip
:param download_path: download path to return as task payload
:param request_username: the username of the requesting user
"""
from hs_core.hydroshare.utils import get_resource_by_shortkey
res = get_resource_by_shortkey(resource_id)
aggregation = None
if aggregation_name:
aggregation = res.get_aggregation_by_aggregation_name(aggregation_name)
istorage = res.get_irods_storage() # invoke federated storage as necessary
if res.resource_type == "CompositeResource":
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
res.create_aggregation_meta_files(path=short_path)
else: # all metadata included, e.g., /data/*
res.create_aggregation_meta_files()
if aggregation or sf_zip:
# input path points to single file aggregation
# ensure that foo.zip contains aggregation metadata
# by copying these into a temp subdirectory foo/foo parallel to where foo.zip is stored
temp_folder_name, ext = os.path.splitext(output_path) # strip zip to get scratch dir
head, tail = os.path.split(temp_folder_name) # tail is unqualified folder name "foo"
out_with_folder = os.path.join(temp_folder_name, tail) # foo/foo is subdir to zip
# in the case of user provided zip file name, out_with_folder path may not end with
# aggregation file name
aggr_filename = os.path.basename(input_path)
if not out_with_folder.endswith(aggr_filename):
out_with_folder = os.path.join(os.path.dirname(out_with_folder), aggr_filename)
istorage.copyFiles(input_path, out_with_folder)
if not aggregation:
if '/data/contents/' in input_path:
short_path = input_path.split('/data/contents/')[1] # strip /data/contents/
else:
short_path = input_path
try:
aggregation = res.get_aggregation_by_name(short_path)
except ObjectDoesNotExist:
pass
if aggregation:
try:
istorage.copyFiles(aggregation.map_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.map_file_path))
try:
istorage.copyFiles(aggregation.metadata_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.metadata_file_path))
if aggregation.is_model_program or aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_file_path))
if aggregation.is_model_instance:
try:
istorage.copyFiles(aggregation.schema_values_file_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(aggregation.schema_values_file_path))
for file in aggregation.files.all():
try:
istorage.copyFiles(file.storage_path, temp_folder_name)
except SessionException:
logger.error("cannot copy {}".format(file.storage_path))
istorage.zipup(temp_folder_name, output_path)
istorage.delete(temp_folder_name) # delete working directory; this isn't the zipfile
else: # regular folder to zip
istorage.zipup(input_path, output_path)
return download_path
@shared_task
def create_bag_by_irods(resource_id, create_zip=True):
"""Create a resource bag on iRODS side by running the bagit rule and ibun zip.
This function runs as a celery task, invoked asynchronously so that it does not
block the main web thread when it creates bags for very large files which will take some time.
:param
resource_id: the resource uuid that is used to look for the resource to create the bag for.
:param create_zip: defaults to True, set to false to create bagit files without zipping
:return: bag_url if bag creation operation succeeds or
raise an exception if resource does not exist or any other issues that prevent bags from being created.
"""
res = utils.get_resource_by_shortkey(resource_id)
istorage = res.get_irods_storage()
bag_path = res.bag_path
metadata_dirty = res.getAVU('metadata_dirty')
metadata_dirty = metadata_dirty is None or metadata_dirty
# if metadata has been changed, then regenerate metadata xml files
if metadata_dirty:
create_bag_metadata_files(res)
bag_modified = res.getAVU("bag_modified")
bag_modified = bag_modified is None or bag_modified
if metadata_dirty or bag_modified:
create_bagit_files_by_irods(res, istorage)
res.setAVU("bag_modified", False)
if create_zip:
irods_bagit_input_path = res.get_irods_path(resource_id, prepend_short_id=False)
# only proceed when the resource is not deleted potentially by another request
# when being downloaded
is_exist = istorage.exists(irods_bagit_input_path)
if is_exist:
try:
if istorage.exists(bag_path):
istorage.delete(bag_path)
istorage.zipup(irods_bagit_input_path, bag_path)
if res.raccess.published:
# compute checksum to meet DataONE distribution requirement
chksum = istorage.checksum(bag_path)
res.bag_checksum = chksum
return res.bag_url
except SessionException as ex:
raise SessionException(-1, '', ex.stderr)
else:
raise ObjectDoesNotExist('Resource {} does not exist.'.format(resource_id))
@shared_task
def copy_resource_task(ori_res_id, new_res_id=None, request_username=None):
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, request_username, action='copy')
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
if new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
# the resource to be copied is a versioned resource, need to delete this isVersionOf
# relation element to maintain the single versioning obsolescence chain
new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().delete()
# create the relation element for the new_res
today = date.today().strftime("%m/%d/%Y")
derived_from = "{}, accessed on: {}".format(ori_res.get_citation(), today)
# since we are allowing user to add relation of type source, need to check we don't already have it
if not new_res.metadata.relations.all().filter(type=RelationTypes.source, value=derived_from).exists():
new_res.metadata.create_element('relation', type=RelationTypes.source, value=derived_from)
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceCopyException(str(ex))
@shared_task
def create_new_version_resource_task(ori_res_id, username, new_res_id=None):
"""
Task for creating a new version of a resource
Args:
ori_res_id: the original resource id that is to be versioned.
new_res_id: the new versioned resource id from the original resource. If None, a
new resource will be created.
username: the requesting user's username
Returns:
the new versioned resource url as the payload
"""
try:
new_res = None
if not new_res_id:
new_res = create_empty_resource(ori_res_id, username)
new_res_id = new_res.short_id
utils.copy_resource_files_and_AVUs(ori_res_id, new_res_id)
# copy metadata from source resource to target new-versioned resource except three elements
ori_res = utils.get_resource_by_shortkey(ori_res_id)
if not new_res:
new_res = utils.get_resource_by_shortkey(new_res_id)
utils.copy_and_create_metadata(ori_res, new_res)
# add or update Relation element to link source and target resources
ori_res.metadata.create_element('relation', type=RelationTypes.isReplacedBy, value=new_res.get_citation())
if new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
# the original resource is already a versioned resource, and its isVersionOf relation
# element is copied over to this new version resource, needs to delete this element so
# it can be created to link to its original resource correctly
new_res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().delete()
new_res.metadata.create_element('relation', type=RelationTypes.isVersionOf, value=ori_res.get_citation())
if ori_res.resource_type.lower() == "collectionresource":
# clone contained_res list of original collection and add to new collection
# note that new version collection will not contain "deleted resources"
new_res.resources = ori_res.resources.all()
# create bag for the new resource
create_bag(new_res)
# since an isReplaceBy relation element is added to original resource, needs to call
# resource_modified() for original resource
# if everything goes well up to this point, set original resource to be immutable so that
# obsoleted resources cannot be modified from REST API
ori_res.raccess.immutable = True
ori_res.raccess.save()
ori_res.save()
return new_res.get_absolute_url()
except Exception as ex:
if new_res:
new_res.delete()
raise utils.ResourceVersioningException(str(ex))
finally:
# release the lock regardless
ori_res.locked_time = None
ori_res.save()
@shared_task
def replicate_resource_bag_to_user_zone_task(res_id, request_username):
"""
Task for replicating resource bag which will be created on demand if not existent already to iRODS user zone
Args:
res_id: the resource id with its bag to be replicated to iRODS user zone
request_username: the requesting user's username to whose user zone space the bag is copied to
Returns:
None, but exceptions will be raised if there is an issue with iRODS operation
"""
res = utils.get_resource_by_shortkey(res_id)
res_coll = res.root_path
istorage = res.get_irods_storage()
if istorage.exists(res_coll):
bag_modified = res.getAVU('bag_modified')
if bag_modified is None or not bag_modified:
if not istorage.exists(res.bag_path):
create_bag_by_irods(res_id)
else:
create_bag_by_irods(res_id)
# do replication of the resource bag to irods user zone
if not res.resource_federation_path:
istorage.set_fed_zone_session()
src_file = res.bag_path
tgt_file = '/{userzone}/home/{username}/{resid}.zip'.format(
userzone=settings.HS_USER_IRODS_ZONE, username=request_username, resid=res_id)
fsize = istorage.size(src_file)
utils.validate_user_quota(request_username, fsize)
istorage.copyFiles(src_file, tgt_file)
return None
else:
raise ValidationError("Resource {} does not exist in iRODS".format(res.short_id))
@shared_task
def delete_resource_task(resource_id, request_username=None):
"""
Deletes a resource managed by HydroShare. The caller must be an owner of the resource or an
administrator to perform this function.
:param resource_id: The unique HydroShare identifier of the resource to be deleted
:return: resource_id if delete operation succeeds
raise an exception if there were errors.
"""
res = utils.get_resource_by_shortkey(resource_id)
res_title = res.metadata.title
res_type = res.resource_type
resource_related_collections = [col for col in res.collections.all()]
owners_list = [owner for owner in res.raccess.owners.all()]
# when the most recent version of a resource in an obsolescence chain is deleted, the previous
# version in the chain needs to be set as the "active" version by deleting "isReplacedBy"
# relation element
if res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).exists():
is_version_of_res_link = \
res.metadata.relations.all().filter(type=RelationTypes.isVersionOf).first().value
idx = is_version_of_res_link.rindex('/')
if idx == -1:
obsolete_res_id = is_version_of_res_link
else:
obsolete_res_id = is_version_of_res_link[idx + 1:]
obsolete_res = utils.get_resource_by_shortkey(obsolete_res_id)
if obsolete_res.metadata.relations.all().filter(type=RelationTypes.isReplacedBy).exists():
eid = obsolete_res.metadata.relations.all().filter(type=RelationTypes.isReplacedBy).first().id
obsolete_res.metadata.delete_element('relation', eid)
# also make this obsoleted resource editable if not published now that it becomes the latest version
if not obsolete_res.raccess.published:
obsolete_res.raccess.immutable = False
obsolete_res.raccess.save()
for res_in_col in res.resources.all():
# res being deleted is a collection resource - delete isPartOf relation of all resources that are part of the
# collection
if res_in_col.metadata.relations.filter(type='isPartOf', value__endswith=res.short_id).exists():
res_in_col.metadata.relations.filter(type='isPartOf', value__endswith=res.short_id).delete()
set_dirty_bag_flag(res_in_col)
for collection_res in resource_related_collections:
# res being deleted is part of one or more collections - delete hasPart relation for all those collections
collection_res.metadata.relations.filter(type='hasPart', value__endswith=res.short_id).delete()
set_dirty_bag_flag(collection_res)
res.delete()
if request_username:
# if the deleted resource is part of any collection resource, then for each of those collection
# create a CollectionDeletedResource object which can then be used to list collection deleted
# resources on collection resource landing page
for collection_res in resource_related_collections:
o = CollectionDeletedResource.objects.create(
resource_title=res_title,
deleted_by=User.objects.get(username=request_username),
resource_id=resource_id,
resource_type=res_type,
collection=collection_res
)
o.resource_owners.add(*owners_list)
# return the page URL to redirect to after resource deletion task is complete
return '/my-resources/'
@shared_task
def update_web_services(services_url, api_token, timeout, publish_urls, res_id):
"""Update web services hosted by GeoServer and HydroServer.
This function sends a resource id to the HydroShare web services manager
application, which will check the current status of the resource and register
or unregister services hosted by GeoServer and HydroServer.
The HydroShare web services manager will return a list of endpoint URLs
for both the resource and individual aggregations. If publish_urls is set to
True, these endpoints will be added to the extra metadata fields of the
resource and aggregations.
"""
session = requests.Session()
session.headers.update(
{"Authorization": " ".join(("Token", str(api_token)))}
)
rest_url = str(services_url) + "/" + str(res_id) + "/"
try:
response = session.post(rest_url, timeout=timeout)
if publish_urls and response.status_code == status.HTTP_201_CREATED:
try:
resource = utils.get_resource_by_shortkey(res_id)
response_content = json.loads(response.content.decode())
for key, value in response_content["resource"].items():
resource.extra_metadata[key] = value
resource.save()
for url in response_content["content"]:
logical_files = list(resource.logical_files)
lf = logical_files[[i.aggregation_name for i in
logical_files].index(
url["layer_name"].encode()
)]
lf.metadata.extra_metadata["Web Services URL"] = url["message"]
lf.metadata.save()
except Exception as e:
logger.error(e)
return e
return response
except (requests.exceptions.RequestException, ValueError) as e:
logger.error(e)
return e
@shared_task
def resource_debug(resource_id):
"""Update web services hosted by GeoServer and HydroServer.
"""
resource = utils.get_resource_by_shortkey(resource_id)
from hs_core.management.utils import check_irods_files
return check_irods_files(resource, log_errors=False, return_errors=True)
@shared_task
def unzip_task(user_pk, res_id, zip_with_rel_path, bool_remove_original, overwrite=False, auto_aggregate=False,
ingest_metadata=False, unzip_to_folder=False):
from hs_core.views.utils import unzip_file
user = User.objects.get(pk=user_pk)
unzip_file(user, res_id, zip_with_rel_path, bool_remove_original, overwrite, auto_aggregate, ingest_metadata,
unzip_to_folder)
@shared_task
def move_aggregation_task(res_id, file_type_id, file_type, tgt_path):
from hs_core.views.utils import rename_irods_file_or_folder_in_django
res = utils.get_resource_by_shortkey(res_id)
istorage = res.get_irods_storage()
res_files = []
file_type_obj = FILE_TYPE_MAP[file_type]
aggregation = file_type_obj.objects.get(id=file_type_id)
res_files.extend(aggregation.files.all())
orig_aggregation_name = aggregation.aggregation_name
for file in res_files:
tgt_full_path = os.path.join(res.file_path, tgt_path, os.path.basename(file.storage_path))
istorage.moveFile(file.storage_path, tgt_full_path)
rename_irods_file_or_folder_in_django(res, file.storage_path, tgt_full_path)
new_aggregation_name = os.path.join(tgt_path, os.path.basename(orig_aggregation_name))
res.set_flag_to_recreate_aggregation_meta_files(orig_path=orig_aggregation_name,
new_path=new_aggregation_name)
return res.get_absolute_url()
@celery_app.task(ignore_result=True)
def daily_odm2_sync():
"""
ODM2 variables are maintained on an external site this synchronizes data to HydroShare for local caching
"""
ODM2Variable.sync()
@celery_app.task(ignore_result=True)
def monthly_group_membership_requests_cleanup():
"""
Delete expired and redeemed group membership requests
"""
two_months_ago = datetime.today() - timedelta(days=60)
GroupMembershipRequest.objects.filter(my_date__lte=two_months_ago).delete()
@celery_app.task(ignore_result=True)
def daily_innactive_group_requests_cleanup():
"""
Redeem group membership requests for innactive users
"""
GroupMembershipRequest.objects.filter(request_from__is_active=False).update(redeemed=True)
GroupMembershipRequest.objects.filter(invitation_to__is_active=False).update(redeemed=True)
@task_postrun.connect
def update_task_notification(sender=None, task_id=None, task=None, state=None, retval=None, **kwargs):
"""
Updates the state of TaskNotification model when a celery task completes
:param sender:
:param task_id: task id
:param task: task object
:param state: task return state
:param retval: task return value
:param kwargs:
:return:
"""
if task.name in settings.TASK_NAME_LIST:
if state == states.SUCCESS:
get_or_create_task_notification(task_id, status="completed", payload=retval)
elif state in states.EXCEPTION_STATES:
get_or_create_task_notification(task_id, status="failed", payload=retval)
elif state == states.REVOKED:
get_or_create_task_notification(task_id, status="aborted", payload=retval)
else:
logger.warning("Unhandled task state of {} for {}".format(state, task_id))
@celery_app.task(ignore_result=True)
def task_notification_cleanup():
"""
Delete expired task notifications each week
"""
week_ago = datetime.today() - timedelta(days=7)
TaskNotification.objects.filter(created__lte=week_ago).delete()
|
"""
Copyright 2015 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
import random
import numpy as np
import numpy.linalg as la
from scipy.stats import chi2
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import log_loss
from sklearn.tree import DecisionTreeClassifier
def estimate_lr_iter(n_samples):
return max(20,
int(np.ceil(10**4 / n_samples)))
def calculate_intercept_(labels):
n_class_1 = float(sum(labels))
n = float(len(labels))
prob = n_class_1 / n
# p = 1 / (1 + e^{-b})
# (1 + e^{-b})p = 1
# 1 + e^{-b} = 1/p
# e^{-b} = 1/p - 1
# -b = log(1/p - 1)
# b = -log(1/p - 1)
return - np.log(1. / prob - 1.)
def calculate_intercept(labels):
label_ids = set(labels)
if len(label_ids) == 2:
return calculate_intercept_(labels)
intercepts = np.zeros(len(label_ids))
for i in xrange(len(label_ids)):
binary_labels = [1 if l == i else 0 for l in labels]
binary_labels = np.array(binary_labels)
intercepts[i] = calculate_intercept_(binary_labels)
return intercepts
def likelihood_ratio_test(features_alternate, labels, lr_model, features_null=None, set_intercept=True):
if isinstance(features_alternate, tuple) and len(features_alternate) == 2:
training_features_alternate, testing_features_alternate = features_alternate
training_labels, testing_labels = labels
else:
training_features_alternate = features_alternate
testing_features_alternate = features_alternate
training_labels = labels
testing_labels = labels
n_classes = len(set(training_labels))
if set_intercept:
intercept_init = calculate_intercept(training_labels)
else:
intercept_init = None
if features_null is not None:
if isinstance(features_null, tuple) and len(features_null) == 2:
training_features_null, testing_features_null = features_null
else:
training_features_null = features_null
testing_features_null = features_null
lr_model.fit(training_features_null,
training_labels,
intercept_init = intercept_init)
null_prob = lr_model.predict_proba(testing_features_null)
df = testing_features_alternate.shape[1] - testing_features_null.shape[1]
else:
intercepts = calculate_intercept(training_labels)
null_prob = np.zeros((testing_labels.shape[0], n_classes))
for i in xrange(testing_labels.shape[0]):
null_prob[i, :] = 1.0 / (1.0 + np.exp(intercepts))
df = testing_features_alternate.shape[1] - 1
lr_model.fit(training_features_alternate,
training_labels,
intercept_init = intercept_init)
alt_prob = lr_model.predict_proba(testing_features_alternate)
alt_log_likelihood = -log_loss(testing_labels,
alt_prob,
normalize=False)
null_log_likelihood = -log_loss(testing_labels,
null_prob,
normalize=False)
G = 2 * (alt_log_likelihood - null_log_likelihood)
p_value = chi2.sf(G, df)
return p_value
def null_predict_proba(intercept):
prob = 1.0 / (1.0 + np.exp(-intercept))
return prob
def mcfadden_r2(X, y, n_models=250, n_iter=20):
"""
Computes coefficient of determination using McFadden's method
"""
y = np.array(y)
sgd = SGDClassifier(loss="log",
penalty="l2",
n_iter=n_iter)
ensemble = BaggingClassifier(sgd,
n_estimators=n_models,
bootstrap=True)
ensemble.fit(X, y)
model_probs = ensemble.predict_proba(X)
model_log_likelihood = -1. * log_loss(y,
model_probs[:, 1],
normalize=False)
# "null model" with only intercept
intercept = np.float(np.sum(y)) / y.shape[0]
null_probs = np.ones(y.shape) * null_predict_proba(intercept)
null_log_likelihood = -1. * log_loss(y,
null_probs,
normalize=False)
return 1.0 - model_log_likelihood / null_log_likelihood
class PCA(object):
def __init__(self, n_components):
self.svd = TruncatedSVD(n_components=n_components)
def explained_variance(self, features):
self.svd.fit(features)
return self.svd.explained_variance_ratio_
def transform(self, features):
coordinates = self.svd.fit_transform(features)
self.components_ = self.svd.components_
return coordinates
class LogisticRegressionEnsemble(object):
"""
Implementation of an ensemble of Logistic
Regression classifiers that supports bagging.
"""
def __init__(self, n_models, method, batch_size, bagging=True, n_iter=20):
self.n_models = n_models
self.bagging = bagging
self.batch_size = batch_size
self.method = method
self.n_iter = n_iter
def get_base(self, n_samples):
if self.method == "sag-l2":
# copied from http://scikit-learn.org/stable/auto_examples/linear_model/plot_sgd_comparison.html#sphx-glr-auto-examples-linear-model-plot-sgd-comparison-py
return LogisticRegression(solver="sag", tol=1e-1, C=1.e4 / n_samples)
elif self.method == "sgd-l2":
return SGDClassifier(loss="log", penalty="l2", n_iter = self.n_iter)
elif self.method == "sgd-en":
return SGDClassifier(loss="log", penalty="elasticnet", n_iter = self.n_iter)
elif self.method == "asgd-l2":
return SGDClassifier(loss="log", penalty="l2", average=True, n_iter = self.n_iter)
else:
raise Exception, "Unknown logistic regression method '%s'" % self.method
def feature_importances(self, X, y):
y = np.array(y)
feature_importances = np.zeros(X.shape[1])
trained_models = 0
while trained_models < self.n_models:
to_train = min(self.batch_size, self.n_models - trained_models)
ensemble = BaggingClassifier(self.get_base(X.shape[0]),
n_estimators=to_train,
bootstrap=self.bagging)
ensemble.fit(X, y)
for model in ensemble.estimators_:
feature_importances += model.coef_[0] / la.norm(model.coef_)
trained_models += to_train
print "Trained %s of %s models" % (trained_models, self.n_models)
return np.abs(feature_importances / self.n_models)
class ConstrainedBaggingRandomForest(object):
"""
Implementation of a Random Forest using constrained
bagging for generating the training sets for the
underlying Decision Trees.
"""
def __init__(self, n_trees, n_resamples, batch_size):
self.n_trees = n_trees
self.n_resamples = n_resamples
self.batch_size = batch_size
def _resample(self, X, y):
new_indices = list(xrange(X.shape[0]))
for i in xrange(self.n_resamples):
idx = random.randint(0, X.shape[0] - 1)
new_indices.append(idx)
X_new = X[new_indices, :]
y_new = y[new_indices]
return X_new, y_new
def feature_importances(self, X, y, statistics=False, interactions=False):
y = np.array(y)
feature_importances = np.zeros(X.shape[1])
used_features_histogram = None
if statistics:
used_features_histogram = defaultdict(int)
used_feature_sets = None
if interactions:
used_feature_sets = defaultdict(int)
if self.n_resamples == -1:
completed_trees = 0
while completed_trees < self.n_trees:
n_classifiers = min(self.batch_size, self.n_trees - completed_trees)
rf = RandomForestClassifier(n_estimators=n_classifiers,
n_jobs=1)
rf.fit(X, y)
feature_importances += rf.feature_importances_ * n_classifiers
if statistics or interactions:
for dt in rf.estimators_:
tree = dt.tree_
used_features = set(tree.feature)
# leaves denoted by -2
used_features.remove(-2)
if statistics:
used_features_histogram[len(used_features)] += 1
if interactions:
used_feature_sets[frozenset(used_features)] += 1
completed_trees += n_classifiers
print "Trained", completed_trees, "of", self.n_trees, "trees"
else:
for i in xrange(self.n_trees):
dt = DecisionTreeClassifier(max_features="sqrt")
X_new, y_new = self._resample(X, y)
dt.fit(X_new, y_new)
if statistics or interactions:
used_features = set(dt.tree_.feature)
used_features.remove(-2)
if statistics:
used_features_histogram[len(used_features)] += 1
if interactions:
used_feature_sets[frozenset(used_features)] += 1
feature_importances += dt.feature_importances_
feature_importances = feature_importances / self.n_trees
if interactions:
used_feature_sets = dict(used_feature_sets)
return feature_importances, used_features_histogram, used_feature_sets
Correct d.f.
"""
Copyright 2015 Ronald J. Nowling
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
import random
import numpy as np
import numpy.linalg as la
from scipy.stats import chi2
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import log_loss
from sklearn.tree import DecisionTreeClassifier
def estimate_lr_iter(n_samples):
return max(20,
int(np.ceil(10**4 / n_samples)))
def calculate_intercept_(labels):
n_class_1 = float(sum(labels))
n = float(len(labels))
prob = n_class_1 / n
# p = 1 / (1 + e^{-b})
# (1 + e^{-b})p = 1
# 1 + e^{-b} = 1/p
# e^{-b} = 1/p - 1
# -b = log(1/p - 1)
# b = -log(1/p - 1)
return - np.log(1. / prob - 1.)
def calculate_intercept(labels):
label_ids = set(labels)
if len(label_ids) == 2:
return calculate_intercept_(labels)
intercepts = np.zeros(len(label_ids))
for i in xrange(len(label_ids)):
binary_labels = [1 if l == i else 0 for l in labels]
binary_labels = np.array(binary_labels)
intercepts[i] = calculate_intercept_(binary_labels)
return intercepts
def likelihood_ratio_test(features_alternate, labels, lr_model, features_null=None, set_intercept=True):
if isinstance(features_alternate, tuple) and len(features_alternate) == 2:
training_features_alternate, testing_features_alternate = features_alternate
training_labels, testing_labels = labels
else:
training_features_alternate = features_alternate
testing_features_alternate = features_alternate
training_labels = labels
testing_labels = labels
n_classes = len(set(training_labels))
if set_intercept:
intercept_init = calculate_intercept(training_labels)
else:
intercept_init = None
if features_null is not None:
if isinstance(features_null, tuple) and len(features_null) == 2:
training_features_null, testing_features_null = features_null
else:
training_features_null = features_null
testing_features_null = features_null
lr_model.fit(training_features_null,
training_labels,
intercept_init = intercept_init)
null_prob = lr_model.predict_proba(testing_features_null)
df = testing_features_alternate.shape[1] - testing_features_null.shape[1]
else:
intercepts = calculate_intercept(training_labels)
null_prob = np.zeros((testing_labels.shape[0], n_classes))
for i in xrange(testing_labels.shape[0]):
null_prob[i, :] = 1.0 / (1.0 + np.exp(intercepts))
df = testing_features_alternate.shape[1]
lr_model.fit(training_features_alternate,
training_labels,
intercept_init = intercept_init)
alt_prob = lr_model.predict_proba(testing_features_alternate)
alt_log_likelihood = -log_loss(testing_labels,
alt_prob,
normalize=False)
null_log_likelihood = -log_loss(testing_labels,
null_prob,
normalize=False)
G = 2 * (alt_log_likelihood - null_log_likelihood)
p_value = chi2.sf(G, df)
return p_value
def null_predict_proba(intercept):
prob = 1.0 / (1.0 + np.exp(-intercept))
return prob
def mcfadden_r2(X, y, n_models=250, n_iter=20):
"""
Computes coefficient of determination using McFadden's method
"""
y = np.array(y)
sgd = SGDClassifier(loss="log",
penalty="l2",
n_iter=n_iter)
ensemble = BaggingClassifier(sgd,
n_estimators=n_models,
bootstrap=True)
ensemble.fit(X, y)
model_probs = ensemble.predict_proba(X)
model_log_likelihood = -1. * log_loss(y,
model_probs[:, 1],
normalize=False)
# "null model" with only intercept
intercept = np.float(np.sum(y)) / y.shape[0]
null_probs = np.ones(y.shape) * null_predict_proba(intercept)
null_log_likelihood = -1. * log_loss(y,
null_probs,
normalize=False)
return 1.0 - model_log_likelihood / null_log_likelihood
class PCA(object):
def __init__(self, n_components):
self.svd = TruncatedSVD(n_components=n_components)
def explained_variance(self, features):
self.svd.fit(features)
return self.svd.explained_variance_ratio_
def transform(self, features):
coordinates = self.svd.fit_transform(features)
self.components_ = self.svd.components_
return coordinates
class LogisticRegressionEnsemble(object):
"""
Implementation of an ensemble of Logistic
Regression classifiers that supports bagging.
"""
def __init__(self, n_models, method, batch_size, bagging=True, n_iter=20):
self.n_models = n_models
self.bagging = bagging
self.batch_size = batch_size
self.method = method
self.n_iter = n_iter
def get_base(self, n_samples):
if self.method == "sag-l2":
# copied from http://scikit-learn.org/stable/auto_examples/linear_model/plot_sgd_comparison.html#sphx-glr-auto-examples-linear-model-plot-sgd-comparison-py
return LogisticRegression(solver="sag", tol=1e-1, C=1.e4 / n_samples)
elif self.method == "sgd-l2":
return SGDClassifier(loss="log", penalty="l2", n_iter = self.n_iter)
elif self.method == "sgd-en":
return SGDClassifier(loss="log", penalty="elasticnet", n_iter = self.n_iter)
elif self.method == "asgd-l2":
return SGDClassifier(loss="log", penalty="l2", average=True, n_iter = self.n_iter)
else:
raise Exception, "Unknown logistic regression method '%s'" % self.method
def feature_importances(self, X, y):
y = np.array(y)
feature_importances = np.zeros(X.shape[1])
trained_models = 0
while trained_models < self.n_models:
to_train = min(self.batch_size, self.n_models - trained_models)
ensemble = BaggingClassifier(self.get_base(X.shape[0]),
n_estimators=to_train,
bootstrap=self.bagging)
ensemble.fit(X, y)
for model in ensemble.estimators_:
feature_importances += model.coef_[0] / la.norm(model.coef_)
trained_models += to_train
print "Trained %s of %s models" % (trained_models, self.n_models)
return np.abs(feature_importances / self.n_models)
class ConstrainedBaggingRandomForest(object):
"""
Implementation of a Random Forest using constrained
bagging for generating the training sets for the
underlying Decision Trees.
"""
def __init__(self, n_trees, n_resamples, batch_size):
self.n_trees = n_trees
self.n_resamples = n_resamples
self.batch_size = batch_size
def _resample(self, X, y):
new_indices = list(xrange(X.shape[0]))
for i in xrange(self.n_resamples):
idx = random.randint(0, X.shape[0] - 1)
new_indices.append(idx)
X_new = X[new_indices, :]
y_new = y[new_indices]
return X_new, y_new
def feature_importances(self, X, y, statistics=False, interactions=False):
y = np.array(y)
feature_importances = np.zeros(X.shape[1])
used_features_histogram = None
if statistics:
used_features_histogram = defaultdict(int)
used_feature_sets = None
if interactions:
used_feature_sets = defaultdict(int)
if self.n_resamples == -1:
completed_trees = 0
while completed_trees < self.n_trees:
n_classifiers = min(self.batch_size, self.n_trees - completed_trees)
rf = RandomForestClassifier(n_estimators=n_classifiers,
n_jobs=1)
rf.fit(X, y)
feature_importances += rf.feature_importances_ * n_classifiers
if statistics or interactions:
for dt in rf.estimators_:
tree = dt.tree_
used_features = set(tree.feature)
# leaves denoted by -2
used_features.remove(-2)
if statistics:
used_features_histogram[len(used_features)] += 1
if interactions:
used_feature_sets[frozenset(used_features)] += 1
completed_trees += n_classifiers
print "Trained", completed_trees, "of", self.n_trees, "trees"
else:
for i in xrange(self.n_trees):
dt = DecisionTreeClassifier(max_features="sqrt")
X_new, y_new = self._resample(X, y)
dt.fit(X_new, y_new)
if statistics or interactions:
used_features = set(dt.tree_.feature)
used_features.remove(-2)
if statistics:
used_features_histogram[len(used_features)] += 1
if interactions:
used_feature_sets[frozenset(used_features)] += 1
feature_importances += dt.feature_importances_
feature_importances = feature_importances / self.n_trees
if interactions:
used_feature_sets = dict(used_feature_sets)
return feature_importances, used_features_histogram, used_feature_sets
|
"""
Responsible for briding between Oscar and the AsiaPay gateway
"""
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from .models import PaydollarTransaction as Transaction
from .gateway import (
set_txn, get_txn, do_txn, SALE, AUTHORIZATION, ORDER,
do_capture, DO_PAYDOLLAR_CHECKOUT, do_void, refund_txn
)
def _get_payment_action():
# AsiaPay supports 3 actions: 'Sale', 'Authorization', 'Order'
action = getattr(settings, 'ASIAPAY_PAYMENT_ACTION', SALE)
if action not in (SALE, AUTHORIZATION, ORDER):
raise ImproperlyConfigured(
"'%s' is not a valid payment action" % action)
return action
def get_asiapay_url(basket, shipping_methods, user=None, shipping_address=None,
shipping_method=None, host=None, scheme=None):
"""
Return the URL for a AsiaPay Paydollar transaction.
This involves registering the txn with AsiaPay to get a one-time
URL. If a shipping method and shipping address are passed, then these are
given to AsiaPay directly - this is used within when using AsiaPay as a
payment method.
"""
currency = getattr(settings, 'ASIAPAY_CURRENCY', 'GBP')
if host is None:
host = Site.objects.get_current().domain
if scheme is None:
use_https = getattr(settings, 'ASIAPAY_CALLBACK_HTTPS', True)
scheme = 'https' if use_https else 'http'
success_url = '%s://%s%s' % (
scheme, host, reverse('asiapay-success-response', kwargs={
'basket_id': basket.id}))
fail_url = '%s://%s%s' % (
scheme, host, reverse('asiapay-fail-response', kwargs={
'basket_id': basket.id}))
# URL for updating shipping methods - we only use this if we have a set of
# shipping methods to choose between.
update_url = None
if shipping_methods:
update_url = '%s://%s%s' % (
scheme, host,
reverse('asiapay-shipping-options',
kwargs={'basket_id': basket.id}))
# Determine whether a shipping address is required
no_shipping = False
if not basket.is_shipping_required():
no_shipping = True
# Pass a default billing address is there is one. This means AsiaPay can
# pre-fill the registration form.
address = None
if user:
addresses = user.addresses.all().order_by('-is_default_for_billing')
if len(addresses):
address = addresses[0]
return set_txn(basket=basket,
shipping_methods=shipping_methods,
currency=currency,
success_url=success_url,
fail_url=fail_url,
update_url=update_url,
action=_get_payment_action(),
shipping_method=shipping_method,
shipping_address=shipping_address,
user=user,
user_address=address,
no_shipping=no_shipping)
def fetch_transaction_details(token):
"""
Fetch the completed details about the AsiaPay transaction.
"""
return get_txn(token)
def confirm_transaction(payer_id, token, amount, currency):
"""
Confirm the payment action.
"""
return do_txn(payer_id, token, amount, currency,
action=_get_payment_action())
def refund_transaction(token, amount, currency, note=None):
txn = Transaction.objects.get(token=token,
method=DO_PAYDOLLAR_CHECKOUT)
is_partial = amount < txn.amount
return refund_txn(txn.value('TRANSACTIONID'), is_partial, amount, currency)
def capture_authorization(token, note=None):
"""
Capture a previous authorization.
"""
txn = Transaction.objects.get(token=token,
method=DO_PAYDOLLAR_CHECKOUT)
return do_capture(txn.value('TRANSACTIONID'),
txn.amount, txn.currency, note=note)
def void_authorization(token, note=None):
"""
Void a previous authorization.
"""
txn = Transaction.objects.get(token=token,
method=DO_PAYDOLLAR_CHECKOUT)
return do_void(txn.value('TRANSACTIONID'), note=note)
removed refund_txn
"""
Responsible for briding between Oscar and the AsiaPay gateway
"""
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from .models import PaydollarTransaction as Transaction
from .gateway import (
set_txn, get_txn, do_txn, SALE, AUTHORIZATION, ORDER,
do_capture, DO_PAYDOLLAR_CHECKOUT, do_void
)
def _get_payment_action():
# AsiaPay supports 3 actions: 'Sale', 'Authorization', 'Order'
action = getattr(settings, 'ASIAPAY_PAYMENT_ACTION', SALE)
if action not in (SALE, AUTHORIZATION, ORDER):
raise ImproperlyConfigured(
"'%s' is not a valid payment action" % action)
return action
def get_asiapay_url(basket, shipping_methods, user=None, shipping_address=None,
shipping_method=None, host=None, scheme=None):
"""
Return the URL for a AsiaPay Paydollar transaction.
This involves registering the txn with AsiaPay to get a one-time
URL. If a shipping method and shipping address are passed, then these are
given to AsiaPay directly - this is used within when using AsiaPay as a
payment method.
"""
currency = getattr(settings, 'ASIAPAY_CURRENCY', 'GBP')
if host is None:
host = Site.objects.get_current().domain
if scheme is None:
use_https = getattr(settings, 'ASIAPAY_CALLBACK_HTTPS', True)
scheme = 'https' if use_https else 'http'
success_url = '%s://%s%s' % (
scheme, host, reverse('asiapay-success-response', kwargs={
'basket_id': basket.id}))
fail_url = '%s://%s%s' % (
scheme, host, reverse('asiapay-fail-response', kwargs={
'basket_id': basket.id}))
# URL for updating shipping methods - we only use this if we have a set of
# shipping methods to choose between.
update_url = None
if shipping_methods:
update_url = '%s://%s%s' % (
scheme, host,
reverse('asiapay-shipping-options',
kwargs={'basket_id': basket.id}))
# Determine whether a shipping address is required
no_shipping = False
if not basket.is_shipping_required():
no_shipping = True
# Pass a default billing address is there is one. This means AsiaPay can
# pre-fill the registration form.
address = None
if user:
addresses = user.addresses.all().order_by('-is_default_for_billing')
if len(addresses):
address = addresses[0]
return set_txn(basket=basket,
shipping_methods=shipping_methods,
currency=currency,
success_url=success_url,
fail_url=fail_url,
update_url=update_url,
action=_get_payment_action(),
shipping_method=shipping_method,
shipping_address=shipping_address,
user=user,
user_address=address,
no_shipping=no_shipping)
def fetch_transaction_details(token):
"""
Fetch the completed details about the AsiaPay transaction.
"""
return get_txn(token)
def confirm_transaction(payer_id, token, amount, currency):
"""
Confirm the payment action.
"""
return do_txn(payer_id, token, amount, currency,
action=_get_payment_action())
def capture_authorization(token, note=None):
"""
Capture a previous authorization.
"""
txn = Transaction.objects.get(token=token,
method=DO_PAYDOLLAR_CHECKOUT)
return do_capture(txn.value('TRANSACTIONID'),
txn.amount, txn.currency, note=note)
def void_authorization(token, note=None):
"""
Void a previous authorization.
"""
txn = Transaction.objects.get(token=token,
method=DO_PAYDOLLAR_CHECKOUT)
return do_void(txn.value('TRANSACTIONID'), note=note)
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
# pylint: disable=E1102
"""WebSearch module regression tests."""
__revision__ = "$Id$"
import unittest
import re
import urlparse, cgi
import sys
import cStringIO
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from mechanize import Browser, LinkNotFoundError
from invenio.config import CFG_SITE_URL, CFG_SITE_NAME, CFG_SITE_LANG, \
CFG_SITE_RECORD, CFG_SITE_LANGS, \
CFG_SITE_SECURE_URL, CFG_WEBSEARCH_SPIRES_SYNTAX
from invenio.testutils import make_test_suite, \
run_test_suite, \
nottest, \
make_url, make_surl, test_web_page_content, \
merge_error_messages
from invenio.urlutils import same_urls_p
from invenio.dbquery import run_sql
from invenio.search_engine import perform_request_search, \
guess_primary_collection_of_a_record, guess_collection_of_a_record, \
collection_restricted_p, get_permitted_restricted_collections, \
search_pattern, search_unit, search_unit_in_bibrec, \
wash_colls, record_public_p
from invenio import search_engine_summarizer
from invenio.search_engine_utils import get_fieldvalues
from invenio.intbitset import intbitset
from invenio.search_engine import intersect_results_with_collrecs
from invenio.bibrank_bridge_utils import get_external_word_similarity_ranker
from invenio.search_engine_query_parser_unit_tests import DATEUTIL_AVAILABLE
from invenio.bibindex_regression_tests import reindex_word_tables_into_testtables
if 'fr' in CFG_SITE_LANGS:
lang_french_configured = True
else:
lang_french_configured = False
def parse_url(url):
parts = urlparse.urlparse(url)
query = cgi.parse_qs(parts[4], True)
return parts[2].split('/')[1:], query
def string_combinations(str_list):
"""Returns all the possible combinations of the strings in the list.
Example: for the list ['A','B','Cd'], it will return
[['Cd', 'B', 'A'], ['B', 'A'], ['Cd', 'A'], ['A'], ['Cd', 'B'], ['B'], ['Cd'], []]
It adds "B", "H", "F" and "S" values to the results so different
combinations of them are also checked.
"""
out_list = []
for i in range(len(str_list) + 1):
out_list += list(combinations(str_list, i))
for i in range(len(out_list)):
out_list[i] = (list(out_list[i]) + {
0: lambda: ["B", "H", "S"],
1: lambda: ["B", "H", "F"],
2: lambda: ["B", "F", "S"],
3: lambda: ["B", "F"],
4: lambda: ["B", "S"],
5: lambda: ["B", "H"],
6: lambda: ["B"]
}[i % 7]())
return out_list
def combinations(iterable, r):
"""Return r length subsequences of elements from the input iterable."""
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
class WebSearchWebPagesAvailabilityTest(unittest.TestCase):
"""Check WebSearch web pages whether they are up or not."""
def test_search_interface_pages_availability(self):
"""websearch - availability of search interface pages"""
baseurl = CFG_SITE_URL + '/'
_exports = ['', 'collection/Poetry', 'collection/Poetry?as=1']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_search_results_pages_availability(self):
"""websearch - availability of search results pages"""
baseurl = CFG_SITE_URL + '/search'
_exports = ['', '?c=Poetry', '?p=ellis', '/cache', '/log']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_search_detailed_record_pages_availability(self):
"""websearch - availability of search detailed record pages"""
baseurl = CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/'
_exports = ['', '1', '1/', '1/files', '1/files/']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_browse_results_pages_availability(self):
"""websearch - availability of browse results pages"""
baseurl = CFG_SITE_URL + '/search'
_exports = ['?p=ellis&f=author&action_browse=Browse']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_help_page_availability(self):
"""websearch - availability of Help Central page"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help',
expected_text="Help Central"))
if lang_french_configured:
def test_help_page_availability_fr(self):
"""websearch - availability of Help Central page in french"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/?ln=fr',
expected_text="Centre d'aide"))
def test_search_tips_page_availability(self):
"""websearch - availability of Search Tips"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-tips',
expected_text="Search Tips"))
if lang_french_configured:
def test_search_tips_page_availability_fr(self):
"""websearch - availability of Search Tips in french"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-tips?ln=fr',
expected_text="Conseils de recherche"))
def test_search_guide_page_availability(self):
"""websearch - availability of Search Guide"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-guide',
expected_text="Search Guide"))
if lang_french_configured:
def test_search_guide_page_availability_fr(self):
"""websearch - availability of Search Guide in french"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-guide?ln=fr',
expected_text="Guide de recherche"))
class WebSearchTestLegacyURLs(unittest.TestCase):
""" Check that the application still responds to legacy URLs for
navigating, searching and browsing."""
def test_legacy_collections(self):
""" websearch - collections handle legacy urls """
browser = Browser()
def check(legacy, new, browser=browser):
browser.open(legacy)
got = browser.geturl()
self.failUnless(same_urls_p(got, new), got)
# Use the root URL unless we need more
check(make_url('/', c=CFG_SITE_NAME),
make_url('/', ln=CFG_SITE_LANG))
# Other collections are redirected in the /collection area
check(make_url('/', c='Poetry'),
make_url('/collection/Poetry', ln=CFG_SITE_LANG))
# Drop unnecessary arguments, like ln and as (when they are
# the default value)
args = {'as': 0}
check(make_url('/', c='Poetry', **args),
make_url('/collection/Poetry', ln=CFG_SITE_LANG))
# Otherwise, keep them
args = {'as': 1, 'ln': CFG_SITE_LANG}
check(make_url('/', c='Poetry', **args),
make_url('/collection/Poetry', **args))
# Support the /index.py addressing too
check(make_url('/index.py', c='Poetry'),
make_url('/collection/Poetry', ln=CFG_SITE_LANG))
def test_legacy_search(self):
""" websearch - search queries handle legacy urls """
browser = Browser()
def check(legacy, new, browser=browser):
browser.open(legacy)
got = browser.geturl()
self.failUnless(same_urls_p(got, new), got)
# /search.py is redirected on /search
# Note that `as' is a reserved word in Python 2.5
check(make_url('/search.py', p='nuclear', ln='en') + 'as=1',
make_url('/search', p='nuclear', ln='en') + 'as=1')
if lang_french_configured:
def test_legacy_search_fr(self):
""" websearch - search queries handle legacy urls """
browser = Browser()
def check(legacy, new, browser=browser):
browser.open(legacy)
got = browser.geturl()
self.failUnless(same_urls_p(got, new), got)
# direct recid searches are redirected to /CFG_SITE_RECORD
check(make_url('/search.py', recid=1, ln='fr'),
make_url('/%s/1' % CFG_SITE_RECORD, ln='fr'))
def test_legacy_search_help_link(self):
"""websearch - legacy Search Help page link"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search/index.en.html',
expected_text="Help Central"))
if lang_french_configured:
def test_legacy_search_tips_link(self):
"""websearch - legacy Search Tips page link"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search/tips.fr.html',
expected_text="Conseils de recherche"))
def test_legacy_search_guide_link(self):
"""websearch - legacy Search Guide page link"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search/guide.en.html',
expected_text="Search Guide"))
class WebSearchTestRecord(unittest.TestCase):
""" Check the interface of the /CFG_SITE_RECORD results """
def test_format_links(self):
""" websearch - check format links for records """
browser = Browser()
# We open the record in all known HTML formats
for hformat in ('hd', 'hx', 'hm'):
browser.open(make_url('/%s/1' % CFG_SITE_RECORD, of=hformat))
if hformat == 'hd':
# hd format should have a link to the following
# formats
for oformat in ('hx', 'hm', 'xm', 'xd'):
target = make_url('/%s/1/export/%s?ln=en' % (CFG_SITE_RECORD, oformat))
try:
browser.find_link(url=target)
except LinkNotFoundError:
self.fail('link %r should be in page' % target)
else:
# non-hd HTML formats should have a link back to
# the main detailed record
target = make_url('/%s/1' % CFG_SITE_RECORD)
try:
browser.find_link(url=target)
except LinkNotFoundError:
self.fail('link %r should be in page' % target)
return
def test_exported_formats(self):
""" websearch - check formats exported through /CFG_SITE_RECORD/1/export/ URLs"""
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hm' % CFG_SITE_RECORD),
expected_text='245__ $$aALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hd' % CFG_SITE_RECORD),
expected_text='<strong>ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/xm' % CFG_SITE_RECORD),
expected_text='<subfield code="a">ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/xd' % CFG_SITE_RECORD),
expected_text='<dc:title>ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hs' % CFG_SITE_RECORD),
expected_text='<a href="/%s/1?ln=%s">ALEPH experiment' % \
(CFG_SITE_RECORD, CFG_SITE_LANG)))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hx' % CFG_SITE_RECORD),
expected_text='title = "{ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/t?ot=245' % CFG_SITE_RECORD),
expected_text='245__ $$aALEPH experiment'))
self.assertNotEqual([],
test_web_page_content(make_url('/%s/1/export/t?ot=245' % CFG_SITE_RECORD),
expected_text='001__'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/h?ot=245' % CFG_SITE_RECORD),
expected_text='245__ $$aALEPH experiment'))
self.assertNotEqual([],
test_web_page_content(make_url('/%s/1/export/h?ot=245' % CFG_SITE_RECORD),
expected_text='001__'))
return
def test_plots_tab(self):
""" websearch - test to ensure the plots tab is working """
self.assertEqual([],
test_web_page_content(make_url('/%s/8/plots' % CFG_SITE_RECORD),
expected_text='div id="clip"',
unexpected_text='Abstract'))
def test_meta_header(self):
""" websearch - test that metadata embedded in header of hd
relies on hdm format and Default_HTML_meta bft, but hook is in
websearch to display the format
"""
self.assertEqual([],
test_web_page_content(make_url('/record/1'),
expected_text='<meta content="ALEPH experiment: Candidate of Higgs boson production" name="citation_title" />'))
return
class WebSearchTestCollections(unittest.TestCase):
def test_traversal_links(self):
""" websearch - traverse all the publications of a collection """
browser = Browser()
try:
for aas in (0, 1):
args = {'as': aas}
browser.open(make_url('/collection/Preprints', **args))
for jrec in (11, 21, 11, 27):
args = {'jrec': jrec, 'cc': 'Preprints'}
if aas:
args['as'] = aas
url = make_url('/search', **args)
try:
browser.follow_link(url=url)
except LinkNotFoundError:
args['ln'] = CFG_SITE_LANG
url = make_url('/search', **args)
browser.follow_link(url=url)
except LinkNotFoundError:
self.fail('no link %r in %r' % (url, browser.geturl()))
def test_collections_links(self):
""" websearch - enter in collections and subcollections """
browser = Browser()
def tryfollow(url):
cur = browser.geturl()
body = browser.response().read()
try:
browser.follow_link(url=url)
except LinkNotFoundError:
print body
self.fail("in %r: could not find %r" % (
cur, url))
return
for aas in (0, 1):
if aas:
kargs = {'as': 1}
else:
kargs = {}
kargs['ln'] = CFG_SITE_LANG
# We navigate from immediate son to immediate son...
browser.open(make_url('/', **kargs))
tryfollow(make_url('/collection/Articles%20%26%20Preprints',
**kargs))
tryfollow(make_url('/collection/Articles', **kargs))
# But we can also jump to a grandson immediately
browser.back()
browser.back()
tryfollow(make_url('/collection/ALEPH', **kargs))
return
def test_records_links(self):
""" websearch - check the links toward records in leaf collections """
browser = Browser()
browser.open(make_url('/collection/Preprints'))
def harvest():
""" Parse all the links in the page, and check that for
each link to a detailed record, we also have the
corresponding link to the similar records."""
records = set()
similar = set()
for link in browser.links():
path, q = parse_url(link.url)
if not path:
continue
if path[0] == CFG_SITE_RECORD:
records.add(int(path[1]))
continue
if path[0] == 'search':
if not q.get('rm') == ['wrd']:
continue
recid = q['p'][0].split(':')[1]
similar.add(int(recid))
self.failUnlessEqual(records, similar)
return records
# We must have 10 links to the corresponding /CFG_SITE_RECORD
found = harvest()
self.failUnlessEqual(len(found), 10)
# When clicking on the "Search" button, we must also have
# these 10 links on the records.
browser.select_form(name="search")
browser.submit()
found = harvest()
self.failUnlessEqual(len(found), 10)
return
def test_em_parameter(self):
""" websearch - check different values of em return different parts of the collection page"""
for combi in string_combinations(["L", "P", "Prt"]):
url = '/collection/Articles?em=%s' % ','.join(combi)
expected_text = ["<strong>Development of photon beam diagnostics for VUV radiation from a SASE FEL</strong>"]
unexpected_text = []
if "H" in combi:
expected_text.append(">Atlantis Institute of Fictive Science</a>")
else:
unexpected_text.append(">Atlantis Institute of Fictive Science</a>")
if "F" in combi:
expected_text.append("This site is also available in the following languages:")
else:
unexpected_text.append("This site is also available in the following languages:")
if "S" in combi:
expected_text.append('value="Search"')
else:
unexpected_text.append('value="Search"')
if "L" in combi:
expected_text.append('Search also:')
else:
unexpected_text.append('Search also:')
if "Prt" in combi or "P" in combi:
expected_text.append('<div class="portalboxheader">ABOUT ARTICLES</div>')
else:
unexpected_text.append('<div class="portalboxheader">ABOUT ARTICLES</div>')
self.assertEqual([], test_web_page_content(make_url(url),
expected_text=expected_text,
unexpected_text=unexpected_text))
return
class WebSearchTestBrowse(unittest.TestCase):
def test_browse_field(self):
""" websearch - check that browsing works """
browser = Browser()
browser.open(make_url('/'))
browser.select_form(name='search')
browser['f'] = ['title']
browser.submit(name='action_browse')
def collect():
# We'll get a few links to search for the actual hits, plus a
# link to the following results.
res = []
for link in browser.links(url_regex=re.compile(CFG_SITE_URL +
r'/search\?')):
if link.text == 'Advanced Search':
continue
dummy, q = parse_url(link.url)
res.append((link, q))
return res
# if we follow the last link, we should get another
# batch. There is an overlap of one item.
batch_1 = collect()
browser.follow_link(link=batch_1[-1][0])
batch_2 = collect()
# FIXME: we cannot compare the whole query, as the collection
# set is not equal
self.failUnlessEqual(batch_1[-2][1]['p'], batch_2[0][1]['p'])
def test_browse_restricted_record_as_unauthorized_user(self):
"""websearch - browse for a record that belongs to a restricted collection as an unauthorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&ln=en',
username = 'guest',
expected_text = ['Hits', '088__a'],
unexpected_text = ['>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_restricted_record_as_unauthorized_user_in_restricted_collection(self):
"""websearch - browse for a record that belongs to a restricted collection as an unauthorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&c=ALEPH+Theses&ln=en',
username='guest',
expected_text= ['This collection is restricted'],
unexpected_text= ['Hits', '>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_restricted_record_as_authorized_user(self):
"""websearch - browse for a record that belongs to a restricted collection as an authorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&ln=en',
username='admin',
password='',
expected_text= ['Hits', '088__a'],
unexpected_text = ['>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_restricted_record_as_authorized_user_in_restricted_collection(self):
"""websearch - browse for a record that belongs to a restricted collection as an authorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&c=ALEPH+Theses&ln=en',
username='admin',
password='',
expected_text= ['Hits', '>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_exact_author_help_link(self):
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=Dasse%2C+Michel&f=author&action_browse=Browse',
username = 'guest',
expected_text = ['Did you mean to browse in', 'index?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=Dasse%2C+Michel&f=firstauthor&action_browse=Browse',
username = 'guest',
expected_text = ['Did you mean to browse in', 'index?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&as=1&m1=a&p1=Dasse%2C+Michel&f1=author&op1=a&m2=a&p2=&f2=firstauthor&op2=a&m3=a&p3=&f3=&action_browse=Browse',
username = 'guest',
expected_text = ['Did you mean to browse in', 'index?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchTestOpenURL(unittest.TestCase):
def test_isbn_01(self):
""" websearch - isbn query via OpenURL 0.1"""
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/openurl', isbn='0387940758'))
dummy, current_q = parse_url(browser.geturl())
self.failUnlessEqual(current_q, {
'sc' : ['1'],
'p' : ['isbn:"0387940758"'],
'of' : ['hd']
})
def test_isbn_10_rft_id(self):
""" websearch - isbn query via OpenURL 1.0 - rft_id"""
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/openurl', rft_id='urn:ISBN:0387940758'))
dummy, current_q = parse_url(browser.geturl())
self.failUnlessEqual(current_q, {
'sc' : ['1'],
'p' : ['isbn:"0387940758"'],
'of' : ['hd']
})
def test_isbn_10(self):
""" websearch - isbn query via OpenURL 1.0"""
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/openurl?rft.isbn=0387940758'))
dummy, current_q = parse_url(browser.geturl())
self.failUnlessEqual(current_q, {
'sc' : ['1'],
'p' : ['isbn:"0387940758"'],
'of' : ['hd']
})
class WebSearchTestSearch(unittest.TestCase):
def test_hits_in_other_collection(self):
""" websearch - check extension of a query to the home collection """
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/collection/ISOLDE', ln='en'))
browser.select_form(name='search')
browser['f'] = ['author']
browser['p'] = 'matsubara'
browser.submit()
dummy, current_q = parse_url(browser.geturl())
link = browser.find_link(text_regex=re.compile('.*hit', re.I))
dummy, target_q = parse_url(link.url)
# the target query should be the current query without any c
# or cc specified.
for f in ('cc', 'c', 'action_search'):
if f in current_q:
del current_q[f]
self.failUnlessEqual(current_q, target_q)
def test_nearest_terms(self):
""" websearch - provide a list of nearest terms """
browser = Browser()
browser.open(make_url(''))
# Search something weird
browser.select_form(name='search')
browser['p'] = 'gronf'
browser.submit()
dummy, original = parse_url(browser.geturl())
for to_drop in ('cc', 'action_search', 'f'):
if to_drop in original:
del original[to_drop]
if 'ln' not in original:
original['ln'] = [CFG_SITE_LANG]
# we should get a few searches back, which are identical
# except for the p field being substituted (and the cc field
# being dropped).
if 'cc' in original:
del original['cc']
for link in browser.links(url_regex=re.compile(CFG_SITE_URL + r'/search\?')):
if link.text == 'Advanced Search':
continue
dummy, target = parse_url(link.url)
if 'ln' not in target:
target['ln'] = [CFG_SITE_LANG]
original['p'] = [link.text]
self.failUnlessEqual(original, target)
return
def test_switch_to_simple_search(self):
""" websearch - switch to simple search """
browser = Browser()
args = {'as': 1}
browser.open(make_url('/collection/ISOLDE', **args))
browser.select_form(name='search')
browser['p1'] = 'tandem'
browser['f1'] = ['title']
browser.submit()
browser.follow_link(text='Simple Search')
dummy, q = parse_url(browser.geturl())
self.failUnlessEqual(q, {'cc': ['ISOLDE'],
'p': ['tandem'],
'f': ['title'],
'ln': ['en']})
def test_switch_to_advanced_search(self):
""" websearch - switch to advanced search """
browser = Browser()
browser.open(make_url('/collection/ISOLDE'))
browser.select_form(name='search')
browser['p'] = 'tandem'
browser['f'] = ['title']
browser.submit()
browser.follow_link(text='Advanced Search')
dummy, q = parse_url(browser.geturl())
self.failUnlessEqual(q, {'cc': ['ISOLDE'],
'p1': ['tandem'],
'f1': ['title'],
'as': ['1'],
'ln' : ['en']})
def test_no_boolean_hits(self):
""" websearch - check the 'no boolean hits' proposed links """
browser = Browser()
browser.open(make_url(''))
browser.select_form(name='search')
browser['p'] = 'quasinormal muon'
browser.submit()
dummy, q = parse_url(browser.geturl())
for to_drop in ('cc', 'action_search', 'f'):
if to_drop in q:
del q[to_drop]
for bsu in ('quasinormal', 'muon'):
l = browser.find_link(text=bsu)
q['p'] = bsu
if not same_urls_p(l.url, make_url('/search', **q)):
self.fail(repr((l.url, make_url('/search', **q))))
def test_similar_authors(self):
""" websearch - test similar authors box """
browser = Browser()
browser.open(make_url(''))
browser.select_form(name='search')
browser['p'] = 'Ellis, R K'
browser['f'] = ['author']
browser.submit()
l = browser.find_link(text="Ellis, R S")
self.failUnless(same_urls_p(l.url, make_url('/search',
p="Ellis, R S",
f='author',
ln='en')))
def test_em_parameter(self):
""" websearch - check different values of em return different parts of the search page"""
for combi in string_combinations(["K", "A", "I", "O"]):
url = '/search?ln=en&cc=Articles+%%26+Preprints&sc=1&c=Articles&c=Preprints&em=%s' % ','.join(combi)
expected_text = ["<strong>Development of photon beam diagnostics for VUV radiation from a SASE FEL</strong>"]
unexpected_text = []
if "H" in combi:
expected_text.append(">Atlantis Institute of Fictive Science</a>")
else:
unexpected_text.append(">Atlantis Institute of Fictive Science</a>")
if "F" in combi:
expected_text.append("This site is also available in the following languages:")
else:
unexpected_text.append("This site is also available in the following languages:")
if "S" in combi:
expected_text.append('value="Search"')
else:
unexpected_text.append('value="Search"')
if "K" in combi:
expected_text.append('value="Add to basket"')
else:
unexpected_text.append('value="Add to basket"')
if "A" in combi:
expected_text.append('Interested in being notified about new results for this query?')
else:
unexpected_text.append('Interested in being notified about new results for this query?')
if "I" in combi:
expected_text.append('jump to record:')
else:
unexpected_text.append('jump to record:')
if "O" in combi:
expected_text.append('<th class="searchresultsboxheader"><strong>Results overview:</strong> Found <strong>')
else:
unexpected_text.append('<th class="searchresultsboxheader"><strong>Results overview:</strong> Found <strong>')
self.assertEqual([], test_web_page_content(make_url(url),
expected_text=expected_text,
unexpected_text=unexpected_text))
return
class WebSearchCJKTokenizedSearchTest(unittest.TestCase):
"""
Reindexes record 104 (the one with chinese poetry) with use of BibIndexCJKTokenizer.
After tests it reindexes record 104 back with BibIndexDefaultTokenizer.
Checks if one can find record 104 specifying only one or two CJK characters.
"""
test_counter = 0
reindexed = False
@classmethod
def setUp(self):
if not self.reindexed:
reindex_word_tables_into_testtables('title',
recids=[[104,104]],
prefix = False,
tokenizer = 'BibIndexCJKTokenizer')
self.reindexed = True
@classmethod
def tearDown(self):
self.test_counter += 1
if self.test_counter == 2:
reindex_word_tables_into_testtables('title',
recids=[[104,104]],
prefix = False,
tokenizer = 'BibIndexDefaultTokenizer')
def test_title_cjk_tokenized_two_characters(self):
"""CJKTokenizer - test for finding chinese poetry with two CJK characters"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=title%3A敬亭&f=&of=id',
expected_text='[104]'))
def test_title_cjk_tokenized_single_character(self):
"""CJKTokenizer - test for finding chinese poetry with one CJK character"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=title%3A亭&f=&of=id',
expected_text='[104]'))
class WebSearchTestWildcardLimit(unittest.TestCase):
"""Checks if the wildcard limit is correctly passed and that
users without autorization can not exploit it"""
def test_wildcard_limit_correctly_passed_when_not_set(self):
"""websearch - wildcard limit is correctly passed when default"""
self.assertEqual(search_pattern(p='e*', f='author'),
search_pattern(p='e*', f='author', wl=1000))
def test_wildcard_limit_correctly_passed_when_set(self):
"""websearch - wildcard limit is correctly passed when set"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=e*&f=author&of=id&wl=5&rg=100',
expected_text="[9, 10, 11, 17, 46, 48, 50, 51, 52, 53, 54, 67, 72, 74, 81, 88, 92, 96]"))
def test_wildcard_limit_correctly_not_active(self):
"""websearch - wildcard limit is not active when there is no wildcard query"""
self.assertEqual(search_pattern(p='ellis', f='author'),
search_pattern(p='ellis', f='author', wl=1))
def test_wildcard_limit_increased_by_authorized_users(self):
"""websearch - wildcard limit increased by authorized user"""
browser = Browser()
#try a search query, with no wildcard limit set by the user
browser.open(make_url('/search?p=a*&of=id'))
recid_list_guest_no_limit = browser.response().read() # so the limit is CGF_WEBSEARCH_WILDCARD_LIMIT
#try a search query, with a wildcard limit imposed by the user
#wl=1000000 - a very high limit,higher then what the CFG_WEBSEARCH_WILDCARD_LIMIT might be
browser.open(make_url('/search?p=a*&of=id&wl=1000000'))
recid_list_guest_with_limit = browser.response().read()
#same results should be returned for a search without the wildcard limit set by the user
#and for a search with a large limit set by the user
#in this way we know that nomatter how large the limit is, the wildcard query will be
#limitted by CFG_WEBSEARCH_WILDCARD_LIMIT (for a guest user)
self.failIf(len(recid_list_guest_no_limit.split(',')) != len(recid_list_guest_with_limit.split(',')))
##login as admin
browser.open(make_surl('/youraccount/login'))
browser.select_form(nr=0)
browser['p_un'] = 'admin'
browser['p_pw'] = ''
browser.submit()
#try a search query, with a wildcard limit imposed by an authorized user
#wl = 10000 a very high limit, higher then what the CFG_WEBSEARCH_WILDCARD_LIMIT might be
browser.open(make_surl('/search?p=a*&of=id&wl=10000'))
recid_list_authuser_with_limit = browser.response().read()
#the authorized user can set whatever limit he might wish
#so, the results returned for the auth. users should exceed the results returned for unauth. users
self.failUnless(len(recid_list_guest_no_limit.split(',')) <= len(recid_list_authuser_with_limit.split(',')))
#logout
browser.open(make_surl('/youraccount/logout'))
browser.response().read()
browser.close()
class WebSearchNearestTermsTest(unittest.TestCase):
"""Check various alternatives of searches leading to the nearest
terms box."""
def test_nearest_terms_box_in_okay_query(self):
""" websearch - no nearest terms box for a successful query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text="jump to record"))
def test_nearest_terms_box_in_unsuccessful_simple_query(self):
""" websearch - nearest terms box for unsuccessful simple query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellisz',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=embed",
expected_link_label='embed'))
def test_nearest_terms_box_in_unsuccessful_simple_accented_query(self):
""" websearch - nearest terms box for unsuccessful accented query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=elliszà',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=embed",
expected_link_label='embed'))
def test_nearest_terms_box_in_unsuccessful_structured_query(self):
""" websearch - nearest terms box for unsuccessful structured query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellisz&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=eisenhandler&f=author",
expected_link_label='eisenhandler'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3Aellisz',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=author%3Aeisenhandler",
expected_link_label='eisenhandler'))
def test_nearest_terms_box_in_query_with_invalid_index(self):
""" websearch - nearest terms box for queries with invalid indexes specified """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=bednarz%3Aellis',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=bednarz",
expected_link_label='bednarz'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=1%3Aellis',
expected_text="no index 1.",
expected_link_target=CFG_SITE_URL+"/record/47?ln=en",
expected_link_label="Detailed record"))
def test_nearest_terms_box_in_unsuccessful_phrase_query(self):
""" websearch - nearest terms box for unsuccessful phrase query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%22Ellis%2C+Z%22',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=author%3A%22Enqvist%2C+K%22",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%22ellisz%22&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=%22Enqvist%2C+K%22&f=author",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%22elliszà%22&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=%22Enqvist%2C+K%22&f=author",
expected_link_label='Enqvist, K'))
def test_nearest_terms_box_in_unsuccessful_partial_phrase_query(self):
""" websearch - nearest terms box for unsuccessful partial phrase query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%27Ellis%2C+Z%27',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=author%3A%27Enqvist%2C+K%27",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%27ellisz%27&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=%27Enqvist%2C+K%27&f=author",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%27elliszà%27&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=%27Enqvist%2C+K%27&f=author",
expected_link_label='Enqvist, K'))
def test_nearest_terms_box_in_unsuccessful_partial_phrase_advanced_query(self):
""" websearch - nearest terms box for unsuccessful partial phrase advanced search query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p1=aaa&f1=title&m1=p&as=1',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&f1=title&as=1&p1=A+simple+functional+form+for+proton-nucleus+total+reaction+cross+sections&m1=p",
expected_link_label='A simple functional form for proton-nucleus total reaction cross sections'))
def test_nearest_terms_box_in_unsuccessful_exact_phrase_advanced_query(self):
""" websearch - nearest terms box for unsuccessful exact phrase advanced search query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p1=aaa&f1=title&m1=e&as=1',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&f1=title&as=1&p1=A+simple+functional+form+for+proton-nucleus+total+reaction+cross+sections&m1=e",
expected_link_label='A simple functional form for proton-nucleus total reaction cross sections'))
def test_nearest_terms_box_in_unsuccessful_boolean_query(self):
""" websearch - nearest terms box for unsuccessful boolean query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3Aellisz+author%3Aellisz',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aellisz",
expected_link_label='energi'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3Aenergi+author%3Aenergie',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aenqvist",
expected_link_label='enqvist'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=title%3Aellisz+author%3Aellisz&f=keyword',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aellisz&f=keyword",
expected_link_label='energi'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=title%3Aenergi+author%3Aenergie&f=keyword',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aenqvist&f=keyword",
expected_link_label='enqvist'))
def test_nearest_terms_box_in_unsuccessful_uppercase_query(self):
""" websearch - nearest terms box for unsuccessful uppercase query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=fOo%3Atest',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=food",
expected_link_label='food'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=arXiv%3A1007.5048',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=artist",
expected_link_label='artist'))
def test_nearest_terms_box_in_unsuccessful_spires_query(self):
""" websearch - nearest terms box for unsuccessful spires query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=find+a+foobar',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=find+a+finch",
expected_link_label='finch'))
class WebSearchBooleanQueryTest(unittest.TestCase):
"""Check various boolean queries."""
def test_successful_boolean_query(self):
""" websearch - successful boolean query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis+muon',
expected_text="records found",
expected_link_label="Detailed record"))
def test_unsuccessful_boolean_query_where_all_individual_terms_match(self):
""" websearch - unsuccessful boolean query where all individual terms match """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis+muon+letter',
expected_text="Boolean query returned no hits. Please combine your search terms differently."))
def test_unsuccessful_boolean_query_in_advanced_search_where_all_individual_terms_match(self):
""" websearch - unsuccessful boolean query in advanced search where all individual terms match """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?m1=a&p1=ellis&op1=a&m2=a&p2=muon&op2=a&p3=letter',
expected_text="Boolean query returned no hits. Please combine your search terms differently."))
class WebSearchAuthorQueryTest(unittest.TestCase):
"""Check various author-related queries."""
def test_propose_similar_author_names_box(self):
""" websearch - propose similar author names box """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=Ellis%2C+R&f=author',
expected_text="See also: similar author names",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=Ellis%2C+R+K&f=author",
expected_link_label="Ellis, R K"))
def test_do_not_propose_similar_author_names_box(self):
""" websearch - do not propose similar author names box """
errmsgs = test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%22Ellis%2C+R%22',
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=Ellis%2C+R+K&f=author",
expected_link_label="Ellis, R K")
if errmsgs[0].find("does not contain link to") > -1:
pass
else:
self.fail("Should not propose similar author names box.")
return
class WebSearchSearchEnginePythonAPITest(unittest.TestCase):
"""Check typical search engine Python API calls on the demo data."""
def test_search_engine_python_api_for_failed_query(self):
"""websearch - search engine Python API for failed query"""
self.assertEqual([],
perform_request_search(p='aoeuidhtns'))
def test_search_engine_python_api_for_successful_query(self):
"""websearch - search engine Python API for successful query"""
self.assertEqual([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118],
perform_request_search(p='ellis'))
def test_search_engine_python_api_for_successful_query_format_intbitset(self):
"""websearch - search engine Python API for successful query, output format intbitset"""
self.assertEqual(intbitset([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118]),
perform_request_search(p='ellis', of='intbitset'))
def test_search_engine_web_api_ignore_paging_parameter(self):
"""websearch - search engine Python API for successful query, ignore paging parameters"""
self.assertEqual([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118],
perform_request_search(p='ellis', rg=5, jrec=3))
def test_search_engine_python_api_respect_sorting_parameter(self):
"""websearch - search engine Python API for successful query, respect sorting parameters"""
self.assertEqual([77, 84, 85],
perform_request_search(p='klebanov'))
self.assertEqual([77, 85, 84],
perform_request_search(p='klebanov', sf='909C4v'))
def test_search_engine_python_api_respect_ranking_parameter(self):
"""websearch - search engine Python API for successful query, respect ranking parameters"""
self.assertEqual([77, 84, 85],
perform_request_search(p='klebanov'))
self.assertEqual([85, 77, 84],
perform_request_search(p='klebanov', rm='citation'))
def test_search_engine_python_api_for_existing_record(self):
"""websearch - search engine Python API for existing record"""
self.assertEqual([8],
perform_request_search(recid=8))
def test_search_engine_python_api_for_existing_record_format_intbitset(self):
"""websearch - search engine Python API for existing record, output format intbitset"""
self.assertEqual(intbitset([8]),
perform_request_search(recid=8, of='intbitset'))
def test_search_engine_python_api_for_nonexisting_record(self):
"""websearch - search engine Python API for non-existing record"""
self.assertEqual([],
perform_request_search(recid=16777215))
def test_search_engine_python_api_for_nonexisting_record_format_intbitset(self):
"""websearch - search engine Python API for non-existing record, output format intbitset"""
self.assertEqual(intbitset(),
perform_request_search(recid=16777215, of='intbitset'))
def test_search_engine_python_api_for_nonexisting_collection(self):
"""websearch - search engine Python API for non-existing collection"""
self.assertEqual([],
perform_request_search(c='Foo'))
def test_search_engine_python_api_for_range_of_records(self):
"""websearch - search engine Python API for range of records"""
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8, 9],
perform_request_search(recid=1, recidb=10))
def test_search_engine_python_api_ranked_by_citation(self):
"""websearch - search engine Python API for citation ranking"""
self.assertEqual([82, 83, 87, 89],
perform_request_search(p='recid:81', rm='citation'))
def test_search_engine_python_api_textmarc_full(self):
"""websearch - search engine Python API for Text MARC output, full"""
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='tm')
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
000000107 001__ 107
000000107 003__ SzGeCERN
000000107 005__ %(rec_107_rev)s
000000107 035__ $$9SPIRES$$a4066995
000000107 037__ $$aCERN-EP-99-060
000000107 041__ $$aeng
000000107 084__ $$2CERN Library$$aEP-1999-060
000000107 088__ $$9SCAN-9910048
000000107 088__ $$aCERN-L3-175
000000107 110__ $$aCERN. Geneva
000000107 245__ $$aLimits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$
000000107 260__ $$c1999
000000107 269__ $$aGeneva$$bCERN$$c26 Apr 1999
000000107 300__ $$a18 p
000000107 490__ $$aALEPH Papers
000000107 500__ $$aPreprint not submitted to publication
000000107 65017 $$2SzGeCERN$$aParticle Physics - Experiment
000000107 690C_ $$aCERN
000000107 690C_ $$aPREPRINT
000000107 693__ $$aCERN LEP$$eALEPH
000000107 693__ $$aCERN LEP$$eDELPHI
000000107 693__ $$aCERN LEP$$eL3
000000107 693__ $$aCERN LEP$$eOPAL
000000107 695__ $$9MEDLINE$$asearches Higgs bosons
000000107 697C_ $$aLexiHiggs
000000107 710__ $$5EP
000000107 710__ $$gALEPH Collaboration
000000107 710__ $$gDELPHI Collaboration
000000107 710__ $$gL3 Collaboration
000000107 710__ $$gLEP Working Group for Higgs Boson Searches
000000107 710__ $$gOPAL Collaboration
000000107 901__ $$uCERN
000000107 916__ $$sh$$w199941
000000107 960__ $$a11
000000107 963__ $$aPUBLIC
000000107 970__ $$a000330309CER
000000107 980__ $$aARTICLE
000000085 001__ 85
000000085 003__ SzGeCERN
000000085 005__ %(rec_85_rev)s
000000085 035__ $$a2356302CERCER
000000085 035__ $$9SLAC$$a5423422
000000085 037__ $$ahep-th/0212181
000000085 041__ $$aeng
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 245__ $$a3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS
000000085 260__ $$c2003
000000085 269__ $$c16 Dec 2002
000000085 300__ $$a8 p
000000085 520__ $$aWe study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.
000000085 65017 $$2SzGeCERN$$aParticle Physics - Theory
000000085 690C_ $$aARTICLE
000000085 695__ $$9LANL EDS$$aHigh Energy Physics - Theory
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.pdf
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.ps.gz
000000085 859__ $$falberto.zaffaroni@mib.infn.it
000000085 909C4 $$c289-293$$pPhys. Lett. B$$v561$$y2003
000000085 916__ $$sn$$w200251
000000085 960__ $$a13
000000085 961__ $$c20060823$$h0007$$lCER01$$x20021217
000000085 963__ $$aPUBLIC
000000085 970__ $$a002356302CER
000000085 980__ $$aARTICLE
000000085 999C5 $$mD. Francia and A. Sagnotti,$$o[1]$$rhep-th/0207002$$sPhys. Lett. B 543 (2002) 303
000000085 999C5 $$mP. Haggi-Mani and B. Sundborg,$$o[1]$$rhep-th/0002189$$sJ. High Energy Phys. 0004 (2000) 031
000000085 999C5 $$mB. Sundborg,$$o[1]$$rhep-th/0103247$$sNucl. Phys. B, Proc. Suppl. 102 (2001) 113
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0105001$$sJ. High Energy Phys. 0109 (2001) 036
000000085 999C5 $$mA. Mikhailov,$$o[1]$$rhep-th/0201019
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205131$$sNucl. Phys. B 644 (2002) 303
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205132$$sJ. High Energy Phys. 0207 (2002) 055
000000085 999C5 $$mJ. Engquist, E. Sezgin and P. Sundell,$$o[1]$$rhep-th/0207101$$sClass. Quantum Gravity 19 (2002) 6175
000000085 999C5 $$mM. A. Vasiliev,$$o[1]$$rhep-th/9611024$$sInt. J. Mod. Phys. D 5 (1996) 763
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9808004$$sNucl. Phys. B 541 (1999) 323
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9906167$$sClass. Quantum Gravity 17 (2000) 1383
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sNucl. Phys. B 291 (1987) 141
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sPhys. Lett. B 189 (1987) 89
000000085 999C5 $$mI. R. Klebanov and A. M. Polyakov,$$o[3]$$rhep-th/0210114$$sPhys. Lett. B 550 (2002) 213
000000085 999C5 $$mM. A. Vasiliev,$$o[4]$$rhep-th/9910096
000000085 999C5 $$mT. Leonhardt, A. Meziane and W. Ruhl,$$o[5]$$rhep-th/0211092
000000085 999C5 $$mO. Aharony, M. Berkooz and E. Silverstein,$$o[6]$$rhep-th/0105309$$sJ. High Energy Phys. 0108 (2001) 006
000000085 999C5 $$mE. Witten,$$o[7]$$rhep-th/0112258
000000085 999C5 $$mM. Berkooz, A. Sever and A. Shomer$$o[8]$$rhep-th/0112264$$sJ. High Energy Phys. 0205 (2002) 034
000000085 999C5 $$mS. S. Gubser and I. Mitra,$$o[9]$$rhep-th/0210093
000000085 999C5 $$mS. S. Gubser and I. R. Klebanov,$$o[10]$$rhep-th/0212138
000000085 999C5 $$mM. Porrati,$$o[11]$$rhep-th/0112166$$sJ. High Energy Phys. 0204 (2002) 058
000000085 999C5 $$mK. G. Wilson and J. B. Kogut,$$o[12]$$sPhys. Rep. 12 (1974) 75
000000085 999C5 $$mI. R. Klebanov and E. Witten,$$o[13]$$rhep-th/9905104$$sNucl. Phys. B 556 (1999) 89
000000085 999C5 $$mW. Heidenreich,$$o[14]$$sJ. Math. Phys. 22 (1981) 1566
000000085 999C5 $$mD. Anselmi,$$o[15]$$rhep-th/0210123
000000001 001__ 1
000000001 005__ %(rec_1_rev)s
000000001 037__ $$aCERN-EX-0106015
000000001 100__ $$aPhotolab
000000001 245__ $$aALEPH experiment: Candidate of Higgs boson production
000000001 246_1 $$aExpérience ALEPH: Candidat de la production d'un boson Higgs
000000001 260__ $$c14 06 2000
000000001 340__ $$aFILM
000000001 520__ $$aCandidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.
000000001 65017 $$2SzGeCERN$$aExperiments and Tracks
000000001 6531_ $$aLEP
000000001 8560_ $$fneil.calder@cern.ch
000000001 8564_ $$u%(siteurl)s/record/1/files/0106015_01.jpg
000000001 8564_ $$u%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon$$xicon
000000001 909C0 $$o0003717PHOPHO
000000001 909C0 $$y2000
000000001 909C0 $$b81
000000001 909C1 $$c2001-06-14$$l50$$m2001-08-27$$oCM
000000001 909CP $$pBldg. 2
000000001 909CP $$rCalder, N
000000001 909CS $$sn$$w200231
000000001 980__ $$aPICTURE
""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]})
def test_search_engine_python_api_textmarc_field_filtered(self):
"""websearch - search engine Python API for Text MARC output, field-filtered"""
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='tm', ot=['100', '700'])
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000001 100__ $$aPhotolab
""")
def test_search_engine_python_api_for_intersect_results_with_one_collrec(self):
"""websearch - search engine Python API for intersect results with one collrec"""
self.assertEqual({'Books & Reports': intbitset([19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])},
intersect_results_with_collrecs(None, intbitset(range(0,110)), ['Books & Reports'], 0, 'id', 0, 'en', False))
def test_search_engine_python_api_for_intersect_results_with_several_collrecs(self):
"""websearch - search engine Python API for intersect results with several collrecs"""
self.assertEqual({'Books': intbitset([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]),
'Reports': intbitset([19, 20]),
'Theses': intbitset([35, 36, 37, 38, 39, 40, 41, 42, 105])},
intersect_results_with_collrecs(None, intbitset(range(0,110)), ['Books', 'Theses', 'Reports'], 0, 'id', 0, 'en', False))
def test_search_engine_python_api_textmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Python API for Text MARC output, field-filtered, hidden field, no guest access"""
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='tm', ot=['100', '595'])
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000001 100__ $$aPhotolab
""")
def test_search_engine_python_api_xmlmarc_full(self):
"""websearch - search engine Python API for XMLMARC output, full"""
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='xm')
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_107_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SPIRES</subfield>
<subfield code="a">4066995</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EP-99-060</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">CERN Library</subfield>
<subfield code="a">EP-1999-060</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="9">SCAN-9910048</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="a">CERN-L3-175</subfield>
</datafield>
<datafield tag="110" ind1=" " ind2=" ">
<subfield code="a">CERN. Geneva</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Limits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">1999</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="a">Geneva</subfield>
<subfield code="b">CERN</subfield>
<subfield code="c">26 Apr 1999</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">18 p</subfield>
</datafield>
<datafield tag="490" ind1=" " ind2=" ">
<subfield code="a">ALEPH Papers</subfield>
</datafield>
<datafield tag="500" ind1=" " ind2=" ">
<subfield code="a">Preprint not submitted to publication</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Experiment</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">CERN</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">PREPRINT</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">ALEPH</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">DELPHI</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">L3</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">OPAL</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">MEDLINE</subfield>
<subfield code="a">searches Higgs bosons</subfield>
</datafield>
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">LexiHiggs</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="5">EP</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">ALEPH Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">DELPHI Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">L3 Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">LEP Working Group for Higgs Boson Searches</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">OPAL Collaboration</subfield>
</datafield>
<datafield tag="901" ind1=" " ind2=" ">
<subfield code="u">CERN</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">h</subfield>
<subfield code="w">199941</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">11</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">000330309CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_85_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="a">2356302CERCER</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5423422</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/0212181</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">2003</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="c">16 Dec 2002</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">8 p</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">We study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Theory</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">High Energy Physics - Theory</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.pdf</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.ps.gz</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="4">
<subfield code="c">289-293</subfield>
<subfield code="p">Phys. Lett. B</subfield>
<subfield code="v">561</subfield>
<subfield code="y">2003</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">alberto.zaffaroni@mib.infn.it</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">n</subfield>
<subfield code="w">200251</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">13</subfield>
</datafield>
<datafield tag="961" ind1=" " ind2=" ">
<subfield code="c">20060823</subfield>
<subfield code="h">0007</subfield>
<subfield code="l">CER01</subfield>
<subfield code="x">20021217</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">002356302CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Francia and A. Sagnotti,</subfield>
<subfield code="s">Phys. Lett. B 543 (2002) 303</subfield>
<subfield code="r">hep-th/0207002</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">P. Haggi-Mani and B. Sundborg,</subfield>
<subfield code="s">J. High Energy Phys. 0004 (2000) 031</subfield>
<subfield code="r">hep-th/0002189</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">B. Sundborg,</subfield>
<subfield code="s">Nucl. Phys. B, Proc. Suppl. 102 (2001) 113</subfield>
<subfield code="r">hep-th/0103247</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0109 (2001) 036</subfield>
<subfield code="r">hep-th/0105001</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">A. Mikhailov,</subfield>
<subfield code="r">hep-th/0201019</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Nucl. Phys. B 644 (2002) 303</subfield>
<subfield code="r">hep-th/0205131</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0207 (2002) 055</subfield>
<subfield code="r">hep-th/0205132</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">J. Engquist, E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Class. Quantum Gravity 19 (2002) 6175</subfield>
<subfield code="r">hep-th/0207101</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="s">Int. J. Mod. Phys. D 5 (1996) 763</subfield>
<subfield code="r">hep-th/9611024</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Nucl. Phys. B 541 (1999) 323</subfield>
<subfield code="r">hep-th/9808004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Class. Quantum Gravity 17 (2000) 1383</subfield>
<subfield code="r">hep-th/9906167</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Nucl. Phys. B 291 (1987) 141</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Phys. Lett. B 189 (1987) 89</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">I. R. Klebanov and A. M. Polyakov,</subfield>
<subfield code="s">Phys. Lett. B 550 (2002) 213</subfield>
<subfield code="r">hep-th/0210114</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="r">hep-th/9910096</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="m">T. Leonhardt, A. Meziane and W. Ruhl,</subfield>
<subfield code="r">hep-th/0211092</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">O. Aharony, M. Berkooz and E. Silverstein,</subfield>
<subfield code="s">J. High Energy Phys. 0108 (2001) 006</subfield>
<subfield code="r">hep-th/0105309</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">E. Witten,</subfield>
<subfield code="r">hep-th/0112258</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="m">M. Berkooz, A. Sever and A. Shomer</subfield>
<subfield code="s">J. High Energy Phys. 0205 (2002) 034</subfield>
<subfield code="r">hep-th/0112264</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">S. S. Gubser and I. Mitra,</subfield>
<subfield code="r">hep-th/0210093</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">S. S. Gubser and I. R. Klebanov,</subfield>
<subfield code="r">hep-th/0212138</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">M. Porrati,</subfield>
<subfield code="s">J. High Energy Phys. 0204 (2002) 058</subfield>
<subfield code="r">hep-th/0112166</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[12]</subfield>
<subfield code="m">K. G. Wilson and J. B. Kogut,</subfield>
<subfield code="s">Phys. Rep. 12 (1974) 75</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[13]</subfield>
<subfield code="m">I. R. Klebanov and E. Witten,</subfield>
<subfield code="s">Nucl. Phys. B 556 (1999) 89</subfield>
<subfield code="r">hep-th/9905104</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[14]</subfield>
<subfield code="m">W. Heidenreich,</subfield>
<subfield code="s">J. Math. Phys. 22 (1981) 1566</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[15]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="r">hep-th/0210123</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<controlfield tag="005">%(rec_1_rev)s</controlfield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EX-0106015</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">ALEPH experiment: Candidate of Higgs boson production</subfield>
</datafield>
<datafield tag="246" ind1=" " ind2="1">
<subfield code="a">Expérience ALEPH: Candidat de la production d'un boson Higgs</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">14 06 2000</subfield>
</datafield>
<datafield tag="340" ind1=" " ind2=" ">
<subfield code="a">FILM</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">Candidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Experiments and Tracks</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="a">LEP</subfield>
</datafield>
<datafield tag="856" ind1="0" ind2=" ">
<subfield code="f">neil.calder@cern.ch</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.jpg</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon</subfield>
<subfield code="x">icon</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="o">0003717PHOPHO</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="y">2000</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="b">81</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="1">
<subfield code="c">2001-06-14</subfield>
<subfield code="l">50</subfield>
<subfield code="m">2001-08-27</subfield>
<subfield code="o">CM</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="p">Bldg. 2</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="r">Calder, N</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="S">
<subfield code="s">n</subfield>
<subfield code="w">200231</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">PICTURE</subfield>
</datafield>
</record>
</collection>""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]})
def test_search_engine_python_api_xmlmarc_field_filtered(self):
"""websearch - search engine Python API for XMLMARC output, field-filtered"""
# we are testing example from /help/hacking/search-engine-api
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='xm', ot=['100', '700'])
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>""")
def test_search_engine_python_api_xmlmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Python API for XMLMARC output, field-filtered, hidden field, no guest access"""
# we are testing example from /help/hacking/search-engine-api
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='xm', ot=['100', '595'])
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>""")
def test_search_engine_python_api_long_author_with_quotes(self):
"""websearch - search engine Python API for p=author:"Abbot, R B"'""" \
"""this test was written along with a bug report, needs fixing."""
self.assertEqual([16], perform_request_search(p='author:"Abbott, R B"'))
class WebSearchSearchEngineWebAPITest(unittest.TestCase):
"""Check typical search engine Web API calls on the demo data."""
def test_search_engine_web_api_for_failed_query(self):
"""websearch - search engine Web API for failed query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=aoeuidhtns&of=id',
expected_text="[]"))
def test_search_engine_web_api_for_failed_query_format_intbitset(self):
"""websearch - search engine Web API for failed query, output format intbitset"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=aoeuidhtns&of=intbitset',
expected_text=intbitset().fastdump()))
def test_search_engine_web_api_for_successful_query(self):
"""websearch - search engine Web API for successful query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=id',
expected_text="[8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118]"))
def test_search_engine_web_api_ignore_paging_parameter(self):
"""websearch - search engine Web API for successful query, ignore paging parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=id&rg=5&jrec=3',
expected_text="[8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118]"))
def test_search_engine_web_api_respect_sorting_parameter(self):
"""websearch - search engine Web API for successful query, respect sorting parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
expected_text="[84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
username="admin",
expected_text="[77, 84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&sf=909C4v',
expected_text="[85, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&sf=909C4v',
username="admin",
expected_text="[77, 85, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=intbitset&sf=909C4v',
username="admin",
expected_text=intbitset([77, 84, 85]).fastdump()))
def test_search_engine_web_api_respect_ranking_parameter(self):
"""websearch - search engine Web API for successful query, respect ranking parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
expected_text="[84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
username="admin",
expected_text="[77, 84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&rm=citation',
expected_text="[85, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&rm=citation',
username="admin",
expected_text="[85, 77, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=intbitset&rm=citation',
username="admin",
expected_text=intbitset([77, 84, 85]).fastdump()))
def test_search_engine_web_api_for_existing_record(self):
"""websearch - search engine Web API for existing record"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?recid=8&of=id',
expected_text="[8]"))
def test_search_engine_web_api_for_nonexisting_record(self):
"""websearch - search engine Web API for non-existing record"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?recid=123456789&of=id',
expected_text="[]"))
def test_search_engine_web_api_for_nonexisting_collection(self):
"""websearch - search engine Web API for non-existing collection"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?c=Foo&of=id',
expected_text="[]"))
def test_search_engine_web_api_for_range_of_records(self):
"""websearch - search engine Web API for range of records"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?recid=1&recidb=10&of=id',
expected_text="[1, 2, 3, 4, 5, 6, 7, 8, 9]"))
def test_search_engine_web_api_ranked_by_citation(self):
"""websearch - search engine Web API for citation ranking"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A81&rm=citation&of=id',
expected_text="[82, 83, 87, 89]"))
def test_search_engine_web_api_textmarc_full(self):
"""websearch - search engine Web API for Text MARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm',
expected_text="""\
000000107 001__ 107
000000107 003__ SzGeCERN
000000107 005__ %(rec_107_rev)s
000000107 035__ $$9SPIRES$$a4066995
000000107 037__ $$aCERN-EP-99-060
000000107 041__ $$aeng
000000107 084__ $$2CERN Library$$aEP-1999-060
000000107 088__ $$9SCAN-9910048
000000107 088__ $$aCERN-L3-175
000000107 110__ $$aCERN. Geneva
000000107 245__ $$aLimits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$
000000107 260__ $$c1999
000000107 269__ $$aGeneva$$bCERN$$c26 Apr 1999
000000107 300__ $$a18 p
000000107 490__ $$aALEPH Papers
000000107 500__ $$aPreprint not submitted to publication
000000107 65017 $$2SzGeCERN$$aParticle Physics - Experiment
000000107 690C_ $$aCERN
000000107 690C_ $$aPREPRINT
000000107 693__ $$aCERN LEP$$eALEPH
000000107 693__ $$aCERN LEP$$eDELPHI
000000107 693__ $$aCERN LEP$$eL3
000000107 693__ $$aCERN LEP$$eOPAL
000000107 695__ $$9MEDLINE$$asearches Higgs bosons
000000107 697C_ $$aLexiHiggs
000000107 710__ $$5EP
000000107 710__ $$gALEPH Collaboration
000000107 710__ $$gDELPHI Collaboration
000000107 710__ $$gL3 Collaboration
000000107 710__ $$gLEP Working Group for Higgs Boson Searches
000000107 710__ $$gOPAL Collaboration
000000107 901__ $$uCERN
000000107 916__ $$sh$$w199941
000000107 960__ $$a11
000000107 963__ $$aPUBLIC
000000107 970__ $$a000330309CER
000000107 980__ $$aARTICLE
000000085 001__ 85
000000085 003__ SzGeCERN
000000085 005__ %(rec_85_rev)s
000000085 035__ $$a2356302CERCER
000000085 035__ $$9SLAC$$a5423422
000000085 037__ $$ahep-th/0212181
000000085 041__ $$aeng
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 245__ $$a3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS
000000085 260__ $$c2003
000000085 269__ $$c16 Dec 2002
000000085 300__ $$a8 p
000000085 520__ $$aWe study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.
000000085 65017 $$2SzGeCERN$$aParticle Physics - Theory
000000085 690C_ $$aARTICLE
000000085 695__ $$9LANL EDS$$aHigh Energy Physics - Theory
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.pdf
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.ps.gz
000000085 859__ $$falberto.zaffaroni@mib.infn.it
000000085 909C4 $$c289-293$$pPhys. Lett. B$$v561$$y2003
000000085 916__ $$sn$$w200251
000000085 960__ $$a13
000000085 961__ $$c20060823$$h0007$$lCER01$$x20021217
000000085 963__ $$aPUBLIC
000000085 970__ $$a002356302CER
000000085 980__ $$aARTICLE
000000085 999C5 $$mD. Francia and A. Sagnotti,$$o[1]$$rhep-th/0207002$$sPhys. Lett. B 543 (2002) 303
000000085 999C5 $$mP. Haggi-Mani and B. Sundborg,$$o[1]$$rhep-th/0002189$$sJ. High Energy Phys. 0004 (2000) 031
000000085 999C5 $$mB. Sundborg,$$o[1]$$rhep-th/0103247$$sNucl. Phys. B, Proc. Suppl. 102 (2001) 113
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0105001$$sJ. High Energy Phys. 0109 (2001) 036
000000085 999C5 $$mA. Mikhailov,$$o[1]$$rhep-th/0201019
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205131$$sNucl. Phys. B 644 (2002) 303
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205132$$sJ. High Energy Phys. 0207 (2002) 055
000000085 999C5 $$mJ. Engquist, E. Sezgin and P. Sundell,$$o[1]$$rhep-th/0207101$$sClass. Quantum Gravity 19 (2002) 6175
000000085 999C5 $$mM. A. Vasiliev,$$o[1]$$rhep-th/9611024$$sInt. J. Mod. Phys. D 5 (1996) 763
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9808004$$sNucl. Phys. B 541 (1999) 323
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9906167$$sClass. Quantum Gravity 17 (2000) 1383
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sNucl. Phys. B 291 (1987) 141
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sPhys. Lett. B 189 (1987) 89
000000085 999C5 $$mI. R. Klebanov and A. M. Polyakov,$$o[3]$$rhep-th/0210114$$sPhys. Lett. B 550 (2002) 213
000000085 999C5 $$mM. A. Vasiliev,$$o[4]$$rhep-th/9910096
000000085 999C5 $$mT. Leonhardt, A. Meziane and W. Ruhl,$$o[5]$$rhep-th/0211092
000000085 999C5 $$mO. Aharony, M. Berkooz and E. Silverstein,$$o[6]$$rhep-th/0105309$$sJ. High Energy Phys. 0108 (2001) 006
000000085 999C5 $$mE. Witten,$$o[7]$$rhep-th/0112258
000000085 999C5 $$mM. Berkooz, A. Sever and A. Shomer$$o[8]$$rhep-th/0112264$$sJ. High Energy Phys. 0205 (2002) 034
000000085 999C5 $$mS. S. Gubser and I. Mitra,$$o[9]$$rhep-th/0210093
000000085 999C5 $$mS. S. Gubser and I. R. Klebanov,$$o[10]$$rhep-th/0212138
000000085 999C5 $$mM. Porrati,$$o[11]$$rhep-th/0112166$$sJ. High Energy Phys. 0204 (2002) 058
000000085 999C5 $$mK. G. Wilson and J. B. Kogut,$$o[12]$$sPhys. Rep. 12 (1974) 75
000000085 999C5 $$mI. R. Klebanov and E. Witten,$$o[13]$$rhep-th/9905104$$sNucl. Phys. B 556 (1999) 89
000000085 999C5 $$mW. Heidenreich,$$o[14]$$sJ. Math. Phys. 22 (1981) 1566
000000085 999C5 $$mD. Anselmi,$$o[15]$$rhep-th/0210123
000000001 001__ 1
000000001 005__ %(rec_1_rev)s
000000001 037__ $$aCERN-EX-0106015
000000001 100__ $$aPhotolab
000000001 245__ $$aALEPH experiment: Candidate of Higgs boson production
000000001 246_1 $$aExpérience ALEPH: Candidat de la production d'un boson Higgs
000000001 260__ $$c14 06 2000
000000001 340__ $$aFILM
000000001 520__ $$aCandidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.
000000001 65017 $$2SzGeCERN$$aExperiments and Tracks
000000001 6531_ $$aLEP
000000001 8560_ $$fneil.calder@cern.ch
000000001 8564_ $$u%(siteurl)s/record/1/files/0106015_01.jpg
000000001 8564_ $$u%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon$$xicon
000000001 909C0 $$o0003717PHOPHO
000000001 909C0 $$y2000
000000001 909C0 $$b81
000000001 909C1 $$c2001-06-14$$l50$$m2001-08-27$$oCM
000000001 909CP $$pBldg. 2
000000001 909CP $$rCalder, N
000000001 909CS $$sn$$w200231
000000001 980__ $$aPICTURE
""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]}))
def test_search_engine_web_api_textmarc_field_filtered(self):
"""websearch - search engine Web API for Text MARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=100,700',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000001 100__ $$aPhotolab
"""))
def test_search_engine_web_api_textmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Web API for Text MARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=100,595',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000001 100__ $$aPhotolab
"""))
def test_search_engine_web_api_textmarc_field_filtered_hidden_admin(self):
"""websearch - search engine Web API for Text MARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=100,595',
username='admin',
expected_text="""\
000000107 595__ $$aNo authors
000000107 595__ $$aCERN-EP
000000107 595__ $$aOA
000000107 595__ $$aSIS:200740 PR/LKR not found (from SLAC, INSPEC)
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 595__ $$aLANL EDS
000000085 595__ $$aSIS LANLPUBL2004
000000085 595__ $$aSIS:2004 PR/LKR added
000000001 100__ $$aPhotolab
000000001 595__ $$aPress
"""))
def test_search_engine_web_api_textmarc_subfield_values(self):
"""websearch - search engine Web API for Text MARC output, subfield values"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=700__a',
expected_text="""\
Porrati, Massimo
Zaffaroni, A
"""))
def test_search_engine_web_api_xmlmarc_full(self):
"""websearch - search engine Web API for XMLMARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_107_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SPIRES</subfield>
<subfield code="a">4066995</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EP-99-060</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">CERN Library</subfield>
<subfield code="a">EP-1999-060</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="9">SCAN-9910048</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="a">CERN-L3-175</subfield>
</datafield>
<datafield tag="110" ind1=" " ind2=" ">
<subfield code="a">CERN. Geneva</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Limits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">1999</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="a">Geneva</subfield>
<subfield code="b">CERN</subfield>
<subfield code="c">26 Apr 1999</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">18 p</subfield>
</datafield>
<datafield tag="490" ind1=" " ind2=" ">
<subfield code="a">ALEPH Papers</subfield>
</datafield>
<datafield tag="500" ind1=" " ind2=" ">
<subfield code="a">Preprint not submitted to publication</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Experiment</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">CERN</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">PREPRINT</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">ALEPH</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">DELPHI</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">L3</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">OPAL</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">MEDLINE</subfield>
<subfield code="a">searches Higgs bosons</subfield>
</datafield>
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">LexiHiggs</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="5">EP</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">ALEPH Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">DELPHI Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">L3 Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">LEP Working Group for Higgs Boson Searches</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">OPAL Collaboration</subfield>
</datafield>
<datafield tag="901" ind1=" " ind2=" ">
<subfield code="u">CERN</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">h</subfield>
<subfield code="w">199941</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">11</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">000330309CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_85_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="a">2356302CERCER</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5423422</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/0212181</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">2003</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="c">16 Dec 2002</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">8 p</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">We study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Theory</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">High Energy Physics - Theory</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.pdf</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.ps.gz</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="4">
<subfield code="c">289-293</subfield>
<subfield code="p">Phys. Lett. B</subfield>
<subfield code="v">561</subfield>
<subfield code="y">2003</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">alberto.zaffaroni@mib.infn.it</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">n</subfield>
<subfield code="w">200251</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">13</subfield>
</datafield>
<datafield tag="961" ind1=" " ind2=" ">
<subfield code="c">20060823</subfield>
<subfield code="h">0007</subfield>
<subfield code="l">CER01</subfield>
<subfield code="x">20021217</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">002356302CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Francia and A. Sagnotti,</subfield>
<subfield code="s">Phys. Lett. B 543 (2002) 303</subfield>
<subfield code="r">hep-th/0207002</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">P. Haggi-Mani and B. Sundborg,</subfield>
<subfield code="s">J. High Energy Phys. 0004 (2000) 031</subfield>
<subfield code="r">hep-th/0002189</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">B. Sundborg,</subfield>
<subfield code="s">Nucl. Phys. B, Proc. Suppl. 102 (2001) 113</subfield>
<subfield code="r">hep-th/0103247</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0109 (2001) 036</subfield>
<subfield code="r">hep-th/0105001</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">A. Mikhailov,</subfield>
<subfield code="r">hep-th/0201019</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Nucl. Phys. B 644 (2002) 303</subfield>
<subfield code="r">hep-th/0205131</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0207 (2002) 055</subfield>
<subfield code="r">hep-th/0205132</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">J. Engquist, E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Class. Quantum Gravity 19 (2002) 6175</subfield>
<subfield code="r">hep-th/0207101</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="s">Int. J. Mod. Phys. D 5 (1996) 763</subfield>
<subfield code="r">hep-th/9611024</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Nucl. Phys. B 541 (1999) 323</subfield>
<subfield code="r">hep-th/9808004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Class. Quantum Gravity 17 (2000) 1383</subfield>
<subfield code="r">hep-th/9906167</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Nucl. Phys. B 291 (1987) 141</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Phys. Lett. B 189 (1987) 89</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">I. R. Klebanov and A. M. Polyakov,</subfield>
<subfield code="s">Phys. Lett. B 550 (2002) 213</subfield>
<subfield code="r">hep-th/0210114</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="r">hep-th/9910096</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="m">T. Leonhardt, A. Meziane and W. Ruhl,</subfield>
<subfield code="r">hep-th/0211092</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">O. Aharony, M. Berkooz and E. Silverstein,</subfield>
<subfield code="s">J. High Energy Phys. 0108 (2001) 006</subfield>
<subfield code="r">hep-th/0105309</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">E. Witten,</subfield>
<subfield code="r">hep-th/0112258</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="m">M. Berkooz, A. Sever and A. Shomer</subfield>
<subfield code="s">J. High Energy Phys. 0205 (2002) 034</subfield>
<subfield code="r">hep-th/0112264</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">S. S. Gubser and I. Mitra,</subfield>
<subfield code="r">hep-th/0210093</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">S. S. Gubser and I. R. Klebanov,</subfield>
<subfield code="r">hep-th/0212138</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">M. Porrati,</subfield>
<subfield code="s">J. High Energy Phys. 0204 (2002) 058</subfield>
<subfield code="r">hep-th/0112166</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[12]</subfield>
<subfield code="m">K. G. Wilson and J. B. Kogut,</subfield>
<subfield code="s">Phys. Rep. 12 (1974) 75</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[13]</subfield>
<subfield code="m">I. R. Klebanov and E. Witten,</subfield>
<subfield code="s">Nucl. Phys. B 556 (1999) 89</subfield>
<subfield code="r">hep-th/9905104</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[14]</subfield>
<subfield code="m">W. Heidenreich,</subfield>
<subfield code="s">J. Math. Phys. 22 (1981) 1566</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[15]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="r">hep-th/0210123</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<controlfield tag="005">%(rec_1_rev)s</controlfield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EX-0106015</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">ALEPH experiment: Candidate of Higgs boson production</subfield>
</datafield>
<datafield tag="246" ind1=" " ind2="1">
<subfield code="a">Expérience ALEPH: Candidat de la production d'un boson Higgs</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">14 06 2000</subfield>
</datafield>
<datafield tag="340" ind1=" " ind2=" ">
<subfield code="a">FILM</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">Candidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Experiments and Tracks</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="a">LEP</subfield>
</datafield>
<datafield tag="856" ind1="0" ind2=" ">
<subfield code="f">neil.calder@cern.ch</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.jpg</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon</subfield>
<subfield code="x">icon</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="o">0003717PHOPHO</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="y">2000</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="b">81</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="1">
<subfield code="c">2001-06-14</subfield>
<subfield code="l">50</subfield>
<subfield code="m">2001-08-27</subfield>
<subfield code="o">CM</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="p">Bldg. 2</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="r">Calder, N</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="S">
<subfield code="s">n</subfield>
<subfield code="w">200231</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">PICTURE</subfield>
</datafield>
</record>
</collection>""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]}))
def test_search_engine_web_api_xmlmarc_field_filtered(self):
"""websearch - search engine Web API for XMLMARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm&ot=100,700',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>"""))
def test_search_engine_web_api_xmlmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Web API for XMLMARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm&ot=100,595',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>"""))
def test_search_engine_web_api_xmlmarc_field_filtered_hidden_admin(self):
"""websearch - search engine Web API for XMLMARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm&ot=100,595',
username='admin',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">No authors</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">CERN-EP</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">OA</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS:200740 PR/LKR not found (from SLAC, INSPEC)</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">LANL EDS</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS LANLPUBL2004</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS:2004 PR/LKR added</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">Press</subfield>
</datafield>
</record>
</collection>"""))
class WebSearchRecordWebAPITest(unittest.TestCase):
"""Check typical /record Web API calls on the demo data."""
def test_record_web_api_textmarc_full(self):
"""websearch - /record Web API for TextMARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm',
expected_text="""\
000000085 001__ 85
000000085 003__ SzGeCERN
000000085 005__ %(rec_85_rev)s
000000085 035__ $$a2356302CERCER
000000085 035__ $$9SLAC$$a5423422
000000085 037__ $$ahep-th/0212181
000000085 041__ $$aeng
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 245__ $$a3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS
000000085 260__ $$c2003
000000085 269__ $$c16 Dec 2002
000000085 300__ $$a8 p
000000085 520__ $$aWe study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.
000000085 65017 $$2SzGeCERN$$aParticle Physics - Theory
000000085 690C_ $$aARTICLE
000000085 695__ $$9LANL EDS$$aHigh Energy Physics - Theory
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.pdf
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.ps.gz
000000085 859__ $$falberto.zaffaroni@mib.infn.it
000000085 909C4 $$c289-293$$pPhys. Lett. B$$v561$$y2003
000000085 916__ $$sn$$w200251
000000085 960__ $$a13
000000085 961__ $$c20060823$$h0007$$lCER01$$x20021217
000000085 963__ $$aPUBLIC
000000085 970__ $$a002356302CER
000000085 980__ $$aARTICLE
000000085 999C5 $$mD. Francia and A. Sagnotti,$$o[1]$$rhep-th/0207002$$sPhys. Lett. B 543 (2002) 303
000000085 999C5 $$mP. Haggi-Mani and B. Sundborg,$$o[1]$$rhep-th/0002189$$sJ. High Energy Phys. 0004 (2000) 031
000000085 999C5 $$mB. Sundborg,$$o[1]$$rhep-th/0103247$$sNucl. Phys. B, Proc. Suppl. 102 (2001) 113
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0105001$$sJ. High Energy Phys. 0109 (2001) 036
000000085 999C5 $$mA. Mikhailov,$$o[1]$$rhep-th/0201019
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205131$$sNucl. Phys. B 644 (2002) 303
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205132$$sJ. High Energy Phys. 0207 (2002) 055
000000085 999C5 $$mJ. Engquist, E. Sezgin and P. Sundell,$$o[1]$$rhep-th/0207101$$sClass. Quantum Gravity 19 (2002) 6175
000000085 999C5 $$mM. A. Vasiliev,$$o[1]$$rhep-th/9611024$$sInt. J. Mod. Phys. D 5 (1996) 763
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9808004$$sNucl. Phys. B 541 (1999) 323
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9906167$$sClass. Quantum Gravity 17 (2000) 1383
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sNucl. Phys. B 291 (1987) 141
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sPhys. Lett. B 189 (1987) 89
000000085 999C5 $$mI. R. Klebanov and A. M. Polyakov,$$o[3]$$rhep-th/0210114$$sPhys. Lett. B 550 (2002) 213
000000085 999C5 $$mM. A. Vasiliev,$$o[4]$$rhep-th/9910096
000000085 999C5 $$mT. Leonhardt, A. Meziane and W. Ruhl,$$o[5]$$rhep-th/0211092
000000085 999C5 $$mO. Aharony, M. Berkooz and E. Silverstein,$$o[6]$$rhep-th/0105309$$sJ. High Energy Phys. 0108 (2001) 006
000000085 999C5 $$mE. Witten,$$o[7]$$rhep-th/0112258
000000085 999C5 $$mM. Berkooz, A. Sever and A. Shomer$$o[8]$$rhep-th/0112264$$sJ. High Energy Phys. 0205 (2002) 034
000000085 999C5 $$mS. S. Gubser and I. Mitra,$$o[9]$$rhep-th/0210093
000000085 999C5 $$mS. S. Gubser and I. R. Klebanov,$$o[10]$$rhep-th/0212138
000000085 999C5 $$mM. Porrati,$$o[11]$$rhep-th/0112166$$sJ. High Energy Phys. 0204 (2002) 058
000000085 999C5 $$mK. G. Wilson and J. B. Kogut,$$o[12]$$sPhys. Rep. 12 (1974) 75
000000085 999C5 $$mI. R. Klebanov and E. Witten,$$o[13]$$rhep-th/9905104$$sNucl. Phys. B 556 (1999) 89
000000085 999C5 $$mW. Heidenreich,$$o[14]$$sJ. Math. Phys. 22 (1981) 1566
000000085 999C5 $$mD. Anselmi,$$o[15]$$rhep-th/0210123
""" % {'siteurl': CFG_SITE_URL,
'rec_85_rev': get_fieldvalues(85, '005__')[0]}))
def test_record_web_api_xmlmarc_full(self):
"""websearch - /record Web API for XMLMARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_85_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="a">2356302CERCER</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5423422</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/0212181</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">2003</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="c">16 Dec 2002</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">8 p</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">We study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Theory</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">High Energy Physics - Theory</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.pdf</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.ps.gz</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="4">
<subfield code="c">289-293</subfield>
<subfield code="p">Phys. Lett. B</subfield>
<subfield code="v">561</subfield>
<subfield code="y">2003</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">alberto.zaffaroni@mib.infn.it</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">n</subfield>
<subfield code="w">200251</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">13</subfield>
</datafield>
<datafield tag="961" ind1=" " ind2=" ">
<subfield code="c">20060823</subfield>
<subfield code="h">0007</subfield>
<subfield code="l">CER01</subfield>
<subfield code="x">20021217</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">002356302CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Francia and A. Sagnotti,</subfield>
<subfield code="s">Phys. Lett. B 543 (2002) 303</subfield>
<subfield code="r">hep-th/0207002</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">P. Haggi-Mani and B. Sundborg,</subfield>
<subfield code="s">J. High Energy Phys. 0004 (2000) 031</subfield>
<subfield code="r">hep-th/0002189</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">B. Sundborg,</subfield>
<subfield code="s">Nucl. Phys. B, Proc. Suppl. 102 (2001) 113</subfield>
<subfield code="r">hep-th/0103247</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0109 (2001) 036</subfield>
<subfield code="r">hep-th/0105001</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">A. Mikhailov,</subfield>
<subfield code="r">hep-th/0201019</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Nucl. Phys. B 644 (2002) 303</subfield>
<subfield code="r">hep-th/0205131</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0207 (2002) 055</subfield>
<subfield code="r">hep-th/0205132</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">J. Engquist, E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Class. Quantum Gravity 19 (2002) 6175</subfield>
<subfield code="r">hep-th/0207101</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="s">Int. J. Mod. Phys. D 5 (1996) 763</subfield>
<subfield code="r">hep-th/9611024</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Nucl. Phys. B 541 (1999) 323</subfield>
<subfield code="r">hep-th/9808004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Class. Quantum Gravity 17 (2000) 1383</subfield>
<subfield code="r">hep-th/9906167</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Nucl. Phys. B 291 (1987) 141</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Phys. Lett. B 189 (1987) 89</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">I. R. Klebanov and A. M. Polyakov,</subfield>
<subfield code="s">Phys. Lett. B 550 (2002) 213</subfield>
<subfield code="r">hep-th/0210114</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="r">hep-th/9910096</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="m">T. Leonhardt, A. Meziane and W. Ruhl,</subfield>
<subfield code="r">hep-th/0211092</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">O. Aharony, M. Berkooz and E. Silverstein,</subfield>
<subfield code="s">J. High Energy Phys. 0108 (2001) 006</subfield>
<subfield code="r">hep-th/0105309</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">E. Witten,</subfield>
<subfield code="r">hep-th/0112258</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="m">M. Berkooz, A. Sever and A. Shomer</subfield>
<subfield code="s">J. High Energy Phys. 0205 (2002) 034</subfield>
<subfield code="r">hep-th/0112264</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">S. S. Gubser and I. Mitra,</subfield>
<subfield code="r">hep-th/0210093</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">S. S. Gubser and I. R. Klebanov,</subfield>
<subfield code="r">hep-th/0212138</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">M. Porrati,</subfield>
<subfield code="s">J. High Energy Phys. 0204 (2002) 058</subfield>
<subfield code="r">hep-th/0112166</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[12]</subfield>
<subfield code="m">K. G. Wilson and J. B. Kogut,</subfield>
<subfield code="s">Phys. Rep. 12 (1974) 75</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[13]</subfield>
<subfield code="m">I. R. Klebanov and E. Witten,</subfield>
<subfield code="s">Nucl. Phys. B 556 (1999) 89</subfield>
<subfield code="r">hep-th/9905104</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[14]</subfield>
<subfield code="m">W. Heidenreich,</subfield>
<subfield code="s">J. Math. Phys. 22 (1981) 1566</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[15]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="r">hep-th/0210123</subfield>
</datafield>
</record>
</collection>""" % {'siteurl': CFG_SITE_URL,
'rec_85_rev': get_fieldvalues(85, '005__')[0]}))
def test_record_web_api_textmarc_field_filtered(self):
"""websearch - /record Web API for TextMARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=100,700',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
"""))
def test_record_web_api_textmarc_field_filtered_hidden_guest(self):
"""websearch - /record Web API for TextMARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=100,595',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
"""))
def test_record_web_api_textmarc_field_filtered_hidden_admin(self):
"""websearch - /record Web API for TextMARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=100,595',
username='admin',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 595__ $$aLANL EDS
000000085 595__ $$aSIS LANLPUBL2004
000000085 595__ $$aSIS:2004 PR/LKR added
"""))
def test_record_web_api_xmlmarc_field_filtered(self):
"""websearch - /record Web API for XMLMARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm&ot=100,700',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
</record>
</collection>"""))
def test_record_web_api_xmlmarc_field_filtered_hidden_guest(self):
"""websearch - /record Web API for XMLMARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm&ot=100,595',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
</record>
</collection>"""))
def test_record_web_api_xmlmarc_field_filtered_hidden_admin(self):
"""websearch - /record Web API for XMLMARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm&ot=100,595',
username='admin',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">LANL EDS</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS LANLPUBL2004</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS:2004 PR/LKR added</subfield>
</datafield>
</record>
</collection>"""))
def test_record_web_api_textmarc_subfield_values(self):
"""websearch - /record Web API for TextMARC output, subfield values"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=700__a',
expected_text="""\
Porrati, Massimo
Zaffaroni, A
"""))
class WebSearchRestrictedCollectionTest(unittest.TestCase):
"""Test of the restricted collections behaviour."""
def test_restricted_collection_interface_page(self):
"""websearch - restricted collection interface page body"""
# there should be no Latest additions box for restricted collections
self.assertNotEqual([],
test_web_page_content(CFG_SITE_URL + '/collection/Theses',
expected_text="Latest additions"))
def test_restricted_search_as_anonymous_guest(self):
"""websearch - restricted collection not searchable by anonymous guest"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?c=Theses')
response = browser.response().read()
if response.find("If you think you have right to access it, please authenticate yourself.") > -1:
pass
else:
self.fail("Oops, searching restricted collection without password should have redirected to login dialog.")
return
def test_restricted_search_as_authorized_person(self):
"""websearch - restricted collection searchable by authorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?c=Theses')
browser.select_form(nr=0)
browser['p_un'] = 'jekyll'
browser['p_pw'] = 'j123ekyll'
browser.submit()
if browser.response().read().find("records found") > -1:
pass
else:
self.fail("Oops, Dr. Jekyll should be able to search Theses collection.")
def test_restricted_search_as_unauthorized_person(self):
"""websearch - restricted collection not searchable by unauthorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?c=Theses')
browser.select_form(nr=0)
browser['p_un'] = 'hyde'
browser['p_pw'] = 'h123yde'
browser.submit()
# Mr. Hyde should not be able to connect:
if browser.response().read().find("Authorization failure") <= -1:
# if we got here, things are broken:
self.fail("Oops, Mr.Hyde should not be able to search Theses collection.")
def test_restricted_detailed_record_page_as_anonymous_guest(self):
"""websearch - restricted detailed record page not accessible to guests"""
browser = Browser()
browser.open(CFG_SITE_URL + '/%s/35' % CFG_SITE_RECORD)
if browser.response().read().find("You can use your nickname or your email address to login.") > -1:
pass
else:
self.fail("Oops, searching restricted collection without password should have redirected to login dialog.")
return
def test_restricted_detailed_record_page_as_authorized_person(self):
"""websearch - restricted detailed record page accessible to authorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/youraccount/login')
browser.select_form(nr=0)
browser['p_un'] = 'jekyll'
browser['p_pw'] = 'j123ekyll'
browser.submit()
browser.open(CFG_SITE_URL + '/%s/35' % CFG_SITE_RECORD)
# Dr. Jekyll should be able to connect
# (add the pw to the whole CFG_SITE_URL because we shall be
# redirected to '/reordrestricted/'):
if browser.response().read().find("A High-performance Video Browsing System") > -1:
pass
else:
self.fail("Oops, Dr. Jekyll should be able to access restricted detailed record page.")
def test_restricted_detailed_record_page_as_unauthorized_person(self):
"""websearch - restricted detailed record page not accessible to unauthorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/youraccount/login')
browser.select_form(nr=0)
browser['p_un'] = 'hyde'
browser['p_pw'] = 'h123yde'
browser.submit()
browser.open(CFG_SITE_URL + '/%s/35' % CFG_SITE_RECORD)
# Mr. Hyde should not be able to connect:
if browser.response().read().find('You are not authorized') <= -1:
# if we got here, things are broken:
self.fail("Oops, Mr.Hyde should not be able to access restricted detailed record page.")
def test_collection_restricted_p(self):
"""websearch - collection_restricted_p"""
self.failUnless(collection_restricted_p('Theses'), True)
self.failIf(collection_restricted_p('Books & Reports'))
def test_get_permitted_restricted_collections(self):
"""websearch - get_permitted_restricted_collections"""
from invenio.webuser import get_uid_from_email, collect_user_info
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('jekyll@cds.cern.ch'))), ['Theses', 'Drafts'])
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('hyde@cds.cern.ch'))), [])
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('balthasar.montague@cds.cern.ch'))), ['ALEPH Theses', 'ALEPH Internal Notes', 'Atlantis Times Drafts'])
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('dorian.gray@cds.cern.ch'))), ['ISOLDE Internal Notes'])
def test_restricted_record_has_restriction_flag(self):
"""websearch - restricted record displays a restriction flag"""
browser = Browser()
browser.open(CFG_SITE_URL + '/%s/42/files/' % CFG_SITE_RECORD)
browser.select_form(nr=0)
browser['p_un'] = 'jekyll'
browser['p_pw'] = 'j123ekyll'
browser.submit()
if browser.response().read().find("Restricted") > -1:
pass
else:
self.fail("Oops, a 'Restricted' flag should appear on restricted records.")
browser.open(CFG_SITE_URL + '/%s/42/files/comments' % CFG_SITE_RECORD)
if browser.response().read().find("Restricted") > -1:
pass
else:
self.fail("Oops, a 'Restricted' flag should appear on restricted records.")
# Flag also appear on records that exist both in a public and
# restricted collection:
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/109' % CFG_SITE_RECORD,
username='admin',
password='',
expected_text=['Restricted'])
if error_messages:
self.fail("Oops, a 'Restricted' flag should appear on restricted records.")
class WebSearchRestrictedCollectionHandlingTest(unittest.TestCase):
"""
Check how the restricted or restricted and "hidden" collection
handling works: (i)user has or not rights to access to specific
records or collections, (ii)public and restricted results are displayed
in the right position in the collection tree, (iii)display the right
warning depending on the case.
Changes in the collection tree used for testing (are showed the records used for testing as well):
Articles & Preprints Books & Reports
_____________|________________ ____________|_____________
| | | | | | |
Articles Drafts(r) Notes Preprints Books Theses(r) Reports
69 77 109 10 105
77 98 98
108 105
CERN Experiments
_________________________|___________________________
| |
ALEPH ISOLDE
_________________|_________________ ____________|_____________
| | | | |
ALEPH ALEPH ALEPH ISOLDE ISOLDE
Papers Internal Notes(r) Theses(r) Papers Internal Notes(r&h)
10 109 105 69 110
108 106
Authorized users:
jekyll -> Drafts, Theses
balthasar -> ALEPH Internal Notes, ALEPH Theses
dorian -> ISOLDE Internal Notes
"""
def test_show_public_colls_in_warning_as_unauthorizad_user(self):
"""websearch - show public daugther collections in warning to unauthorized user"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=recid:20',
username='hyde',
password='h123yde',
expected_text=['No match found in collection <em>Articles, Preprints, Notes</em>.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_show_public_and_restricted_colls_in_warning_as_authorized_user(self):
"""websearch - show public and restricted daugther collections in warning to authorized user"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=recid:20',
username='jekyll',
password='j123ekyll',
expected_text=['No match found in collection <em>Articles, Preprints, Notes, Drafts</em>.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_unauthorized_user(self):
"""websearch - record belongs to different restricted collections with different rights, user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=105&f=recid',
username='hyde',
password='h123yde',
expected_text=['No public collection matched your query.'],
unexpected_text=['records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_authorized_user_of_one_coll(self):
"""websearch - record belongs to different restricted collections with different rights, balthasar has rights to one of them"""
from invenio.config import CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if policy == 'ANY':
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='balthasar',
password='b123althasar',
expected_text=['[CERN-THESIS-99-074]'],
unexpected_text=['No public collection matched your query.'])
else:
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='balthasar',
password='b123althasar',
expected_text=['No public collection matched your query.'],
unexpected_text=['[CERN-THESIS-99-074]'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_authorized_user_of_two_colls(self):
"""websearch - record belongs to different restricted collections with different rights, jekyll has rights to two of them"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='jekyll',
password='j123ekyll',
expected_text=['Articles & Preprints', 'Books & Reports'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_authorized_user_of_all_colls(self):
"""websearch - record belongs to different restricted collections with different rights, admin has rights to all of them"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
expected_text=['Articles & Preprints', 'Books & Reports', 'ALEPH Theses'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_not_dad_coll(self):
"""websearch - record belongs to different restricted collections with different rights, search from a not dad collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Multimedia+%26+Arts&sc=1&p=recid%3A105&f=&action_search=Search&c=Pictures&c=Poetry&c=Atlantis+Times',
username='admin',
expected_text='No match found in collection',
expected_link_label='1 hits')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_as_unauthorized_user(self):
"""websearch - record belongs to different public and restricted collections, user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=geometry&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts&of=id',
username='guest',
expected_text='[80, 86]',
unexpected_text='[40, 80, 86]')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_as_authorized_user(self):
"""websearch - record belongs to different public and restricted collections, admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=geometry&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts&of=id',
username='admin',
password='',
expected_text='[40, 80, 86]')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_of_focus_as_unauthorized_user(self):
"""websearch - record belongs to both a public and a restricted collection of "focus on", user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=109&f=recid',
username='hyde',
password='h123yde',
expected_text=['No public collection matched your query'],
unexpected_text=['LEP Center-of-Mass Energies in Presence of Opposite'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_of_focus_as_authorized_user(self):
"""websearch - record belongs to both a public and a restricted collection of "focus on", user has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=109&f=recid&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='balthasar',
password='b123althasar',
expected_text=['Articles & Preprints', 'ALEPH Internal Notes', 'LEP Center-of-Mass Energies in Presence of Opposite'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_public_and_restricted_record_from_not_dad_coll_as_authorized_user(self):
"""websearch - record belongs to both a public and a restricted collection, search from a not dad collection, admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=recid%3A98&f=&action_search=Search&c=Books&c=Reports',
username='admin',
password='',
expected_text='No match found in collection <em>Books, Theses, Reports</em>',
expected_link_label='1 hits')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_public_and_restricted_record_from_not_dad_coll_as_unauthorized_user(self):
"""websearch - record belongs to both a public and a restricted collection, search from a not dad collection, hyde not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=recid%3A98&f=&action_search=Search&c=Books&c=Reports',
username='hyde',
password='h123yde',
expected_text='No public collection matched your query',
unexpected_text='No match found in collection')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_of_focus_as_authorized_user(self):
"""websearch - record belongs to a restricted collection of "focus on", balthasar has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=106&f=recid&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts&of=id',
username='balthasar',
password='b123althasar',
expected_text='[106]',
unexpected_text='[]')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_display_dad_coll_of_restricted_coll_as_unauthorized_user(self):
"""websearch - unauthorized user displays a collection that contains a restricted collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=&f=&action_search=Search&c=Articles&c=Drafts&c=Preprints',
username='guest',
expected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_display_dad_coll_of_restricted_coll_as_authorized_user(self):
"""websearch - authorized user displays a collection that contains a restricted collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=&f=&action_search=Search&c=Articles&c=Drafts&c=Notes&c=Preprints',
username='jekyll',
password='j123ekyll',
expected_text=['Articles', 'Drafts', 'Notes', 'Preprints'],
unexpected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_coll_of_focus_as_unauthorized_user(self):
"""websearch - search for a record that belongs to a restricted collection from a collection of "focus on" , jekyll not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=CERN+Divisions&sc=1&p=recid%3A106&f=&action_search=Search&c=Experimental+Physics+(EP)&c=Theoretical+Physics+(TH)',
username='jekyll',
password='j123ekyll',
expected_text=['No public collection matched your query.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_coll_of_focus_as_authorized_user(self):
"""websearch - search for a record that belongs to a restricted collection from a collection of "focus on" , admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=CERN+Divisions&sc=1&p=recid%3A106&f=&action_search=Search&c=Experimental+Physics+(EP)&c=Theoretical+Physics+(TH)',
username='admin',
password='',
expected_text='No match found in collection <em>Experimental Physics (EP), Theoretical Physics (TH)</em>.',
expected_link_label='1 hits')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_not_direct_dad_coll_and_display_in_right_position_in_tree(self):
"""websearch - search for a restricted record from not direct dad collection and display it on its right position in the tree"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=recid%3A40&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
password='',
expected_text=['Books & Reports','[LBL-22304]'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_direct_dad_coll_and_display_in_right_position_in_tree(self):
"""websearch - search for a restricted record from the direct dad collection and display it on its right position in the tree"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=recid%3A40&f=&action_search=Search&c=Books&c=Reports',
username='admin',
password='',
expected_text=['Theses', '[LBL-22304]'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_and_hidden_record_as_unauthorized_user(self):
"""websearch - search for a "hidden" record, user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=recid%3A110&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='guest',
expected_text=['If you were looking for a non-public document'],
unexpected_text=['If you were looking for a hidden document'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_and_hidden_record_as_authorized_user(self):
"""websearch - search for a "hidden" record, admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=recid%3A110&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
password='',
expected_text=['If you were looking for a hidden document, please type the correct URL for this record.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_enter_url_of_restricted_and_hidden_coll_as_unauthorized_user(self):
"""websearch - unauthorized user types the concret URL of a "hidden" collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=ISOLDE+Internal+Notes&sc=1&p=&f=&action_search=Search',
username='guest',
expected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_enter_url_of_restricted_and_hidden_coll_as_authorized_user(self):
"""websearch - authorized user types the concret URL of a "hidden" collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=ISOLDE+Internal+Notes&sc=1&p=&f=&action_search=Search',
username='dorian',
password='d123orian',
expected_text=['ISOLDE Internal Notes', '[CERN-PS-PA-Note-93-04]'],
unexpected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_the_top_as_unauthorized_user(self):
"""websearch - unauthorized user searches for a pattern from the top"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=of&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='guest',
expected_text=['Articles & Preprints', '61', 'records found',
'Books & Reports', '2', 'records found',
'Multimedia & Arts', '14', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_the_top_as_authorized_user(self):
"""websearch - authorized user searches for a pattern from the top"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=of&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
password='',
expected_text=['Articles & Preprints', '61', 'records found',
'Books & Reports', '6', 'records found',
'Multimedia & Arts', '14', 'records found',
'ALEPH Theses', '1', 'records found',
'ALEPH Internal Notes', '1', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_an_specific_coll_as_unauthorized_user(self):
"""websearch - unauthorized user searches for a pattern from one specific collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=of&f=&action_search=Search&c=Books&c=Reports',
username='guest',
expected_text=['Books', '1', 'records found',
'Reports', '1', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_an_specific_coll_as_authorized_user(self):
"""websearch - authorized user searches for a pattern from one specific collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=of&f=&action_search=Search&c=Books&c=Reports',
username='admin',
password='',
expected_text=['Books', '1', 'records found',
'Reports', '1', 'records found',
'Theses', '4', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchRestrictedPicturesTest(unittest.TestCase):
"""
Check whether restricted pictures on the demo site can be accessed
well by people who have rights to access them.
"""
def test_restricted_pictures_guest(self):
"""websearch - restricted pictures not available to guest"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/0106015_01.jpg' % CFG_SITE_RECORD,
expected_text=['This file is restricted. If you think you have right to access it, please authenticate yourself.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_pictures_romeo(self):
"""websearch - restricted pictures available to Romeo"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/0106015_01.jpg' % CFG_SITE_RECORD,
username='romeo',
password='r123omeo',
expected_text=[],
unexpected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_pictures_hyde(self):
"""websearch - restricted pictures not available to Mr. Hyde"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/0106015_01.jpg' % CFG_SITE_RECORD,
username='hyde',
password='h123yde',
expected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.failUnless("HTTP Error 401: Unauthorized" in merge_error_messages(error_messages))
class WebSearchRestrictedWebJournalFilesTest(unittest.TestCase):
"""
Check whether files attached to a WebJournal article are well
accessible when the article is published
"""
def test_restricted_files_guest(self):
"""websearch - files of unreleased articles are not available to guest"""
# Record is not public...
self.assertEqual(record_public_p(112), False)
# ... and guest cannot access attached files
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/112/files/journal_galapagos_archipelago.jpg' % CFG_SITE_RECORD,
expected_text=['This file is restricted. If you think you have right to access it, please authenticate yourself.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_files_editor(self):
"""websearch - files of unreleased articles are available to editor"""
# Record is not public...
self.assertEqual(record_public_p(112), False)
# ... but editor can access attached files
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/112/files/journal_galapagos_archipelago.jpg' % CFG_SITE_RECORD,
username='balthasar',
password='b123althasar',
expected_text=[],
unexpected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_files_guest(self):
"""websearch - files of released articles are available to guest"""
# Record is not public...
self.assertEqual(record_public_p(111), False)
# ... but user can access attached files, as article is released
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/111/files/journal_scissor_beak.jpg' % CFG_SITE_RECORD,
expected_text=[],
unexpected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_really_restricted_files_guest(self):
"""websearch - restricted files of released articles are not available to guest"""
# Record is not public...
self.assertEqual(record_public_p(111), False)
# ... and user cannot access restricted attachements, even if
# article is released
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/111/files/restricted-journal_scissor_beak.jpg' % CFG_SITE_RECORD,
expected_text=['This file is restricted. If you think you have right to access it, please authenticate yourself.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_picture_has_restriction_flag(self):
"""websearch - restricted files displays a restriction flag"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/' % CFG_SITE_RECORD,
expected_text="Restricted")
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchRSSFeedServiceTest(unittest.TestCase):
"""Test of the RSS feed service."""
def test_rss_feed_service(self):
"""websearch - RSS feed service"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/rss',
expected_text='<rss version="2.0"'))
class WebSearchXSSVulnerabilityTest(unittest.TestCase):
"""Test possible XSS vulnerabilities of the search engine."""
def test_xss_in_collection_interface_page(self):
"""websearch - no XSS vulnerability in collection interface pages"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/?c=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='Collection &lt;SCRIPT&gt;alert("XSS");&lt;/SCRIPT&gt; Not Found'))
def test_xss_in_collection_search_page(self):
"""websearch - no XSS vulnerability in collection search pages"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?c=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='Collection <SCRIPT>alert("XSS");</SCRIPT> Not Found'))
def test_xss_in_simple_search(self):
"""websearch - no XSS vulnerability in simple search"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='Search term <em><SCRIPT>alert("XSS");</SCRIPT></em> did not match any record.'))
def test_xss_in_structured_search(self):
"""websearch - no XSS vulnerability in structured search"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&f=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='No word index is available for <em><script>alert("xss");</script></em>.'))
def test_xss_in_advanced_search(self):
"""websearch - no XSS vulnerability in advanced search"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?as=1&p1=ellis&f1=author&op1=a&p2=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&f2=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&m2=e',
expected_text='Search term <em><SCRIPT>alert("XSS");</SCRIPT></em> inside index <em><script>alert("xss");</script></em> did not match any record.'))
def test_xss_in_browse(self):
"""websearch - no XSS vulnerability in browse"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&f=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&action_browse=Browse',
expected_text='<SCRIPT>alert("XSS");</SCRIPT>'))
class WebSearchResultsOverview(unittest.TestCase):
"""Test of the search results page's Results overview box and links."""
def test_results_overview_split_off(self):
"""websearch - results overview box when split by collection is off"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?p=of&sc=0')
body = browser.response().read()
if body.find("Results overview") > -1:
self.fail("Oops, when split by collection is off, "
"results overview should not be present.")
if body.find('<a name="1"></a>') == -1:
self.fail("Oops, when split by collection is off, "
"Atlantis collection should be found.")
if body.find('<a name="15"></a>') > -1:
self.fail("Oops, when split by collection is off, "
"Multimedia & Arts should not be found.")
try:
browser.find_link(url='#15')
self.fail("Oops, when split by collection is off, "
"a link to Multimedia & Arts should not be found.")
except LinkNotFoundError:
pass
def test_results_overview_split_on(self):
"""websearch - results overview box when split by collection is on"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?p=of&sc=1')
body = browser.response().read()
if body.find("Results overview") == -1:
self.fail("Oops, when split by collection is on, "
"results overview should be present.")
if body.find('<a name="Atlantis%20Institute%20of%20Fictive%20Science"></a>') > -1:
self.fail("Oops, when split by collection is on, "
"Atlantis collection should not be found.")
if body.find('<a name="15"></a>') == -1:
self.fail("Oops, when split by collection is on, "
"Multimedia & Arts should be found.")
try:
browser.find_link(url='#15')
except LinkNotFoundError:
self.fail("Oops, when split by collection is on, "
"a link to Multimedia & Arts should be found.")
class WebSearchSortResultsTest(unittest.TestCase):
"""Test of the search results page's sorting capability."""
def test_sort_results_default(self):
"""websearch - search results sorting, default method"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=3',
expected_text="CMS animation of the high-energy collisions"))
def test_sort_results_ascending(self):
"""websearch - search results sorting, ascending field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=2&sf=reportnumber&so=a',
expected_text="[astro-ph/0104076]"))
def test_sort_results_descending(self):
"""websearch - search results sorting, descending field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=1&sf=reportnumber&so=d',
expected_text=" [TESLA-FEL-99-07]"))
def test_sort_results_sort_pattern(self):
"""websearch - search results sorting, preferential sort pattern"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=1&sf=reportnumber&so=d&sp=cern',
expected_text="[CERN-TH-2002-069]"))
class WebSearchSearchResultsXML(unittest.TestCase):
"""Test search results in various output"""
def test_search_results_xm_output_split_on(self):
""" websearch - check document element of search results in xm output (split by collection on)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=1&of=xm')
body = browser.response().read()
num_doc_element = body.count("<collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
def test_search_results_xm_output_split_off(self):
""" websearch - check document element of search results in xm output (split by collection off)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=0&of=xm')
body = browser.response().read()
num_doc_element = body.count("<collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
def test_search_results_xd_output_split_on(self):
""" websearch - check document element of search results in xd output (split by collection on)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=1&of=xd')
body = browser.response().read()
num_doc_element = body.count("<collection")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
def test_search_results_xd_output_split_off(self):
""" websearch - check document element of search results in xd output (split by collection off)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=0&of=xd')
body = browser.response().read()
num_doc_element = body.count("<collection>")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
class WebSearchUnicodeQueryTest(unittest.TestCase):
"""Test of the search results for queries containing Unicode characters."""
def test_unicode_word_query(self):
"""websearch - Unicode word query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%CE%99%CE%B8%CE%AC%CE%BA%CE%B7',
expected_text="[76]"))
def test_unicode_word_query_not_found_term(self):
"""websearch - Unicode word query, not found term"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3A%CE%99%CE%B8',
expected_text="ιθάκη"))
def test_unicode_exact_phrase_query(self):
"""websearch - Unicode exact phrase query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%22%CE%99%CE%B8%CE%AC%CE%BA%CE%B7%22',
expected_text="[76]"))
def test_unicode_partial_phrase_query(self):
"""websearch - Unicode partial phrase query"""
# no hit here for example title partial phrase query due to
# removed difference between double-quoted and single-quoted
# search:
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%27%CE%B7%27',
expected_text="[]"))
def test_unicode_regexp_query(self):
"""websearch - Unicode regexp query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%2F%CE%B7%2F',
expected_text="[76]"))
class WebSearchMARCQueryTest(unittest.TestCase):
"""Test of the search results for queries containing physical MARC tags."""
def test_single_marc_tag_exact_phrase_query(self):
"""websearch - single MARC tag, exact phrase query (100__a)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=100__a%3A%22Ellis%2C+J%22',
expected_text="[9, 14, 18]"))
def test_single_marc_tag_partial_phrase_query(self):
"""websearch - single MARC tag, partial phrase query (245__b)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=245__b%3A%27and%27',
expected_text="[28]"))
def test_many_marc_tags_partial_phrase_query(self):
"""websearch - many MARC tags, partial phrase query (245)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=245%3A%27and%27&rg=100',
expected_text="[1, 8, 9, 14, 15, 20, 22, 24, 28, 33, 47, 48, 49, 51, 53, 64, 69, 71, 79, 82, 83, 85, 91, 96, 108]"))
def test_single_marc_tag_regexp_query(self):
"""websearch - single MARC tag, regexp query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=245%3A%2Fand%2F&rg=100',
expected_text="[1, 8, 9, 14, 15, 20, 22, 24, 28, 33, 47, 48, 49, 51, 53, 64, 69, 71, 79, 82, 83, 85, 91, 96, 108]"))
class WebSearchExtSysnoQueryTest(unittest.TestCase):
"""Test of queries using external system numbers."""
def test_existing_sysno_html_output(self):
"""websearch - external sysno query, existing sysno, HTML output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CER',
expected_text="The wall of the cave"))
def test_existing_sysno_id_output(self):
"""websearch - external sysno query, existing sysno, ID output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CER&of=id',
expected_text="[95]"))
def test_nonexisting_sysno_html_output(self):
"""websearch - external sysno query, non-existing sysno, HTML output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CERRRR',
expected_text="Requested record does not seem to exist."))
def test_nonexisting_sysno_id_output(self):
"""websearch - external sysno query, non-existing sysno, ID output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CERRRR&of=id',
expected_text="[]"))
class WebSearchResultsRecordGroupingTest(unittest.TestCase):
"""Test search results page record grouping (rg)."""
def test_search_results_rg_guest(self):
"""websearch - search results, records in groups of, guest"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?rg=17',
expected_text="1 - 17"))
def test_search_results_rg_nonguest(self):
"""websearch - search results, records in groups of, non-guest"""
# This test used to fail due to saved user preference fetching
# not overridden by URL rg argument.
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?rg=17',
username='admin',
expected_text="1 - 17"))
class WebSearchSpecialTermsQueryTest(unittest.TestCase):
"""Test of the search results for queries containing special terms."""
def test_special_terms_u1(self):
"""websearch - query for special terms, U(1)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=U%281%29',
expected_text="[57, 79, 80, 88]"))
def test_special_terms_u1_and_sl(self):
"""websearch - query for special terms, U(1) SL(2,Z)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=U%281%29+SL%282%2CZ%29',
expected_text="[88]"))
def test_special_terms_u1_and_sl_or(self):
"""websearch - query for special terms, U(1) OR SL(2,Z)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=U%281%29+OR+SL%282%2CZ%29',
expected_text="[57, 79, 80, 88]"))
@nottest
def FIXME_TICKET_453_test_special_terms_u1_and_sl_or_parens(self):
"""websearch - query for special terms, (U(1) OR SL(2,Z))"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=%28U%281%29+OR+SL%282%2CZ%29%29',
expected_text="[57, 79, 80, 88]"))
def test_special_terms_u1_and_sl_in_quotes(self):
"""websearch - query for special terms, ('SL(2,Z)' OR 'U(1)')"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + "/search?of=id&p=%28%27SL%282%2CZ%29%27+OR+%27U%281%29%27%29",
expected_text="[57, 79, 80, 88, 96]"))
class WebSearchJournalQueryTest(unittest.TestCase):
"""Test of the search results for journal pubinfo queries."""
def test_query_journal_title_only(self):
"""websearch - journal publication info query, title only"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&f=journal&p=Phys.+Lett.+B',
expected_text="[78, 85, 87]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&f=journal&p=Phys.+Lett.+B',
username='admin',
expected_text="[77, 78, 85, 87]"))
def test_query_journal_full_pubinfo(self):
"""websearch - journal publication info query, full reference"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&f=journal&p=Phys.+Lett.+B+531+%282002%29+301',
expected_text="[78]"))
class WebSearchStemmedIndexQueryTest(unittest.TestCase):
"""Test of the search results for queries using stemmed indexes."""
def test_query_stemmed_lowercase(self):
"""websearch - stemmed index query, lowercase"""
# note that dasse/Dasse is stemmed into dass/Dass, as expected
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=dasse',
expected_text="[25, 26]"))
def test_query_stemmed_uppercase(self):
"""websearch - stemmed index query, uppercase"""
# ... but note also that DASSE is stemmed into DASSE(!); so
# the test would fail if the search engine would not lower the
# query term. (Something that is not necessary for
# non-stemmed indexes.)
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=DASSE',
expected_text="[25, 26]"))
class WebSearchSummarizerTest(unittest.TestCase):
"""Test of the search results summarizer functions."""
def test_most_popular_field_values_singletag(self):
"""websearch - most popular field values, simple tag"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('PREPRINT', 37), ('ARTICLE', 28), ('BOOK', 14), ('THESIS', 8), ('PICTURE', 7),
('DRAFT', 2), ('POETRY', 2), ('REPORT', 2), ('ALEPHPAPER', 1), ('ATLANTISTIMESNEWS', 1),
('ISOLDEPAPER', 1)],
get_most_popular_field_values(range(0,100), '980__a'))
def test_most_popular_field_values_singletag_multiexclusion(self):
"""websearch - most popular field values, simple tag, multiple exclusions"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('PREPRINT', 37), ('ARTICLE', 28), ('BOOK', 14), ('DRAFT', 2), ('REPORT', 2),
('ALEPHPAPER', 1), ('ATLANTISTIMESNEWS', 1), ('ISOLDEPAPER', 1)],
get_most_popular_field_values(range(0,100), '980__a', ('THESIS', 'PICTURE', 'POETRY')))
def test_most_popular_field_values_multitag(self):
"""websearch - most popular field values, multiple tags"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('Ellis, J', 3), ('Enqvist, K', 1), ('Ibanez, L E', 1), ('Nanopoulos, D V', 1), ('Ross, G G', 1)],
get_most_popular_field_values((9, 14, 18), ('100__a', '700__a')))
def test_most_popular_field_values_multitag_singleexclusion(self):
"""websearch - most popular field values, multiple tags, single exclusion"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('Enqvist, K', 1), ('Ibanez, L E', 1), ('Nanopoulos, D V', 1), ('Ross, G G', 1)],
get_most_popular_field_values((9, 14, 18), ('100__a', '700__a'), ('Ellis, J')))
def test_most_popular_field_values_multitag_countrepetitive(self):
"""websearch - most popular field values, multiple tags, counting repetitive occurrences"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('THESIS', 2), ('REPORT', 1)],
get_most_popular_field_values((41,), ('690C_a', '980__a'), count_repetitive_values=True))
self.assertEqual([('REPORT', 1), ('THESIS', 1)],
get_most_popular_field_values((41,), ('690C_a', '980__a'), count_repetitive_values=False))
def test_ellis_citation_summary(self):
"""websearch - query ellis, citation summary output format"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=hcs',
expected_text="Less known papers (1-9)",
expected_link_target=CFG_SITE_URL+"/search?p=ellis%20AND%20cited%3A1-%3E9",
expected_link_label='1'))
def test_ellis_not_quark_citation_summary_advanced(self):
"""websearch - ellis and not quark, citation summary format advanced"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&as=1&m1=a&p1=ellis&f1=author&op1=n&m2=a&p2=quark&f2=&op2=a&m3=a&p3=&f3=&action_search=Search&sf=&so=a&rm=&rg=10&sc=1&of=hcs',
expected_text="Less known papers (1-9)",
expected_link_target=CFG_SITE_URL+'/search?p=author%3Aellis%20and%20not%20quark%20AND%20cited%3A1-%3E9',
expected_link_label='1'))
def test_ellis_not_quark_citation_summary_regular(self):
"""websearch - ellis and not quark, citation summary format advanced"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=author%3Aellis+and+not+quark&f=&action_search=Search&sf=&so=d&rm=&rg=10&sc=0&of=hcs',
expected_text="Less known papers (1-9)",
expected_link_target=CFG_SITE_URL+'/search?p=author%3Aellis%20and%20not%20quark%20AND%20cited%3A1-%3E9',
expected_link_label='1'))
class WebSearchRecordCollectionGuessTest(unittest.TestCase):
"""Primary collection guessing tests."""
def test_guess_primary_collection_of_a_record(self):
"""websearch - guess_primary_collection_of_a_record"""
self.assertEqual(guess_primary_collection_of_a_record(96), 'Articles')
def test_guess_collection_of_a_record(self):
"""websearch - guess_collection_of_a_record"""
self.assertEqual(guess_collection_of_a_record(96), 'Articles')
self.assertEqual(guess_collection_of_a_record(96, '%s/collection/Theoretical Physics (TH)?ln=en' % CFG_SITE_URL), 'Articles')
self.assertEqual(guess_collection_of_a_record(12, '%s/collection/Theoretical Physics (TH)?ln=en' % CFG_SITE_URL), 'Theoretical Physics (TH)')
self.assertEqual(guess_collection_of_a_record(12, '%s/collection/Theoretical%%20Physics%%20%%28TH%%29?ln=en' % CFG_SITE_URL), 'Theoretical Physics (TH)')
class WebSearchGetFieldValuesTest(unittest.TestCase):
"""Testing get_fieldvalues() function."""
def test_get_fieldvalues_001(self):
"""websearch - get_fieldvalues() for bibxxx-agnostic tags"""
self.assertEqual(get_fieldvalues(10, '001___'), ['10'])
def test_get_fieldvalues_980(self):
"""websearch - get_fieldvalues() for bibxxx-powered tags"""
self.assertEqual(get_fieldvalues(18, '700__a'), ['Enqvist, K', 'Nanopoulos, D V'])
self.assertEqual(get_fieldvalues(18, '909C1u'), ['CERN'])
def test_get_fieldvalues_wildcard(self):
"""websearch - get_fieldvalues() for tag wildcards"""
self.assertEqual(get_fieldvalues(18, '%'), [])
self.assertEqual(get_fieldvalues(18, '7%'), [])
self.assertEqual(get_fieldvalues(18, '700%'), ['Enqvist, K', 'Nanopoulos, D V'])
self.assertEqual(get_fieldvalues(18, '909C0%'), ['1985', '13','TH'])
def test_get_fieldvalues_recIDs(self):
"""websearch - get_fieldvalues() for list of recIDs"""
self.assertEqual(get_fieldvalues([], '001___'), [])
self.assertEqual(get_fieldvalues([], '700__a'), [])
self.assertEqual(get_fieldvalues([10, 13], '001___'), ['10', '13'])
self.assertEqual(get_fieldvalues([18, 13], '700__a'),
['Dawson, S', 'Ellis, R K', 'Enqvist, K', 'Nanopoulos, D V'])
def test_get_fieldvalues_repetitive(self):
"""websearch - get_fieldvalues() for repetitive values"""
self.assertEqual(get_fieldvalues([17, 18], '909C1u'),
['CERN', 'CERN'])
self.assertEqual(get_fieldvalues([17, 18], '909C1u', repetitive_values=True),
['CERN', 'CERN'])
self.assertEqual(get_fieldvalues([17, 18], '909C1u', repetitive_values=False),
['CERN'])
class WebSearchAddToBasketTest(unittest.TestCase):
"""Test of the add-to-basket presence depending on user rights."""
def test_add_to_basket_guest(self):
"""websearch - add-to-basket facility allowed for guests"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='Add to basket'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='<input name="recid" type="checkbox" value="10" />'))
def test_add_to_basket_jekyll(self):
"""websearch - add-to-basket facility allowed for Dr. Jekyll"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='Add to basket',
username='jekyll',
password='j123ekyll'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='<input name="recid" type="checkbox" value="10" />',
username='jekyll',
password='j123ekyll'))
def test_add_to_basket_hyde(self):
"""websearch - add-to-basket facility denied to Mr. Hyde"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
unexpected_text='Add to basket',
username='hyde',
password='h123yde'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
unexpected_text='<input name="recid" type="checkbox" value="10" />',
username='hyde',
password='h123yde'))
class WebSearchAlertTeaserTest(unittest.TestCase):
"""Test of the alert teaser presence depending on user rights."""
def test_alert_teaser_guest(self):
"""websearch - alert teaser allowed for guests"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_link_label='email alert'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='RSS feed'))
def test_alert_teaser_jekyll(self):
"""websearch - alert teaser allowed for Dr. Jekyll"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='email alert',
username='jekyll',
password='j123ekyll'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='RSS feed',
username='jekyll',
password='j123ekyll'))
def test_alert_teaser_hyde(self):
"""websearch - alert teaser allowed for Mr. Hyde"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='email alert',
username='hyde',
password='h123yde'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='RSS feed',
username='hyde',
password='h123yde'))
class WebSearchSpanQueryTest(unittest.TestCase):
"""Test of span queries."""
def test_span_in_word_index(self):
"""websearch - span query in a word index"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=year%3A1992-%3E1996&of=id&ap=0',
expected_text='[17, 66, 69, 71]'))
def test_span_in_phrase_index(self):
"""websearch - span query in a phrase index"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=year%3A%221992%22-%3E%221996%22&of=id&ap=0',
expected_text='[17, 66, 69, 71]'))
def test_span_in_bibxxx(self):
"""websearch - span query in MARC tables"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=909C0y%3A%221992%22-%3E%221996%22&of=id&ap=0',
expected_text='[17, 66, 69, 71]'))
def test_span_with_spaces(self):
"""websearch - no span query when a space is around"""
# useful for reaction search
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3A%27mu%20--%3E%20e%27&of=id&ap=0',
expected_text='[67]'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=245%3A%27mu%20--%3E%20e%27&of=id&ap=0',
expected_text='[67]'))
def test_span_in_author(self):
"""websearch - span query in special author index"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%22Ellis,%20K%22-%3E%22Ellis,%20RZ%22&of=id&ap=0',
expected_text='[8, 9, 11, 12, 13, 14, 17, 18, 47]'))
class WebSearchReferstoCitedbyTest(unittest.TestCase):
"""Test of refersto/citedby search operators."""
def test_refersto_recid(self):
'websearch - refersto:recid:84'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Arecid%3A84&of=id&ap=0',
expected_text='[85, 88, 91]'))
def test_refersto_repno(self):
'websearch - refersto:reportnumber:hep-th/0205061'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Areportnumber%3Ahep-th/0205061&of=id&ap=0',
expected_text='[91]'))
def test_refersto_author_word(self):
'websearch - refersto:author:klebanov'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Aauthor%3Aklebanov&of=id&ap=0',
expected_text='[85, 86, 88, 91]'))
def test_refersto_author_phrase(self):
'websearch - refersto:author:"Klebanov, I"'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Aauthor%3A%22Klebanov,%20I%22&of=id&ap=0',
expected_text='[85, 86, 88, 91]'))
def test_citedby_recid(self):
'websearch - citedby:recid:92'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Arecid%3A92&of=id&ap=0',
expected_text='[74, 91]'))
def test_citedby_repno(self):
'websearch - citedby:reportnumber:hep-th/0205061'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Areportnumber%3Ahep-th/0205061&of=id&ap=0',
expected_text='[78]'))
def test_citedby_author_word(self):
'websearch - citedby:author:klebanov'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Aauthor%3Aklebanov&of=id&ap=0',
expected_text='[95]'))
def test_citedby_author_phrase(self):
'websearch - citedby:author:"Klebanov, I"'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Aauthor%3A%22Klebanov,%20I%22&of=id&ap=0',
expected_text='[95]'))
def test_refersto_bad_query(self):
'websearch - refersto:title:'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Atitle%3A',
expected_text='There are no records referring to title:.'))
def test_citedby_bad_query(self):
'websearch - citedby:title:'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Atitle%3A',
expected_text='There are no records cited by title:.'))
class WebSearchSPIRESSyntaxTest(unittest.TestCase):
"""Test of SPIRES syntax issues"""
if CFG_WEBSEARCH_SPIRES_SYNTAX > 0:
def test_and_not_parens(self):
'websearch - find a ellis, j and not a enqvist'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL +'/search?p=find+a+ellis%2C+j+and+not+a+enqvist&of=id&ap=0',
expected_text='[9, 12, 14, 47, 118]'))
if DATEUTIL_AVAILABLE:
def test_dadd_search(self):
'websearch - find da > today - 3650'
# XXX: assumes we've reinstalled our site in the last 10 years
# should return every document in the system
self.assertEqual([],
test_web_page_content(CFG_SITE_URL +'/search?ln=en&p=find+da+%3E+today+-+3650&f=&of=id',
expected_text='[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 107, 108, 113, 114, 115, 116, 117, 118, 120, 121, 122, 123, 124, 125, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141]'))
class WebSearchDateQueryTest(unittest.TestCase):
"""Test various date queries."""
def setUp(self):
"""Establish variables we plan to re-use"""
self.empty = intbitset()
def test_search_unit_hits_for_datecreated_previous_millenia(self):
"""websearch - search_unit with datecreated returns >0 hits for docs in the last 1000 years"""
self.assertNotEqual(self.empty, search_unit('1000-01-01->9999-12-31', 'datecreated'))
def test_search_unit_hits_for_datemodified_previous_millenia(self):
"""websearch - search_unit with datemodified returns >0 hits for docs in the last 1000 years"""
self.assertNotEqual(self.empty, search_unit('1000-01-01->9999-12-31', 'datemodified'))
def test_search_unit_in_bibrec_for_datecreated_previous_millenia(self):
"""websearch - search_unit_in_bibrec with creationdate gets >0 hits for past 1000 years"""
self.assertNotEqual(self.empty, search_unit_in_bibrec("1000-01-01", "9999-12-31", 'creationdate'))
def test_search_unit_in_bibrec_for_datecreated_next_millenia(self):
"""websearch - search_unit_in_bibrec with creationdate gets 0 hits for after year 3000"""
self.assertEqual(self.empty, search_unit_in_bibrec("3000-01-01", "9999-12-31", 'creationdate'))
class WebSearchSynonymQueryTest(unittest.TestCase):
"""Test of queries using synonyms."""
def test_journal_phrvd(self):
"""websearch - search-time synonym search, journal title"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=PHRVD&f=journal&of=id',
expected_text="[66, 72]"))
def test_journal_phrvd_54_1996_4234(self):
"""websearch - search-time synonym search, journal article"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=PHRVD%2054%20%281996%29%204234&f=journal&of=id',
expected_text="[66]"))
def test_journal_beta_decay_title(self):
"""websearch - index-time synonym search, beta decay in title"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta+decay&f=title&of=id',
expected_text="[59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2+decay&f=title&of=id',
expected_text="[59]"))
def test_journal_beta_decay_global(self):
"""websearch - index-time synonym search, beta decay in any field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta+decay&of=id',
expected_text="[52, 59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2+decay&of=id',
expected_text="[52, 59]"))
def test_journal_beta_title(self):
"""websearch - index-time synonym search, beta in title"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta&f=title&of=id',
expected_text="[59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2&f=title&of=id',
expected_text="[59]"))
def test_journal_beta_global(self):
"""websearch - index-time synonym search, beta in any field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta&of=id',
expected_text="[52, 59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2&of=id',
expected_text="[52, 59]"))
class WebSearchWashCollectionsTest(unittest.TestCase):
"""Test if the collection argument is washed correctly"""
def test_wash_coll_when_coll_restricted(self):
"""websearch - washing of restricted daughter collections"""
self.assertEqual(
sorted(wash_colls(cc='', c=['Books & Reports', 'Theses'])[1]),
['Books & Reports', 'Theses'])
self.assertEqual(
sorted(wash_colls(cc='', c=['Books & Reports', 'Theses'])[2]),
['Books & Reports', 'Theses'])
class WebSearchAuthorCountQueryTest(unittest.TestCase):
"""Test of queries using authorcount fields."""
def test_journal_authorcount_word(self):
"""websearch - author count, word query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=4&f=authorcount&of=id',
expected_text="[51, 54, 59, 66, 92, 96]"))
def test_journal_authorcount_phrase(self):
"""websearch - author count, phrase query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%224%22&f=authorcount&of=id',
expected_text="[51, 54, 59, 66, 92, 96]"))
def test_journal_authorcount_span(self):
"""websearch - author count, span query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=authorcount%3A9-%3E16&of=id',
expected_text="[69, 71, 127]"))
def test_journal_authorcount_plus(self):
"""websearch - author count, plus query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=50%2B&f=authorcount&of=id',
expected_text="[10, 17]"))
class WebSearchItemCountQueryTest(unittest.TestCase):
"""Test of queries using itemcount field/index"""
def test_itemcount_plus(self):
"""websearch - item count, search for more than one item, using 'plus'"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + 'search?p=2%2B&f=itemcount&of=id',
expected_text="[31,32,34]"))
def test_itemcount_span(self):
"""websearch - item count, search for more than one item, using 'span'"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + 'search?p=2->10&f=itemcount&of=id',
expected_text="[31,32,34]"))
def test_itemcount_phrase(self):
"""websearch - item count, search for records with exactly two items, phrase"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%222%22&f=itemcount&of=id',
expected_text="[31,34]"))
def test_itemcount_records_with_two_items(self):
"""websearch - item count, search for records with exactly two items"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + 'search?p=2&f=itemcount&of=id',
expected_text="[31,34]"))
class WebSearchPerformRequestSearchRefactoringTest(unittest.TestCase):
"""Tests the perform request search API after refactoring."""
def _run_test(self, test_args, expected_results):
params = {}
params.update(map(lambda y: (y[0], ',' in y[1] and ', ' not in y[1] and y[1].split(',') or y[1]), map(lambda x: x.split('=', 1), test_args.split(';'))))
#params.update(map(lambda x: x.split('=', 1), test_args.split(';')))
req = cStringIO.StringIO()
params['req'] = req
recs = perform_request_search(**params)
if isinstance(expected_results, str):
req.seek(0)
recs = req.read()
# this is just used to generate the results from the seearch engine before refactoring
#if recs != expected_results:
# print test_args
# print params
# print recs
self.assertEqual(recs, expected_results, "Error, we expect: %s, and we received: %s" % (expected_results, recs))
def test_queries(self):
"""websearch - testing p_r_s standard arguments and their combinations"""
self._run_test('p=ellis;f=author;action=Search', [8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 47, 118])
self._run_test('p=ellis;f=author;sf=title;action=Search', [118, 8, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ellis;f=author;sf=title;wl=5;action=Search', [118, 8, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ellis;f=author;sf=title;wl=5;so=a', [118, 13, 47, 10, 12, 18, 17, 11, 9, 14, 16, 8])
self._run_test('p=ellis;f=author;sf=title;wl=5;so=d', [118, 8, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ell*;sf=title;wl=5', [118, 8, 15, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ell*;sf=title;wl=1', [10])
self._run_test('p=ell*;sf=title;wl=100', [118, 8, 15, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=muon OR kaon;f=author;sf=title;wl=5;action=Search', [])
self._run_test('p=muon OR kaon;sf=title;wl=5;action=Search', [67, 12])
self._run_test('p=muon OR kaon;sf=title;wl=5;c=Articles,Preprints', [67, 12])
self._run_test('p=muon OR kaon;sf=title;wl=5;c=Articles', [67])
self._run_test('p=muon OR kaon;sf=title;wl=5;c=Preprints', [12])
# FIXME_TICKET_1174
# self._run_test('p=el*;rm=citation', [2, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 23, 30, 32, 34, 47, 48, 51, 52, 54, 56, 58, 59, 92, 97, 100, 103, 18, 74, 91, 94, 81])
if not get_external_word_similarity_ranker():
self._run_test('p=el*;rm=wrd', [2, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23, 30, 32, 34, 47, 48, 51, 52, 54, 56, 58, 59, 74, 81, 91, 92, 94, 97, 100, 103, 109, 118, 123, 127, 128])
self._run_test('p=el*;sf=title', [118, 123, 100, 32, 8, 15, 16, 81, 97, 34, 23, 127, 58, 2, 14, 9, 128, 11, 30, 109, 52, 48, 94, 17, 56, 18, 91, 59, 12, 92, 74, 54, 103, 10, 51, 47, 13])
self._run_test('p=boson;rm=citation', [1, 47, 50, 107, 108, 77, 95])
if not get_external_word_similarity_ranker():
self._run_test('p=boson;rm=wrd', [108, 77, 47, 50, 95, 1, 107])
self._run_test('p1=ellis;f1=author;m1=a;op1=a;p2=john;f2=author;m2=a', [9, 12, 14, 18, 118])
self._run_test('p1=ellis;f1=author;m1=o;op1=a;p2=john;f2=author;m2=o', [9, 12, 14, 18, 118])
self._run_test('p1=ellis;f1=author;m1=e;op1=a;p2=john;f2=author;m2=e', [])
self._run_test('p1=ellis;f1=author;m1=a;op1=o;p2=john;f2=author;m2=a', [8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 47, 118])
self._run_test('p1=ellis;f1=author;m1=o;op1=o;p2=john;f2=author;m2=o', [8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 47, 118])
self._run_test('p1=ellis;f1=author;m1=e;op1=o;p2=john;f2=author;m2=e', [])
self._run_test('p1=ellis;f1=author;m1=a;op1=n;p2=john;f2=author;m2=a', [8, 10, 11, 13, 16, 17, 47])
self._run_test('p1=ellis;f1=author;m1=o;op1=n;p2=john;f2=author;m2=o', [8, 10, 11, 13, 16, 17, 47])
self._run_test('p1=ellis;f1=author;m1=e;op1=n;p2=john;f2=author;m2=e', [])
self._run_test('p=Ellis, J;ap=1', [9, 10, 11, 12, 14, 17, 18, 47, 118])
self._run_test('p=Ellis, J;ap=0', [9, 10, 11, 12, 14, 17, 18, 47, 118])
self._run_test('p=recid:148x', [])
self._run_test('p=recid:148x;of=xm;rg=200', "<collection xmlns=\"http://www.loc.gov/MARC21/slim\">\n\n</collection>")
class WebSearchGetRecordTests(unittest.TestCase):
def setUp(self):
self.recid = run_sql("INSERT INTO bibrec(creation_date, modification_date) VALUES(NOW(), NOW())")
def tearDown(self):
run_sql("DELETE FROM bibrec WHERE id=%s", (self.recid,))
def test_get_record(self):
"""bibformat - test print_record and get_record of empty record"""
from invenio.search_engine import print_record, get_record
self.assertEqual(print_record(self.recid, 'xm'), ' <record>\n <controlfield tag="001">%s</controlfield>\n </record>\n\n ' % self.recid)
self.assertEqual(get_record(self.recid), {'001': [([], ' ', ' ', str(self.recid), 1)]})
class WebSearchExactTitleIndexTest(unittest.TestCase):
"""Checks if exact title index works correctly """
def test_exacttitle_query_solves_problems(self):
"""websearch - check exacttitle query solves problems"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'solves+problems'&f=&action_search=Search",
expected_text = "Non-compact supergravity solves problems"))
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_exacttitle_query_solve_problems(self):
"""websearch - check exacttitle query solve problems"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'solve+problems'&f=&action_search=Search",
expected_text = ['Search term', 'solve problems', 'did not match']))
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_exacttitle_query_photon_beam(self):
"""websearch - check exacttitle search photon beam"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'photon+beam'&f=&action_search=Search",
expected_text = "Development of photon beam diagnostics"))
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_exacttitle_query_photons_beam(self):
"""websearch - check exacttitle search photons beam"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'photons+beam'&f=&action_search=Search",
expected_text = ['Search term', 'photons beam', 'did not match']))
if error_messages:
self.fail(merge_error_messages(error_messages))
TEST_SUITE = make_test_suite(WebSearchWebPagesAvailabilityTest,
WebSearchTestSearch,
WebSearchTestBrowse,
WebSearchTestOpenURL,
WebSearchTestCollections,
WebSearchTestRecord,
WebSearchTestLegacyURLs,
WebSearchNearestTermsTest,
WebSearchBooleanQueryTest,
WebSearchAuthorQueryTest,
WebSearchSearchEnginePythonAPITest,
WebSearchSearchEngineWebAPITest,
WebSearchRecordWebAPITest,
WebSearchRestrictedCollectionTest,
WebSearchRestrictedCollectionHandlingTest,
WebSearchRestrictedPicturesTest,
WebSearchRestrictedWebJournalFilesTest,
WebSearchRSSFeedServiceTest,
WebSearchXSSVulnerabilityTest,
WebSearchResultsOverview,
WebSearchSortResultsTest,
WebSearchSearchResultsXML,
WebSearchUnicodeQueryTest,
WebSearchMARCQueryTest,
WebSearchExtSysnoQueryTest,
WebSearchResultsRecordGroupingTest,
WebSearchSpecialTermsQueryTest,
WebSearchJournalQueryTest,
WebSearchStemmedIndexQueryTest,
WebSearchSummarizerTest,
WebSearchRecordCollectionGuessTest,
WebSearchGetFieldValuesTest,
WebSearchAddToBasketTest,
WebSearchAlertTeaserTest,
WebSearchSpanQueryTest,
WebSearchReferstoCitedbyTest,
WebSearchSPIRESSyntaxTest,
WebSearchDateQueryTest,
WebSearchTestWildcardLimit,
WebSearchSynonymQueryTest,
WebSearchWashCollectionsTest,
WebSearchAuthorCountQueryTest,
WebSearchPerformRequestSearchRefactoringTest,
WebSearchGetRecordTests,
WebSearchExactTitleIndexTest,
WebSearchCJKTokenizedSearchTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
WebSearch: item count regression test activation
Signed-off-by: Tibor Simko <c7116e45300b163846dc01f3ed02116e3f8ef7a5@cern.ch>
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
# pylint: disable=E1102
"""WebSearch module regression tests."""
__revision__ = "$Id$"
import unittest
import re
import urlparse, cgi
import sys
import cStringIO
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
from mechanize import Browser, LinkNotFoundError
from invenio.config import CFG_SITE_URL, CFG_SITE_NAME, CFG_SITE_LANG, \
CFG_SITE_RECORD, CFG_SITE_LANGS, \
CFG_SITE_SECURE_URL, CFG_WEBSEARCH_SPIRES_SYNTAX
from invenio.testutils import make_test_suite, \
run_test_suite, \
nottest, \
make_url, make_surl, test_web_page_content, \
merge_error_messages
from invenio.urlutils import same_urls_p
from invenio.dbquery import run_sql
from invenio.search_engine import perform_request_search, \
guess_primary_collection_of_a_record, guess_collection_of_a_record, \
collection_restricted_p, get_permitted_restricted_collections, \
search_pattern, search_unit, search_unit_in_bibrec, \
wash_colls, record_public_p
from invenio import search_engine_summarizer
from invenio.search_engine_utils import get_fieldvalues
from invenio.intbitset import intbitset
from invenio.search_engine import intersect_results_with_collrecs
from invenio.bibrank_bridge_utils import get_external_word_similarity_ranker
from invenio.search_engine_query_parser_unit_tests import DATEUTIL_AVAILABLE
from invenio.bibindex_regression_tests import reindex_word_tables_into_testtables
if 'fr' in CFG_SITE_LANGS:
lang_french_configured = True
else:
lang_french_configured = False
def parse_url(url):
parts = urlparse.urlparse(url)
query = cgi.parse_qs(parts[4], True)
return parts[2].split('/')[1:], query
def string_combinations(str_list):
"""Returns all the possible combinations of the strings in the list.
Example: for the list ['A','B','Cd'], it will return
[['Cd', 'B', 'A'], ['B', 'A'], ['Cd', 'A'], ['A'], ['Cd', 'B'], ['B'], ['Cd'], []]
It adds "B", "H", "F" and "S" values to the results so different
combinations of them are also checked.
"""
out_list = []
for i in range(len(str_list) + 1):
out_list += list(combinations(str_list, i))
for i in range(len(out_list)):
out_list[i] = (list(out_list[i]) + {
0: lambda: ["B", "H", "S"],
1: lambda: ["B", "H", "F"],
2: lambda: ["B", "F", "S"],
3: lambda: ["B", "F"],
4: lambda: ["B", "S"],
5: lambda: ["B", "H"],
6: lambda: ["B"]
}[i % 7]())
return out_list
def combinations(iterable, r):
"""Return r length subsequences of elements from the input iterable."""
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1
for j in range(i+1, r):
indices[j] = indices[j-1] + 1
yield tuple(pool[i] for i in indices)
class WebSearchWebPagesAvailabilityTest(unittest.TestCase):
"""Check WebSearch web pages whether they are up or not."""
def test_search_interface_pages_availability(self):
"""websearch - availability of search interface pages"""
baseurl = CFG_SITE_URL + '/'
_exports = ['', 'collection/Poetry', 'collection/Poetry?as=1']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_search_results_pages_availability(self):
"""websearch - availability of search results pages"""
baseurl = CFG_SITE_URL + '/search'
_exports = ['', '?c=Poetry', '?p=ellis', '/cache', '/log']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_search_detailed_record_pages_availability(self):
"""websearch - availability of search detailed record pages"""
baseurl = CFG_SITE_URL + '/'+ CFG_SITE_RECORD +'/'
_exports = ['', '1', '1/', '1/files', '1/files/']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_browse_results_pages_availability(self):
"""websearch - availability of browse results pages"""
baseurl = CFG_SITE_URL + '/search'
_exports = ['?p=ellis&f=author&action_browse=Browse']
error_messages = []
for url in [baseurl + page for page in _exports]:
error_messages.extend(test_web_page_content(url))
if error_messages:
self.fail(merge_error_messages(error_messages))
return
def test_help_page_availability(self):
"""websearch - availability of Help Central page"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help',
expected_text="Help Central"))
if lang_french_configured:
def test_help_page_availability_fr(self):
"""websearch - availability of Help Central page in french"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/?ln=fr',
expected_text="Centre d'aide"))
def test_search_tips_page_availability(self):
"""websearch - availability of Search Tips"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-tips',
expected_text="Search Tips"))
if lang_french_configured:
def test_search_tips_page_availability_fr(self):
"""websearch - availability of Search Tips in french"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-tips?ln=fr',
expected_text="Conseils de recherche"))
def test_search_guide_page_availability(self):
"""websearch - availability of Search Guide"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-guide',
expected_text="Search Guide"))
if lang_french_configured:
def test_search_guide_page_availability_fr(self):
"""websearch - availability of Search Guide in french"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search-guide?ln=fr',
expected_text="Guide de recherche"))
class WebSearchTestLegacyURLs(unittest.TestCase):
""" Check that the application still responds to legacy URLs for
navigating, searching and browsing."""
def test_legacy_collections(self):
""" websearch - collections handle legacy urls """
browser = Browser()
def check(legacy, new, browser=browser):
browser.open(legacy)
got = browser.geturl()
self.failUnless(same_urls_p(got, new), got)
# Use the root URL unless we need more
check(make_url('/', c=CFG_SITE_NAME),
make_url('/', ln=CFG_SITE_LANG))
# Other collections are redirected in the /collection area
check(make_url('/', c='Poetry'),
make_url('/collection/Poetry', ln=CFG_SITE_LANG))
# Drop unnecessary arguments, like ln and as (when they are
# the default value)
args = {'as': 0}
check(make_url('/', c='Poetry', **args),
make_url('/collection/Poetry', ln=CFG_SITE_LANG))
# Otherwise, keep them
args = {'as': 1, 'ln': CFG_SITE_LANG}
check(make_url('/', c='Poetry', **args),
make_url('/collection/Poetry', **args))
# Support the /index.py addressing too
check(make_url('/index.py', c='Poetry'),
make_url('/collection/Poetry', ln=CFG_SITE_LANG))
def test_legacy_search(self):
""" websearch - search queries handle legacy urls """
browser = Browser()
def check(legacy, new, browser=browser):
browser.open(legacy)
got = browser.geturl()
self.failUnless(same_urls_p(got, new), got)
# /search.py is redirected on /search
# Note that `as' is a reserved word in Python 2.5
check(make_url('/search.py', p='nuclear', ln='en') + 'as=1',
make_url('/search', p='nuclear', ln='en') + 'as=1')
if lang_french_configured:
def test_legacy_search_fr(self):
""" websearch - search queries handle legacy urls """
browser = Browser()
def check(legacy, new, browser=browser):
browser.open(legacy)
got = browser.geturl()
self.failUnless(same_urls_p(got, new), got)
# direct recid searches are redirected to /CFG_SITE_RECORD
check(make_url('/search.py', recid=1, ln='fr'),
make_url('/%s/1' % CFG_SITE_RECORD, ln='fr'))
def test_legacy_search_help_link(self):
"""websearch - legacy Search Help page link"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search/index.en.html',
expected_text="Help Central"))
if lang_french_configured:
def test_legacy_search_tips_link(self):
"""websearch - legacy Search Tips page link"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search/tips.fr.html',
expected_text="Conseils de recherche"))
def test_legacy_search_guide_link(self):
"""websearch - legacy Search Guide page link"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/help/search/guide.en.html',
expected_text="Search Guide"))
class WebSearchTestRecord(unittest.TestCase):
""" Check the interface of the /CFG_SITE_RECORD results """
def test_format_links(self):
""" websearch - check format links for records """
browser = Browser()
# We open the record in all known HTML formats
for hformat in ('hd', 'hx', 'hm'):
browser.open(make_url('/%s/1' % CFG_SITE_RECORD, of=hformat))
if hformat == 'hd':
# hd format should have a link to the following
# formats
for oformat in ('hx', 'hm', 'xm', 'xd'):
target = make_url('/%s/1/export/%s?ln=en' % (CFG_SITE_RECORD, oformat))
try:
browser.find_link(url=target)
except LinkNotFoundError:
self.fail('link %r should be in page' % target)
else:
# non-hd HTML formats should have a link back to
# the main detailed record
target = make_url('/%s/1' % CFG_SITE_RECORD)
try:
browser.find_link(url=target)
except LinkNotFoundError:
self.fail('link %r should be in page' % target)
return
def test_exported_formats(self):
""" websearch - check formats exported through /CFG_SITE_RECORD/1/export/ URLs"""
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hm' % CFG_SITE_RECORD),
expected_text='245__ $$aALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hd' % CFG_SITE_RECORD),
expected_text='<strong>ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/xm' % CFG_SITE_RECORD),
expected_text='<subfield code="a">ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/xd' % CFG_SITE_RECORD),
expected_text='<dc:title>ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hs' % CFG_SITE_RECORD),
expected_text='<a href="/%s/1?ln=%s">ALEPH experiment' % \
(CFG_SITE_RECORD, CFG_SITE_LANG)))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/hx' % CFG_SITE_RECORD),
expected_text='title = "{ALEPH experiment'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/t?ot=245' % CFG_SITE_RECORD),
expected_text='245__ $$aALEPH experiment'))
self.assertNotEqual([],
test_web_page_content(make_url('/%s/1/export/t?ot=245' % CFG_SITE_RECORD),
expected_text='001__'))
self.assertEqual([],
test_web_page_content(make_url('/%s/1/export/h?ot=245' % CFG_SITE_RECORD),
expected_text='245__ $$aALEPH experiment'))
self.assertNotEqual([],
test_web_page_content(make_url('/%s/1/export/h?ot=245' % CFG_SITE_RECORD),
expected_text='001__'))
return
def test_plots_tab(self):
""" websearch - test to ensure the plots tab is working """
self.assertEqual([],
test_web_page_content(make_url('/%s/8/plots' % CFG_SITE_RECORD),
expected_text='div id="clip"',
unexpected_text='Abstract'))
def test_meta_header(self):
""" websearch - test that metadata embedded in header of hd
relies on hdm format and Default_HTML_meta bft, but hook is in
websearch to display the format
"""
self.assertEqual([],
test_web_page_content(make_url('/record/1'),
expected_text='<meta content="ALEPH experiment: Candidate of Higgs boson production" name="citation_title" />'))
return
class WebSearchTestCollections(unittest.TestCase):
def test_traversal_links(self):
""" websearch - traverse all the publications of a collection """
browser = Browser()
try:
for aas in (0, 1):
args = {'as': aas}
browser.open(make_url('/collection/Preprints', **args))
for jrec in (11, 21, 11, 27):
args = {'jrec': jrec, 'cc': 'Preprints'}
if aas:
args['as'] = aas
url = make_url('/search', **args)
try:
browser.follow_link(url=url)
except LinkNotFoundError:
args['ln'] = CFG_SITE_LANG
url = make_url('/search', **args)
browser.follow_link(url=url)
except LinkNotFoundError:
self.fail('no link %r in %r' % (url, browser.geturl()))
def test_collections_links(self):
""" websearch - enter in collections and subcollections """
browser = Browser()
def tryfollow(url):
cur = browser.geturl()
body = browser.response().read()
try:
browser.follow_link(url=url)
except LinkNotFoundError:
print body
self.fail("in %r: could not find %r" % (
cur, url))
return
for aas in (0, 1):
if aas:
kargs = {'as': 1}
else:
kargs = {}
kargs['ln'] = CFG_SITE_LANG
# We navigate from immediate son to immediate son...
browser.open(make_url('/', **kargs))
tryfollow(make_url('/collection/Articles%20%26%20Preprints',
**kargs))
tryfollow(make_url('/collection/Articles', **kargs))
# But we can also jump to a grandson immediately
browser.back()
browser.back()
tryfollow(make_url('/collection/ALEPH', **kargs))
return
def test_records_links(self):
""" websearch - check the links toward records in leaf collections """
browser = Browser()
browser.open(make_url('/collection/Preprints'))
def harvest():
""" Parse all the links in the page, and check that for
each link to a detailed record, we also have the
corresponding link to the similar records."""
records = set()
similar = set()
for link in browser.links():
path, q = parse_url(link.url)
if not path:
continue
if path[0] == CFG_SITE_RECORD:
records.add(int(path[1]))
continue
if path[0] == 'search':
if not q.get('rm') == ['wrd']:
continue
recid = q['p'][0].split(':')[1]
similar.add(int(recid))
self.failUnlessEqual(records, similar)
return records
# We must have 10 links to the corresponding /CFG_SITE_RECORD
found = harvest()
self.failUnlessEqual(len(found), 10)
# When clicking on the "Search" button, we must also have
# these 10 links on the records.
browser.select_form(name="search")
browser.submit()
found = harvest()
self.failUnlessEqual(len(found), 10)
return
def test_em_parameter(self):
""" websearch - check different values of em return different parts of the collection page"""
for combi in string_combinations(["L", "P", "Prt"]):
url = '/collection/Articles?em=%s' % ','.join(combi)
expected_text = ["<strong>Development of photon beam diagnostics for VUV radiation from a SASE FEL</strong>"]
unexpected_text = []
if "H" in combi:
expected_text.append(">Atlantis Institute of Fictive Science</a>")
else:
unexpected_text.append(">Atlantis Institute of Fictive Science</a>")
if "F" in combi:
expected_text.append("This site is also available in the following languages:")
else:
unexpected_text.append("This site is also available in the following languages:")
if "S" in combi:
expected_text.append('value="Search"')
else:
unexpected_text.append('value="Search"')
if "L" in combi:
expected_text.append('Search also:')
else:
unexpected_text.append('Search also:')
if "Prt" in combi or "P" in combi:
expected_text.append('<div class="portalboxheader">ABOUT ARTICLES</div>')
else:
unexpected_text.append('<div class="portalboxheader">ABOUT ARTICLES</div>')
self.assertEqual([], test_web_page_content(make_url(url),
expected_text=expected_text,
unexpected_text=unexpected_text))
return
class WebSearchTestBrowse(unittest.TestCase):
def test_browse_field(self):
""" websearch - check that browsing works """
browser = Browser()
browser.open(make_url('/'))
browser.select_form(name='search')
browser['f'] = ['title']
browser.submit(name='action_browse')
def collect():
# We'll get a few links to search for the actual hits, plus a
# link to the following results.
res = []
for link in browser.links(url_regex=re.compile(CFG_SITE_URL +
r'/search\?')):
if link.text == 'Advanced Search':
continue
dummy, q = parse_url(link.url)
res.append((link, q))
return res
# if we follow the last link, we should get another
# batch. There is an overlap of one item.
batch_1 = collect()
browser.follow_link(link=batch_1[-1][0])
batch_2 = collect()
# FIXME: we cannot compare the whole query, as the collection
# set is not equal
self.failUnlessEqual(batch_1[-2][1]['p'], batch_2[0][1]['p'])
def test_browse_restricted_record_as_unauthorized_user(self):
"""websearch - browse for a record that belongs to a restricted collection as an unauthorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&ln=en',
username = 'guest',
expected_text = ['Hits', '088__a'],
unexpected_text = ['>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_restricted_record_as_unauthorized_user_in_restricted_collection(self):
"""websearch - browse for a record that belongs to a restricted collection as an unauthorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&c=ALEPH+Theses&ln=en',
username='guest',
expected_text= ['This collection is restricted'],
unexpected_text= ['Hits', '>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_restricted_record_as_authorized_user(self):
"""websearch - browse for a record that belongs to a restricted collection as an authorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&ln=en',
username='admin',
password='',
expected_text= ['Hits', '088__a'],
unexpected_text = ['>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_restricted_record_as_authorized_user_in_restricted_collection(self):
"""websearch - browse for a record that belongs to a restricted collection as an authorized user."""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=CERN-THESIS-99-074&f=088__a&action_browse=Browse&c=ALEPH+Theses&ln=en',
username='admin',
password='',
expected_text= ['Hits', '>CERN-THESIS-99-074</a>'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_browse_exact_author_help_link(self):
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=Dasse%2C+Michel&f=author&action_browse=Browse',
username = 'guest',
expected_text = ['Did you mean to browse in', 'index?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=Dasse%2C+Michel&f=firstauthor&action_browse=Browse',
username = 'guest',
expected_text = ['Did you mean to browse in', 'index?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&as=1&m1=a&p1=Dasse%2C+Michel&f1=author&op1=a&m2=a&p2=&f2=firstauthor&op2=a&m3=a&p3=&f3=&action_browse=Browse',
username = 'guest',
expected_text = ['Did you mean to browse in', 'index?'])
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchTestOpenURL(unittest.TestCase):
def test_isbn_01(self):
""" websearch - isbn query via OpenURL 0.1"""
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/openurl', isbn='0387940758'))
dummy, current_q = parse_url(browser.geturl())
self.failUnlessEqual(current_q, {
'sc' : ['1'],
'p' : ['isbn:"0387940758"'],
'of' : ['hd']
})
def test_isbn_10_rft_id(self):
""" websearch - isbn query via OpenURL 1.0 - rft_id"""
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/openurl', rft_id='urn:ISBN:0387940758'))
dummy, current_q = parse_url(browser.geturl())
self.failUnlessEqual(current_q, {
'sc' : ['1'],
'p' : ['isbn:"0387940758"'],
'of' : ['hd']
})
def test_isbn_10(self):
""" websearch - isbn query via OpenURL 1.0"""
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/openurl?rft.isbn=0387940758'))
dummy, current_q = parse_url(browser.geturl())
self.failUnlessEqual(current_q, {
'sc' : ['1'],
'p' : ['isbn:"0387940758"'],
'of' : ['hd']
})
class WebSearchTestSearch(unittest.TestCase):
def test_hits_in_other_collection(self):
""" websearch - check extension of a query to the home collection """
browser = Browser()
# We do a precise search in an isolated collection
browser.open(make_url('/collection/ISOLDE', ln='en'))
browser.select_form(name='search')
browser['f'] = ['author']
browser['p'] = 'matsubara'
browser.submit()
dummy, current_q = parse_url(browser.geturl())
link = browser.find_link(text_regex=re.compile('.*hit', re.I))
dummy, target_q = parse_url(link.url)
# the target query should be the current query without any c
# or cc specified.
for f in ('cc', 'c', 'action_search'):
if f in current_q:
del current_q[f]
self.failUnlessEqual(current_q, target_q)
def test_nearest_terms(self):
""" websearch - provide a list of nearest terms """
browser = Browser()
browser.open(make_url(''))
# Search something weird
browser.select_form(name='search')
browser['p'] = 'gronf'
browser.submit()
dummy, original = parse_url(browser.geturl())
for to_drop in ('cc', 'action_search', 'f'):
if to_drop in original:
del original[to_drop]
if 'ln' not in original:
original['ln'] = [CFG_SITE_LANG]
# we should get a few searches back, which are identical
# except for the p field being substituted (and the cc field
# being dropped).
if 'cc' in original:
del original['cc']
for link in browser.links(url_regex=re.compile(CFG_SITE_URL + r'/search\?')):
if link.text == 'Advanced Search':
continue
dummy, target = parse_url(link.url)
if 'ln' not in target:
target['ln'] = [CFG_SITE_LANG]
original['p'] = [link.text]
self.failUnlessEqual(original, target)
return
def test_switch_to_simple_search(self):
""" websearch - switch to simple search """
browser = Browser()
args = {'as': 1}
browser.open(make_url('/collection/ISOLDE', **args))
browser.select_form(name='search')
browser['p1'] = 'tandem'
browser['f1'] = ['title']
browser.submit()
browser.follow_link(text='Simple Search')
dummy, q = parse_url(browser.geturl())
self.failUnlessEqual(q, {'cc': ['ISOLDE'],
'p': ['tandem'],
'f': ['title'],
'ln': ['en']})
def test_switch_to_advanced_search(self):
""" websearch - switch to advanced search """
browser = Browser()
browser.open(make_url('/collection/ISOLDE'))
browser.select_form(name='search')
browser['p'] = 'tandem'
browser['f'] = ['title']
browser.submit()
browser.follow_link(text='Advanced Search')
dummy, q = parse_url(browser.geturl())
self.failUnlessEqual(q, {'cc': ['ISOLDE'],
'p1': ['tandem'],
'f1': ['title'],
'as': ['1'],
'ln' : ['en']})
def test_no_boolean_hits(self):
""" websearch - check the 'no boolean hits' proposed links """
browser = Browser()
browser.open(make_url(''))
browser.select_form(name='search')
browser['p'] = 'quasinormal muon'
browser.submit()
dummy, q = parse_url(browser.geturl())
for to_drop in ('cc', 'action_search', 'f'):
if to_drop in q:
del q[to_drop]
for bsu in ('quasinormal', 'muon'):
l = browser.find_link(text=bsu)
q['p'] = bsu
if not same_urls_p(l.url, make_url('/search', **q)):
self.fail(repr((l.url, make_url('/search', **q))))
def test_similar_authors(self):
""" websearch - test similar authors box """
browser = Browser()
browser.open(make_url(''))
browser.select_form(name='search')
browser['p'] = 'Ellis, R K'
browser['f'] = ['author']
browser.submit()
l = browser.find_link(text="Ellis, R S")
self.failUnless(same_urls_p(l.url, make_url('/search',
p="Ellis, R S",
f='author',
ln='en')))
def test_em_parameter(self):
""" websearch - check different values of em return different parts of the search page"""
for combi in string_combinations(["K", "A", "I", "O"]):
url = '/search?ln=en&cc=Articles+%%26+Preprints&sc=1&c=Articles&c=Preprints&em=%s' % ','.join(combi)
expected_text = ["<strong>Development of photon beam diagnostics for VUV radiation from a SASE FEL</strong>"]
unexpected_text = []
if "H" in combi:
expected_text.append(">Atlantis Institute of Fictive Science</a>")
else:
unexpected_text.append(">Atlantis Institute of Fictive Science</a>")
if "F" in combi:
expected_text.append("This site is also available in the following languages:")
else:
unexpected_text.append("This site is also available in the following languages:")
if "S" in combi:
expected_text.append('value="Search"')
else:
unexpected_text.append('value="Search"')
if "K" in combi:
expected_text.append('value="Add to basket"')
else:
unexpected_text.append('value="Add to basket"')
if "A" in combi:
expected_text.append('Interested in being notified about new results for this query?')
else:
unexpected_text.append('Interested in being notified about new results for this query?')
if "I" in combi:
expected_text.append('jump to record:')
else:
unexpected_text.append('jump to record:')
if "O" in combi:
expected_text.append('<th class="searchresultsboxheader"><strong>Results overview:</strong> Found <strong>')
else:
unexpected_text.append('<th class="searchresultsboxheader"><strong>Results overview:</strong> Found <strong>')
self.assertEqual([], test_web_page_content(make_url(url),
expected_text=expected_text,
unexpected_text=unexpected_text))
return
class WebSearchCJKTokenizedSearchTest(unittest.TestCase):
"""
Reindexes record 104 (the one with chinese poetry) with use of BibIndexCJKTokenizer.
After tests it reindexes record 104 back with BibIndexDefaultTokenizer.
Checks if one can find record 104 specifying only one or two CJK characters.
"""
test_counter = 0
reindexed = False
@classmethod
def setUp(self):
if not self.reindexed:
reindex_word_tables_into_testtables('title',
recids=[[104,104]],
prefix = False,
tokenizer = 'BibIndexCJKTokenizer')
self.reindexed = True
@classmethod
def tearDown(self):
self.test_counter += 1
if self.test_counter == 2:
reindex_word_tables_into_testtables('title',
recids=[[104,104]],
prefix = False,
tokenizer = 'BibIndexDefaultTokenizer')
def test_title_cjk_tokenized_two_characters(self):
"""CJKTokenizer - test for finding chinese poetry with two CJK characters"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=title%3A敬亭&f=&of=id',
expected_text='[104]'))
def test_title_cjk_tokenized_single_character(self):
"""CJKTokenizer - test for finding chinese poetry with one CJK character"""
self.assertEqual([], test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=title%3A亭&f=&of=id',
expected_text='[104]'))
class WebSearchTestWildcardLimit(unittest.TestCase):
"""Checks if the wildcard limit is correctly passed and that
users without autorization can not exploit it"""
def test_wildcard_limit_correctly_passed_when_not_set(self):
"""websearch - wildcard limit is correctly passed when default"""
self.assertEqual(search_pattern(p='e*', f='author'),
search_pattern(p='e*', f='author', wl=1000))
def test_wildcard_limit_correctly_passed_when_set(self):
"""websearch - wildcard limit is correctly passed when set"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=e*&f=author&of=id&wl=5&rg=100',
expected_text="[9, 10, 11, 17, 46, 48, 50, 51, 52, 53, 54, 67, 72, 74, 81, 88, 92, 96]"))
def test_wildcard_limit_correctly_not_active(self):
"""websearch - wildcard limit is not active when there is no wildcard query"""
self.assertEqual(search_pattern(p='ellis', f='author'),
search_pattern(p='ellis', f='author', wl=1))
def test_wildcard_limit_increased_by_authorized_users(self):
"""websearch - wildcard limit increased by authorized user"""
browser = Browser()
#try a search query, with no wildcard limit set by the user
browser.open(make_url('/search?p=a*&of=id'))
recid_list_guest_no_limit = browser.response().read() # so the limit is CGF_WEBSEARCH_WILDCARD_LIMIT
#try a search query, with a wildcard limit imposed by the user
#wl=1000000 - a very high limit,higher then what the CFG_WEBSEARCH_WILDCARD_LIMIT might be
browser.open(make_url('/search?p=a*&of=id&wl=1000000'))
recid_list_guest_with_limit = browser.response().read()
#same results should be returned for a search without the wildcard limit set by the user
#and for a search with a large limit set by the user
#in this way we know that nomatter how large the limit is, the wildcard query will be
#limitted by CFG_WEBSEARCH_WILDCARD_LIMIT (for a guest user)
self.failIf(len(recid_list_guest_no_limit.split(',')) != len(recid_list_guest_with_limit.split(',')))
##login as admin
browser.open(make_surl('/youraccount/login'))
browser.select_form(nr=0)
browser['p_un'] = 'admin'
browser['p_pw'] = ''
browser.submit()
#try a search query, with a wildcard limit imposed by an authorized user
#wl = 10000 a very high limit, higher then what the CFG_WEBSEARCH_WILDCARD_LIMIT might be
browser.open(make_surl('/search?p=a*&of=id&wl=10000'))
recid_list_authuser_with_limit = browser.response().read()
#the authorized user can set whatever limit he might wish
#so, the results returned for the auth. users should exceed the results returned for unauth. users
self.failUnless(len(recid_list_guest_no_limit.split(',')) <= len(recid_list_authuser_with_limit.split(',')))
#logout
browser.open(make_surl('/youraccount/logout'))
browser.response().read()
browser.close()
class WebSearchNearestTermsTest(unittest.TestCase):
"""Check various alternatives of searches leading to the nearest
terms box."""
def test_nearest_terms_box_in_okay_query(self):
""" websearch - no nearest terms box for a successful query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text="jump to record"))
def test_nearest_terms_box_in_unsuccessful_simple_query(self):
""" websearch - nearest terms box for unsuccessful simple query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellisz',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=embed",
expected_link_label='embed'))
def test_nearest_terms_box_in_unsuccessful_simple_accented_query(self):
""" websearch - nearest terms box for unsuccessful accented query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=elliszà',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=embed",
expected_link_label='embed'))
def test_nearest_terms_box_in_unsuccessful_structured_query(self):
""" websearch - nearest terms box for unsuccessful structured query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellisz&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=eisenhandler&f=author",
expected_link_label='eisenhandler'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3Aellisz',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=author%3Aeisenhandler",
expected_link_label='eisenhandler'))
def test_nearest_terms_box_in_query_with_invalid_index(self):
""" websearch - nearest terms box for queries with invalid indexes specified """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=bednarz%3Aellis',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=bednarz",
expected_link_label='bednarz'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=1%3Aellis',
expected_text="no index 1.",
expected_link_target=CFG_SITE_URL+"/record/47?ln=en",
expected_link_label="Detailed record"))
def test_nearest_terms_box_in_unsuccessful_phrase_query(self):
""" websearch - nearest terms box for unsuccessful phrase query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%22Ellis%2C+Z%22',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=author%3A%22Enqvist%2C+K%22",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%22ellisz%22&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=%22Enqvist%2C+K%22&f=author",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%22elliszà%22&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=%22Enqvist%2C+K%22&f=author",
expected_link_label='Enqvist, K'))
def test_nearest_terms_box_in_unsuccessful_partial_phrase_query(self):
""" websearch - nearest terms box for unsuccessful partial phrase query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%27Ellis%2C+Z%27',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=author%3A%27Enqvist%2C+K%27",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%27ellisz%27&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=%27Enqvist%2C+K%27&f=author",
expected_link_label='Enqvist, K'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%27elliszà%27&f=author',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=%27Enqvist%2C+K%27&f=author",
expected_link_label='Enqvist, K'))
def test_nearest_terms_box_in_unsuccessful_partial_phrase_advanced_query(self):
""" websearch - nearest terms box for unsuccessful partial phrase advanced search query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p1=aaa&f1=title&m1=p&as=1',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&f1=title&as=1&p1=A+simple+functional+form+for+proton-nucleus+total+reaction+cross+sections&m1=p",
expected_link_label='A simple functional form for proton-nucleus total reaction cross sections'))
def test_nearest_terms_box_in_unsuccessful_exact_phrase_advanced_query(self):
""" websearch - nearest terms box for unsuccessful exact phrase advanced search query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p1=aaa&f1=title&m1=e&as=1',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&f1=title&as=1&p1=A+simple+functional+form+for+proton-nucleus+total+reaction+cross+sections&m1=e",
expected_link_label='A simple functional form for proton-nucleus total reaction cross sections'))
def test_nearest_terms_box_in_unsuccessful_boolean_query(self):
""" websearch - nearest terms box for unsuccessful boolean query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3Aellisz+author%3Aellisz',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aellisz",
expected_link_label='energi'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3Aenergi+author%3Aenergie',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aenqvist",
expected_link_label='enqvist'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=title%3Aellisz+author%3Aellisz&f=keyword',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aellisz&f=keyword",
expected_link_label='energi'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=title%3Aenergi+author%3Aenergie&f=keyword',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=title%3Aenergi+author%3Aenqvist&f=keyword",
expected_link_label='enqvist'))
def test_nearest_terms_box_in_unsuccessful_uppercase_query(self):
""" websearch - nearest terms box for unsuccessful uppercase query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=fOo%3Atest',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=food",
expected_link_label='food'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=arXiv%3A1007.5048',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=artist",
expected_link_label='artist'))
def test_nearest_terms_box_in_unsuccessful_spires_query(self):
""" websearch - nearest terms box for unsuccessful spires query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=find+a+foobar',
expected_text="Nearest terms in any collection are",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=find+a+finch",
expected_link_label='finch'))
class WebSearchBooleanQueryTest(unittest.TestCase):
"""Check various boolean queries."""
def test_successful_boolean_query(self):
""" websearch - successful boolean query """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis+muon',
expected_text="records found",
expected_link_label="Detailed record"))
def test_unsuccessful_boolean_query_where_all_individual_terms_match(self):
""" websearch - unsuccessful boolean query where all individual terms match """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis+muon+letter',
expected_text="Boolean query returned no hits. Please combine your search terms differently."))
def test_unsuccessful_boolean_query_in_advanced_search_where_all_individual_terms_match(self):
""" websearch - unsuccessful boolean query in advanced search where all individual terms match """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?m1=a&p1=ellis&op1=a&m2=a&p2=muon&op2=a&p3=letter',
expected_text="Boolean query returned no hits. Please combine your search terms differently."))
class WebSearchAuthorQueryTest(unittest.TestCase):
"""Check various author-related queries."""
def test_propose_similar_author_names_box(self):
""" websearch - propose similar author names box """
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=Ellis%2C+R&f=author',
expected_text="See also: similar author names",
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=Ellis%2C+R+K&f=author",
expected_link_label="Ellis, R K"))
def test_do_not_propose_similar_author_names_box(self):
""" websearch - do not propose similar author names box """
errmsgs = test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%22Ellis%2C+R%22',
expected_link_target=CFG_SITE_URL+"/search?ln=en&p=Ellis%2C+R+K&f=author",
expected_link_label="Ellis, R K")
if errmsgs[0].find("does not contain link to") > -1:
pass
else:
self.fail("Should not propose similar author names box.")
return
class WebSearchSearchEnginePythonAPITest(unittest.TestCase):
"""Check typical search engine Python API calls on the demo data."""
def test_search_engine_python_api_for_failed_query(self):
"""websearch - search engine Python API for failed query"""
self.assertEqual([],
perform_request_search(p='aoeuidhtns'))
def test_search_engine_python_api_for_successful_query(self):
"""websearch - search engine Python API for successful query"""
self.assertEqual([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118],
perform_request_search(p='ellis'))
def test_search_engine_python_api_for_successful_query_format_intbitset(self):
"""websearch - search engine Python API for successful query, output format intbitset"""
self.assertEqual(intbitset([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118]),
perform_request_search(p='ellis', of='intbitset'))
def test_search_engine_web_api_ignore_paging_parameter(self):
"""websearch - search engine Python API for successful query, ignore paging parameters"""
self.assertEqual([8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118],
perform_request_search(p='ellis', rg=5, jrec=3))
def test_search_engine_python_api_respect_sorting_parameter(self):
"""websearch - search engine Python API for successful query, respect sorting parameters"""
self.assertEqual([77, 84, 85],
perform_request_search(p='klebanov'))
self.assertEqual([77, 85, 84],
perform_request_search(p='klebanov', sf='909C4v'))
def test_search_engine_python_api_respect_ranking_parameter(self):
"""websearch - search engine Python API for successful query, respect ranking parameters"""
self.assertEqual([77, 84, 85],
perform_request_search(p='klebanov'))
self.assertEqual([85, 77, 84],
perform_request_search(p='klebanov', rm='citation'))
def test_search_engine_python_api_for_existing_record(self):
"""websearch - search engine Python API for existing record"""
self.assertEqual([8],
perform_request_search(recid=8))
def test_search_engine_python_api_for_existing_record_format_intbitset(self):
"""websearch - search engine Python API for existing record, output format intbitset"""
self.assertEqual(intbitset([8]),
perform_request_search(recid=8, of='intbitset'))
def test_search_engine_python_api_for_nonexisting_record(self):
"""websearch - search engine Python API for non-existing record"""
self.assertEqual([],
perform_request_search(recid=16777215))
def test_search_engine_python_api_for_nonexisting_record_format_intbitset(self):
"""websearch - search engine Python API for non-existing record, output format intbitset"""
self.assertEqual(intbitset(),
perform_request_search(recid=16777215, of='intbitset'))
def test_search_engine_python_api_for_nonexisting_collection(self):
"""websearch - search engine Python API for non-existing collection"""
self.assertEqual([],
perform_request_search(c='Foo'))
def test_search_engine_python_api_for_range_of_records(self):
"""websearch - search engine Python API for range of records"""
self.assertEqual([1, 2, 3, 4, 5, 6, 7, 8, 9],
perform_request_search(recid=1, recidb=10))
def test_search_engine_python_api_ranked_by_citation(self):
"""websearch - search engine Python API for citation ranking"""
self.assertEqual([82, 83, 87, 89],
perform_request_search(p='recid:81', rm='citation'))
def test_search_engine_python_api_textmarc_full(self):
"""websearch - search engine Python API for Text MARC output, full"""
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='tm')
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
000000107 001__ 107
000000107 003__ SzGeCERN
000000107 005__ %(rec_107_rev)s
000000107 035__ $$9SPIRES$$a4066995
000000107 037__ $$aCERN-EP-99-060
000000107 041__ $$aeng
000000107 084__ $$2CERN Library$$aEP-1999-060
000000107 088__ $$9SCAN-9910048
000000107 088__ $$aCERN-L3-175
000000107 110__ $$aCERN. Geneva
000000107 245__ $$aLimits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$
000000107 260__ $$c1999
000000107 269__ $$aGeneva$$bCERN$$c26 Apr 1999
000000107 300__ $$a18 p
000000107 490__ $$aALEPH Papers
000000107 500__ $$aPreprint not submitted to publication
000000107 65017 $$2SzGeCERN$$aParticle Physics - Experiment
000000107 690C_ $$aCERN
000000107 690C_ $$aPREPRINT
000000107 693__ $$aCERN LEP$$eALEPH
000000107 693__ $$aCERN LEP$$eDELPHI
000000107 693__ $$aCERN LEP$$eL3
000000107 693__ $$aCERN LEP$$eOPAL
000000107 695__ $$9MEDLINE$$asearches Higgs bosons
000000107 697C_ $$aLexiHiggs
000000107 710__ $$5EP
000000107 710__ $$gALEPH Collaboration
000000107 710__ $$gDELPHI Collaboration
000000107 710__ $$gL3 Collaboration
000000107 710__ $$gLEP Working Group for Higgs Boson Searches
000000107 710__ $$gOPAL Collaboration
000000107 901__ $$uCERN
000000107 916__ $$sh$$w199941
000000107 960__ $$a11
000000107 963__ $$aPUBLIC
000000107 970__ $$a000330309CER
000000107 980__ $$aARTICLE
000000085 001__ 85
000000085 003__ SzGeCERN
000000085 005__ %(rec_85_rev)s
000000085 035__ $$a2356302CERCER
000000085 035__ $$9SLAC$$a5423422
000000085 037__ $$ahep-th/0212181
000000085 041__ $$aeng
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 245__ $$a3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS
000000085 260__ $$c2003
000000085 269__ $$c16 Dec 2002
000000085 300__ $$a8 p
000000085 520__ $$aWe study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.
000000085 65017 $$2SzGeCERN$$aParticle Physics - Theory
000000085 690C_ $$aARTICLE
000000085 695__ $$9LANL EDS$$aHigh Energy Physics - Theory
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.pdf
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.ps.gz
000000085 859__ $$falberto.zaffaroni@mib.infn.it
000000085 909C4 $$c289-293$$pPhys. Lett. B$$v561$$y2003
000000085 916__ $$sn$$w200251
000000085 960__ $$a13
000000085 961__ $$c20060823$$h0007$$lCER01$$x20021217
000000085 963__ $$aPUBLIC
000000085 970__ $$a002356302CER
000000085 980__ $$aARTICLE
000000085 999C5 $$mD. Francia and A. Sagnotti,$$o[1]$$rhep-th/0207002$$sPhys. Lett. B 543 (2002) 303
000000085 999C5 $$mP. Haggi-Mani and B. Sundborg,$$o[1]$$rhep-th/0002189$$sJ. High Energy Phys. 0004 (2000) 031
000000085 999C5 $$mB. Sundborg,$$o[1]$$rhep-th/0103247$$sNucl. Phys. B, Proc. Suppl. 102 (2001) 113
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0105001$$sJ. High Energy Phys. 0109 (2001) 036
000000085 999C5 $$mA. Mikhailov,$$o[1]$$rhep-th/0201019
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205131$$sNucl. Phys. B 644 (2002) 303
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205132$$sJ. High Energy Phys. 0207 (2002) 055
000000085 999C5 $$mJ. Engquist, E. Sezgin and P. Sundell,$$o[1]$$rhep-th/0207101$$sClass. Quantum Gravity 19 (2002) 6175
000000085 999C5 $$mM. A. Vasiliev,$$o[1]$$rhep-th/9611024$$sInt. J. Mod. Phys. D 5 (1996) 763
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9808004$$sNucl. Phys. B 541 (1999) 323
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9906167$$sClass. Quantum Gravity 17 (2000) 1383
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sNucl. Phys. B 291 (1987) 141
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sPhys. Lett. B 189 (1987) 89
000000085 999C5 $$mI. R. Klebanov and A. M. Polyakov,$$o[3]$$rhep-th/0210114$$sPhys. Lett. B 550 (2002) 213
000000085 999C5 $$mM. A. Vasiliev,$$o[4]$$rhep-th/9910096
000000085 999C5 $$mT. Leonhardt, A. Meziane and W. Ruhl,$$o[5]$$rhep-th/0211092
000000085 999C5 $$mO. Aharony, M. Berkooz and E. Silverstein,$$o[6]$$rhep-th/0105309$$sJ. High Energy Phys. 0108 (2001) 006
000000085 999C5 $$mE. Witten,$$o[7]$$rhep-th/0112258
000000085 999C5 $$mM. Berkooz, A. Sever and A. Shomer$$o[8]$$rhep-th/0112264$$sJ. High Energy Phys. 0205 (2002) 034
000000085 999C5 $$mS. S. Gubser and I. Mitra,$$o[9]$$rhep-th/0210093
000000085 999C5 $$mS. S. Gubser and I. R. Klebanov,$$o[10]$$rhep-th/0212138
000000085 999C5 $$mM. Porrati,$$o[11]$$rhep-th/0112166$$sJ. High Energy Phys. 0204 (2002) 058
000000085 999C5 $$mK. G. Wilson and J. B. Kogut,$$o[12]$$sPhys. Rep. 12 (1974) 75
000000085 999C5 $$mI. R. Klebanov and E. Witten,$$o[13]$$rhep-th/9905104$$sNucl. Phys. B 556 (1999) 89
000000085 999C5 $$mW. Heidenreich,$$o[14]$$sJ. Math. Phys. 22 (1981) 1566
000000085 999C5 $$mD. Anselmi,$$o[15]$$rhep-th/0210123
000000001 001__ 1
000000001 005__ %(rec_1_rev)s
000000001 037__ $$aCERN-EX-0106015
000000001 100__ $$aPhotolab
000000001 245__ $$aALEPH experiment: Candidate of Higgs boson production
000000001 246_1 $$aExpérience ALEPH: Candidat de la production d'un boson Higgs
000000001 260__ $$c14 06 2000
000000001 340__ $$aFILM
000000001 520__ $$aCandidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.
000000001 65017 $$2SzGeCERN$$aExperiments and Tracks
000000001 6531_ $$aLEP
000000001 8560_ $$fneil.calder@cern.ch
000000001 8564_ $$u%(siteurl)s/record/1/files/0106015_01.jpg
000000001 8564_ $$u%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon$$xicon
000000001 909C0 $$o0003717PHOPHO
000000001 909C0 $$y2000
000000001 909C0 $$b81
000000001 909C1 $$c2001-06-14$$l50$$m2001-08-27$$oCM
000000001 909CP $$pBldg. 2
000000001 909CP $$rCalder, N
000000001 909CS $$sn$$w200231
000000001 980__ $$aPICTURE
""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]})
def test_search_engine_python_api_textmarc_field_filtered(self):
"""websearch - search engine Python API for Text MARC output, field-filtered"""
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='tm', ot=['100', '700'])
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000001 100__ $$aPhotolab
""")
def test_search_engine_python_api_for_intersect_results_with_one_collrec(self):
"""websearch - search engine Python API for intersect results with one collrec"""
self.assertEqual({'Books & Reports': intbitset([19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34])},
intersect_results_with_collrecs(None, intbitset(range(0,110)), ['Books & Reports'], 0, 'id', 0, 'en', False))
def test_search_engine_python_api_for_intersect_results_with_several_collrecs(self):
"""websearch - search engine Python API for intersect results with several collrecs"""
self.assertEqual({'Books': intbitset([21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]),
'Reports': intbitset([19, 20]),
'Theses': intbitset([35, 36, 37, 38, 39, 40, 41, 42, 105])},
intersect_results_with_collrecs(None, intbitset(range(0,110)), ['Books', 'Theses', 'Reports'], 0, 'id', 0, 'en', False))
def test_search_engine_python_api_textmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Python API for Text MARC output, field-filtered, hidden field, no guest access"""
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='tm', ot=['100', '595'])
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000001 100__ $$aPhotolab
""")
def test_search_engine_python_api_xmlmarc_full(self):
"""websearch - search engine Python API for XMLMARC output, full"""
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='xm')
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_107_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SPIRES</subfield>
<subfield code="a">4066995</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EP-99-060</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">CERN Library</subfield>
<subfield code="a">EP-1999-060</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="9">SCAN-9910048</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="a">CERN-L3-175</subfield>
</datafield>
<datafield tag="110" ind1=" " ind2=" ">
<subfield code="a">CERN. Geneva</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Limits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">1999</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="a">Geneva</subfield>
<subfield code="b">CERN</subfield>
<subfield code="c">26 Apr 1999</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">18 p</subfield>
</datafield>
<datafield tag="490" ind1=" " ind2=" ">
<subfield code="a">ALEPH Papers</subfield>
</datafield>
<datafield tag="500" ind1=" " ind2=" ">
<subfield code="a">Preprint not submitted to publication</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Experiment</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">CERN</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">PREPRINT</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">ALEPH</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">DELPHI</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">L3</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">OPAL</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">MEDLINE</subfield>
<subfield code="a">searches Higgs bosons</subfield>
</datafield>
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">LexiHiggs</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="5">EP</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">ALEPH Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">DELPHI Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">L3 Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">LEP Working Group for Higgs Boson Searches</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">OPAL Collaboration</subfield>
</datafield>
<datafield tag="901" ind1=" " ind2=" ">
<subfield code="u">CERN</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">h</subfield>
<subfield code="w">199941</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">11</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">000330309CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_85_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="a">2356302CERCER</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5423422</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/0212181</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">2003</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="c">16 Dec 2002</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">8 p</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">We study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Theory</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">High Energy Physics - Theory</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.pdf</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.ps.gz</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="4">
<subfield code="c">289-293</subfield>
<subfield code="p">Phys. Lett. B</subfield>
<subfield code="v">561</subfield>
<subfield code="y">2003</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">alberto.zaffaroni@mib.infn.it</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">n</subfield>
<subfield code="w">200251</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">13</subfield>
</datafield>
<datafield tag="961" ind1=" " ind2=" ">
<subfield code="c">20060823</subfield>
<subfield code="h">0007</subfield>
<subfield code="l">CER01</subfield>
<subfield code="x">20021217</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">002356302CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Francia and A. Sagnotti,</subfield>
<subfield code="s">Phys. Lett. B 543 (2002) 303</subfield>
<subfield code="r">hep-th/0207002</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">P. Haggi-Mani and B. Sundborg,</subfield>
<subfield code="s">J. High Energy Phys. 0004 (2000) 031</subfield>
<subfield code="r">hep-th/0002189</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">B. Sundborg,</subfield>
<subfield code="s">Nucl. Phys. B, Proc. Suppl. 102 (2001) 113</subfield>
<subfield code="r">hep-th/0103247</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0109 (2001) 036</subfield>
<subfield code="r">hep-th/0105001</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">A. Mikhailov,</subfield>
<subfield code="r">hep-th/0201019</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Nucl. Phys. B 644 (2002) 303</subfield>
<subfield code="r">hep-th/0205131</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0207 (2002) 055</subfield>
<subfield code="r">hep-th/0205132</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">J. Engquist, E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Class. Quantum Gravity 19 (2002) 6175</subfield>
<subfield code="r">hep-th/0207101</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="s">Int. J. Mod. Phys. D 5 (1996) 763</subfield>
<subfield code="r">hep-th/9611024</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Nucl. Phys. B 541 (1999) 323</subfield>
<subfield code="r">hep-th/9808004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Class. Quantum Gravity 17 (2000) 1383</subfield>
<subfield code="r">hep-th/9906167</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Nucl. Phys. B 291 (1987) 141</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Phys. Lett. B 189 (1987) 89</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">I. R. Klebanov and A. M. Polyakov,</subfield>
<subfield code="s">Phys. Lett. B 550 (2002) 213</subfield>
<subfield code="r">hep-th/0210114</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="r">hep-th/9910096</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="m">T. Leonhardt, A. Meziane and W. Ruhl,</subfield>
<subfield code="r">hep-th/0211092</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">O. Aharony, M. Berkooz and E. Silverstein,</subfield>
<subfield code="s">J. High Energy Phys. 0108 (2001) 006</subfield>
<subfield code="r">hep-th/0105309</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">E. Witten,</subfield>
<subfield code="r">hep-th/0112258</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="m">M. Berkooz, A. Sever and A. Shomer</subfield>
<subfield code="s">J. High Energy Phys. 0205 (2002) 034</subfield>
<subfield code="r">hep-th/0112264</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">S. S. Gubser and I. Mitra,</subfield>
<subfield code="r">hep-th/0210093</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">S. S. Gubser and I. R. Klebanov,</subfield>
<subfield code="r">hep-th/0212138</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">M. Porrati,</subfield>
<subfield code="s">J. High Energy Phys. 0204 (2002) 058</subfield>
<subfield code="r">hep-th/0112166</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[12]</subfield>
<subfield code="m">K. G. Wilson and J. B. Kogut,</subfield>
<subfield code="s">Phys. Rep. 12 (1974) 75</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[13]</subfield>
<subfield code="m">I. R. Klebanov and E. Witten,</subfield>
<subfield code="s">Nucl. Phys. B 556 (1999) 89</subfield>
<subfield code="r">hep-th/9905104</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[14]</subfield>
<subfield code="m">W. Heidenreich,</subfield>
<subfield code="s">J. Math. Phys. 22 (1981) 1566</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[15]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="r">hep-th/0210123</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<controlfield tag="005">%(rec_1_rev)s</controlfield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EX-0106015</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">ALEPH experiment: Candidate of Higgs boson production</subfield>
</datafield>
<datafield tag="246" ind1=" " ind2="1">
<subfield code="a">Expérience ALEPH: Candidat de la production d'un boson Higgs</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">14 06 2000</subfield>
</datafield>
<datafield tag="340" ind1=" " ind2=" ">
<subfield code="a">FILM</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">Candidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Experiments and Tracks</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="a">LEP</subfield>
</datafield>
<datafield tag="856" ind1="0" ind2=" ">
<subfield code="f">neil.calder@cern.ch</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.jpg</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon</subfield>
<subfield code="x">icon</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="o">0003717PHOPHO</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="y">2000</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="b">81</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="1">
<subfield code="c">2001-06-14</subfield>
<subfield code="l">50</subfield>
<subfield code="m">2001-08-27</subfield>
<subfield code="o">CM</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="p">Bldg. 2</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="r">Calder, N</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="S">
<subfield code="s">n</subfield>
<subfield code="w">200231</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">PICTURE</subfield>
</datafield>
</record>
</collection>""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]})
def test_search_engine_python_api_xmlmarc_field_filtered(self):
"""websearch - search engine Python API for XMLMARC output, field-filtered"""
# we are testing example from /help/hacking/search-engine-api
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='xm', ot=['100', '700'])
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>""")
def test_search_engine_python_api_xmlmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Python API for XMLMARC output, field-filtered, hidden field, no guest access"""
# we are testing example from /help/hacking/search-engine-api
import cStringIO
tmp = cStringIO.StringIO()
perform_request_search(req=tmp, p='higgs', of='xm', ot=['100', '595'])
out = tmp.getvalue()
tmp.close()
self.assertEqual(out, """\
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>""")
def test_search_engine_python_api_long_author_with_quotes(self):
"""websearch - search engine Python API for p=author:"Abbot, R B"'""" \
"""this test was written along with a bug report, needs fixing."""
self.assertEqual([16], perform_request_search(p='author:"Abbott, R B"'))
class WebSearchSearchEngineWebAPITest(unittest.TestCase):
"""Check typical search engine Web API calls on the demo data."""
def test_search_engine_web_api_for_failed_query(self):
"""websearch - search engine Web API for failed query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=aoeuidhtns&of=id',
expected_text="[]"))
def test_search_engine_web_api_for_failed_query_format_intbitset(self):
"""websearch - search engine Web API for failed query, output format intbitset"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=aoeuidhtns&of=intbitset',
expected_text=intbitset().fastdump()))
def test_search_engine_web_api_for_successful_query(self):
"""websearch - search engine Web API for successful query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=id',
expected_text="[8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118]"))
def test_search_engine_web_api_ignore_paging_parameter(self):
"""websearch - search engine Web API for successful query, ignore paging parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=id&rg=5&jrec=3',
expected_text="[8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 47, 118]"))
def test_search_engine_web_api_respect_sorting_parameter(self):
"""websearch - search engine Web API for successful query, respect sorting parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
expected_text="[84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
username="admin",
expected_text="[77, 84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&sf=909C4v',
expected_text="[85, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&sf=909C4v',
username="admin",
expected_text="[77, 85, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=intbitset&sf=909C4v',
username="admin",
expected_text=intbitset([77, 84, 85]).fastdump()))
def test_search_engine_web_api_respect_ranking_parameter(self):
"""websearch - search engine Web API for successful query, respect ranking parameters"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
expected_text="[84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id',
username="admin",
expected_text="[77, 84, 85]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&rm=citation',
expected_text="[85, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=id&rm=citation',
username="admin",
expected_text="[85, 77, 84]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=klebanov&of=intbitset&rm=citation',
username="admin",
expected_text=intbitset([77, 84, 85]).fastdump()))
def test_search_engine_web_api_for_existing_record(self):
"""websearch - search engine Web API for existing record"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?recid=8&of=id',
expected_text="[8]"))
def test_search_engine_web_api_for_nonexisting_record(self):
"""websearch - search engine Web API for non-existing record"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?recid=123456789&of=id',
expected_text="[]"))
def test_search_engine_web_api_for_nonexisting_collection(self):
"""websearch - search engine Web API for non-existing collection"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?c=Foo&of=id',
expected_text="[]"))
def test_search_engine_web_api_for_range_of_records(self):
"""websearch - search engine Web API for range of records"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?recid=1&recidb=10&of=id',
expected_text="[1, 2, 3, 4, 5, 6, 7, 8, 9]"))
def test_search_engine_web_api_ranked_by_citation(self):
"""websearch - search engine Web API for citation ranking"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A81&rm=citation&of=id',
expected_text="[82, 83, 87, 89]"))
def test_search_engine_web_api_textmarc_full(self):
"""websearch - search engine Web API for Text MARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm',
expected_text="""\
000000107 001__ 107
000000107 003__ SzGeCERN
000000107 005__ %(rec_107_rev)s
000000107 035__ $$9SPIRES$$a4066995
000000107 037__ $$aCERN-EP-99-060
000000107 041__ $$aeng
000000107 084__ $$2CERN Library$$aEP-1999-060
000000107 088__ $$9SCAN-9910048
000000107 088__ $$aCERN-L3-175
000000107 110__ $$aCERN. Geneva
000000107 245__ $$aLimits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$
000000107 260__ $$c1999
000000107 269__ $$aGeneva$$bCERN$$c26 Apr 1999
000000107 300__ $$a18 p
000000107 490__ $$aALEPH Papers
000000107 500__ $$aPreprint not submitted to publication
000000107 65017 $$2SzGeCERN$$aParticle Physics - Experiment
000000107 690C_ $$aCERN
000000107 690C_ $$aPREPRINT
000000107 693__ $$aCERN LEP$$eALEPH
000000107 693__ $$aCERN LEP$$eDELPHI
000000107 693__ $$aCERN LEP$$eL3
000000107 693__ $$aCERN LEP$$eOPAL
000000107 695__ $$9MEDLINE$$asearches Higgs bosons
000000107 697C_ $$aLexiHiggs
000000107 710__ $$5EP
000000107 710__ $$gALEPH Collaboration
000000107 710__ $$gDELPHI Collaboration
000000107 710__ $$gL3 Collaboration
000000107 710__ $$gLEP Working Group for Higgs Boson Searches
000000107 710__ $$gOPAL Collaboration
000000107 901__ $$uCERN
000000107 916__ $$sh$$w199941
000000107 960__ $$a11
000000107 963__ $$aPUBLIC
000000107 970__ $$a000330309CER
000000107 980__ $$aARTICLE
000000085 001__ 85
000000085 003__ SzGeCERN
000000085 005__ %(rec_85_rev)s
000000085 035__ $$a2356302CERCER
000000085 035__ $$9SLAC$$a5423422
000000085 037__ $$ahep-th/0212181
000000085 041__ $$aeng
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 245__ $$a3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS
000000085 260__ $$c2003
000000085 269__ $$c16 Dec 2002
000000085 300__ $$a8 p
000000085 520__ $$aWe study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.
000000085 65017 $$2SzGeCERN$$aParticle Physics - Theory
000000085 690C_ $$aARTICLE
000000085 695__ $$9LANL EDS$$aHigh Energy Physics - Theory
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.pdf
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.ps.gz
000000085 859__ $$falberto.zaffaroni@mib.infn.it
000000085 909C4 $$c289-293$$pPhys. Lett. B$$v561$$y2003
000000085 916__ $$sn$$w200251
000000085 960__ $$a13
000000085 961__ $$c20060823$$h0007$$lCER01$$x20021217
000000085 963__ $$aPUBLIC
000000085 970__ $$a002356302CER
000000085 980__ $$aARTICLE
000000085 999C5 $$mD. Francia and A. Sagnotti,$$o[1]$$rhep-th/0207002$$sPhys. Lett. B 543 (2002) 303
000000085 999C5 $$mP. Haggi-Mani and B. Sundborg,$$o[1]$$rhep-th/0002189$$sJ. High Energy Phys. 0004 (2000) 031
000000085 999C5 $$mB. Sundborg,$$o[1]$$rhep-th/0103247$$sNucl. Phys. B, Proc. Suppl. 102 (2001) 113
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0105001$$sJ. High Energy Phys. 0109 (2001) 036
000000085 999C5 $$mA. Mikhailov,$$o[1]$$rhep-th/0201019
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205131$$sNucl. Phys. B 644 (2002) 303
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205132$$sJ. High Energy Phys. 0207 (2002) 055
000000085 999C5 $$mJ. Engquist, E. Sezgin and P. Sundell,$$o[1]$$rhep-th/0207101$$sClass. Quantum Gravity 19 (2002) 6175
000000085 999C5 $$mM. A. Vasiliev,$$o[1]$$rhep-th/9611024$$sInt. J. Mod. Phys. D 5 (1996) 763
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9808004$$sNucl. Phys. B 541 (1999) 323
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9906167$$sClass. Quantum Gravity 17 (2000) 1383
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sNucl. Phys. B 291 (1987) 141
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sPhys. Lett. B 189 (1987) 89
000000085 999C5 $$mI. R. Klebanov and A. M. Polyakov,$$o[3]$$rhep-th/0210114$$sPhys. Lett. B 550 (2002) 213
000000085 999C5 $$mM. A. Vasiliev,$$o[4]$$rhep-th/9910096
000000085 999C5 $$mT. Leonhardt, A. Meziane and W. Ruhl,$$o[5]$$rhep-th/0211092
000000085 999C5 $$mO. Aharony, M. Berkooz and E. Silverstein,$$o[6]$$rhep-th/0105309$$sJ. High Energy Phys. 0108 (2001) 006
000000085 999C5 $$mE. Witten,$$o[7]$$rhep-th/0112258
000000085 999C5 $$mM. Berkooz, A. Sever and A. Shomer$$o[8]$$rhep-th/0112264$$sJ. High Energy Phys. 0205 (2002) 034
000000085 999C5 $$mS. S. Gubser and I. Mitra,$$o[9]$$rhep-th/0210093
000000085 999C5 $$mS. S. Gubser and I. R. Klebanov,$$o[10]$$rhep-th/0212138
000000085 999C5 $$mM. Porrati,$$o[11]$$rhep-th/0112166$$sJ. High Energy Phys. 0204 (2002) 058
000000085 999C5 $$mK. G. Wilson and J. B. Kogut,$$o[12]$$sPhys. Rep. 12 (1974) 75
000000085 999C5 $$mI. R. Klebanov and E. Witten,$$o[13]$$rhep-th/9905104$$sNucl. Phys. B 556 (1999) 89
000000085 999C5 $$mW. Heidenreich,$$o[14]$$sJ. Math. Phys. 22 (1981) 1566
000000085 999C5 $$mD. Anselmi,$$o[15]$$rhep-th/0210123
000000001 001__ 1
000000001 005__ %(rec_1_rev)s
000000001 037__ $$aCERN-EX-0106015
000000001 100__ $$aPhotolab
000000001 245__ $$aALEPH experiment: Candidate of Higgs boson production
000000001 246_1 $$aExpérience ALEPH: Candidat de la production d'un boson Higgs
000000001 260__ $$c14 06 2000
000000001 340__ $$aFILM
000000001 520__ $$aCandidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.
000000001 65017 $$2SzGeCERN$$aExperiments and Tracks
000000001 6531_ $$aLEP
000000001 8560_ $$fneil.calder@cern.ch
000000001 8564_ $$u%(siteurl)s/record/1/files/0106015_01.jpg
000000001 8564_ $$u%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon$$xicon
000000001 909C0 $$o0003717PHOPHO
000000001 909C0 $$y2000
000000001 909C0 $$b81
000000001 909C1 $$c2001-06-14$$l50$$m2001-08-27$$oCM
000000001 909CP $$pBldg. 2
000000001 909CP $$rCalder, N
000000001 909CS $$sn$$w200231
000000001 980__ $$aPICTURE
""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]}))
def test_search_engine_web_api_textmarc_field_filtered(self):
"""websearch - search engine Web API for Text MARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=100,700',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000001 100__ $$aPhotolab
"""))
def test_search_engine_web_api_textmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Web API for Text MARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=100,595',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000001 100__ $$aPhotolab
"""))
def test_search_engine_web_api_textmarc_field_filtered_hidden_admin(self):
"""websearch - search engine Web API for Text MARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=100,595',
username='admin',
expected_text="""\
000000107 595__ $$aNo authors
000000107 595__ $$aCERN-EP
000000107 595__ $$aOA
000000107 595__ $$aSIS:200740 PR/LKR not found (from SLAC, INSPEC)
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 595__ $$aLANL EDS
000000085 595__ $$aSIS LANLPUBL2004
000000085 595__ $$aSIS:2004 PR/LKR added
000000001 100__ $$aPhotolab
000000001 595__ $$aPress
"""))
def test_search_engine_web_api_textmarc_subfield_values(self):
"""websearch - search engine Web API for Text MARC output, subfield values"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=tm&ot=700__a',
expected_text="""\
Porrati, Massimo
Zaffaroni, A
"""))
def test_search_engine_web_api_xmlmarc_full(self):
"""websearch - search engine Web API for XMLMARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_107_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SPIRES</subfield>
<subfield code="a">4066995</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EP-99-060</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="084" ind1=" " ind2=" ">
<subfield code="2">CERN Library</subfield>
<subfield code="a">EP-1999-060</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="9">SCAN-9910048</subfield>
</datafield>
<datafield tag="088" ind1=" " ind2=" ">
<subfield code="a">CERN-L3-175</subfield>
</datafield>
<datafield tag="110" ind1=" " ind2=" ">
<subfield code="a">CERN. Geneva</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">Limits on Higgs boson masses from combining the data of the four LEP experiments at $\sqrt{s} \leq 183 GeV$</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">1999</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="a">Geneva</subfield>
<subfield code="b">CERN</subfield>
<subfield code="c">26 Apr 1999</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">18 p</subfield>
</datafield>
<datafield tag="490" ind1=" " ind2=" ">
<subfield code="a">ALEPH Papers</subfield>
</datafield>
<datafield tag="500" ind1=" " ind2=" ">
<subfield code="a">Preprint not submitted to publication</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Experiment</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">CERN</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">PREPRINT</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">ALEPH</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">DELPHI</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">L3</subfield>
</datafield>
<datafield tag="693" ind1=" " ind2=" ">
<subfield code="a">CERN LEP</subfield>
<subfield code="e">OPAL</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">MEDLINE</subfield>
<subfield code="a">searches Higgs bosons</subfield>
</datafield>
<datafield tag="697" ind1="C" ind2=" ">
<subfield code="a">LexiHiggs</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="5">EP</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">ALEPH Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">DELPHI Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">L3 Collaboration</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">LEP Working Group for Higgs Boson Searches</subfield>
</datafield>
<datafield tag="710" ind1=" " ind2=" ">
<subfield code="g">OPAL Collaboration</subfield>
</datafield>
<datafield tag="901" ind1=" " ind2=" ">
<subfield code="u">CERN</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">h</subfield>
<subfield code="w">199941</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">11</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">000330309CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_85_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="a">2356302CERCER</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5423422</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/0212181</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">2003</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="c">16 Dec 2002</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">8 p</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">We study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Theory</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">High Energy Physics - Theory</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.pdf</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.ps.gz</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="4">
<subfield code="c">289-293</subfield>
<subfield code="p">Phys. Lett. B</subfield>
<subfield code="v">561</subfield>
<subfield code="y">2003</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">alberto.zaffaroni@mib.infn.it</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">n</subfield>
<subfield code="w">200251</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">13</subfield>
</datafield>
<datafield tag="961" ind1=" " ind2=" ">
<subfield code="c">20060823</subfield>
<subfield code="h">0007</subfield>
<subfield code="l">CER01</subfield>
<subfield code="x">20021217</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">002356302CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Francia and A. Sagnotti,</subfield>
<subfield code="s">Phys. Lett. B 543 (2002) 303</subfield>
<subfield code="r">hep-th/0207002</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">P. Haggi-Mani and B. Sundborg,</subfield>
<subfield code="s">J. High Energy Phys. 0004 (2000) 031</subfield>
<subfield code="r">hep-th/0002189</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">B. Sundborg,</subfield>
<subfield code="s">Nucl. Phys. B, Proc. Suppl. 102 (2001) 113</subfield>
<subfield code="r">hep-th/0103247</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0109 (2001) 036</subfield>
<subfield code="r">hep-th/0105001</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">A. Mikhailov,</subfield>
<subfield code="r">hep-th/0201019</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Nucl. Phys. B 644 (2002) 303</subfield>
<subfield code="r">hep-th/0205131</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0207 (2002) 055</subfield>
<subfield code="r">hep-th/0205132</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">J. Engquist, E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Class. Quantum Gravity 19 (2002) 6175</subfield>
<subfield code="r">hep-th/0207101</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="s">Int. J. Mod. Phys. D 5 (1996) 763</subfield>
<subfield code="r">hep-th/9611024</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Nucl. Phys. B 541 (1999) 323</subfield>
<subfield code="r">hep-th/9808004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Class. Quantum Gravity 17 (2000) 1383</subfield>
<subfield code="r">hep-th/9906167</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Nucl. Phys. B 291 (1987) 141</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Phys. Lett. B 189 (1987) 89</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">I. R. Klebanov and A. M. Polyakov,</subfield>
<subfield code="s">Phys. Lett. B 550 (2002) 213</subfield>
<subfield code="r">hep-th/0210114</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="r">hep-th/9910096</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="m">T. Leonhardt, A. Meziane and W. Ruhl,</subfield>
<subfield code="r">hep-th/0211092</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">O. Aharony, M. Berkooz and E. Silverstein,</subfield>
<subfield code="s">J. High Energy Phys. 0108 (2001) 006</subfield>
<subfield code="r">hep-th/0105309</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">E. Witten,</subfield>
<subfield code="r">hep-th/0112258</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="m">M. Berkooz, A. Sever and A. Shomer</subfield>
<subfield code="s">J. High Energy Phys. 0205 (2002) 034</subfield>
<subfield code="r">hep-th/0112264</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">S. S. Gubser and I. Mitra,</subfield>
<subfield code="r">hep-th/0210093</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">S. S. Gubser and I. R. Klebanov,</subfield>
<subfield code="r">hep-th/0212138</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">M. Porrati,</subfield>
<subfield code="s">J. High Energy Phys. 0204 (2002) 058</subfield>
<subfield code="r">hep-th/0112166</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[12]</subfield>
<subfield code="m">K. G. Wilson and J. B. Kogut,</subfield>
<subfield code="s">Phys. Rep. 12 (1974) 75</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[13]</subfield>
<subfield code="m">I. R. Klebanov and E. Witten,</subfield>
<subfield code="s">Nucl. Phys. B 556 (1999) 89</subfield>
<subfield code="r">hep-th/9905104</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[14]</subfield>
<subfield code="m">W. Heidenreich,</subfield>
<subfield code="s">J. Math. Phys. 22 (1981) 1566</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[15]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="r">hep-th/0210123</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<controlfield tag="005">%(rec_1_rev)s</controlfield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">CERN-EX-0106015</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">ALEPH experiment: Candidate of Higgs boson production</subfield>
</datafield>
<datafield tag="246" ind1=" " ind2="1">
<subfield code="a">Expérience ALEPH: Candidat de la production d'un boson Higgs</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">14 06 2000</subfield>
</datafield>
<datafield tag="340" ind1=" " ind2=" ">
<subfield code="a">FILM</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">Candidate for the associated production of the Higgs boson and Z boson. Both, the Higgs and Z boson decay into 2 jets each. The green and the yellow jets belong to the Higgs boson. They represent the fragmentation of a bottom andanti-bottom quark. The red and the blue jets stem from the decay of the Z boson into a quark anti-quark pair. Left: View of the event along the beam axis. Bottom right: Zoom around the interaction point at the centre showing detailsof the fragmentation of the bottom and anti-bottom quarks. As expected for b quarks, in each jet the decay of a long-lived B meson is visible. Top right: "World map" showing the spatial distribution of the jets in the event.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Experiments and Tracks</subfield>
</datafield>
<datafield tag="653" ind1="1" ind2=" ">
<subfield code="a">LEP</subfield>
</datafield>
<datafield tag="856" ind1="0" ind2=" ">
<subfield code="f">neil.calder@cern.ch</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.jpg</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/1/files/0106015_01.gif?subformat=icon</subfield>
<subfield code="x">icon</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="o">0003717PHOPHO</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="y">2000</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="0">
<subfield code="b">81</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="1">
<subfield code="c">2001-06-14</subfield>
<subfield code="l">50</subfield>
<subfield code="m">2001-08-27</subfield>
<subfield code="o">CM</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="p">Bldg. 2</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="P">
<subfield code="r">Calder, N</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="S">
<subfield code="s">n</subfield>
<subfield code="w">200231</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">PICTURE</subfield>
</datafield>
</record>
</collection>""" % {'siteurl': CFG_SITE_URL,
'rec_1_rev': get_fieldvalues(1, '005__')[0],
'rec_85_rev': get_fieldvalues(85, '005__')[0],
'rec_107_rev': get_fieldvalues(107, '005__')[0]}))
def test_search_engine_web_api_xmlmarc_field_filtered(self):
"""websearch - search engine Web API for XMLMARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm&ot=100,700',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>"""))
def test_search_engine_web_api_xmlmarc_field_filtered_hidden_guest(self):
"""websearch - search engine Web API for XMLMARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm&ot=100,595',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
</record>
</collection>"""))
def test_search_engine_web_api_xmlmarc_field_filtered_hidden_admin(self):
"""websearch - search engine Web API for XMLMARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=higgs&of=xm&ot=100,595',
username='admin',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<!-- Search-Engine-Total-Number-Of-Results: 3 -->
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">107</controlfield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">No authors</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">CERN-EP</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">OA</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS:200740 PR/LKR not found (from SLAC, INSPEC)</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">LANL EDS</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS LANLPUBL2004</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS:2004 PR/LKR added</subfield>
</datafield>
</record>
<record>
<controlfield tag="001">1</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Photolab</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">Press</subfield>
</datafield>
</record>
</collection>"""))
class WebSearchRecordWebAPITest(unittest.TestCase):
"""Check typical /record Web API calls on the demo data."""
def test_record_web_api_textmarc_full(self):
"""websearch - /record Web API for TextMARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm',
expected_text="""\
000000085 001__ 85
000000085 003__ SzGeCERN
000000085 005__ %(rec_85_rev)s
000000085 035__ $$a2356302CERCER
000000085 035__ $$9SLAC$$a5423422
000000085 037__ $$ahep-th/0212181
000000085 041__ $$aeng
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 245__ $$a3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS
000000085 260__ $$c2003
000000085 269__ $$c16 Dec 2002
000000085 300__ $$a8 p
000000085 520__ $$aWe study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.
000000085 65017 $$2SzGeCERN$$aParticle Physics - Theory
000000085 690C_ $$aARTICLE
000000085 695__ $$9LANL EDS$$aHigh Energy Physics - Theory
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.pdf
000000085 8564_ $$u%(siteurl)s/record/85/files/0212181.ps.gz
000000085 859__ $$falberto.zaffaroni@mib.infn.it
000000085 909C4 $$c289-293$$pPhys. Lett. B$$v561$$y2003
000000085 916__ $$sn$$w200251
000000085 960__ $$a13
000000085 961__ $$c20060823$$h0007$$lCER01$$x20021217
000000085 963__ $$aPUBLIC
000000085 970__ $$a002356302CER
000000085 980__ $$aARTICLE
000000085 999C5 $$mD. Francia and A. Sagnotti,$$o[1]$$rhep-th/0207002$$sPhys. Lett. B 543 (2002) 303
000000085 999C5 $$mP. Haggi-Mani and B. Sundborg,$$o[1]$$rhep-th/0002189$$sJ. High Energy Phys. 0004 (2000) 031
000000085 999C5 $$mB. Sundborg,$$o[1]$$rhep-th/0103247$$sNucl. Phys. B, Proc. Suppl. 102 (2001) 113
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0105001$$sJ. High Energy Phys. 0109 (2001) 036
000000085 999C5 $$mA. Mikhailov,$$o[1]$$rhep-th/0201019
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205131$$sNucl. Phys. B 644 (2002) 303
000000085 999C5 $$mE. Sezgin and P. Sundell,$$o[1]$$rhep-th/0205132$$sJ. High Energy Phys. 0207 (2002) 055
000000085 999C5 $$mJ. Engquist, E. Sezgin and P. Sundell,$$o[1]$$rhep-th/0207101$$sClass. Quantum Gravity 19 (2002) 6175
000000085 999C5 $$mM. A. Vasiliev,$$o[1]$$rhep-th/9611024$$sInt. J. Mod. Phys. D 5 (1996) 763
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9808004$$sNucl. Phys. B 541 (1999) 323
000000085 999C5 $$mD. Anselmi,$$o[1]$$rhep-th/9906167$$sClass. Quantum Gravity 17 (2000) 1383
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sNucl. Phys. B 291 (1987) 141
000000085 999C5 $$mE. S. Fradkin and M. A. Vasiliev,$$o[2]$$sPhys. Lett. B 189 (1987) 89
000000085 999C5 $$mI. R. Klebanov and A. M. Polyakov,$$o[3]$$rhep-th/0210114$$sPhys. Lett. B 550 (2002) 213
000000085 999C5 $$mM. A. Vasiliev,$$o[4]$$rhep-th/9910096
000000085 999C5 $$mT. Leonhardt, A. Meziane and W. Ruhl,$$o[5]$$rhep-th/0211092
000000085 999C5 $$mO. Aharony, M. Berkooz and E. Silverstein,$$o[6]$$rhep-th/0105309$$sJ. High Energy Phys. 0108 (2001) 006
000000085 999C5 $$mE. Witten,$$o[7]$$rhep-th/0112258
000000085 999C5 $$mM. Berkooz, A. Sever and A. Shomer$$o[8]$$rhep-th/0112264$$sJ. High Energy Phys. 0205 (2002) 034
000000085 999C5 $$mS. S. Gubser and I. Mitra,$$o[9]$$rhep-th/0210093
000000085 999C5 $$mS. S. Gubser and I. R. Klebanov,$$o[10]$$rhep-th/0212138
000000085 999C5 $$mM. Porrati,$$o[11]$$rhep-th/0112166$$sJ. High Energy Phys. 0204 (2002) 058
000000085 999C5 $$mK. G. Wilson and J. B. Kogut,$$o[12]$$sPhys. Rep. 12 (1974) 75
000000085 999C5 $$mI. R. Klebanov and E. Witten,$$o[13]$$rhep-th/9905104$$sNucl. Phys. B 556 (1999) 89
000000085 999C5 $$mW. Heidenreich,$$o[14]$$sJ. Math. Phys. 22 (1981) 1566
000000085 999C5 $$mD. Anselmi,$$o[15]$$rhep-th/0210123
""" % {'siteurl': CFG_SITE_URL,
'rec_85_rev': get_fieldvalues(85, '005__')[0]}))
def test_record_web_api_xmlmarc_full(self):
"""websearch - /record Web API for XMLMARC output, full"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<controlfield tag="003">SzGeCERN</controlfield>
<controlfield tag="005">%(rec_85_rev)s</controlfield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="a">2356302CERCER</subfield>
</datafield>
<datafield tag="035" ind1=" " ind2=" ">
<subfield code="9">SLAC</subfield>
<subfield code="a">5423422</subfield>
</datafield>
<datafield tag="037" ind1=" " ind2=" ">
<subfield code="a">hep-th/0212181</subfield>
</datafield>
<datafield tag="041" ind1=" " ind2=" ">
<subfield code="a">eng</subfield>
</datafield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="245" ind1=" " ind2=" ">
<subfield code="a">3-D Interacting CFTs and Generalized Higgs Phenomenon in Higher Spin Theories on AdS</subfield>
</datafield>
<datafield tag="260" ind1=" " ind2=" ">
<subfield code="c">2003</subfield>
</datafield>
<datafield tag="269" ind1=" " ind2=" ">
<subfield code="c">16 Dec 2002</subfield>
</datafield>
<datafield tag="300" ind1=" " ind2=" ">
<subfield code="a">8 p</subfield>
</datafield>
<datafield tag="520" ind1=" " ind2=" ">
<subfield code="a">We study a duality, recently conjectured by Klebanov and Polyakov, between higher-spin theories on AdS_4 and O(N) vector models in 3-d. These theories are free in the UV and interacting in the IR. At the UV fixed point, the O(N) model has an infinite number of higher-spin conserved currents. In the IR, these currents are no longer conserved for spin s>2. In this paper, we show that the dual interpretation of this fact is that all fields of spin s>2 in AdS_4 become massive by a Higgs mechanism, that leaves the spin-2 field massless. We identify the Higgs field and show how it relates to the RG flow connecting the two CFTs, which is induced by a double trace deformation.</subfield>
</datafield>
<datafield tag="650" ind1="1" ind2="7">
<subfield code="2">SzGeCERN</subfield>
<subfield code="a">Particle Physics - Theory</subfield>
</datafield>
<datafield tag="690" ind1="C" ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="695" ind1=" " ind2=" ">
<subfield code="9">LANL EDS</subfield>
<subfield code="a">High Energy Physics - Theory</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.pdf</subfield>
</datafield>
<datafield tag="856" ind1="4" ind2=" ">
<subfield code="u">%(siteurl)s/record/85/files/0212181.ps.gz</subfield>
</datafield>
<datafield tag="909" ind1="C" ind2="4">
<subfield code="c">289-293</subfield>
<subfield code="p">Phys. Lett. B</subfield>
<subfield code="v">561</subfield>
<subfield code="y">2003</subfield>
</datafield>
<datafield tag="859" ind1=" " ind2=" ">
<subfield code="f">alberto.zaffaroni@mib.infn.it</subfield>
</datafield>
<datafield tag="916" ind1=" " ind2=" ">
<subfield code="s">n</subfield>
<subfield code="w">200251</subfield>
</datafield>
<datafield tag="960" ind1=" " ind2=" ">
<subfield code="a">13</subfield>
</datafield>
<datafield tag="961" ind1=" " ind2=" ">
<subfield code="c">20060823</subfield>
<subfield code="h">0007</subfield>
<subfield code="l">CER01</subfield>
<subfield code="x">20021217</subfield>
</datafield>
<datafield tag="963" ind1=" " ind2=" ">
<subfield code="a">PUBLIC</subfield>
</datafield>
<datafield tag="970" ind1=" " ind2=" ">
<subfield code="a">002356302CER</subfield>
</datafield>
<datafield tag="980" ind1=" " ind2=" ">
<subfield code="a">ARTICLE</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Francia and A. Sagnotti,</subfield>
<subfield code="s">Phys. Lett. B 543 (2002) 303</subfield>
<subfield code="r">hep-th/0207002</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">P. Haggi-Mani and B. Sundborg,</subfield>
<subfield code="s">J. High Energy Phys. 0004 (2000) 031</subfield>
<subfield code="r">hep-th/0002189</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">B. Sundborg,</subfield>
<subfield code="s">Nucl. Phys. B, Proc. Suppl. 102 (2001) 113</subfield>
<subfield code="r">hep-th/0103247</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0109 (2001) 036</subfield>
<subfield code="r">hep-th/0105001</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">A. Mikhailov,</subfield>
<subfield code="r">hep-th/0201019</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Nucl. Phys. B 644 (2002) 303</subfield>
<subfield code="r">hep-th/0205131</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">E. Sezgin and P. Sundell,</subfield>
<subfield code="s">J. High Energy Phys. 0207 (2002) 055</subfield>
<subfield code="r">hep-th/0205132</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">J. Engquist, E. Sezgin and P. Sundell,</subfield>
<subfield code="s">Class. Quantum Gravity 19 (2002) 6175</subfield>
<subfield code="r">hep-th/0207101</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="s">Int. J. Mod. Phys. D 5 (1996) 763</subfield>
<subfield code="r">hep-th/9611024</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Nucl. Phys. B 541 (1999) 323</subfield>
<subfield code="r">hep-th/9808004</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[1]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="s">Class. Quantum Gravity 17 (2000) 1383</subfield>
<subfield code="r">hep-th/9906167</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Nucl. Phys. B 291 (1987) 141</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[2]</subfield>
<subfield code="m">E. S. Fradkin and M. A. Vasiliev,</subfield>
<subfield code="s">Phys. Lett. B 189 (1987) 89</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[3]</subfield>
<subfield code="m">I. R. Klebanov and A. M. Polyakov,</subfield>
<subfield code="s">Phys. Lett. B 550 (2002) 213</subfield>
<subfield code="r">hep-th/0210114</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[4]</subfield>
<subfield code="m">M. A. Vasiliev,</subfield>
<subfield code="r">hep-th/9910096</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[5]</subfield>
<subfield code="m">T. Leonhardt, A. Meziane and W. Ruhl,</subfield>
<subfield code="r">hep-th/0211092</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[6]</subfield>
<subfield code="m">O. Aharony, M. Berkooz and E. Silverstein,</subfield>
<subfield code="s">J. High Energy Phys. 0108 (2001) 006</subfield>
<subfield code="r">hep-th/0105309</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[7]</subfield>
<subfield code="m">E. Witten,</subfield>
<subfield code="r">hep-th/0112258</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[8]</subfield>
<subfield code="m">M. Berkooz, A. Sever and A. Shomer</subfield>
<subfield code="s">J. High Energy Phys. 0205 (2002) 034</subfield>
<subfield code="r">hep-th/0112264</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[9]</subfield>
<subfield code="m">S. S. Gubser and I. Mitra,</subfield>
<subfield code="r">hep-th/0210093</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[10]</subfield>
<subfield code="m">S. S. Gubser and I. R. Klebanov,</subfield>
<subfield code="r">hep-th/0212138</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[11]</subfield>
<subfield code="m">M. Porrati,</subfield>
<subfield code="s">J. High Energy Phys. 0204 (2002) 058</subfield>
<subfield code="r">hep-th/0112166</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[12]</subfield>
<subfield code="m">K. G. Wilson and J. B. Kogut,</subfield>
<subfield code="s">Phys. Rep. 12 (1974) 75</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[13]</subfield>
<subfield code="m">I. R. Klebanov and E. Witten,</subfield>
<subfield code="s">Nucl. Phys. B 556 (1999) 89</subfield>
<subfield code="r">hep-th/9905104</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[14]</subfield>
<subfield code="m">W. Heidenreich,</subfield>
<subfield code="s">J. Math. Phys. 22 (1981) 1566</subfield>
</datafield>
<datafield tag="999" ind1="C" ind2="5">
<subfield code="o">[15]</subfield>
<subfield code="m">D. Anselmi,</subfield>
<subfield code="r">hep-th/0210123</subfield>
</datafield>
</record>
</collection>""" % {'siteurl': CFG_SITE_URL,
'rec_85_rev': get_fieldvalues(85, '005__')[0]}))
def test_record_web_api_textmarc_field_filtered(self):
"""websearch - /record Web API for TextMARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=100,700',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 700__ $$aPorrati, Massimo
000000085 700__ $$aZaffaroni, A
"""))
def test_record_web_api_textmarc_field_filtered_hidden_guest(self):
"""websearch - /record Web API for TextMARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=100,595',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
"""))
def test_record_web_api_textmarc_field_filtered_hidden_admin(self):
"""websearch - /record Web API for TextMARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=100,595',
username='admin',
expected_text="""\
000000085 100__ $$aGirardello, L$$uINFN$$uUniversita di Milano-Bicocca
000000085 595__ $$aLANL EDS
000000085 595__ $$aSIS LANLPUBL2004
000000085 595__ $$aSIS:2004 PR/LKR added
"""))
def test_record_web_api_xmlmarc_field_filtered(self):
"""websearch - /record Web API for XMLMARC output, field-filtered"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm&ot=100,700',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Porrati, Massimo</subfield>
</datafield>
<datafield tag="700" ind1=" " ind2=" ">
<subfield code="a">Zaffaroni, A</subfield>
</datafield>
</record>
</collection>"""))
def test_record_web_api_xmlmarc_field_filtered_hidden_guest(self):
"""websearch - /record Web API for XMLMARC output, field-filtered, hidden field, no guest access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm&ot=100,595',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
</record>
</collection>"""))
def test_record_web_api_xmlmarc_field_filtered_hidden_admin(self):
"""websearch - /record Web API for XMLMARC output, field-filtered, hidden field, admin access"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=xm&ot=100,595',
username='admin',
expected_text="""\
<?xml version="1.0" encoding="UTF-8"?>
<collection xmlns="http://www.loc.gov/MARC21/slim">
<record>
<controlfield tag="001">85</controlfield>
<datafield tag="100" ind1=" " ind2=" ">
<subfield code="a">Girardello, L</subfield>
<subfield code="u">INFN</subfield>
<subfield code="u">Universita di Milano-Bicocca</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">LANL EDS</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS LANLPUBL2004</subfield>
</datafield>
<datafield tag="595" ind1=" " ind2=" ">
<subfield code="a">SIS:2004 PR/LKR added</subfield>
</datafield>
</record>
</collection>"""))
def test_record_web_api_textmarc_subfield_values(self):
"""websearch - /record Web API for TextMARC output, subfield values"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/record/85?of=tm&ot=700__a',
expected_text="""\
Porrati, Massimo
Zaffaroni, A
"""))
class WebSearchRestrictedCollectionTest(unittest.TestCase):
"""Test of the restricted collections behaviour."""
def test_restricted_collection_interface_page(self):
"""websearch - restricted collection interface page body"""
# there should be no Latest additions box for restricted collections
self.assertNotEqual([],
test_web_page_content(CFG_SITE_URL + '/collection/Theses',
expected_text="Latest additions"))
def test_restricted_search_as_anonymous_guest(self):
"""websearch - restricted collection not searchable by anonymous guest"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?c=Theses')
response = browser.response().read()
if response.find("If you think you have right to access it, please authenticate yourself.") > -1:
pass
else:
self.fail("Oops, searching restricted collection without password should have redirected to login dialog.")
return
def test_restricted_search_as_authorized_person(self):
"""websearch - restricted collection searchable by authorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?c=Theses')
browser.select_form(nr=0)
browser['p_un'] = 'jekyll'
browser['p_pw'] = 'j123ekyll'
browser.submit()
if browser.response().read().find("records found") > -1:
pass
else:
self.fail("Oops, Dr. Jekyll should be able to search Theses collection.")
def test_restricted_search_as_unauthorized_person(self):
"""websearch - restricted collection not searchable by unauthorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?c=Theses')
browser.select_form(nr=0)
browser['p_un'] = 'hyde'
browser['p_pw'] = 'h123yde'
browser.submit()
# Mr. Hyde should not be able to connect:
if browser.response().read().find("Authorization failure") <= -1:
# if we got here, things are broken:
self.fail("Oops, Mr.Hyde should not be able to search Theses collection.")
def test_restricted_detailed_record_page_as_anonymous_guest(self):
"""websearch - restricted detailed record page not accessible to guests"""
browser = Browser()
browser.open(CFG_SITE_URL + '/%s/35' % CFG_SITE_RECORD)
if browser.response().read().find("You can use your nickname or your email address to login.") > -1:
pass
else:
self.fail("Oops, searching restricted collection without password should have redirected to login dialog.")
return
def test_restricted_detailed_record_page_as_authorized_person(self):
"""websearch - restricted detailed record page accessible to authorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/youraccount/login')
browser.select_form(nr=0)
browser['p_un'] = 'jekyll'
browser['p_pw'] = 'j123ekyll'
browser.submit()
browser.open(CFG_SITE_URL + '/%s/35' % CFG_SITE_RECORD)
# Dr. Jekyll should be able to connect
# (add the pw to the whole CFG_SITE_URL because we shall be
# redirected to '/reordrestricted/'):
if browser.response().read().find("A High-performance Video Browsing System") > -1:
pass
else:
self.fail("Oops, Dr. Jekyll should be able to access restricted detailed record page.")
def test_restricted_detailed_record_page_as_unauthorized_person(self):
"""websearch - restricted detailed record page not accessible to unauthorized person"""
browser = Browser()
browser.open(CFG_SITE_URL + '/youraccount/login')
browser.select_form(nr=0)
browser['p_un'] = 'hyde'
browser['p_pw'] = 'h123yde'
browser.submit()
browser.open(CFG_SITE_URL + '/%s/35' % CFG_SITE_RECORD)
# Mr. Hyde should not be able to connect:
if browser.response().read().find('You are not authorized') <= -1:
# if we got here, things are broken:
self.fail("Oops, Mr.Hyde should not be able to access restricted detailed record page.")
def test_collection_restricted_p(self):
"""websearch - collection_restricted_p"""
self.failUnless(collection_restricted_p('Theses'), True)
self.failIf(collection_restricted_p('Books & Reports'))
def test_get_permitted_restricted_collections(self):
"""websearch - get_permitted_restricted_collections"""
from invenio.webuser import get_uid_from_email, collect_user_info
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('jekyll@cds.cern.ch'))), ['Theses', 'Drafts'])
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('hyde@cds.cern.ch'))), [])
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('balthasar.montague@cds.cern.ch'))), ['ALEPH Theses', 'ALEPH Internal Notes', 'Atlantis Times Drafts'])
self.assertEqual(get_permitted_restricted_collections(collect_user_info(get_uid_from_email('dorian.gray@cds.cern.ch'))), ['ISOLDE Internal Notes'])
def test_restricted_record_has_restriction_flag(self):
"""websearch - restricted record displays a restriction flag"""
browser = Browser()
browser.open(CFG_SITE_URL + '/%s/42/files/' % CFG_SITE_RECORD)
browser.select_form(nr=0)
browser['p_un'] = 'jekyll'
browser['p_pw'] = 'j123ekyll'
browser.submit()
if browser.response().read().find("Restricted") > -1:
pass
else:
self.fail("Oops, a 'Restricted' flag should appear on restricted records.")
browser.open(CFG_SITE_URL + '/%s/42/files/comments' % CFG_SITE_RECORD)
if browser.response().read().find("Restricted") > -1:
pass
else:
self.fail("Oops, a 'Restricted' flag should appear on restricted records.")
# Flag also appear on records that exist both in a public and
# restricted collection:
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/109' % CFG_SITE_RECORD,
username='admin',
password='',
expected_text=['Restricted'])
if error_messages:
self.fail("Oops, a 'Restricted' flag should appear on restricted records.")
class WebSearchRestrictedCollectionHandlingTest(unittest.TestCase):
"""
Check how the restricted or restricted and "hidden" collection
handling works: (i)user has or not rights to access to specific
records or collections, (ii)public and restricted results are displayed
in the right position in the collection tree, (iii)display the right
warning depending on the case.
Changes in the collection tree used for testing (are showed the records used for testing as well):
Articles & Preprints Books & Reports
_____________|________________ ____________|_____________
| | | | | | |
Articles Drafts(r) Notes Preprints Books Theses(r) Reports
69 77 109 10 105
77 98 98
108 105
CERN Experiments
_________________________|___________________________
| |
ALEPH ISOLDE
_________________|_________________ ____________|_____________
| | | | |
ALEPH ALEPH ALEPH ISOLDE ISOLDE
Papers Internal Notes(r) Theses(r) Papers Internal Notes(r&h)
10 109 105 69 110
108 106
Authorized users:
jekyll -> Drafts, Theses
balthasar -> ALEPH Internal Notes, ALEPH Theses
dorian -> ISOLDE Internal Notes
"""
def test_show_public_colls_in_warning_as_unauthorizad_user(self):
"""websearch - show public daugther collections in warning to unauthorized user"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=recid:20',
username='hyde',
password='h123yde',
expected_text=['No match found in collection <em>Articles, Preprints, Notes</em>.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_show_public_and_restricted_colls_in_warning_as_authorized_user(self):
"""websearch - show public and restricted daugther collections in warning to authorized user"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=recid:20',
username='jekyll',
password='j123ekyll',
expected_text=['No match found in collection <em>Articles, Preprints, Notes, Drafts</em>.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_unauthorized_user(self):
"""websearch - record belongs to different restricted collections with different rights, user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?p=105&f=recid',
username='hyde',
password='h123yde',
expected_text=['No public collection matched your query.'],
unexpected_text=['records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_authorized_user_of_one_coll(self):
"""websearch - record belongs to different restricted collections with different rights, balthasar has rights to one of them"""
from invenio.config import CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if policy == 'ANY':
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='balthasar',
password='b123althasar',
expected_text=['[CERN-THESIS-99-074]'],
unexpected_text=['No public collection matched your query.'])
else:
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='balthasar',
password='b123althasar',
expected_text=['No public collection matched your query.'],
unexpected_text=['[CERN-THESIS-99-074]'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_authorized_user_of_two_colls(self):
"""websearch - record belongs to different restricted collections with different rights, jekyll has rights to two of them"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='jekyll',
password='j123ekyll',
expected_text=['Articles & Preprints', 'Books & Reports'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_in_different_colls_as_authorized_user_of_all_colls(self):
"""websearch - record belongs to different restricted collections with different rights, admin has rights to all of them"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=recid:105&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
expected_text=['Articles & Preprints', 'Books & Reports', 'ALEPH Theses'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_not_dad_coll(self):
"""websearch - record belongs to different restricted collections with different rights, search from a not dad collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Multimedia+%26+Arts&sc=1&p=recid%3A105&f=&action_search=Search&c=Pictures&c=Poetry&c=Atlantis+Times',
username='admin',
expected_text='No match found in collection',
expected_link_label='1 hits')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_as_unauthorized_user(self):
"""websearch - record belongs to different public and restricted collections, user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=geometry&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts&of=id',
username='guest',
expected_text='[80, 86]',
unexpected_text='[40, 80, 86]')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_as_authorized_user(self):
"""websearch - record belongs to different public and restricted collections, admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=geometry&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts&of=id',
username='admin',
password='',
expected_text='[40, 80, 86]')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_of_focus_as_unauthorized_user(self):
"""websearch - record belongs to both a public and a restricted collection of "focus on", user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=109&f=recid',
username='hyde',
password='h123yde',
expected_text=['No public collection matched your query'],
unexpected_text=['LEP Center-of-Mass Energies in Presence of Opposite'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_and_restricted_record_of_focus_as_authorized_user(self):
"""websearch - record belongs to both a public and a restricted collection of "focus on", user has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=109&f=recid&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='balthasar',
password='b123althasar',
expected_text=['Articles & Preprints', 'ALEPH Internal Notes', 'LEP Center-of-Mass Energies in Presence of Opposite'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_public_and_restricted_record_from_not_dad_coll_as_authorized_user(self):
"""websearch - record belongs to both a public and a restricted collection, search from a not dad collection, admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=recid%3A98&f=&action_search=Search&c=Books&c=Reports',
username='admin',
password='',
expected_text='No match found in collection <em>Books, Theses, Reports</em>',
expected_link_label='1 hits')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_public_and_restricted_record_from_not_dad_coll_as_unauthorized_user(self):
"""websearch - record belongs to both a public and a restricted collection, search from a not dad collection, hyde not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=recid%3A98&f=&action_search=Search&c=Books&c=Reports',
username='hyde',
password='h123yde',
expected_text='No public collection matched your query',
unexpected_text='No match found in collection')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_record_of_focus_as_authorized_user(self):
"""websearch - record belongs to a restricted collection of "focus on", balthasar has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?&sc=1&p=106&f=recid&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts&of=id',
username='balthasar',
password='b123althasar',
expected_text='[106]',
unexpected_text='[]')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_display_dad_coll_of_restricted_coll_as_unauthorized_user(self):
"""websearch - unauthorized user displays a collection that contains a restricted collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=&f=&action_search=Search&c=Articles&c=Drafts&c=Preprints',
username='guest',
expected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_display_dad_coll_of_restricted_coll_as_authorized_user(self):
"""websearch - authorized user displays a collection that contains a restricted collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Articles+%26+Preprints&sc=1&p=&f=&action_search=Search&c=Articles&c=Drafts&c=Notes&c=Preprints',
username='jekyll',
password='j123ekyll',
expected_text=['Articles', 'Drafts', 'Notes', 'Preprints'],
unexpected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_coll_of_focus_as_unauthorized_user(self):
"""websearch - search for a record that belongs to a restricted collection from a collection of "focus on" , jekyll not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=CERN+Divisions&sc=1&p=recid%3A106&f=&action_search=Search&c=Experimental+Physics+(EP)&c=Theoretical+Physics+(TH)',
username='jekyll',
password='j123ekyll',
expected_text=['No public collection matched your query.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_coll_of_focus_as_authorized_user(self):
"""websearch - search for a record that belongs to a restricted collection from a collection of "focus on" , admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=CERN+Divisions&sc=1&p=recid%3A106&f=&action_search=Search&c=Experimental+Physics+(EP)&c=Theoretical+Physics+(TH)',
username='admin',
password='',
expected_text='No match found in collection <em>Experimental Physics (EP), Theoretical Physics (TH)</em>.',
expected_link_label='1 hits')
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_not_direct_dad_coll_and_display_in_right_position_in_tree(self):
"""websearch - search for a restricted record from not direct dad collection and display it on its right position in the tree"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=recid%3A40&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
password='',
expected_text=['Books & Reports','[LBL-22304]'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_restricted_record_from_direct_dad_coll_and_display_in_right_position_in_tree(self):
"""websearch - search for a restricted record from the direct dad collection and display it on its right position in the tree"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=recid%3A40&f=&action_search=Search&c=Books&c=Reports',
username='admin',
password='',
expected_text=['Theses', '[LBL-22304]'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_and_hidden_record_as_unauthorized_user(self):
"""websearch - search for a "hidden" record, user not has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=recid%3A110&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='guest',
expected_text=['If you were looking for a non-public document'],
unexpected_text=['If you were looking for a hidden document'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_and_hidden_record_as_authorized_user(self):
"""websearch - search for a "hidden" record, admin has rights"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=recid%3A110&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
password='',
expected_text=['If you were looking for a hidden document, please type the correct URL for this record.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_enter_url_of_restricted_and_hidden_coll_as_unauthorized_user(self):
"""websearch - unauthorized user types the concret URL of a "hidden" collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=ISOLDE+Internal+Notes&sc=1&p=&f=&action_search=Search',
username='guest',
expected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_enter_url_of_restricted_and_hidden_coll_as_authorized_user(self):
"""websearch - authorized user types the concret URL of a "hidden" collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=ISOLDE+Internal+Notes&sc=1&p=&f=&action_search=Search',
username='dorian',
password='d123orian',
expected_text=['ISOLDE Internal Notes', '[CERN-PS-PA-Note-93-04]'],
unexpected_text=['This collection is restricted.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_the_top_as_unauthorized_user(self):
"""websearch - unauthorized user searches for a pattern from the top"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=of&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='guest',
expected_text=['Articles & Preprints', '61', 'records found',
'Books & Reports', '2', 'records found',
'Multimedia & Arts', '14', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_the_top_as_authorized_user(self):
"""websearch - authorized user searches for a pattern from the top"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&sc=1&p=of&f=&action_search=Search&c=Articles+%26+Preprints&c=Books+%26+Reports&c=Multimedia+%26+Arts',
username='admin',
password='',
expected_text=['Articles & Preprints', '61', 'records found',
'Books & Reports', '6', 'records found',
'Multimedia & Arts', '14', 'records found',
'ALEPH Theses', '1', 'records found',
'ALEPH Internal Notes', '1', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_an_specific_coll_as_unauthorized_user(self):
"""websearch - unauthorized user searches for a pattern from one specific collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=of&f=&action_search=Search&c=Books&c=Reports',
username='guest',
expected_text=['Books', '1', 'records found',
'Reports', '1', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_search_for_pattern_from_an_specific_coll_as_authorized_user(self):
"""websearch - authorized user searches for a pattern from one specific collection"""
error_messages = test_web_page_content(CFG_SITE_URL + '/search?ln=en&cc=Books+%26+Reports&sc=1&p=of&f=&action_search=Search&c=Books&c=Reports',
username='admin',
password='',
expected_text=['Books', '1', 'records found',
'Reports', '1', 'records found',
'Theses', '4', 'records found'])
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchRestrictedPicturesTest(unittest.TestCase):
"""
Check whether restricted pictures on the demo site can be accessed
well by people who have rights to access them.
"""
def test_restricted_pictures_guest(self):
"""websearch - restricted pictures not available to guest"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/0106015_01.jpg' % CFG_SITE_RECORD,
expected_text=['This file is restricted. If you think you have right to access it, please authenticate yourself.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_pictures_romeo(self):
"""websearch - restricted pictures available to Romeo"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/0106015_01.jpg' % CFG_SITE_RECORD,
username='romeo',
password='r123omeo',
expected_text=[],
unexpected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_pictures_hyde(self):
"""websearch - restricted pictures not available to Mr. Hyde"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/0106015_01.jpg' % CFG_SITE_RECORD,
username='hyde',
password='h123yde',
expected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.failUnless("HTTP Error 401: Unauthorized" in merge_error_messages(error_messages))
class WebSearchRestrictedWebJournalFilesTest(unittest.TestCase):
"""
Check whether files attached to a WebJournal article are well
accessible when the article is published
"""
def test_restricted_files_guest(self):
"""websearch - files of unreleased articles are not available to guest"""
# Record is not public...
self.assertEqual(record_public_p(112), False)
# ... and guest cannot access attached files
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/112/files/journal_galapagos_archipelago.jpg' % CFG_SITE_RECORD,
expected_text=['This file is restricted. If you think you have right to access it, please authenticate yourself.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_files_editor(self):
"""websearch - files of unreleased articles are available to editor"""
# Record is not public...
self.assertEqual(record_public_p(112), False)
# ... but editor can access attached files
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/112/files/journal_galapagos_archipelago.jpg' % CFG_SITE_RECORD,
username='balthasar',
password='b123althasar',
expected_text=[],
unexpected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_public_files_guest(self):
"""websearch - files of released articles are available to guest"""
# Record is not public...
self.assertEqual(record_public_p(111), False)
# ... but user can access attached files, as article is released
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/111/files/journal_scissor_beak.jpg' % CFG_SITE_RECORD,
expected_text=[],
unexpected_text=['This file is restricted',
'You are not authorized'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_really_restricted_files_guest(self):
"""websearch - restricted files of released articles are not available to guest"""
# Record is not public...
self.assertEqual(record_public_p(111), False)
# ... and user cannot access restricted attachements, even if
# article is released
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/111/files/restricted-journal_scissor_beak.jpg' % CFG_SITE_RECORD,
expected_text=['This file is restricted. If you think you have right to access it, please authenticate yourself.'])
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_restricted_picture_has_restriction_flag(self):
"""websearch - restricted files displays a restriction flag"""
error_messages = test_web_page_content(CFG_SITE_URL + '/%s/1/files/' % CFG_SITE_RECORD,
expected_text="Restricted")
if error_messages:
self.fail(merge_error_messages(error_messages))
class WebSearchRSSFeedServiceTest(unittest.TestCase):
"""Test of the RSS feed service."""
def test_rss_feed_service(self):
"""websearch - RSS feed service"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/rss',
expected_text='<rss version="2.0"'))
class WebSearchXSSVulnerabilityTest(unittest.TestCase):
"""Test possible XSS vulnerabilities of the search engine."""
def test_xss_in_collection_interface_page(self):
"""websearch - no XSS vulnerability in collection interface pages"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/?c=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='Collection &lt;SCRIPT&gt;alert("XSS");&lt;/SCRIPT&gt; Not Found'))
def test_xss_in_collection_search_page(self):
"""websearch - no XSS vulnerability in collection search pages"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?c=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='Collection <SCRIPT>alert("XSS");</SCRIPT> Not Found'))
def test_xss_in_simple_search(self):
"""websearch - no XSS vulnerability in simple search"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='Search term <em><SCRIPT>alert("XSS");</SCRIPT></em> did not match any record.'))
def test_xss_in_structured_search(self):
"""websearch - no XSS vulnerability in structured search"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&f=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E',
expected_text='No word index is available for <em><script>alert("xss");</script></em>.'))
def test_xss_in_advanced_search(self):
"""websearch - no XSS vulnerability in advanced search"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?as=1&p1=ellis&f1=author&op1=a&p2=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&f2=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&m2=e',
expected_text='Search term <em><SCRIPT>alert("XSS");</SCRIPT></em> inside index <em><script>alert("xss");</script></em> did not match any record.'))
def test_xss_in_browse(self):
"""websearch - no XSS vulnerability in browse"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&f=%3CSCRIPT%3Ealert%28%22XSS%22%29%3B%3C%2FSCRIPT%3E&action_browse=Browse',
expected_text='<SCRIPT>alert("XSS");</SCRIPT>'))
class WebSearchResultsOverview(unittest.TestCase):
"""Test of the search results page's Results overview box and links."""
def test_results_overview_split_off(self):
"""websearch - results overview box when split by collection is off"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?p=of&sc=0')
body = browser.response().read()
if body.find("Results overview") > -1:
self.fail("Oops, when split by collection is off, "
"results overview should not be present.")
if body.find('<a name="1"></a>') == -1:
self.fail("Oops, when split by collection is off, "
"Atlantis collection should be found.")
if body.find('<a name="15"></a>') > -1:
self.fail("Oops, when split by collection is off, "
"Multimedia & Arts should not be found.")
try:
browser.find_link(url='#15')
self.fail("Oops, when split by collection is off, "
"a link to Multimedia & Arts should not be found.")
except LinkNotFoundError:
pass
def test_results_overview_split_on(self):
"""websearch - results overview box when split by collection is on"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?p=of&sc=1')
body = browser.response().read()
if body.find("Results overview") == -1:
self.fail("Oops, when split by collection is on, "
"results overview should be present.")
if body.find('<a name="Atlantis%20Institute%20of%20Fictive%20Science"></a>') > -1:
self.fail("Oops, when split by collection is on, "
"Atlantis collection should not be found.")
if body.find('<a name="15"></a>') == -1:
self.fail("Oops, when split by collection is on, "
"Multimedia & Arts should be found.")
try:
browser.find_link(url='#15')
except LinkNotFoundError:
self.fail("Oops, when split by collection is on, "
"a link to Multimedia & Arts should be found.")
class WebSearchSortResultsTest(unittest.TestCase):
"""Test of the search results page's sorting capability."""
def test_sort_results_default(self):
"""websearch - search results sorting, default method"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=3',
expected_text="CMS animation of the high-energy collisions"))
def test_sort_results_ascending(self):
"""websearch - search results sorting, ascending field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=2&sf=reportnumber&so=a',
expected_text="[astro-ph/0104076]"))
def test_sort_results_descending(self):
"""websearch - search results sorting, descending field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=1&sf=reportnumber&so=d',
expected_text=" [TESLA-FEL-99-07]"))
def test_sort_results_sort_pattern(self):
"""websearch - search results sorting, preferential sort pattern"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=of&f=title&rg=1&sf=reportnumber&so=d&sp=cern',
expected_text="[CERN-TH-2002-069]"))
class WebSearchSearchResultsXML(unittest.TestCase):
"""Test search results in various output"""
def test_search_results_xm_output_split_on(self):
""" websearch - check document element of search results in xm output (split by collection on)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=1&of=xm')
body = browser.response().read()
num_doc_element = body.count("<collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
def test_search_results_xm_output_split_off(self):
""" websearch - check document element of search results in xm output (split by collection off)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=0&of=xm')
body = browser.response().read()
num_doc_element = body.count("<collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
def test_search_results_xd_output_split_on(self):
""" websearch - check document element of search results in xd output (split by collection on)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=1&of=xd')
body = browser.response().read()
num_doc_element = body.count("<collection")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
def test_search_results_xd_output_split_off(self):
""" websearch - check document element of search results in xd output (split by collection off)"""
browser = Browser()
browser.open(CFG_SITE_URL + '/search?sc=0&of=xd')
body = browser.response().read()
num_doc_element = body.count("<collection>")
if num_doc_element == 0:
self.fail("Oops, no document element <collection "
"xmlns=\"http://www.loc.gov/MARC21/slim\">"
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements <collection> "
"found in search results.")
num_doc_element = body.count("</collection>")
if num_doc_element == 0:
self.fail("Oops, no document element </collection> "
"found in search results.")
elif num_doc_element > 1:
self.fail("Oops, multiple document elements </collection> "
"found in search results.")
class WebSearchUnicodeQueryTest(unittest.TestCase):
"""Test of the search results for queries containing Unicode characters."""
def test_unicode_word_query(self):
"""websearch - Unicode word query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%CE%99%CE%B8%CE%AC%CE%BA%CE%B7',
expected_text="[76]"))
def test_unicode_word_query_not_found_term(self):
"""websearch - Unicode word query, not found term"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3A%CE%99%CE%B8',
expected_text="ιθάκη"))
def test_unicode_exact_phrase_query(self):
"""websearch - Unicode exact phrase query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%22%CE%99%CE%B8%CE%AC%CE%BA%CE%B7%22',
expected_text="[76]"))
def test_unicode_partial_phrase_query(self):
"""websearch - Unicode partial phrase query"""
# no hit here for example title partial phrase query due to
# removed difference between double-quoted and single-quoted
# search:
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%27%CE%B7%27',
expected_text="[]"))
def test_unicode_regexp_query(self):
"""websearch - Unicode regexp query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=title%3A%2F%CE%B7%2F',
expected_text="[76]"))
class WebSearchMARCQueryTest(unittest.TestCase):
"""Test of the search results for queries containing physical MARC tags."""
def test_single_marc_tag_exact_phrase_query(self):
"""websearch - single MARC tag, exact phrase query (100__a)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=100__a%3A%22Ellis%2C+J%22',
expected_text="[9, 14, 18]"))
def test_single_marc_tag_partial_phrase_query(self):
"""websearch - single MARC tag, partial phrase query (245__b)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=245__b%3A%27and%27',
expected_text="[28]"))
def test_many_marc_tags_partial_phrase_query(self):
"""websearch - many MARC tags, partial phrase query (245)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=245%3A%27and%27&rg=100',
expected_text="[1, 8, 9, 14, 15, 20, 22, 24, 28, 33, 47, 48, 49, 51, 53, 64, 69, 71, 79, 82, 83, 85, 91, 96, 108]"))
def test_single_marc_tag_regexp_query(self):
"""websearch - single MARC tag, regexp query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=245%3A%2Fand%2F&rg=100',
expected_text="[1, 8, 9, 14, 15, 20, 22, 24, 28, 33, 47, 48, 49, 51, 53, 64, 69, 71, 79, 82, 83, 85, 91, 96, 108]"))
class WebSearchExtSysnoQueryTest(unittest.TestCase):
"""Test of queries using external system numbers."""
def test_existing_sysno_html_output(self):
"""websearch - external sysno query, existing sysno, HTML output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CER',
expected_text="The wall of the cave"))
def test_existing_sysno_id_output(self):
"""websearch - external sysno query, existing sysno, ID output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CER&of=id',
expected_text="[95]"))
def test_nonexisting_sysno_html_output(self):
"""websearch - external sysno query, non-existing sysno, HTML output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CERRRR',
expected_text="Requested record does not seem to exist."))
def test_nonexisting_sysno_id_output(self):
"""websearch - external sysno query, non-existing sysno, ID output"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?sysno=000289446CERRRR&of=id',
expected_text="[]"))
class WebSearchResultsRecordGroupingTest(unittest.TestCase):
"""Test search results page record grouping (rg)."""
def test_search_results_rg_guest(self):
"""websearch - search results, records in groups of, guest"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?rg=17',
expected_text="1 - 17"))
def test_search_results_rg_nonguest(self):
"""websearch - search results, records in groups of, non-guest"""
# This test used to fail due to saved user preference fetching
# not overridden by URL rg argument.
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?rg=17',
username='admin',
expected_text="1 - 17"))
class WebSearchSpecialTermsQueryTest(unittest.TestCase):
"""Test of the search results for queries containing special terms."""
def test_special_terms_u1(self):
"""websearch - query for special terms, U(1)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=U%281%29',
expected_text="[57, 79, 80, 88]"))
def test_special_terms_u1_and_sl(self):
"""websearch - query for special terms, U(1) SL(2,Z)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=U%281%29+SL%282%2CZ%29',
expected_text="[88]"))
def test_special_terms_u1_and_sl_or(self):
"""websearch - query for special terms, U(1) OR SL(2,Z)"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=U%281%29+OR+SL%282%2CZ%29',
expected_text="[57, 79, 80, 88]"))
@nottest
def FIXME_TICKET_453_test_special_terms_u1_and_sl_or_parens(self):
"""websearch - query for special terms, (U(1) OR SL(2,Z))"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=%28U%281%29+OR+SL%282%2CZ%29%29',
expected_text="[57, 79, 80, 88]"))
def test_special_terms_u1_and_sl_in_quotes(self):
"""websearch - query for special terms, ('SL(2,Z)' OR 'U(1)')"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + "/search?of=id&p=%28%27SL%282%2CZ%29%27+OR+%27U%281%29%27%29",
expected_text="[57, 79, 80, 88, 96]"))
class WebSearchJournalQueryTest(unittest.TestCase):
"""Test of the search results for journal pubinfo queries."""
def test_query_journal_title_only(self):
"""websearch - journal publication info query, title only"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&f=journal&p=Phys.+Lett.+B',
expected_text="[78, 85, 87]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&f=journal&p=Phys.+Lett.+B',
username='admin',
expected_text="[77, 78, 85, 87]"))
def test_query_journal_full_pubinfo(self):
"""websearch - journal publication info query, full reference"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&f=journal&p=Phys.+Lett.+B+531+%282002%29+301',
expected_text="[78]"))
class WebSearchStemmedIndexQueryTest(unittest.TestCase):
"""Test of the search results for queries using stemmed indexes."""
def test_query_stemmed_lowercase(self):
"""websearch - stemmed index query, lowercase"""
# note that dasse/Dasse is stemmed into dass/Dass, as expected
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=dasse',
expected_text="[25, 26]"))
def test_query_stemmed_uppercase(self):
"""websearch - stemmed index query, uppercase"""
# ... but note also that DASSE is stemmed into DASSE(!); so
# the test would fail if the search engine would not lower the
# query term. (Something that is not necessary for
# non-stemmed indexes.)
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?of=id&p=DASSE',
expected_text="[25, 26]"))
class WebSearchSummarizerTest(unittest.TestCase):
"""Test of the search results summarizer functions."""
def test_most_popular_field_values_singletag(self):
"""websearch - most popular field values, simple tag"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('PREPRINT', 37), ('ARTICLE', 28), ('BOOK', 14), ('THESIS', 8), ('PICTURE', 7),
('DRAFT', 2), ('POETRY', 2), ('REPORT', 2), ('ALEPHPAPER', 1), ('ATLANTISTIMESNEWS', 1),
('ISOLDEPAPER', 1)],
get_most_popular_field_values(range(0,100), '980__a'))
def test_most_popular_field_values_singletag_multiexclusion(self):
"""websearch - most popular field values, simple tag, multiple exclusions"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('PREPRINT', 37), ('ARTICLE', 28), ('BOOK', 14), ('DRAFT', 2), ('REPORT', 2),
('ALEPHPAPER', 1), ('ATLANTISTIMESNEWS', 1), ('ISOLDEPAPER', 1)],
get_most_popular_field_values(range(0,100), '980__a', ('THESIS', 'PICTURE', 'POETRY')))
def test_most_popular_field_values_multitag(self):
"""websearch - most popular field values, multiple tags"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('Ellis, J', 3), ('Enqvist, K', 1), ('Ibanez, L E', 1), ('Nanopoulos, D V', 1), ('Ross, G G', 1)],
get_most_popular_field_values((9, 14, 18), ('100__a', '700__a')))
def test_most_popular_field_values_multitag_singleexclusion(self):
"""websearch - most popular field values, multiple tags, single exclusion"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('Enqvist, K', 1), ('Ibanez, L E', 1), ('Nanopoulos, D V', 1), ('Ross, G G', 1)],
get_most_popular_field_values((9, 14, 18), ('100__a', '700__a'), ('Ellis, J')))
def test_most_popular_field_values_multitag_countrepetitive(self):
"""websearch - most popular field values, multiple tags, counting repetitive occurrences"""
from invenio.search_engine import get_most_popular_field_values
self.assertEqual([('THESIS', 2), ('REPORT', 1)],
get_most_popular_field_values((41,), ('690C_a', '980__a'), count_repetitive_values=True))
self.assertEqual([('REPORT', 1), ('THESIS', 1)],
get_most_popular_field_values((41,), ('690C_a', '980__a'), count_repetitive_values=False))
def test_ellis_citation_summary(self):
"""websearch - query ellis, citation summary output format"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis&of=hcs',
expected_text="Less known papers (1-9)",
expected_link_target=CFG_SITE_URL+"/search?p=ellis%20AND%20cited%3A1-%3E9",
expected_link_label='1'))
def test_ellis_not_quark_citation_summary_advanced(self):
"""websearch - ellis and not quark, citation summary format advanced"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&as=1&m1=a&p1=ellis&f1=author&op1=n&m2=a&p2=quark&f2=&op2=a&m3=a&p3=&f3=&action_search=Search&sf=&so=a&rm=&rg=10&sc=1&of=hcs',
expected_text="Less known papers (1-9)",
expected_link_target=CFG_SITE_URL+'/search?p=author%3Aellis%20and%20not%20quark%20AND%20cited%3A1-%3E9',
expected_link_label='1'))
def test_ellis_not_quark_citation_summary_regular(self):
"""websearch - ellis and not quark, citation summary format advanced"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?ln=en&p=author%3Aellis+and+not+quark&f=&action_search=Search&sf=&so=d&rm=&rg=10&sc=0&of=hcs',
expected_text="Less known papers (1-9)",
expected_link_target=CFG_SITE_URL+'/search?p=author%3Aellis%20and%20not%20quark%20AND%20cited%3A1-%3E9',
expected_link_label='1'))
class WebSearchRecordCollectionGuessTest(unittest.TestCase):
"""Primary collection guessing tests."""
def test_guess_primary_collection_of_a_record(self):
"""websearch - guess_primary_collection_of_a_record"""
self.assertEqual(guess_primary_collection_of_a_record(96), 'Articles')
def test_guess_collection_of_a_record(self):
"""websearch - guess_collection_of_a_record"""
self.assertEqual(guess_collection_of_a_record(96), 'Articles')
self.assertEqual(guess_collection_of_a_record(96, '%s/collection/Theoretical Physics (TH)?ln=en' % CFG_SITE_URL), 'Articles')
self.assertEqual(guess_collection_of_a_record(12, '%s/collection/Theoretical Physics (TH)?ln=en' % CFG_SITE_URL), 'Theoretical Physics (TH)')
self.assertEqual(guess_collection_of_a_record(12, '%s/collection/Theoretical%%20Physics%%20%%28TH%%29?ln=en' % CFG_SITE_URL), 'Theoretical Physics (TH)')
class WebSearchGetFieldValuesTest(unittest.TestCase):
"""Testing get_fieldvalues() function."""
def test_get_fieldvalues_001(self):
"""websearch - get_fieldvalues() for bibxxx-agnostic tags"""
self.assertEqual(get_fieldvalues(10, '001___'), ['10'])
def test_get_fieldvalues_980(self):
"""websearch - get_fieldvalues() for bibxxx-powered tags"""
self.assertEqual(get_fieldvalues(18, '700__a'), ['Enqvist, K', 'Nanopoulos, D V'])
self.assertEqual(get_fieldvalues(18, '909C1u'), ['CERN'])
def test_get_fieldvalues_wildcard(self):
"""websearch - get_fieldvalues() for tag wildcards"""
self.assertEqual(get_fieldvalues(18, '%'), [])
self.assertEqual(get_fieldvalues(18, '7%'), [])
self.assertEqual(get_fieldvalues(18, '700%'), ['Enqvist, K', 'Nanopoulos, D V'])
self.assertEqual(get_fieldvalues(18, '909C0%'), ['1985', '13','TH'])
def test_get_fieldvalues_recIDs(self):
"""websearch - get_fieldvalues() for list of recIDs"""
self.assertEqual(get_fieldvalues([], '001___'), [])
self.assertEqual(get_fieldvalues([], '700__a'), [])
self.assertEqual(get_fieldvalues([10, 13], '001___'), ['10', '13'])
self.assertEqual(get_fieldvalues([18, 13], '700__a'),
['Dawson, S', 'Ellis, R K', 'Enqvist, K', 'Nanopoulos, D V'])
def test_get_fieldvalues_repetitive(self):
"""websearch - get_fieldvalues() for repetitive values"""
self.assertEqual(get_fieldvalues([17, 18], '909C1u'),
['CERN', 'CERN'])
self.assertEqual(get_fieldvalues([17, 18], '909C1u', repetitive_values=True),
['CERN', 'CERN'])
self.assertEqual(get_fieldvalues([17, 18], '909C1u', repetitive_values=False),
['CERN'])
class WebSearchAddToBasketTest(unittest.TestCase):
"""Test of the add-to-basket presence depending on user rights."""
def test_add_to_basket_guest(self):
"""websearch - add-to-basket facility allowed for guests"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='Add to basket'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='<input name="recid" type="checkbox" value="10" />'))
def test_add_to_basket_jekyll(self):
"""websearch - add-to-basket facility allowed for Dr. Jekyll"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='Add to basket',
username='jekyll',
password='j123ekyll'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
expected_text='<input name="recid" type="checkbox" value="10" />',
username='jekyll',
password='j123ekyll'))
def test_add_to_basket_hyde(self):
"""websearch - add-to-basket facility denied to Mr. Hyde"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
unexpected_text='Add to basket',
username='hyde',
password='h123yde'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=recid%3A10',
unexpected_text='<input name="recid" type="checkbox" value="10" />',
username='hyde',
password='h123yde'))
class WebSearchAlertTeaserTest(unittest.TestCase):
"""Test of the alert teaser presence depending on user rights."""
def test_alert_teaser_guest(self):
"""websearch - alert teaser allowed for guests"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_link_label='email alert'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='RSS feed'))
def test_alert_teaser_jekyll(self):
"""websearch - alert teaser allowed for Dr. Jekyll"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='email alert',
username='jekyll',
password='j123ekyll'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='RSS feed',
username='jekyll',
password='j123ekyll'))
def test_alert_teaser_hyde(self):
"""websearch - alert teaser allowed for Mr. Hyde"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='email alert',
username='hyde',
password='h123yde'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=ellis',
expected_text='RSS feed',
username='hyde',
password='h123yde'))
class WebSearchSpanQueryTest(unittest.TestCase):
"""Test of span queries."""
def test_span_in_word_index(self):
"""websearch - span query in a word index"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=year%3A1992-%3E1996&of=id&ap=0',
expected_text='[17, 66, 69, 71]'))
def test_span_in_phrase_index(self):
"""websearch - span query in a phrase index"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=year%3A%221992%22-%3E%221996%22&of=id&ap=0',
expected_text='[17, 66, 69, 71]'))
def test_span_in_bibxxx(self):
"""websearch - span query in MARC tables"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=909C0y%3A%221992%22-%3E%221996%22&of=id&ap=0',
expected_text='[17, 66, 69, 71]'))
def test_span_with_spaces(self):
"""websearch - no span query when a space is around"""
# useful for reaction search
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=title%3A%27mu%20--%3E%20e%27&of=id&ap=0',
expected_text='[67]'))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=245%3A%27mu%20--%3E%20e%27&of=id&ap=0',
expected_text='[67]'))
def test_span_in_author(self):
"""websearch - span query in special author index"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=author%3A%22Ellis,%20K%22-%3E%22Ellis,%20RZ%22&of=id&ap=0',
expected_text='[8, 9, 11, 12, 13, 14, 17, 18, 47]'))
class WebSearchReferstoCitedbyTest(unittest.TestCase):
"""Test of refersto/citedby search operators."""
def test_refersto_recid(self):
'websearch - refersto:recid:84'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Arecid%3A84&of=id&ap=0',
expected_text='[85, 88, 91]'))
def test_refersto_repno(self):
'websearch - refersto:reportnumber:hep-th/0205061'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Areportnumber%3Ahep-th/0205061&of=id&ap=0',
expected_text='[91]'))
def test_refersto_author_word(self):
'websearch - refersto:author:klebanov'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Aauthor%3Aklebanov&of=id&ap=0',
expected_text='[85, 86, 88, 91]'))
def test_refersto_author_phrase(self):
'websearch - refersto:author:"Klebanov, I"'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Aauthor%3A%22Klebanov,%20I%22&of=id&ap=0',
expected_text='[85, 86, 88, 91]'))
def test_citedby_recid(self):
'websearch - citedby:recid:92'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Arecid%3A92&of=id&ap=0',
expected_text='[74, 91]'))
def test_citedby_repno(self):
'websearch - citedby:reportnumber:hep-th/0205061'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Areportnumber%3Ahep-th/0205061&of=id&ap=0',
expected_text='[78]'))
def test_citedby_author_word(self):
'websearch - citedby:author:klebanov'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Aauthor%3Aklebanov&of=id&ap=0',
expected_text='[95]'))
def test_citedby_author_phrase(self):
'websearch - citedby:author:"Klebanov, I"'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Aauthor%3A%22Klebanov,%20I%22&of=id&ap=0',
expected_text='[95]'))
def test_refersto_bad_query(self):
'websearch - refersto:title:'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=refersto%3Atitle%3A',
expected_text='There are no records referring to title:.'))
def test_citedby_bad_query(self):
'websearch - citedby:title:'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=citedby%3Atitle%3A',
expected_text='There are no records cited by title:.'))
class WebSearchSPIRESSyntaxTest(unittest.TestCase):
"""Test of SPIRES syntax issues"""
if CFG_WEBSEARCH_SPIRES_SYNTAX > 0:
def test_and_not_parens(self):
'websearch - find a ellis, j and not a enqvist'
self.assertEqual([],
test_web_page_content(CFG_SITE_URL +'/search?p=find+a+ellis%2C+j+and+not+a+enqvist&of=id&ap=0',
expected_text='[9, 12, 14, 47, 118]'))
if DATEUTIL_AVAILABLE:
def test_dadd_search(self):
'websearch - find da > today - 3650'
# XXX: assumes we've reinstalled our site in the last 10 years
# should return every document in the system
self.assertEqual([],
test_web_page_content(CFG_SITE_URL +'/search?ln=en&p=find+da+%3E+today+-+3650&f=&of=id',
expected_text='[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 99, 100, 101, 102, 103, 104, 107, 108, 113, 114, 115, 116, 117, 118, 120, 121, 122, 123, 124, 125, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141]'))
class WebSearchDateQueryTest(unittest.TestCase):
"""Test various date queries."""
def setUp(self):
"""Establish variables we plan to re-use"""
self.empty = intbitset()
def test_search_unit_hits_for_datecreated_previous_millenia(self):
"""websearch - search_unit with datecreated returns >0 hits for docs in the last 1000 years"""
self.assertNotEqual(self.empty, search_unit('1000-01-01->9999-12-31', 'datecreated'))
def test_search_unit_hits_for_datemodified_previous_millenia(self):
"""websearch - search_unit with datemodified returns >0 hits for docs in the last 1000 years"""
self.assertNotEqual(self.empty, search_unit('1000-01-01->9999-12-31', 'datemodified'))
def test_search_unit_in_bibrec_for_datecreated_previous_millenia(self):
"""websearch - search_unit_in_bibrec with creationdate gets >0 hits for past 1000 years"""
self.assertNotEqual(self.empty, search_unit_in_bibrec("1000-01-01", "9999-12-31", 'creationdate'))
def test_search_unit_in_bibrec_for_datecreated_next_millenia(self):
"""websearch - search_unit_in_bibrec with creationdate gets 0 hits for after year 3000"""
self.assertEqual(self.empty, search_unit_in_bibrec("3000-01-01", "9999-12-31", 'creationdate'))
class WebSearchSynonymQueryTest(unittest.TestCase):
"""Test of queries using synonyms."""
def test_journal_phrvd(self):
"""websearch - search-time synonym search, journal title"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=PHRVD&f=journal&of=id',
expected_text="[66, 72]"))
def test_journal_phrvd_54_1996_4234(self):
"""websearch - search-time synonym search, journal article"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=PHRVD%2054%20%281996%29%204234&f=journal&of=id',
expected_text="[66]"))
def test_journal_beta_decay_title(self):
"""websearch - index-time synonym search, beta decay in title"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta+decay&f=title&of=id',
expected_text="[59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2+decay&f=title&of=id',
expected_text="[59]"))
def test_journal_beta_decay_global(self):
"""websearch - index-time synonym search, beta decay in any field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta+decay&of=id',
expected_text="[52, 59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2+decay&of=id',
expected_text="[52, 59]"))
def test_journal_beta_title(self):
"""websearch - index-time synonym search, beta in title"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta&f=title&of=id',
expected_text="[59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2&f=title&of=id',
expected_text="[59]"))
def test_journal_beta_global(self):
"""websearch - index-time synonym search, beta in any field"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=beta&of=id',
expected_text="[52, 59]"))
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%CE%B2&of=id',
expected_text="[52, 59]"))
class WebSearchWashCollectionsTest(unittest.TestCase):
"""Test if the collection argument is washed correctly"""
def test_wash_coll_when_coll_restricted(self):
"""websearch - washing of restricted daughter collections"""
self.assertEqual(
sorted(wash_colls(cc='', c=['Books & Reports', 'Theses'])[1]),
['Books & Reports', 'Theses'])
self.assertEqual(
sorted(wash_colls(cc='', c=['Books & Reports', 'Theses'])[2]),
['Books & Reports', 'Theses'])
class WebSearchAuthorCountQueryTest(unittest.TestCase):
"""Test of queries using authorcount fields."""
def test_journal_authorcount_word(self):
"""websearch - author count, word query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=4&f=authorcount&of=id',
expected_text="[51, 54, 59, 66, 92, 96]"))
def test_journal_authorcount_phrase(self):
"""websearch - author count, phrase query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%224%22&f=authorcount&of=id',
expected_text="[51, 54, 59, 66, 92, 96]"))
def test_journal_authorcount_span(self):
"""websearch - author count, span query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=authorcount%3A9-%3E16&of=id',
expected_text="[69, 71, 127]"))
def test_journal_authorcount_plus(self):
"""websearch - author count, plus query"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=50%2B&f=authorcount&of=id',
expected_text="[10, 17]"))
class WebSearchItemCountQueryTest(unittest.TestCase):
"""Test of queries using itemcount field/index"""
def test_itemcount_plus(self):
"""websearch - item count, search for more than one item, using 'plus'"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=2%2B&f=itemcount&of=id',
expected_text="[31, 32, 34]"))
def test_itemcount_span(self):
"""websearch - item count, search for more than one item, using 'span'"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=2->10&f=itemcount&of=id',
expected_text="[31, 32, 34]"))
def test_itemcount_phrase(self):
"""websearch - item count, search for records with exactly two items, phrase"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=%222%22&f=itemcount&of=id',
expected_text="[31, 34]"))
def test_itemcount_records_with_two_items(self):
"""websearch - item count, search for records with exactly two items"""
self.assertEqual([],
test_web_page_content(CFG_SITE_URL + '/search?p=2&f=itemcount&of=id',
expected_text="[31, 34]"))
class WebSearchPerformRequestSearchRefactoringTest(unittest.TestCase):
"""Tests the perform request search API after refactoring."""
def _run_test(self, test_args, expected_results):
params = {}
params.update(map(lambda y: (y[0], ',' in y[1] and ', ' not in y[1] and y[1].split(',') or y[1]), map(lambda x: x.split('=', 1), test_args.split(';'))))
#params.update(map(lambda x: x.split('=', 1), test_args.split(';')))
req = cStringIO.StringIO()
params['req'] = req
recs = perform_request_search(**params)
if isinstance(expected_results, str):
req.seek(0)
recs = req.read()
# this is just used to generate the results from the seearch engine before refactoring
#if recs != expected_results:
# print test_args
# print params
# print recs
self.assertEqual(recs, expected_results, "Error, we expect: %s, and we received: %s" % (expected_results, recs))
def test_queries(self):
"""websearch - testing p_r_s standard arguments and their combinations"""
self._run_test('p=ellis;f=author;action=Search', [8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 47, 118])
self._run_test('p=ellis;f=author;sf=title;action=Search', [118, 8, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ellis;f=author;sf=title;wl=5;action=Search', [118, 8, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ellis;f=author;sf=title;wl=5;so=a', [118, 13, 47, 10, 12, 18, 17, 11, 9, 14, 16, 8])
self._run_test('p=ellis;f=author;sf=title;wl=5;so=d', [118, 8, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ell*;sf=title;wl=5', [118, 8, 15, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=ell*;sf=title;wl=1', [10])
self._run_test('p=ell*;sf=title;wl=100', [118, 8, 15, 16, 14, 9, 11, 17, 18, 12, 10, 47, 13])
self._run_test('p=muon OR kaon;f=author;sf=title;wl=5;action=Search', [])
self._run_test('p=muon OR kaon;sf=title;wl=5;action=Search', [67, 12])
self._run_test('p=muon OR kaon;sf=title;wl=5;c=Articles,Preprints', [67, 12])
self._run_test('p=muon OR kaon;sf=title;wl=5;c=Articles', [67])
self._run_test('p=muon OR kaon;sf=title;wl=5;c=Preprints', [12])
# FIXME_TICKET_1174
# self._run_test('p=el*;rm=citation', [2, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 23, 30, 32, 34, 47, 48, 51, 52, 54, 56, 58, 59, 92, 97, 100, 103, 18, 74, 91, 94, 81])
if not get_external_word_similarity_ranker():
self._run_test('p=el*;rm=wrd', [2, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 23, 30, 32, 34, 47, 48, 51, 52, 54, 56, 58, 59, 74, 81, 91, 92, 94, 97, 100, 103, 109, 118, 123, 127, 128])
self._run_test('p=el*;sf=title', [118, 123, 100, 32, 8, 15, 16, 81, 97, 34, 23, 127, 58, 2, 14, 9, 128, 11, 30, 109, 52, 48, 94, 17, 56, 18, 91, 59, 12, 92, 74, 54, 103, 10, 51, 47, 13])
self._run_test('p=boson;rm=citation', [1, 47, 50, 107, 108, 77, 95])
if not get_external_word_similarity_ranker():
self._run_test('p=boson;rm=wrd', [108, 77, 47, 50, 95, 1, 107])
self._run_test('p1=ellis;f1=author;m1=a;op1=a;p2=john;f2=author;m2=a', [9, 12, 14, 18, 118])
self._run_test('p1=ellis;f1=author;m1=o;op1=a;p2=john;f2=author;m2=o', [9, 12, 14, 18, 118])
self._run_test('p1=ellis;f1=author;m1=e;op1=a;p2=john;f2=author;m2=e', [])
self._run_test('p1=ellis;f1=author;m1=a;op1=o;p2=john;f2=author;m2=a', [8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 47, 118])
self._run_test('p1=ellis;f1=author;m1=o;op1=o;p2=john;f2=author;m2=o', [8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 47, 118])
self._run_test('p1=ellis;f1=author;m1=e;op1=o;p2=john;f2=author;m2=e', [])
self._run_test('p1=ellis;f1=author;m1=a;op1=n;p2=john;f2=author;m2=a', [8, 10, 11, 13, 16, 17, 47])
self._run_test('p1=ellis;f1=author;m1=o;op1=n;p2=john;f2=author;m2=o', [8, 10, 11, 13, 16, 17, 47])
self._run_test('p1=ellis;f1=author;m1=e;op1=n;p2=john;f2=author;m2=e', [])
self._run_test('p=Ellis, J;ap=1', [9, 10, 11, 12, 14, 17, 18, 47, 118])
self._run_test('p=Ellis, J;ap=0', [9, 10, 11, 12, 14, 17, 18, 47, 118])
self._run_test('p=recid:148x', [])
self._run_test('p=recid:148x;of=xm;rg=200', "<collection xmlns=\"http://www.loc.gov/MARC21/slim\">\n\n</collection>")
class WebSearchGetRecordTests(unittest.TestCase):
def setUp(self):
self.recid = run_sql("INSERT INTO bibrec(creation_date, modification_date) VALUES(NOW(), NOW())")
def tearDown(self):
run_sql("DELETE FROM bibrec WHERE id=%s", (self.recid,))
def test_get_record(self):
"""bibformat - test print_record and get_record of empty record"""
from invenio.search_engine import print_record, get_record
self.assertEqual(print_record(self.recid, 'xm'), ' <record>\n <controlfield tag="001">%s</controlfield>\n </record>\n\n ' % self.recid)
self.assertEqual(get_record(self.recid), {'001': [([], ' ', ' ', str(self.recid), 1)]})
class WebSearchExactTitleIndexTest(unittest.TestCase):
"""Checks if exact title index works correctly """
def test_exacttitle_query_solves_problems(self):
"""websearch - check exacttitle query solves problems"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'solves+problems'&f=&action_search=Search",
expected_text = "Non-compact supergravity solves problems"))
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_exacttitle_query_solve_problems(self):
"""websearch - check exacttitle query solve problems"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'solve+problems'&f=&action_search=Search",
expected_text = ['Search term', 'solve problems', 'did not match']))
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_exacttitle_query_photon_beam(self):
"""websearch - check exacttitle search photon beam"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'photon+beam'&f=&action_search=Search",
expected_text = "Development of photon beam diagnostics"))
if error_messages:
self.fail(merge_error_messages(error_messages))
def test_exacttitle_query_photons_beam(self):
"""websearch - check exacttitle search photons beam"""
error_messages = []
error_messages.extend(test_web_page_content(CFG_SITE_URL + "/search?ln=en&p=exacttitle%3A'photons+beam'&f=&action_search=Search",
expected_text = ['Search term', 'photons beam', 'did not match']))
if error_messages:
self.fail(merge_error_messages(error_messages))
TEST_SUITE = make_test_suite(WebSearchWebPagesAvailabilityTest,
WebSearchTestSearch,
WebSearchTestBrowse,
WebSearchTestOpenURL,
WebSearchTestCollections,
WebSearchTestRecord,
WebSearchTestLegacyURLs,
WebSearchNearestTermsTest,
WebSearchBooleanQueryTest,
WebSearchAuthorQueryTest,
WebSearchSearchEnginePythonAPITest,
WebSearchSearchEngineWebAPITest,
WebSearchRecordWebAPITest,
WebSearchRestrictedCollectionTest,
WebSearchRestrictedCollectionHandlingTest,
WebSearchRestrictedPicturesTest,
WebSearchRestrictedWebJournalFilesTest,
WebSearchRSSFeedServiceTest,
WebSearchXSSVulnerabilityTest,
WebSearchResultsOverview,
WebSearchSortResultsTest,
WebSearchSearchResultsXML,
WebSearchUnicodeQueryTest,
WebSearchMARCQueryTest,
WebSearchExtSysnoQueryTest,
WebSearchResultsRecordGroupingTest,
WebSearchSpecialTermsQueryTest,
WebSearchJournalQueryTest,
WebSearchStemmedIndexQueryTest,
WebSearchSummarizerTest,
WebSearchRecordCollectionGuessTest,
WebSearchGetFieldValuesTest,
WebSearchAddToBasketTest,
WebSearchAlertTeaserTest,
WebSearchSpanQueryTest,
WebSearchReferstoCitedbyTest,
WebSearchSPIRESSyntaxTest,
WebSearchDateQueryTest,
WebSearchTestWildcardLimit,
WebSearchSynonymQueryTest,
WebSearchWashCollectionsTest,
WebSearchAuthorCountQueryTest,
WebSearchPerformRequestSearchRefactoringTest,
WebSearchGetRecordTests,
WebSearchExactTitleIndexTest,
WebSearchCJKTokenizedSearchTest,
WebSearchItemCountQueryTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE, warn_user=True)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (C) 2008-2012 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hive Netius System. If not, see <http://www.gnu.org/licenses/>.
__author__ = "João Magalhães joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2012 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "GNU General Public License (GPL), Version 3"
""" The license for the module """
import os
import ssl
import copy
import time
import errno
import select
import logging
import traceback
import observer
from conn import * #@UnusedWildImport
NAME = "netius"
""" The global infra-structure name to be used in the
identification of both the clients and the services this
value may be prefixed or suffixed """
VERSION = "0.2.0"
""" The version value that identifies the version of the
current infra-structure, all of the services and clients
may share this value """
WSAEWOULDBLOCK = 10035
""" The wsa would block error code meant to be used on
windows environments as a replacement for the would block
error code that indicates the failure to operate on a non
blocking connection """
VALID_ERRORS = (
errno.EWOULDBLOCK,
errno.EAGAIN,
errno.EPERM,
errno.ENOENT,
WSAEWOULDBLOCK
)
""" List containing the complete set of error that represent
non ready operations in a non blocking socket """
SSL_VALID_ERRORS = (
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE
)
""" The list containing the valid error in the handshake
operation of the ssl connection establishment """
STATE_STOP = 1
""" The stop state value, this value is set when the service
is either in the constructed stage or when the service has been
stop normally or with an error """
STATE_START = 2
""" The start state set when the service is in the starting
stage and running, normal state """
STATE_CONFIG = 3
""" The configuration state that is set when the service is
preparing to become started and the configuration attributes
are being set according to pre-determined indications """
STATE_SELECT = 4
""" State to be used when the service is in the select part
of the loop, this is the most frequent state in an idle service
as the service "spends" most of its time in it """
STATE_TICK = 5
""" Tick state representative of the situation where the loop
tick operation is being started and all the pre tick handlers
are going to be called for pre-operations """
STATE_READ = 6
""" Read state that is set when the connection are being read
and the on data handlers are being called, this is the part
where all the logic driven by incoming data is being called """
STATE_WRITE = 7
""" The write state that is set on the writing of data to the
connections, this is a pretty "fast" state as no logic is
associated with it """
STATE_ERRROR = 8
""" The error state to be used when the connection is processing
any error state coming from its main select operation and associated
with a certain connection (very rare) """
STATE_STRINGS = (
"STOP",
"START",
"CONFIG",
"SELECT",
"TICK",
"READ",
"WRITE",
"ERROR"
)
""" Sequence that contains the various strings associated with
the various states for the base service, this may be used to
create an integer to string resolution mechanism """
KEEPALIVE_TIMEOUT = 300
""" The amount of time in seconds that a connection is set as
idle until a new refresh token is sent to it to make sure that
it's still online and not disconnected, make sure that this
value is high enough that it does not consume to much bandwidth """
KEEPALIVE_COUNT = 3
""" The amount of times the "ping" packet is re-sent until the
connection is considered to be offline and is dropped """
KEEPALIVE_INTERVAL = int(KEEPALIVE_TIMEOUT / 10)
""" The time between the retrying of "ping" packets, this value
does not need to be too large and should not be considered too
important (may be calculated automatically) """
# initializes the various paths that are going to be used for
# the base files configuration in the complete service infra
# structure, these should include the ssl based files
BASE_PATH = os.path.dirname(__file__)
EXTRAS_PATH = os.path.join(BASE_PATH, "extras")
SSL_KEY_PATH = os.path.join(EXTRAS_PATH, "net.key")
SSL_CER_PATH = os.path.join(EXTRAS_PATH, "net.cer")
class Base(observer.Observable):
"""
Base network structure to be used by all the network
capable infra-structures (eg: servers and clients).
Should handle all the nonblocking event loop so that
the read and write operations are easy to handle.
"""
def __init__(self, name = None, handler = None, *args, **kwargs):
observer.Observable.__init__(self, *args, **kwargs)
self.name = name or self.__class__.__name__
self.handler = handler
self.level = kwargs.get("level", logging.DEBUG)
self.tid = None
self.logger = None
self.read_l = []
self.write_l = []
self.error_l = []
self.connections = []
self.connections_m = {}
self._running = False
self._loaded = False
self._delayed = []
self.set_state(STATE_STOP);
def __del__(self):
self.close()
def delay(self, callable):
self._delayed.append(callable)
def load(self):
if self._loaded: return
self.load_logging(self.level);
self._loaded = True
def load_logging(self, level = logging.DEBUG):
logging.basicConfig(format = "%(asctime)s [%(levelname)s] %(message)s")
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(level)
self.handler and self.logger.addHandler(self.handler)
def start(self):
# retrieves the current thread identifier as the current
# "tid" value to be used for thread control mechanisms
self.tid = thread.get_ident()
# triggers the loading of the internal structures of
# the base structure in case the loading has already
# been done nothing is done (avoids duplicated load)
self.load()
# sets the running flag that controls the running of the
# main loop and then changes the current state to start
# as the main loop is going to start
self._running = True
self.set_state(STATE_START)
# enters the main loop operation printing a message
# to the logger indicating this start, this stage
# should block the thread until a stop call is made
self.debug("Starting '%s' service main loop ..." % self.name)
self.trigger("start", self)
try: self.loop()
except BaseException, exception:
self.error(exception)
lines = traceback.format_exc().splitlines()
for line in lines: self.warning(line)
except:
self.critical("Critical level loop exception raised")
lines = traceback.format_exc().splitlines()
for line in lines: self.error(line)
finally:
self.trigger("stop", self)
self.debug("Finished '%s' service main loop" % self.name)
self.cleanup()
self.set_state(STATE_STOP)
def stop(self):
self._running = False
def close(self):
self.stop()
def is_empty(self):
return not self.read_l and not self.write_l and not self.error_l
def cleanup(self):
# creates a copy of the connections list because this structure
# is going to be changed in the closing of the connection object
connections = copy.copy(self.connections)
# iterates over the complete set of connections currently
# registered in the base structure and closes them so that
# can no longer be used and are gracefully disconnected
for connection in connections: connection.close()
# removes the contents of all of the loop related structures
# so that no extra selection operations are issued
del self.read_l[:]
del self.write_l[:]
del self.error_l[:]
def loop(self):
# iterates continuously while the running flag
# is set, once it becomes unset the loop breaks
# at the next execution cycle
while self._running:
# calls the base tick int handler indicating that a new
# tick loop iteration is going to be started, all the
# "in between loop" operation should be performed in this
# callback as this is the "space" they have for execution
self.ticks()
# updates the current state to select to indicate
# that the base service is selecting the connections
self.set_state(STATE_SELECT)
# verifies if the current selection list is empty
# in case it's sleeps for a while and then continues
# the loop (this avoids error in empty selection)
is_empty = self.is_empty()
if is_empty: time.sleep(0.25); continue
# runs the main selection operation on the current set
# of connection for each of the three operations returning
# the resulting active sets for the callbacks
reads, writes, errors = select.select(
self.read_l,
self.write_l,
self.error_l,
0.25
)
# calls the various callbacks with the selections lists,
# these are the main entry points for the logic to be executed
# each of this methods should be implemented in the underlying
# class instances as no behavior is defined at this inheritance
# level (abstract class)
self.reads(reads)
self.writes(writes)
self.errors(errors)
def ticks(self):
self.set_state(STATE_TICK)
for method in self._delayed: method()
del self._delayed[:]
def reads(self, reads):
self.set_state(STATE_READ)
def writes(self, writes):
self.set_state(STATE_WRITE)
def errors(self, errors):
self.set_state(STATE_ERRROR)
def on_connection_c(self, connection):
self.debug(
"Connection '%s' from '%s' created ..." %\
(connection.id, connection.owner.name)
)
def on_connection_d(self, connection):
self.debug(
"Connection '%s' from '%s' deleted" %\
(connection.id, connection.owner.name)
)
def info_dict(self):
info = dict()
info["loaded"] = self._loaded
info["connections"] = len(self.connections)
info["state"] = self.get_state_s()
return info
def new_connection(self, socket, address, ssl = False):
"""
Creates a new connection for the provided socket
object and string based address, the returned
value should be a workable object.
@type socket: Socket
@param socket: The socket object to be encapsulated
by the object to be created (connection).
@type address: String
@param address: The address as a string to be used to
describe the connection object to be created.
@type ssl: bool
@param ssl: If the connection to be created is meant to
be secured using the ssl framework for encryption.
@rtype: Connection
@return: The connection object that encapsulates the
provided socket and address values.
"""
return Connection(self, socket, address, ssl = ssl)
def debug(self, object):
self.log(object, level = logging.DEBUG)
def info(self, object):
self.log(object, level = logging.INFO)
def warning(self, object):
self.log(object, level = logging.WARNING)
def error(self, object):
self.log(object, level = logging.ERROR)
def critical(self, object):
self.log(object, level = logging.CRITICAL)
def log(self, object, level = logging.INFO):
object_t = type(object)
message = unicode(object) if not object_t in types.StringTypes else object
if not self.logger: return
self.logger.log(level, message)
def set_state(self, state):
self._state = state
def get_state_s(self, lower = True):
"""
Retrieves a string describing the current state
of the system, this string should be as descriptive
as possible.
An optional parameter controls if the string should
be lower cased or not.
@type lower: bool
@param lower: If the returned string should be converted
into a lower cased version.
@rtype: String
@return: A string describing the current sate of the loop
system, should be as descriptive as possible.
"""
state_s = STATE_STRINGS[self._state - 1]
state_s = state_s.lower() if lower else state_s
return state_s
def _pending(self, _socket):
"""
Tries to perform the pending operations in the socket
and, these operations are set in the pending variable
of the socket structure.
The method returns if there are still pending operations
after this method tick.
@type _socket: Socket
@param _socket: The socket object to be checked for
pending operations and that is going to be used in the
performing of these operations.
@rtype: bool
@return: If there are still pending operations to be
performed in the provided socket.
"""
# verifies if the pending attribute exists in the socket
# and that the value is valid, in case it's not there's
# no pending operation (method call) to be performed, and
# as such must return immediately with no pending value
if not hasattr(_socket, "_pending") or\
not _socket._pending: return False
# calls the pending callback method and verifies if the
# pending value still persists in the socket if that the
# case returns the is pending value to the caller method
_socket._pending(_socket)
is_pending = not _socket._pending == None
return is_pending
def _ssl_wrap(self, _socket, key_file = None, cer_file = None, server = True):
dir_path = os.path.dirname(__file__)
base_path = os.path.join(dir_path, "../../")
base_path = os.path.normpath(base_path)
extras_path = os.path.join(base_path, "extras")
ssl_path = os.path.join(extras_path, "ssl")
key_file = key_file or os.path.join(ssl_path, "server.key")
cer_file = cer_file or os.path.join(ssl_path, "server.cer")
socket_ssl = ssl.wrap_socket(
_socket,
keyfile = key_file,
certfile = cer_file,
server_side = server,
ssl_version = ssl.PROTOCOL_SSLv23,
do_handshake_on_connect = False,
ciphers = "ALL"
)
return socket_ssl
def _ssl_handshake(self, _socket):
try:
_socket.do_handshake()
_socket._pending = None
except ssl.SSLError, error:
error_v = error.args[0]
if error_v in SSL_VALID_ERRORS:
_socket._pending = self._ssl_handshake
else: raise
class BaseThread(threading.Thread):
"""
The top level thread class that is meant to encapsulate
a running base object and run it in a new context.
"""
def __init__(self, owner, daemon = False, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.owner = owner
self.daemon = daemon
def run(self):
threading.Thread.run(self)
self.owner.start()
new value added
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (C) 2008-2012 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hive Netius System. If not, see <http://www.gnu.org/licenses/>.
__author__ = "João Magalhães joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2012 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "GNU General Public License (GPL), Version 3"
""" The license for the module """
import os
import ssl
import copy
import time
import errno
import select
import logging
import traceback
import observer
from conn import * #@UnusedWildImport
NAME = "netius"
""" The global infra-structure name to be used in the
identification of both the clients and the services this
value may be prefixed or suffixed """
VERSION = "0.2.0"
""" The version value that identifies the version of the
current infra-structure, all of the services and clients
may share this value """
WSAEWOULDBLOCK = 10035
""" The wsa would block error code meant to be used on
windows environments as a replacement for the would block
error code that indicates the failure to operate on a non
blocking connection """
VALID_ERRORS = (
errno.EWOULDBLOCK,
errno.EAGAIN,
errno.EPERM,
errno.ENOENT,
errno.EINPROGRESS,
WSAEWOULDBLOCK
)
""" List containing the complete set of error that represent
non ready operations in a non blocking socket """
SSL_VALID_ERRORS = (
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE
)
""" The list containing the valid error in the handshake
operation of the ssl connection establishment """
STATE_STOP = 1
""" The stop state value, this value is set when the service
is either in the constructed stage or when the service has been
stop normally or with an error """
STATE_START = 2
""" The start state set when the service is in the starting
stage and running, normal state """
STATE_CONFIG = 3
""" The configuration state that is set when the service is
preparing to become started and the configuration attributes
are being set according to pre-determined indications """
STATE_SELECT = 4
""" State to be used when the service is in the select part
of the loop, this is the most frequent state in an idle service
as the service "spends" most of its time in it """
STATE_TICK = 5
""" Tick state representative of the situation where the loop
tick operation is being started and all the pre tick handlers
are going to be called for pre-operations """
STATE_READ = 6
""" Read state that is set when the connection are being read
and the on data handlers are being called, this is the part
where all the logic driven by incoming data is being called """
STATE_WRITE = 7
""" The write state that is set on the writing of data to the
connections, this is a pretty "fast" state as no logic is
associated with it """
STATE_ERRROR = 8
""" The error state to be used when the connection is processing
any error state coming from its main select operation and associated
with a certain connection (very rare) """
STATE_STRINGS = (
"STOP",
"START",
"CONFIG",
"SELECT",
"TICK",
"READ",
"WRITE",
"ERROR"
)
""" Sequence that contains the various strings associated with
the various states for the base service, this may be used to
create an integer to string resolution mechanism """
KEEPALIVE_TIMEOUT = 300
""" The amount of time in seconds that a connection is set as
idle until a new refresh token is sent to it to make sure that
it's still online and not disconnected, make sure that this
value is high enough that it does not consume to much bandwidth """
KEEPALIVE_COUNT = 3
""" The amount of times the "ping" packet is re-sent until the
connection is considered to be offline and is dropped """
KEEPALIVE_INTERVAL = int(KEEPALIVE_TIMEOUT / 10)
""" The time between the retrying of "ping" packets, this value
does not need to be too large and should not be considered too
important (may be calculated automatically) """
# initializes the various paths that are going to be used for
# the base files configuration in the complete service infra
# structure, these should include the ssl based files
BASE_PATH = os.path.dirname(__file__)
EXTRAS_PATH = os.path.join(BASE_PATH, "extras")
SSL_KEY_PATH = os.path.join(EXTRAS_PATH, "net.key")
SSL_CER_PATH = os.path.join(EXTRAS_PATH, "net.cer")
class Base(observer.Observable):
"""
Base network structure to be used by all the network
capable infra-structures (eg: servers and clients).
Should handle all the nonblocking event loop so that
the read and write operations are easy to handle.
"""
def __init__(self, name = None, handler = None, *args, **kwargs):
observer.Observable.__init__(self, *args, **kwargs)
self.name = name or self.__class__.__name__
self.handler = handler
self.level = kwargs.get("level", logging.DEBUG)
self.tid = None
self.logger = None
self.read_l = []
self.write_l = []
self.error_l = []
self.connections = []
self.connections_m = {}
self._running = False
self._loaded = False
self._delayed = []
self.set_state(STATE_STOP);
def __del__(self):
self.close()
def delay(self, callable):
self._delayed.append(callable)
def load(self):
if self._loaded: return
self.load_logging(self.level);
self._loaded = True
def load_logging(self, level = logging.DEBUG):
logging.basicConfig(format = "%(asctime)s [%(levelname)s] %(message)s")
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(level)
self.handler and self.logger.addHandler(self.handler)
def start(self):
# retrieves the current thread identifier as the current
# "tid" value to be used for thread control mechanisms
self.tid = thread.get_ident()
# triggers the loading of the internal structures of
# the base structure in case the loading has already
# been done nothing is done (avoids duplicated load)
self.load()
# sets the running flag that controls the running of the
# main loop and then changes the current state to start
# as the main loop is going to start
self._running = True
self.set_state(STATE_START)
# enters the main loop operation printing a message
# to the logger indicating this start, this stage
# should block the thread until a stop call is made
self.debug("Starting '%s' service main loop ..." % self.name)
self.trigger("start", self)
try: self.loop()
except BaseException, exception:
self.error(exception)
lines = traceback.format_exc().splitlines()
for line in lines: self.warning(line)
except:
self.critical("Critical level loop exception raised")
lines = traceback.format_exc().splitlines()
for line in lines: self.error(line)
finally:
self.trigger("stop", self)
self.debug("Finished '%s' service main loop" % self.name)
self.cleanup()
self.set_state(STATE_STOP)
def stop(self):
self._running = False
def close(self):
self.stop()
def is_empty(self):
return not self.read_l and not self.write_l and not self.error_l
def cleanup(self):
# creates a copy of the connections list because this structure
# is going to be changed in the closing of the connection object
connections = copy.copy(self.connections)
# iterates over the complete set of connections currently
# registered in the base structure and closes them so that
# can no longer be used and are gracefully disconnected
for connection in connections: connection.close()
# removes the contents of all of the loop related structures
# so that no extra selection operations are issued
del self.read_l[:]
del self.write_l[:]
del self.error_l[:]
def loop(self):
# iterates continuously while the running flag
# is set, once it becomes unset the loop breaks
# at the next execution cycle
while self._running:
# calls the base tick int handler indicating that a new
# tick loop iteration is going to be started, all the
# "in between loop" operation should be performed in this
# callback as this is the "space" they have for execution
self.ticks()
# updates the current state to select to indicate
# that the base service is selecting the connections
self.set_state(STATE_SELECT)
# verifies if the current selection list is empty
# in case it's sleeps for a while and then continues
# the loop (this avoids error in empty selection)
is_empty = self.is_empty()
if is_empty: time.sleep(0.25); continue
# runs the main selection operation on the current set
# of connection for each of the three operations returning
# the resulting active sets for the callbacks
reads, writes, errors = select.select(
self.read_l,
self.write_l,
self.error_l,
0.0005
)
# calls the various callbacks with the selections lists,
# these are the main entry points for the logic to be executed
# each of this methods should be implemented in the underlying
# class instances as no behavior is defined at this inheritance
# level (abstract class)
self.reads(reads)
self.writes(writes)
self.errors(errors)
def ticks(self):
self.set_state(STATE_TICK)
for method in self._delayed: method()
del self._delayed[:]
def reads(self, reads):
self.set_state(STATE_READ)
def writes(self, writes):
self.set_state(STATE_WRITE)
def errors(self, errors):
self.set_state(STATE_ERRROR)
def on_connection_c(self, connection):
self.debug(
"Connection '%s' from '%s' created ..." %\
(connection.id, connection.owner.name)
)
def on_connection_d(self, connection):
self.debug(
"Connection '%s' from '%s' deleted" %\
(connection.id, connection.owner.name)
)
def info_dict(self):
info = dict()
info["loaded"] = self._loaded
info["connections"] = len(self.connections)
info["state"] = self.get_state_s()
return info
def new_connection(self, socket, address, ssl = False):
"""
Creates a new connection for the provided socket
object and string based address, the returned
value should be a workable object.
@type socket: Socket
@param socket: The socket object to be encapsulated
by the object to be created (connection).
@type address: String
@param address: The address as a string to be used to
describe the connection object to be created.
@type ssl: bool
@param ssl: If the connection to be created is meant to
be secured using the ssl framework for encryption.
@rtype: Connection
@return: The connection object that encapsulates the
provided socket and address values.
"""
return Connection(self, socket, address, ssl = ssl)
def debug(self, object):
self.log(object, level = logging.DEBUG)
def info(self, object):
self.log(object, level = logging.INFO)
def warning(self, object):
self.log(object, level = logging.WARNING)
def error(self, object):
self.log(object, level = logging.ERROR)
def critical(self, object):
self.log(object, level = logging.CRITICAL)
def log(self, object, level = logging.INFO):
object_t = type(object)
message = unicode(object) if not object_t in types.StringTypes else object
if not self.logger: return
self.logger.log(level, message)
def set_state(self, state):
self._state = state
def get_state_s(self, lower = True):
"""
Retrieves a string describing the current state
of the system, this string should be as descriptive
as possible.
An optional parameter controls if the string should
be lower cased or not.
@type lower: bool
@param lower: If the returned string should be converted
into a lower cased version.
@rtype: String
@return: A string describing the current sate of the loop
system, should be as descriptive as possible.
"""
state_s = STATE_STRINGS[self._state - 1]
state_s = state_s.lower() if lower else state_s
return state_s
def _pending(self, _socket):
"""
Tries to perform the pending operations in the socket
and, these operations are set in the pending variable
of the socket structure.
The method returns if there are still pending operations
after this method tick.
@type _socket: Socket
@param _socket: The socket object to be checked for
pending operations and that is going to be used in the
performing of these operations.
@rtype: bool
@return: If there are still pending operations to be
performed in the provided socket.
"""
# verifies if the pending attribute exists in the socket
# and that the value is valid, in case it's not there's
# no pending operation (method call) to be performed, and
# as such must return immediately with no pending value
if not hasattr(_socket, "_pending") or\
not _socket._pending: return False
# calls the pending callback method and verifies if the
# pending value still persists in the socket if that the
# case returns the is pending value to the caller method
_socket._pending(_socket)
is_pending = not _socket._pending == None
return is_pending
def _ssl_wrap(self, _socket, key_file = None, cer_file = None, server = True):
dir_path = os.path.dirname(__file__)
base_path = os.path.join(dir_path, "../../")
base_path = os.path.normpath(base_path)
extras_path = os.path.join(base_path, "extras")
ssl_path = os.path.join(extras_path, "ssl")
key_file = key_file or os.path.join(ssl_path, "server.key")
cer_file = cer_file or os.path.join(ssl_path, "server.cer")
socket_ssl = ssl.wrap_socket(
_socket,
keyfile = key_file,
certfile = cer_file,
server_side = server,
ssl_version = ssl.PROTOCOL_SSLv23,
do_handshake_on_connect = False,
ciphers = "ALL"
)
return socket_ssl
def _ssl_handshake(self, _socket):
try:
_socket.do_handshake()
_socket._pending = None
except ssl.SSLError, error:
error_v = error.args[0]
if error_v in SSL_VALID_ERRORS:
_socket._pending = self._ssl_handshake
else: raise
class BaseThread(threading.Thread):
"""
The top level thread class that is meant to encapsulate
a running base object and run it in a new context.
"""
def __init__(self, owner, daemon = False, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.owner = owner
self.daemon = daemon
def run(self):
threading.Thread.run(self)
self.owner.start()
|
# coding: utf-8
"""
ASN.1 type classes for certificate revocation lists (CRL). Exports the
following items:
- CertificateList()
Other type classes are defined that help compose the types listed above.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from .algos import SignedDigestAlgorithm
from .core import (
Boolean,
Enumerated,
GeneralizedTime,
Integer,
ObjectIdentifier,
OctetBitString,
OctetString,
Sequence,
SequenceOf,
)
from .x509 import (
AuthorityInfoAccessSyntax,
AuthorityKeyIdentifier,
CRLDistributionPoints,
DistributionPointName,
GeneralNames,
Name,
ReasonFlags,
Time,
)
# The structures in this file are taken from https://tools.ietf.org/html/rfc5280
class Version(Integer):
_map = {
0: 'v1',
1: 'v2',
2: 'v3',
}
class IssuingDistributionPoint(Sequence):
_fields = [
('distribution_point', DistributionPointName, {'tag_type': 'explicit', 'tag': 0, 'optional': True}),
('only_contains_user_certs', Boolean, {'tag_type': 'implicit', 'tag': 1, 'default': False}),
('only_contains_ca_certs', Boolean, {'tag_type': 'implicit', 'tag': 2, 'default': False}),
('only_some_reasons', ReasonFlags, {'tag_type': 'implicit', 'tag': 3, 'optional': True}),
('indirect_crl', Boolean, {'tag_type': 'implicit', 'tag': 4, 'default': False}),
('only_contains_attribute_certs', Boolean, {'tag_type': 'implicit', 'tag': 5, 'default': False}),
]
class TBSCertListExtensionId(ObjectIdentifier):
_map = {
'2.5.29.18': 'issuer_alt_name',
'2.5.29.20': 'crl_number',
'2.5.29.27': 'delta_crl_indicator',
'2.5.29.28': 'issuing_distribution_point',
'2.5.29.35': 'authority_key_identifier',
'2.5.29.46': 'freshest_crl',
'1.3.6.1.5.5.7.1.1': 'authority_information_access',
}
class TBSCertListExtension(Sequence):
_fields = [
('extn_id', TBSCertListExtensionId),
('critical', Boolean, {'default': False}),
('extn_value', OctetString),
]
_oid_pair = ('extn_id', 'extn_value')
_oid_specs = {
'issuer_alt_name': GeneralNames,
'crl_number': Integer,
'delta_crl_indicator': Integer,
'issuing_distribution_point': IssuingDistributionPoint,
'authority_key_identifier': AuthorityKeyIdentifier,
'freshest_crl': CRLDistributionPoints,
'authority_information_access': AuthorityInfoAccessSyntax,
}
class TBSCertListExtensions(SequenceOf):
_child_spec = TBSCertListExtension
class CRLReason(Enumerated):
_map = {
0: 'unspecified',
1: 'key_compromise',
2: 'ca_compromise',
3: 'affiliation_changed',
4: 'superseded',
5: 'cessation_of_operation',
6: 'certificate_hold',
8: 'remove_from_crl',
9: 'privilege_withdrawn',
10: 'aa_compromise',
}
@property
def human_friendly(self):
"""
:return:
A unicode string with revocation description that is suitable to
show to end-users. Starts with a lower case letter and phrased in
such a way that it makes sense after the phrase "because of" or
"due to".
"""
return {
'unspecified': 'an unspecified reason',
'key_compromise': 'a compromised key',
'ca_compromise': 'the CA being compromised',
'affiliation_changed': 'an affiliation change',
'superseded': 'certificate supersession',
'cessation_of_operation': 'a cessation of operation',
'certificate_hold': 'a certificate hold',
'remove_from_crl': 'removal from the CRL',
'privilege_withdrawn': 'privilege withdrawl',
'aa_compromise': 'the AA being compromised',
}[self.native]
class CRLEntryExtensionId(ObjectIdentifier):
_map = {
'2.5.29.21': 'crl_reason',
'2.5.29.24': 'invalidity_date',
'2.5.29.29': 'certificate_issuer',
}
class CRLEntryExtension(Sequence):
_fields = [
('extn_id', CRLEntryExtensionId),
('critical', Boolean, {'default': False}),
('extn_value', OctetString),
]
_oid_pair = ('extn_id', 'extn_value')
_oid_specs = {
'crl_reason': CRLReason,
'invalidity_date': GeneralizedTime,
'certificate_issuer': GeneralNames,
}
class CRLEntryExtensions(SequenceOf):
_child_spec = CRLEntryExtension
class RevokedCertificate(Sequence):
_fields = [
('user_certificate', Integer),
('revocation_date', Time),
('crl_entry_extensions', CRLEntryExtensions, {'optional': True}),
]
_processed_extensions = False
_critical_extensions = None
_crl_reason_value = None
_invalidity_date_value = None
_certificate_issuer_value = None
_issuer_name = False
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
self._critical_extensions = set()
for extension in self['crl_entry_extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
@property
def critical_extensions(self):
"""
Returns a set of the names (or OID if not a known extension) of the
extensions marked as critical
:return:
A set of unicode strings
"""
if not self._processed_extensions:
self._set_extensions()
return self._critical_extensions
@property
def crl_reason_value(self):
"""
This extension indicates the reason that a certificate was revoked.
:return:
None or a CRLReason object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._crl_reason_value
@property
def invalidity_date_value(self):
"""
This extension indicates the suspected date/time the private key was
compromised or the certificate became invalid. This would usually be
before the revocation date, which is when the CA processed the
revocation.
:return:
None or a GeneralizedTime object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._invalidity_date_value
@property
def certificate_issuer_value(self):
"""
This extension indicates the issuer of the certificate in question,
and is used in indirect CRLs. CRL entries without this extension are
for certificates issued from the last seen issuer.
:return:
None or an x509.GeneralNames object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._certificate_issuer_value
@property
def issuer_name(self):
"""
:return:
None, or an asn1crypto.x509.Name object for the issuer of the cert
"""
if self._issuer_name is False:
self._issuer_name = None
if self.certificate_issuer_value:
for general_name in self.certificate_issuer_value:
if general_name.name == 'directory_name':
self._issuer_name = general_name.chosen
break
return self._issuer_name
class RevokedCertificates(SequenceOf):
_child_spec = RevokedCertificate
class TbsCertList(Sequence):
_fields = [
('version', Version, {'optional': True}),
('signature', SignedDigestAlgorithm),
('issuer', Name),
('this_update', Time),
('next_update', Time),
('revoked_certificates', RevokedCertificates, {'optional': True}),
('crl_extensions', TBSCertListExtensions, {'tag_type': 'explicit', 'tag': 0, 'optional': True}),
]
class CertificateList(Sequence):
_fields = [
('tbs_cert_list', TbsCertList),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
]
_processed_extensions = False
_critical_extensions = None
_issuer_alt_name_value = None
_crl_number_value = None
_delta_crl_indicator_value = None
_issuing_distribution_point_value = None
_authority_key_identifier_value = None
_freshest_crl_value = None
_authority_information_access_value = None
_issuer_cert_urls = None
_delta_crl_distribution_points = None
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
self._critical_extensions = set()
for extension in self['tbs_cert_list']['crl_extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
@property
def critical_extensions(self):
"""
Returns a set of the names (or OID if not a known extension) of the
extensions marked as critical
:return:
A set of unicode strings
"""
if not self._processed_extensions:
self._set_extensions()
return self._critical_extensions
@property
def issuer_alt_name_value(self):
"""
This extension allows associating one or more alternative names with
the issuer of the CRL.
:return:
None or an x509.GeneralNames object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._issuer_alt_name_value
@property
def crl_number_value(self):
"""
This extension adds a monotonically increasing number to the CRL and is
used to distinguish different versions of the CRL.
:return:
None or an Integer object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._crl_number_value
@property
def delta_crl_indicator_value(self):
"""
This extension indicates a CRL is a delta CRL, and contains the CRL
number of the base CRL that it is a delta from.
:return:
None or an Integer object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._delta_crl_indicator_value
@property
def issuing_distribution_point_value(self):
"""
This extension includes information about what types of revocations
and certificates are part of the CRL.
:return:
None or an IssuingDistributionPoint object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._issuing_distribution_point_value
@property
def authority_key_identifier_value(self):
"""
This extension helps in identifying the public key with which to
validate the authenticity of the CRL.
:return:
None or an AuthorityKeyIdentifier object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._authority_key_identifier_value
@property
def freshest_crl_value(self):
"""
This extension is used in complete CRLs to indicate where a delta CRL
may be located.
:return:
None or a CRLDistributionPoints object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._freshest_crl_value
@property
def authority_information_access_value(self):
"""
This extension is used to provide a URL with which to download the
certificate used to sign this CRL.
:return:
None or an AuthorityInfoAccessSyntax object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._authority_information_access_value
@property
def authority_key_identifier(self):
"""
:return:
None or a byte string of the key_identifier from the authority key
identifier extension
"""
if not self.authority_key_identifier_value:
return None
return self.authority_key_identifier_value['key_identifier'].native
@property
def issuer_cert_urls(self):
"""
:return:
A list of unicode strings that are URLs that should contain either
an individual DER-encoded X509 certificate, or a DER-encoded CMS
message containing multiple certificates
"""
if self._issuer_cert_urls is None:
self._issuer_cert_urls = []
if self.authority_information_access_value:
for entry in self.authority_information_access_value:
if entry['access_method'].native == 'ca_issuers':
location = entry['access_location']
if location.name != 'uniform_resource_identifier':
continue
url = location.native
if url.lower()[0:7] == 'http://':
self._issuer_cert_urls.append(url)
return self._issuer_cert_urls
@property
def delta_crl_distribution_points(self):
"""
Returns delta CRL URLs - only applies to complete CRLs
:return:
A list of zero or more DistributionPoint objects
"""
if self._delta_crl_distribution_points is None:
self._delta_crl_distribution_points = []
if self.freshest_crl_value is not None:
for distribution_point in self.freshest_crl_value:
distribution_point_name = distribution_point['distribution_point']
# RFC5280 indicates conforming CA should not use the relative form
if distribution_point_name.name == 'name_relative_to_crl_issuer':
continue
# This library is currently only concerned with HTTP-based CRLs
for general_name in distribution_point_name.chosen:
if general_name.name == 'uniform_resource_identifier':
self._delta_crl_distribution_points.append(distribution_point)
return self._delta_crl_distribution_points
Added crl.CertificateList.issuer
# coding: utf-8
"""
ASN.1 type classes for certificate revocation lists (CRL). Exports the
following items:
- CertificateList()
Other type classes are defined that help compose the types listed above.
"""
from __future__ import unicode_literals, division, absolute_import, print_function
from .algos import SignedDigestAlgorithm
from .core import (
Boolean,
Enumerated,
GeneralizedTime,
Integer,
ObjectIdentifier,
OctetBitString,
OctetString,
Sequence,
SequenceOf,
)
from .x509 import (
AuthorityInfoAccessSyntax,
AuthorityKeyIdentifier,
CRLDistributionPoints,
DistributionPointName,
GeneralNames,
Name,
ReasonFlags,
Time,
)
# The structures in this file are taken from https://tools.ietf.org/html/rfc5280
class Version(Integer):
_map = {
0: 'v1',
1: 'v2',
2: 'v3',
}
class IssuingDistributionPoint(Sequence):
_fields = [
('distribution_point', DistributionPointName, {'tag_type': 'explicit', 'tag': 0, 'optional': True}),
('only_contains_user_certs', Boolean, {'tag_type': 'implicit', 'tag': 1, 'default': False}),
('only_contains_ca_certs', Boolean, {'tag_type': 'implicit', 'tag': 2, 'default': False}),
('only_some_reasons', ReasonFlags, {'tag_type': 'implicit', 'tag': 3, 'optional': True}),
('indirect_crl', Boolean, {'tag_type': 'implicit', 'tag': 4, 'default': False}),
('only_contains_attribute_certs', Boolean, {'tag_type': 'implicit', 'tag': 5, 'default': False}),
]
class TBSCertListExtensionId(ObjectIdentifier):
_map = {
'2.5.29.18': 'issuer_alt_name',
'2.5.29.20': 'crl_number',
'2.5.29.27': 'delta_crl_indicator',
'2.5.29.28': 'issuing_distribution_point',
'2.5.29.35': 'authority_key_identifier',
'2.5.29.46': 'freshest_crl',
'1.3.6.1.5.5.7.1.1': 'authority_information_access',
}
class TBSCertListExtension(Sequence):
_fields = [
('extn_id', TBSCertListExtensionId),
('critical', Boolean, {'default': False}),
('extn_value', OctetString),
]
_oid_pair = ('extn_id', 'extn_value')
_oid_specs = {
'issuer_alt_name': GeneralNames,
'crl_number': Integer,
'delta_crl_indicator': Integer,
'issuing_distribution_point': IssuingDistributionPoint,
'authority_key_identifier': AuthorityKeyIdentifier,
'freshest_crl': CRLDistributionPoints,
'authority_information_access': AuthorityInfoAccessSyntax,
}
class TBSCertListExtensions(SequenceOf):
_child_spec = TBSCertListExtension
class CRLReason(Enumerated):
_map = {
0: 'unspecified',
1: 'key_compromise',
2: 'ca_compromise',
3: 'affiliation_changed',
4: 'superseded',
5: 'cessation_of_operation',
6: 'certificate_hold',
8: 'remove_from_crl',
9: 'privilege_withdrawn',
10: 'aa_compromise',
}
@property
def human_friendly(self):
"""
:return:
A unicode string with revocation description that is suitable to
show to end-users. Starts with a lower case letter and phrased in
such a way that it makes sense after the phrase "because of" or
"due to".
"""
return {
'unspecified': 'an unspecified reason',
'key_compromise': 'a compromised key',
'ca_compromise': 'the CA being compromised',
'affiliation_changed': 'an affiliation change',
'superseded': 'certificate supersession',
'cessation_of_operation': 'a cessation of operation',
'certificate_hold': 'a certificate hold',
'remove_from_crl': 'removal from the CRL',
'privilege_withdrawn': 'privilege withdrawl',
'aa_compromise': 'the AA being compromised',
}[self.native]
class CRLEntryExtensionId(ObjectIdentifier):
_map = {
'2.5.29.21': 'crl_reason',
'2.5.29.24': 'invalidity_date',
'2.5.29.29': 'certificate_issuer',
}
class CRLEntryExtension(Sequence):
_fields = [
('extn_id', CRLEntryExtensionId),
('critical', Boolean, {'default': False}),
('extn_value', OctetString),
]
_oid_pair = ('extn_id', 'extn_value')
_oid_specs = {
'crl_reason': CRLReason,
'invalidity_date': GeneralizedTime,
'certificate_issuer': GeneralNames,
}
class CRLEntryExtensions(SequenceOf):
_child_spec = CRLEntryExtension
class RevokedCertificate(Sequence):
_fields = [
('user_certificate', Integer),
('revocation_date', Time),
('crl_entry_extensions', CRLEntryExtensions, {'optional': True}),
]
_processed_extensions = False
_critical_extensions = None
_crl_reason_value = None
_invalidity_date_value = None
_certificate_issuer_value = None
_issuer_name = False
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
self._critical_extensions = set()
for extension in self['crl_entry_extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
@property
def critical_extensions(self):
"""
Returns a set of the names (or OID if not a known extension) of the
extensions marked as critical
:return:
A set of unicode strings
"""
if not self._processed_extensions:
self._set_extensions()
return self._critical_extensions
@property
def crl_reason_value(self):
"""
This extension indicates the reason that a certificate was revoked.
:return:
None or a CRLReason object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._crl_reason_value
@property
def invalidity_date_value(self):
"""
This extension indicates the suspected date/time the private key was
compromised or the certificate became invalid. This would usually be
before the revocation date, which is when the CA processed the
revocation.
:return:
None or a GeneralizedTime object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._invalidity_date_value
@property
def certificate_issuer_value(self):
"""
This extension indicates the issuer of the certificate in question,
and is used in indirect CRLs. CRL entries without this extension are
for certificates issued from the last seen issuer.
:return:
None or an x509.GeneralNames object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._certificate_issuer_value
@property
def issuer_name(self):
"""
:return:
None, or an asn1crypto.x509.Name object for the issuer of the cert
"""
if self._issuer_name is False:
self._issuer_name = None
if self.certificate_issuer_value:
for general_name in self.certificate_issuer_value:
if general_name.name == 'directory_name':
self._issuer_name = general_name.chosen
break
return self._issuer_name
class RevokedCertificates(SequenceOf):
_child_spec = RevokedCertificate
class TbsCertList(Sequence):
_fields = [
('version', Version, {'optional': True}),
('signature', SignedDigestAlgorithm),
('issuer', Name),
('this_update', Time),
('next_update', Time),
('revoked_certificates', RevokedCertificates, {'optional': True}),
('crl_extensions', TBSCertListExtensions, {'tag_type': 'explicit', 'tag': 0, 'optional': True}),
]
class CertificateList(Sequence):
_fields = [
('tbs_cert_list', TbsCertList),
('signature_algorithm', SignedDigestAlgorithm),
('signature', OctetBitString),
]
_processed_extensions = False
_critical_extensions = None
_issuer_alt_name_value = None
_crl_number_value = None
_delta_crl_indicator_value = None
_issuing_distribution_point_value = None
_authority_key_identifier_value = None
_freshest_crl_value = None
_authority_information_access_value = None
_issuer_cert_urls = None
_delta_crl_distribution_points = None
def _set_extensions(self):
"""
Sets common named extensions to private attributes and creates a list
of critical extensions
"""
self._critical_extensions = set()
for extension in self['tbs_cert_list']['crl_extensions']:
name = extension['extn_id'].native
attribute_name = '_%s_value' % name
if hasattr(self, attribute_name):
setattr(self, attribute_name, extension['extn_value'].parsed)
if extension['critical'].native:
self._critical_extensions.add(name)
self._processed_extensions = True
@property
def critical_extensions(self):
"""
Returns a set of the names (or OID if not a known extension) of the
extensions marked as critical
:return:
A set of unicode strings
"""
if not self._processed_extensions:
self._set_extensions()
return self._critical_extensions
@property
def issuer_alt_name_value(self):
"""
This extension allows associating one or more alternative names with
the issuer of the CRL.
:return:
None or an x509.GeneralNames object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._issuer_alt_name_value
@property
def crl_number_value(self):
"""
This extension adds a monotonically increasing number to the CRL and is
used to distinguish different versions of the CRL.
:return:
None or an Integer object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._crl_number_value
@property
def delta_crl_indicator_value(self):
"""
This extension indicates a CRL is a delta CRL, and contains the CRL
number of the base CRL that it is a delta from.
:return:
None or an Integer object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._delta_crl_indicator_value
@property
def issuing_distribution_point_value(self):
"""
This extension includes information about what types of revocations
and certificates are part of the CRL.
:return:
None or an IssuingDistributionPoint object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._issuing_distribution_point_value
@property
def authority_key_identifier_value(self):
"""
This extension helps in identifying the public key with which to
validate the authenticity of the CRL.
:return:
None or an AuthorityKeyIdentifier object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._authority_key_identifier_value
@property
def freshest_crl_value(self):
"""
This extension is used in complete CRLs to indicate where a delta CRL
may be located.
:return:
None or a CRLDistributionPoints object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._freshest_crl_value
@property
def authority_information_access_value(self):
"""
This extension is used to provide a URL with which to download the
certificate used to sign this CRL.
:return:
None or an AuthorityInfoAccessSyntax object
"""
if self._processed_extensions is False:
self._set_extensions()
return self._authority_information_access_value
@property
def issuer(self):
"""
:return:
An asn1crypto.x509.Name object for the issuer of the CRL
"""
return self['tbs_cert_list']['issuer']
@property
def authority_key_identifier(self):
"""
:return:
None or a byte string of the key_identifier from the authority key
identifier extension
"""
if not self.authority_key_identifier_value:
return None
return self.authority_key_identifier_value['key_identifier'].native
@property
def issuer_cert_urls(self):
"""
:return:
A list of unicode strings that are URLs that should contain either
an individual DER-encoded X509 certificate, or a DER-encoded CMS
message containing multiple certificates
"""
if self._issuer_cert_urls is None:
self._issuer_cert_urls = []
if self.authority_information_access_value:
for entry in self.authority_information_access_value:
if entry['access_method'].native == 'ca_issuers':
location = entry['access_location']
if location.name != 'uniform_resource_identifier':
continue
url = location.native
if url.lower()[0:7] == 'http://':
self._issuer_cert_urls.append(url)
return self._issuer_cert_urls
@property
def delta_crl_distribution_points(self):
"""
Returns delta CRL URLs - only applies to complete CRLs
:return:
A list of zero or more DistributionPoint objects
"""
if self._delta_crl_distribution_points is None:
self._delta_crl_distribution_points = []
if self.freshest_crl_value is not None:
for distribution_point in self.freshest_crl_value:
distribution_point_name = distribution_point['distribution_point']
# RFC5280 indicates conforming CA should not use the relative form
if distribution_point_name.name == 'name_relative_to_crl_issuer':
continue
# This library is currently only concerned with HTTP-based CRLs
for general_name in distribution_point_name.chosen:
if general_name.name == 'uniform_resource_identifier':
self._delta_crl_distribution_points.append(distribution_point)
return self._delta_crl_distribution_points
|
#!/usr/bin/env python
# this line is needed so the execution happens in python environment
import rospy
from std_msgs.msg import Int64
from std_msgs.msg import Header
from boat_imu.msg import measurements
# number of remembered values
history_length = 5
""" Air pressure history
AirPressure[0] newest """
airPressure = [0 for x in range(history_length)]
accelerometer = [["header", 0, 0, 0] for x in range(history_length)]
magnetometer = [["header", 0, 0, 0] for x in range(history_length)]
gyroscope = [["header", 0, 0, 0] for x in range(history_length)]
# TODO make this a dictionary, that contains: name, header, values
# maybe even dimension?
print('test')
#rospy.loginfo("some text")
print('test')
def storeIntROS(data, storage):
""" insert new Int value received from ROS in
the storage list, remove oldest value
"""
storage.insert(0,data.data)
storage.pop()
rospy.logdebug("add value" + str(storage[0]))
rospy.logdebug(storage)
def store3dROS(data, storage):
""" insert new header + three directional measurements
in the handed over storage list, remove oldest value
"""
storage.insert(0,[data.header, data.x_value, data.y_value, data.z_value])
storage.pop()
rospy.logdebug("add value" + str(storage[0]))
rospy.logdebug(storage)
def imu_listener():
rospy.init_node('listener', anonymous=True)
rospy.loginfo('ros imu listener started')
rospy.Subscriber("AirPressure", Int64, storeIntROS, airPressure)
rospy.Subscriber("Accelerometer", measurements, store3dROS, accelerometer)
rospy.Subscriber("Magnetometer", measurements, store3dROS, magnetometer)
rospy.Subscriber("Gyroscope", measurements, store3dROS, magnetometer)
rospy.spin()
if __name__ == '__main__':
imu_listener()
switch to dictionaries for measurement history
#!/usr/bin/env python
# this line is needed so the execution happens in python environment
import rospy
from std_msgs.msg import Int64
from std_msgs.msg import Header
from boat_imu.msg import measurements
# number of remembered values
history_length = 5
""" Air pressure history
AirPressure[0] newest """
# empty history to initialise storage lists
empty_history = [0 for x in range(history_length)]
empty_header_history = ["header" for x in range(history_length)]
airPressure = {
'name': 'Air Pressure',
'value': empty_history}
accelerometer = {
'name': 'Accelerometer',
'header': map(str, empty_history),
'x_value': empty_history,
'y_value': empty_history,
'z_value': empty_history
}
magnetometer = {
'name': 'Magnetometer',
'header': map(str, empty_history),
'x_value': empty_history,
'y_value': empty_history,
'z_value': empty_history
}
gyroscope = {
'name': 'Gyroscope',
'header': map(str, empty_history),
'x_value': empty_history,
'y_value': empty_history,
'z_value': empty_history
}
def storeIntROS(data, storage):
""" insert new Int value received from ROS in
the storage list, remove oldest value
"""
storage['value'].insert(0,data.data)
storage['value'].pop()
rospy.loginfo(
"add value " + str(storage['value'][0]) + ' to ' + storage['name'])
rospy.logdebug(storage)
def store3dROS(data, storage):
""" insert new header + three directional measurements
in the handed over storage list, remove oldest value
"""
storage['header'].insert(0, data.header)
storage['header'].pop()
storage['x_value'].insert(0, data.x_value)
storage['x_value'].pop()
storage['y_value'].insert(0, data.y_value)
storage['y_value'].pop()
storage['z_value'].insert(0, data.z_value)
storage['z_value'].pop()
rospy.loginfo(
'add x_value ' + str(storage['x_value'][0]) +
' y_value ' + str(storage['y_value'][0]) +
' z_value ' + str(storage['z_value'][0]) +
' to ' + storage['name'])
rospy.logdebug(storage)
def imu_listener():
rospy.init_node('listener', anonymous=True)
rospy.loginfo('ros imu listener started')
rospy.Subscriber("AirPressure", Int64, storeIntROS, airPressure)
rospy.Subscriber("Accelerometer", measurements, store3dROS, accelerometer)
rospy.Subscriber("Magnetometer", measurements, store3dROS, magnetometer)
rospy.Subscriber("Gyroscope", measurements, store3dROS, gyroscope)
rospy.spin()
if __name__ == '__main__':
imu_listener()
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-05 11:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0078_add_core_translation_fields'),
('profiles', '0002_userprofile_auth_service_uuid'),
]
operations = [
migrations.AddField(
model_name='securityquestion',
name='language',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.SiteLanguage'),
),
migrations.AddField(
model_name='securityquestion',
name='translated_pages',
field=models.ManyToManyField(blank=True, related_name='_securityquestion_translated_pages_+', to='profiles.SecurityQuestion'),
),
]
renamed the core migration dependence
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-05 11:59
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0004_add_core_translation_fields'),
('profiles', '0002_userprofile_auth_service_uuid'),
]
operations = [
migrations.AddField(
model_name='securityquestion',
name='language',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='core.SiteLanguage'),
),
migrations.AddField(
model_name='securityquestion',
name='translated_pages',
field=models.ManyToManyField(blank=True, related_name='_securityquestion_translated_pages_+', to='profiles.SecurityQuestion'),
),
]
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2017 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2017 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import copy
import json
import heapq
import signal
import logging
import hashlib
import tempfile
import traceback
import netius.pool
import netius.adapters
from . import log
from . import util
from . import compat
from . import asynchronous
from .. import middleware
from .conn import * #@UnusedWildImport
from .poll import * #@UnusedWildImport
from .asynchronous import * #@UnusedWildImport
NAME = "netius"
""" The global infra-structure name to be used in the
identification of both the clients and the services this
value may be prefixed or suffixed """
VERSION = "1.16.19"
""" The version value that identifies the version of the
current infra-structure, all of the services and clients
may share this value """
PLATFORM = "%s %d.%d.%d.%s %s" % (
sys.subversion[0] if hasattr(sys, "subversion") else "CPython",
sys.version_info[0],
sys.version_info[1],
sys.version_info[2],
sys.version_info[3],
sys.platform
)
""" Extra system information containing some of the details
of the technical platform that is running the system, this
string should be exposed carefully to avoid extra information
from being exposed to outside agents """
IDENTIFIER_SHORT = "%s/%s" % (NAME, VERSION)
""" The short version of the current environment's identifier
meant to be used in production like environment as it hides some
of the critical and internal information of the system """
IDENTIFIER_LONG = "%s/%s (%s)" % (NAME, VERSION, PLATFORM)
""" Longest version of the system identifier, to be used in the
development like environment as it shows critical information
about the system internals that may expose the system """
IDENTIFIER = IDENTIFIER_LONG if config._is_devel() else IDENTIFIER_SHORT
""" The identifier that may be used to identify an user agent
or service running under the current platform, this string
should comply with the typical structure for such values,
by default this value is set with the short version of the
identifier (less information) but this may be changed at
runtime if the current verbosity level is changed """
WSAEWOULDBLOCK = 10035
""" The wsa would block error code meant to be used on
windows environments as a replacement for the would block
error code that indicates the failure to operate on a non
blocking connection """
WSAECONNABORTED = 10053
""" Error code meant to be raised when a connection is aborted
from the other peer meaning that that client or a server in the
as abruptly dropped the connection """
WSAECONNRESET = 10054
""" Code that is used when a connection is reset meaning that
the connection as been disconnected using a graceful approach
and without raising any extraneous problems """
SSL_ERROR_CERT_ALREADY_IN_HASH_TABLE = 101
""" Error raised under the ssl infra-structure for situations
where the certificate does not required re-loading as it is
already present in the hash table, this error may be safely
ignored as it does not represent a threat """
POLL_ORDER = (
EpollPoll,
KqueuePoll,
PollPoll,
SelectPoll
)
""" The order from which the poll methods are going to be
selected from the fastest to the slowest, in case no explicit
poll method is defined for a base service they are selected
based on this list testing them for acceptance first """
SILENT_ERRORS = (
errno.ECONNABORTED,
errno.ECONNRESET,
errno.EPIPE,
WSAECONNABORTED,
WSAECONNRESET
)
""" List that contain the various connection error states that
should not raise any extra logging information because even though
they should drop the connection they are expected """
VALID_ERRORS = (
errno.EWOULDBLOCK,
errno.EAGAIN,
errno.EPERM,
errno.ENOENT,
errno.EINPROGRESS,
WSAEWOULDBLOCK
)
""" List containing the complete set of error that represent
non ready operations in a non blocking socket """
SSL_SILENT_ERRORS = (
ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN
)
""" The list containing the errors that should be silenced
while still making the connection dropped as they are expected
to occur and should not be considered an exception """
SSL_VALID_ERRORS = (
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
SSL_ERROR_CERT_ALREADY_IN_HASH_TABLE
)
""" The list containing the valid errors for the handshake
operation of the ssl connection establishment """
SSL_ERROR_NAMES = {
ssl.SSL_ERROR_WANT_READ : "SSL_ERROR_WANT_READ",
ssl.SSL_ERROR_WANT_WRITE : "SSL_ERROR_WANT_WRITE",
SSL_ERROR_CERT_ALREADY_IN_HASH_TABLE : "SSL_ERROR_CERT_ALREADY_IN_HASH_TABLE"
}
""" The dictionary containing the association between the
various ssl errors and their string representation """
SSL_VALID_REASONS = (
"CERT_ALREADY_IN_HASH_TABLE",
)
""" The list containing the valid reasons for the handshake
operation of the ssl connection establishment """
TCP_TYPE = 1
""" The type enumeration value that represents the tcp (stream)
based communication protocol, for various usages in the base
netius communication infra-structure """
UDP_TYPE = 2
""" The datagram based udp protocol enumeration value to be used
in static references to this kind of socket usage """
STATE_STOP = 1
""" The stop state value, this value is set when the service
is either in the constructed stage or when the service has been
stop normally or with an error """
STATE_START = 2
""" The start state set when the service is in the starting
stage and running, normal state """
STATE_PAUSE = 3
""" The pause state set for a service for which the main event
loop has been paused and should be resumed latter """
STATE_CONFIG = 4
""" The configuration state that is set when the service is
preparing to become started and the configuration attributes
are being set according to pre-determined indications """
STATE_POLL = 5
""" State to be used when the service is in the polling part
of the loop, this is the most frequent state in an idle service
as the service "spends" most of its time in it """
STATE_TICK = 6
""" Tick state representative of the situation where the loop
tick operation is being started and all the pre tick handlers
are going to be called for pre-operations """
STATE_READ = 7
""" Read state that is set when the connection are being read
and the on data handlers are being called, this is the part
where all the logic driven by incoming data is being called """
STATE_WRITE = 8
""" The write state that is set on the writing of data to the
connections, this is a pretty "fast" state as no logic is
associated with it """
STATE_ERRROR = 9
""" The error state to be used when the connection is processing
any error state coming from its main select operation and associated
with a certain connection (very rare) """
STATE_STRINGS = (
"STOP",
"START",
"PAUSE",
"CONFIG",
"POLL",
"TICK",
"READ",
"WRITE",
"ERROR"
)
""" Sequence that contains the various strings associated with
the various states for the base service, this may be used to
create an integer to string resolution mechanism """
KEEPALIVE_TIMEOUT = 300
""" The amount of time in seconds that a connection is set as
idle until a new refresh token is sent to it to make sure that
it's still online and not disconnected, make sure that this
value is high enough that it does not consume to much bandwidth """
KEEPALIVE_INTERVAL = int(KEEPALIVE_TIMEOUT / 10)
""" The time between the retrying of "ping" packets, this value
does not need to be too large and should not be considered too
important (may be calculated automatically) """
KEEPALIVE_COUNT = 3
""" The amount of times the "ping" packet is re-sent until the
connection is considered to be offline and is dropped """
LOG_FORMAT = "%(asctime)s [%(levelname)s] %(message)s"
""" The format that is going to be used by the logger of the
netius infra-structure for debugging purposes it should allow
and end developer to dig into the details of the execution """
# initializes the various paths that are going to be used for
# the base files configuration in the complete service infra
# structure, these should include the ssl based files
BASE_PATH = os.path.dirname(__file__)
EXTRAS_PATH = os.path.join(BASE_PATH, "extras")
SSL_KEY_PATH = os.path.join(EXTRAS_PATH, "net.key")
SSL_CER_PATH = os.path.join(EXTRAS_PATH, "net.cer")
SSL_CA_PATH = os.path.join(EXTRAS_PATH, "net.ca")
SSL_DH_PATH = os.path.join(EXTRAS_PATH, "dh.pem")
if not os.path.exists(SSL_CA_PATH): SSL_CA_PATH = None
if not os.path.exists(SSL_DH_PATH): SSL_DH_PATH = None
class AbstractBase(observer.Observable):
"""
Base network structure to be used by all the network
capable infra-structures (eg: servers and clients).
Should handle all the nonblocking event loop so that
the read and write operations are easy to handle.
"""
_MAIN = None
""" Reference to the top level main instance responsible
for the control of the main thread loop """
def __init__(self, name = None, handlers = None, *args, **kwargs):
observer.Observable.__init__(self, *args, **kwargs)
cls = self.__class__
poll = cls.test_poll()
self.name = name or self.__class__.__name__
self.handler_stream = logging.StreamHandler()
self.handlers = handlers or (self.handler_stream,)
self.level = kwargs.get("level", logging.INFO)
self.diag = kwargs.get("diag", False)
self.middleware = kwargs.get("middleware", [])
self.children = kwargs.get("children", 0)
self.tid = None
self.tname = None
self.logger = None
self.logging = None
self.npool = None
self.tpool = None
self.fpool = None
self.poll_c = kwargs.get("poll", poll)
self.poll = self.poll_c()
self.poll_name = self.poll.name()
self.poll_timeout = kwargs.get("poll_timeout", POLL_TIMEOUT)
self.keepalive_timeout = kwargs.get("keepalive_timeout", KEEPALIVE_TIMEOUT)
self.keepalive_interval = kwargs.get("keepalive_interval", KEEPALIVE_INTERVAL)
self.keepalive_count = kwargs.get("keepalive_count", KEEPALIVE_COUNT)
self.poll_owner = True
self.diag_app = None
self.middleware_l = []
self.connections = []
self.connections_m = {}
self.callbacks_m = {}
self._uuid = uuid.uuid4()
self._lid = 0
self._did = 0
self._main = False
self._running = False
self._pausing = False
self._loaded = False
self._forked = False
self._child = False
self._childs = []
self._events = {}
self._notified = []
self._delayed = []
self._delayed_o = []
self._delayed_n = []
self._delayed_l = threading.RLock()
self._extra_handlers = []
self._expanded = []
self._ssl_init()
self.set_state(STATE_STOP)
@classmethod
def test_poll(cls, preferred = None):
# sets the initial selected variable with the unselected
# (invalid) value so that at lease one selection must be
# done in order for this method to succeed
selected = None
# iterates over all the poll classes ordered by preference
# (best first) and tries to find the one that better matched
# the current situation, either the preferred poll method or
# the most performant one in case it's not possible
for poll in POLL_ORDER:
if not poll.test(): continue
if not selected: selected = poll
if not preferred: break
name = poll.name()
if not name == preferred: continue
selected = poll
break
# in case no polling method was selected must raise an exception
# indicating that no valid polling mechanism is available
if not selected: raise errors.NetiusError(
"No valid poll mechanism available"
)
# returns the selected polling mechanism class to the caller method
# as expected by the current method
return selected
@classmethod
def get_loop(cls, asyncio = False):
loop = cls.get_asyncio() if asyncio else None
loop = loop or cls.get_main()
return loop
@classmethod
def get_main(cls):
return cls._MAIN
@classmethod
def get_asyncio(cls):
asyncio = asynchronous.get_asyncio()
if not asyncio: return None
policy = asyncio.get_event_loop_policy()
if not policy._local._loop: return None
return asyncio.get_event_loop()
@classmethod
def set_main(cls, instance, set_legacy = True):
cls._MAIN = instance
if not set_legacy: return
asyncio = asynchronous.get_asyncio()
if not asyncio: return
cls.patch_asyncio()
if instance: loop = compat.CompatLoop(instance)
else: loop = None
asyncio.set_event_loop(loop)
@classmethod
def unset_main(cls, set_legacy = True):
cls.set_main(None, set_legacy = set_legacy)
@classmethod
def patch_asyncio(cls):
asyncio = asynchronous.get_asyncio()
if not asyncio: return
if hasattr(asyncio, "_patched"): return
if hasattr(asyncio.tasks, "_PyTask"):
asyncio.Task = asyncio.tasks._PyTask #@UndefinedVariable
asyncio.tasks.Task = asyncio.tasks._PyTask #@UndefinedVariable
asyncio._patched = True
def destroy(self):
observer.Observable.destroy(self)
# iterates over the complete set of sockets in the connections
# map to properly close them (avoids any leak of resources)
for _socket in self.connections_m: _socket.close()
# clears some of the internal structure so that they don't
# get called any longer (as expected)
self.connections_m.clear()
self.callbacks_m.clear()
def call_safe(self, callable, args = [], kwargs = {}):
"""
Calls the provided callable object using a safe strategy
meaning that in case there's an exception raised in the
middle of the callable execution it is going to be caught
and the details of it logged.
:type callable: Function
:param callable: The callable function that is going to
be called using the safe approach.
:type args: List
:param args: The normal (non keyword) arguments to be sent
to the callable.
:type kwargs: Dictionary
:param kwargs: The set of keyword arguments that are going
to be sent to the callable.
:rtype: Object
:return: The result of the calling of the callable.
"""
try:
# calls the provided callable (method) with the
# provided arguments and keyword arguments returning
# the result to the caller method
return callable(*args, **kwargs)
except BaseException as exception:
# in case there's an exception displays a warning
# about the raised exception and the logs the current
# stack so that the exception is traceable
self.warning(exception)
self.log_stack()
def wait_event(self, callable, name = None):
# tries to retrieve the list of binds for the event
# to be "waited" for, this list should contain the
# complete list of callables to be called upon the
# event notification/trigger
binds = self._events.get(name, [])
if callable in binds: return
# adds the callable to the list of binds for the event
# the complete set of callables will be called whenever
# the a notification for the event occurs
binds.append(callable)
self._events[name] = binds
def unwait_event(self, callable, name = None):
# tries to retrieve the list of binds for the event
# and verifies that the callable is present on them
# and if that's not the case ignores the operation
binds = self._events.get(name, None)
if not binds or not callable in binds: return
# removes the callable from the binds list so that
# it's no longer going to be called
binds.remove(callable)
# verifies if the binds list is still valid deleting
# it from the map of events otherwise
if binds: self._events[name] = binds
else: del self._events[name]
def delay(
self,
callable,
timeout = None,
immediately = False,
verify = False,
safe = False
):
# in case the safe flag is set and the thread trying to add
# delayed elements is not the main the proper (safe) method
# is used meaning a safe execution is targeted
if safe and not self.is_main():
return self.delay_s(
callable,
timeout = timeout,
immediately = immediately,
verify = verify
)
# creates the original target value with a zero value (forced
# execution in next tick) in case the timeout value is set the
# value is incremented to the current time, then created the
# callable original tuple with the target (time) and the callable
target = -1 if immediately else 0
if timeout: target = time.time() + timeout
callable_o = (target, callable)
callable_o = legacy.orderable(callable_o)
# in case the verify flag is set, must verify if the callable
# is already inserted in the list of delayed operations in
# case it does returns immediately to avoid duplicated values
is_duplicate = verify and callable_o in self._delayed_o
if is_duplicate: return
# creates the list that is going to be used to populate the
# options to be used by the calling tuple
options = [True]
# creates the "final" callable tuple with the target time, the
# callable and the loop id (lid) then inserts both the delayed
# (original) callable tuple and the callable tuple in the lists
callable_t = (target, self._did, callable, self._lid, options)
callable_t = legacy.orderable(callable_t)
heapq.heappush(self._delayed, callable_t)
heapq.heappush(self._delayed_o, callable_o)
# increments the "delay" identifier by one, this identifier is
# used to correctly identify a delayed object so that for the
# same target value a sorting is performed (fifo like)
self._did += 1
# returns the callable tuple that may be latter used to control
# the execution or not of the delayed operation (cancellation)
return callable_t
def delay_s(
self,
callable,
timeout = None,
immediately = True,
verify = False,
wakeup = True
):
"""
Safe version of the delay operation to be used to insert a callable
from a different thread (implied lock mechanisms).
This method should only be used from different threads as there's
a huge performance impact created from using this method instead of
the local event loop one (delay()).
:type callable: Function
:param callable: The callable that should be called on the next tick
according to the event loop rules.
:type timeout: int
:param timeout: The timeout for the callable to be called, this value
may not reflect an accurate value and depends greatly on the minimum
resolution value of the polling mechanism.
:type immediately: bool
:param immediately: If the callable should be called as soon as possible,
this is equivalent to setting timeout to -1.
:type verify: bool
:param verify: If the delayed sequences should be verified for possible
duplicated, avoiding possible issues.
:type wakeup: bool
:param wakeup: If the main event loop should be awaken so that the
callable is processed as soon as possible.
"""
# creates the next element tuple that is going to be scheduled according
# to the definition provided to the method
next = (callable, timeout, immediately, verify)
# acquires the lock that controls the access to the delayed for next
# tick list and then adds the callable to such list, please note that
# the delayed (next) list is only going to be joined/merged with delay
# operations and list on the next tick (through the merge operation)
self._delayed_l.acquire()
try: self._delayed_n.append(next)
finally: self._delayed_l.release()
# in case the wakeup flag is set this delay operation should have
# been called from a different thread and the event loop should
# awaken as soon as possible to handle the event
if wakeup: self.wakeup()
def delay_m(self):
"""
Runs the merge operation so that the delay next list (used by the delay
safe operation) is merged with the delayed and the delayed ordered
structures, making the events (effectively) ready to be executed by delays.
"""
# verifies if the delay next list is not valid or empty and if that's
# the case returns immediately as there's nothing to be merged
if not self._delayed_n: return
# iterates over the complete set of next elements in the delay next list
# and schedules them as delay for the next tick execution
for next in self._delayed_n:
callable, timeout, immediately, verify = next
self.delay(
callable,
timeout = timeout,
immediately = immediately,
verify = verify
)
# deletes the complete set of elements present in the delay next list, this
# is considered to be equivalent to the empty operation
del self._delayed_n[:]
def ensure(
self,
coroutine,
args = [],
kwargs = {},
thread = None,
future = None,
immediately = True
):
"""
Main method for the queuing/startup of an asynchronous coroutine
of async method, this should be called at the most higher level
of the execution of a chained coroutine sequence.
It should ensure that the provided callable is wrapped into a
coroutine if that's the case, so that chained calling is not
violated by a non compliant element.
The method should create a proper sequence/pipelined handling of
the various chained coroutine calls so that they are called one
after the other using futures for such handling. The final "simple"
abstraction should expose one "parent" future object as the interface.
Multiple calls to this method should generate different async
contexts (with different parent future instances).
:type coroutine: Coroutine/CoroutineObject/Callable
:param coroutine: The callable or coroutine that is going to be
"inserted" for an asynchronous execution, if a callable is provided
a coroutine is created wrapping the execution of such callable.
:type args: List
:param args: The list of "normal" arguments to be sent to the
coroutine as parts of its signature.
:type kwargs: Dictionary
:param kwargs: The keyword arguments to be sent to the coroutine.
:type thread: bool
:param thread: If the execution of the coroutine should be done
using a different thread (via thread pool), this may be interesting
if the coroutine includes blocking i/o calls.
:type future: Future
:param future: If provided ensures that non new future object is going
to be created for this async context to be created.
:type immediately: bool
:param immediately: If the callback should be scheduler in the event
pool to be executed immediately (as soon as possible).
:rtype: Future
:return: The future that has been created for this new async context
or the provided one if one was provided (this is considered to be the
parent future of the complete coroutine chain).
"""
# tries to determine if the provided callable is really
# a coroutine and uses that condition to determine the
# default value for the thread argument, notice that the
# verification is also performed for the coroutine object
is_coroutine = asynchronous.is_coroutine(coroutine)
is_coroutine_object = asynchronous.is_coroutine_object(coroutine)
is_defined = is_coroutine or is_coroutine_object
if thread == None: thread = False if is_defined else True
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# ensure execution operation
future = future or self.build_future()
# in case the provided coroutine callable is not really
# a coroutine and instead a "normal" function a conversion
# is required so that there's compatibility between the
# coroutine model and the typical sync model
if not is_defined:
# saves the "original" callable so that it may be latter
# used as part of the back calling process
coroutine_c = coroutine
# creates the coroutine that is going to be used to
# encapsulate the callable, note that the result of the
# callable is set as the result of the future (as expected)
def coroutine(future, *args, **kwargs):
yield
result = coroutine_c(*args, **kwargs)
future.set_result(result)
# creates the function that is going to "propagate" the cancel
# operation from the "parent" future to the child one, this
# should also close the associated generator
def cleanup(future):
if not future.cancelled(): return
if not hasattr(future, "child"): return
if not future.child: return
future.child.cancel()
# adds the cleanup function as a done callback so that whenever
# the future is canceled a child future is also canceled, this
# propagation of operations allows for proper cleanup
future.add_done_callback(cleanup)
# verifies if the currently provided coroutine is in fact (already)
# a coroutine object, if that's the case the sequence (generator)
# is already present and the coroutine is simply assigned to the
# sequence without any kind of conversion
if is_coroutine_object:
sequence = coroutine
# otherwise the sequence must be created by calling the coroutine
# (function) with the proper set of arguments, notice that the signature
# is inspected to determine if a future argument is required
else:
# retrieves the argument spec of the provided coroutine to check
# if the provided coroutine requires a future to be passed
spec = legacy.getargspec(coroutine)
is_future = spec[0] and spec[0][0] == "future"
# creates the generate sequence from the coroutine callable
# by calling it with the newly created future instance, that
# will be used for the control of the execution, notice that
# the future is only passed in case the coroutine has been
# determined to be receiving the future as first argument
if is_future: sequence = coroutine(future, *args, **kwargs)
else: sequence = coroutine(*args, **kwargs)
# calls the ensure generator method so that the provided sequence
# gets properly "normalized" into the expected generator structure
# in case the normalization is not possible a proper exception is
# raised indicating the "critical" problem
is_generator, sequence = asynchronous.ensure_generator(sequence)
if not is_generator: raise errors.AssertionError("Expected generator")
# creates the callable that is going to be used to call
# the coroutine with the proper future variable as argument
# note that in case the thread mode execution is enabled the
# callable is going to be executed on a different thread
if thread: callable = lambda f = future: self.texecute(step, [f])
else: callable = lambda f = future: step(f)
# creates the function that will be used to step through the
# various elements in the sequence created from the calling of
# the coroutine, the values returned from it may be either future
# or concrete values, for each situation a proper operation must
# be applied to complete the final task in the proper way
def step(_future):
# unsets any possible reference to a child element as it must
# have been processed if the control flow reached this point,
# this avoids duplicated approval of child futures
future.child = None
# iterates continuously over the generator that may emit both
# plain object values or future (delayed executions)
while True:
# in case the future object is considered to be closed,
# (done using a pipeline of callbacks) no more steps are
# going to be taken and the sequence should be closed as
# it's not longer going to be used (for sure), this means
# that the blocked coroutine is not going to be resumed
if future.closed: sequence.close(); future.cancel(); break
# determines if the future is ready to receive new work
# this is done using a pipeline of callbacks that must
# deliver a positive value so that the future is considered
# ready, note that in case the future is not ready the current
# iteration cycle is delayed until the next tick
if not future.ready: self.delay(callable); break
# retrieves the next value from the generator and in case
# value is the last one (stop iteration) verifies if the
# is still considered running (no value or exception) set and
# if that's the case runs the default value set (approve)
# and then breaks the loop, notice that if there's an
# exception raised in the middle of the generator iteration
# it's set on the future (indirect notification)
try: value = next(sequence)
except StopIteration as exception:
result = exception.args[0] if exception.args else None
if future.running: future.set_result(result)
break
except BaseException as exception:
future.set_exception(exception)
break
# determines if the value retrieved from the generator is a
# future and if that's the case schedules a proper execution
is_future = asynchronous.is_future(value)
# in case the current value is a future schedules it for execution
# taking into account the proper thread execution model, note that
# the future is set as a child of the current "parent" future
if is_future:
future.child = value
value.add_done_callback(callable)
break
# otherwise it's a normal value being yielded and should be sent
# to the future object as a partial value (pipelining)
else:
# for a situation where a thread pool should be used the new
# value should be "consumed" by adding the data handler operation
# to the list of delayed operations and notifying the task pool
# so that the event loop on the main thread gets unblocked and
# the proper partial value handling is performed (always on main thread)
if thread:
def handler():
future.partial(value)
callable()
self.delay_s(handler)
break
# otherwise we're already on the main thread so a simple partial callback
# notification should be enough for the proper consuming of the data
else:
future.partial(value)
# delays the execution of the callable so that it is executed
# immediately if possible (event on the same iteration)
self.delay(callable, immediately = immediately)
return future
def resolve_hostname(self, hostname, type = "a"):
import netius.clients
future = self.build_future()
def handler(response):
if not response: raise RuntimeError("Timeout in resolution")
if not response.answers: raise RuntimeError("Unable to resolve")
answer = response.answers[0]
address = answer[4]
future.set_result(address)
netius.clients.DNSClient.query_s(
hostname,
type = type,
callback = handler,
daemon = False
)
return future
def run_coroutine(
self,
coroutine,
args = [],
kwargs = {},
thread = None,
close = None
):
# creates the callback function that is going to be called when
# the future associated with the provided ensure context gets
# finished (on done callback)
def cleanup(future):
# calls the stop or pause method for the current loop, effectively
# ending the loop as soon as possible (next tick), notice that if
# the close method is called no more loop re-usage is possible
self.stop() if close else self.pause()
# tries to determine if the provided object is in fact a coroutine
# or if instead it is a "simple" future object ready to be used
is_future = asynchronous.is_future(coroutine)
is_coroutine = not is_future
# ensures that the provided coroutine get executed under a new
# context and retrieves the resulting future
future = self.ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = thread
) if is_coroutine else coroutine
# defines the cleanup operation (loop stop) as the target for the
# done operation on the future (allows cleanup)
future.add_done_callback(cleanup)
# starts the current event loop, this is a blocking operation until
# the done callback is called to stop the loop
self.start()
# tries to retrieve a possible exception associated with
# the future, in case it does not exist ignores the current
# execution and returns the control flow immediately with
# the future's result, to be used by the caller
exception = future.exception()
if not exception: return future.result()
# raises the exception to the upper layers so that it's properly
# handled by them, this is the expected behaviour by this sync
# execution mode of the coroutine inside an event loop
raise exception
def wakeup(self, force = False):
# verifies if this is the main thread and if that's not the case
# and the force flag is not set ignore the wakeup operation, avoiding
# extra usage of resources (not required)
if self.is_main() and not force: return
# makes sure that the the notify pool is started (required for proper
# event notification) and then runs the notification process, should
# "wake" the main event loop as soon as possible
if force: self.nensure()
if not self.npool: return
self.npool.notify()
def sleep(self, timeout, future = None):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# sleep operation to be executed
future = future or self.build_future()
# creates the callable that is going to be used to set
# the final value of the future variable
callable = lambda: future.set_result(timeout)
# delays the execution of the callable so that it is executed
# after the requested amount of timeout, note that the resolution
# of the event loop will condition the precision of the timeout
self.delay(callable, timeout = timeout)
return future
def wait(self, event, timeout = None, future = None):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# sleep operation to be executed
future = future or self.build_future()
# creates the callable that is going to be used to set
# the final value of the future variable, the result
# set in the future represents the payload of the event
def callable(data):
if future.cancelled(): return
future.set_result(data)
# creates the callable that is going to be called in case
# the timeout has been reached, this avoids constant waiting
# for an event to happen (dead lock)
def canceler():
if future.done(): return
future.cancel()
# creates the callback function that is going to be called
# whenever the future is completed (either error or success)
# this should run the series of cleanup operations
def cleanup(future):
self.unwait_event(callable, name = event)
# registers the cleanup function for the done operation so that
# the waiting for the event is canceled whenever the future is
# completed with either success or error
future.add_done_callback(cleanup)
# waits the execution of the callable until the event with the
# provided name is notified/triggered, the execution should be
# triggered on the same event loop tick as the notification
self.wait_event(callable, name = event)
# in case a valid timeout is set schedules the canceler operation
# to be performed (to unblock the waiting element)
if timeout: self.delay(canceler, timeout = timeout)
# returns the provided future or a new one in case none has been
# provided, this will be used for proper event registration
return future
def notify(self, event, data = None):
# adds the event with the provided name to the list of notifications
# that are going to be processed in the current tick operation
self._notified.append((event, data))
# in case this is considered to be the main thread there no need to
# proceed with the task pool notification process (expensive)
if self.is_main(): return
# runs the wakeup operation making sure that as soon as possible the
# main event loop gets unblocked for event processing
self.wakeup()
def load(self, full = False):
"""
Starts the loading process for the current engine, this should be
a singleton (run once) operation to be executed once per instance.
Some of the responsibilities of the loading process should include:
logging loading, system signal binding and welcome message printing.
The method should be protected against double execution issues, meaning
that should be safely called at any stage of the life cycle.
:type full: bool
:param full: If the loading process should be performed completely,
meaning that even the long tasks should be executed.
"""
# in case the current structure is considered/marked as already loaded
# there's no need to continue with the loading execution (returns immediately)
if self._loaded: return
# calls the boot hook responsible for the initialization of the various
# structures of the base system, note that is going to be called once
# per each loop starting process (structure should be destroyed on cleanup)
self.boot()
# loads the various parts of the base system, under this calls each
# of the systems should have it's internal structures started
self.load_logging(self.level)
# loads the diagnostics application handlers that allows external
# interaction with the service for diagnostics/debugging
self.load_diag()
# loads the complete set of middleware that has been registered for
# the current system (takes into account if it's considered the main one)
self.load_middleware()
# calls the welcome handle this is meant to be used to print some
# information about the finishing of the loading of the infra-structure
# this is going to be called once per base system
self.welcome()
# runs the binding of the system wide signals so that if
# any of such signals is raised it's properly handled and
# redirected to the proper logic through exceptions
self.bind_signals()
# sets the private loading flag ensuring that no extra load operations
# will be done after this first call to the loading (no duplicates)
self._loaded = True
def unload(self, full = True):
"""
Unloads the structures associated with the current engine, so that
the state of the current engine is reversed to the original one.
Note that this is not related in any way with the event loop and only
static structures are affected.
After a call to this method, the load method may be called again.
:type full: bool
:param full: If the complete set of structure unloading operations
should be performed, this is related with the full flag of load.
"""
# verifies if the current structure is considered/marked as already
# "unloaded", if that's the case returns the control flow immediately
# as there's nothing pending to be (undone)
if not self._loaded: return
# triggers the operation that will start the unloading process of the
# logging infra-structure of the current system
if full: self.unload_logging()
# unloads the middleware infra-structure that has been created for the
# current service, no longer going to be used
self.unload_middleware()
# marks the current system as unloaded as the complete set of operations
# meant to start the unloading process have been finished
self._loaded = False
def boot(self):
pass
def welcome(self):
pass
def load_logging(self, level = logging.DEBUG, format = LOG_FORMAT, unique = False):
# verifies if there's a logger already set in the current service
# if that's the case ignores the call no double reloading allowed
if self.logger: return
# normalizes the provided level value so that it represents
# a proper and understandable value, then starts the formatter
# that is going to be used and retrieves the (possibly unique)
# identifier to be used in the logger retrieval/identification
level = self._level(level)
formatter = logging.Formatter(format)
identifier = self.get_id(unique = unique)
# retrieves the logger that is going to be according to the
# decided identifier and then verifies that the counter value
# is properly updated deciding also if the logger instance is
# a new one or if instead it refers an already initialized/old
# instance that doesn't need a new initialization process
self.logger = logging.getLogger(identifier)
counter = self.logger._counter if hasattr(self.logger, "_counter") else 0
is_new = counter == 0
self.logger._counter = counter + 1
if not is_new: return
# start the extra logging infrastructure (extra handlers)
# and initializes the stream handlers with the proper level
# and formatter values (as expected)
self.extra_logging(level, formatter)
self.handler_stream.setLevel(level)
self.handler_stream.setFormatter(formatter)
# starts the new logger instance by setting no parent to it,
# updating the verbosity level of it and then registering the
# complete set of handlers for it (as expected)
self.logger.parent = None
self.logger.setLevel(level)
for handler in self.handlers:
if not handler: continue
self.logger.addHandler(handler)
def unload_logging(self, safe = True):
# verifies if there's a valid logger instance set in the
# current service, in case there's not returns immediately
# as there's nothing remaining to be done here
if not self.logger: return
# updates the counter value for the logger and validates
# that no more "clients" are using the logger so that it
# may be properly destroyed (as expected)
counter = self.logger._counter
is_old = counter == 1
self.logger._counter = counter - 1
if not is_old: return
# iterates over the complete set of handlers in the current
# base element and removes them from the current logger
for handler in self.handlers:
if not handler: continue
self.logger.removeHandler(handler)
# in case the safe flag is set, iterates over the complete
# set of handlers registered for the logger and removes them
# from the current logger, this is required so that proper
# handler unregistration is ensured even for complex scenarios
for handler in self.logger.handlers if safe else ():
if not handler: continue
self.logger.removeHandler(handler)
# closes the base stream handler as it's no longer going to
# be used for any kind of logging operation (avoids leaks)
self.handler_stream.close()
# iterates over the complete set of (built) extra handlers
# and runs the close operation for each of them, as they are
# no longer considered required for logging purposes
for handler in self._extra_handlers: handler.close()
# unset the logger reference in the current service so that
# it's not possible to use it any longer
self.logger = None
def extra_logging(self, level, formatter):
"""
Loads the complete set of logging handlers defined in the
current logging value, should be a map of definitions.
This handlers will latter be used for piping the various
logging messages to certain output channels.
The creation of the handler is done using a special keyword
arguments strategy so that python and configuration files
are properly set as compatible.
:type level: String/int
:param level: The base severity level for which the new handler
will be configured in case no extra level definition is set.
:type formatter: Formatter
:param formatter: The logging formatter instance to be set in
the handler for formatting messages to the output.
"""
# verifies if the logging attribute of the current instance is
# defined and in case it's not returns immediately, otherwise
# starts by converting the currently defined set of handlers into
# a list so that it may be correctly manipulated (add handlers)
if not self.logging: return
self.handlers = list(self.handlers)
# iterates over the complete set of handler configuration in the
# logging to create the associated handler instances
for config in self.logging:
# gathers the base information on the current handler configuration
# running also the appropriate transformation on the level
name = config.get("name", None)
_level = config.get("level", level)
_level = self._level(_level)
# "clones" the configuration dictionary and then removes the base
# values so that they do not interfere with the building
config = dict(config)
if "level" in config: del config["level"]
if "name" in config: del config["name"]
# retrieves the proper building, skipping the current loop in case
# it does not exits and then builds the new handler instance, setting
# the proper level and formatter and then adding it to the set
if not hasattr(log, name + "_handler"): continue
builder = getattr(log, name + "_handler")
handler = builder(**config)
handler.setLevel(_level)
handler.setFormatter(formatter)
self.handlers.append(handler)
self._extra_handlers.append(handler)
# restores the handlers structure back to the "original" tuple form
# so that no expected data types are violated
self.handlers = tuple(self.handlers)
def level_logging(self, level):
"""
Changes the verbosity level of the current logging infra-structure
into the provided level of verbosity.
The provided value may be an integer (internal value) or a string
representation of the requested verbosity level.
:type level: int/String
:param level: The (logging) for which the logging infra-structure
must be changed, either an integer or string value.
"""
# converts the provided logging level value (either string or
# integer value) into the appropriate normalized value that can
# be used internally for logging level setting
level = self._level(level)
# sets the (new) level value value for both the base stream
# handler and also for the logger itself
self.handler_stream.setLevel(level)
self.logger.setLevel(level)
# iterates over the complete set of attached handlers to
# update their respective logging level
for handler in self.handlers: handler.setLevel(level)
def load_diag(self, env = True):
# verifies if the diagnostics "feature" has been requested
# for the current infra-structure and if that's not the case
# returns the control flow immediately to the caller
if not self.diag: return
# runs the import operations for the diag module, note that
# this must be performed locally no avoid any unwanted behavior
# or collision with a runtime process (would pose issues)
from . import diag
# verifies if the diag module has been correctly loaded and
# if that's not the case fails gracefully and returns the
# control flow to the caller method
if not diag.loaded:
self.info("Failed to load diagnostics, import problem")
return
# retrieves the various server related value for the diagnostics
# server, taking into account if the env flag is set
server = self.get_env("DIAG_SERVER", "netius") if env else "netius"
host = self.get_env("DIAG_HOST", "127.0.0.1") if env else "127.0.0.1"
port = self.get_env("DIAG_PORT", 5050, cast = int) if env else 5050
# creates the application object that is going to be
# used for serving the diagnostics app
self.diag_app = diag.DiagApp(self)
# calls the on diag method so that the current instance is
# able to act on the newly created application
self.on_diag()
# starts the "serving" procedure of it under a new thread
# to avoid blocking the current context of execution
self.diag_app.serve(
server = server,
host = host,
port = port,
diag = False,
threaded = True,
conf = False
)
def load_middleware(self, suffix = "Middleware"):
# iterates over the complete set of string that define the middleware
# that is going to be loaded and executes the loading process
for name in self.middleware:
# capitalizes the provided name and appends the suffix to it
# to created the proper (and complete) middleware class name
name_c = name.capitalize()
class_n = name_c + suffix
# retrieves the class (from the middleware module) that is going
# to be used for the current middleware
middleware_c = getattr(middleware, class_n)
# runs the registration process for the middleware, meaning that
# the class is going to be instantiated and started and the proper
# internal structures will be updated in accordance
self.register_middleware(middleware_c)
def unload_middleware(self):
# iterates over the complete set of middleware instance to stop
# them (close internal structures) and then removes the middleware
# list so that they don't get used any longer
for middleware_i in self.middleware_l: middleware_i.stop()
del self.middleware_l[:]
def register_middleware(self, middleware_c):
# instantiates a new middleware class as a new instance and then
# runs the start method indicating the intention to start a new
# middleware (should properly start its internal structures)
middleware_i = middleware_c(self)
middleware_i.start()
# adds the middleware instance that has just been created to the
# list of middleware loaded for the current service
self.middleware_l.append(middleware_i)
# returns the instance of middleware that has just been created
# while running the registration process
return middleware_i
def call_middleware(self, name, *args, **kwargs):
# iterates over the complete set of middleware instance to call the
# method with the provided name, with the provided arguments
for middleware_i in self.middleware_l:
method = getattr(middleware_i, name)
method(*args, **kwargs)
def bind_signals(
self,
signals = (
signal.SIGINT,
signal.SIGTERM,
signal.SIGHUP if hasattr(signal, "SIGHUP") else None, #@UndefinedVariable
signal.SIGQUIT if hasattr(signal, "SIGQUIT") else None #@UndefinedVariable
),
handler = None
):
# creates the signal handler function that propagates the raising
# of the system exit exception (proper logic is executed) and then
# registers such handler for the (typical) sigterm signal
def base_handler(signum = None, frame = None): raise SystemExit()
for signum in signals:
if signum == None: continue
try: signal.signal(signum, handler or base_handler)
except: self.debug("Failed to register %d handler" % signum)
def start(self):
# in case the current instance is currently paused runs the
# resume operation instead as that's the expected operation
if self.is_paused(): return self.resume()
# in case the event loop is already running then a new sub-
# context based loop should be created in order to block the
# current execution stack (as expected)
if self.is_running(): return self.block()
# re-builds the polling structure with the new name this
# is required so that it's possible to change the polling
# mechanism in the middle of the loading process
self.poll = self.build_poll()
# retrieves the name of the polling mechanism that is
# going to be used in the main loop of the current
# base service, this is going to be used for diagnostics
poll_name = self.get_poll_name()
# triggers the loading of the internal structures of
# the base structure in case the loading has already
# been done nothing is done (avoids duplicated load)
self.load()
# opens the polling mechanism so that its internal structures
# become ready for the polling cycle, the inverse operation
# (close) should be performed as part of the cleanup
self.poll.open(timeout = self.poll_timeout)
# makes sure that the notify pool is created so that the event
# notification (required for multi threaded environments) is created
# and ready to be used (as expected)
self.nensure()
# retrieves the complete set of information regarding the current
# thread that is being used for the starting of the loop, this data
# may be used for runtime debugging purposes (debug only data)
cthread = threading.current_thread()
self.tid = cthread.ident or 0
self.tname = cthread.getName()
self._main = self.tname == "MainThread"
# in case the current thread is the main one, the global main instance
# is set as the current instance, just in case no main variable is
# already set otherwise corruption may occur (override of value)
if self._main and not AbstractBase.get_main():
AbstractBase.set_main(self)
# enters the main loop operation by printing a message
# to the logger indicating this start, this stage
# should block the thread until a stop call is made
self.debug("Starting '%s' service main loop (%.2fs) ..." % (self.name, self.poll_timeout))
self.debug("Using thread '%s' with tid '%d'" % (self.tname, self.tid))
self.debug("Using '%s' as polling mechanism" % poll_name)
# calls the main method to be able to start the main event
# loop properly as defined by specification
self.main()
def stop(self):
if self.is_paused(): self.finish()
else: self._running = False
def pause(self):
self._running = False
self._pausing = True
def resume(self):
self.debug("Resuming '%s' service main loop (%.2fs) ..." % (self.name, self.poll_timeout))
self.on_resume()
self.main()
def close(self):
self.stop()
def finish(self):
self.debug("Finishing '%s' service main loop" % self.name)
self.on_stop()
self.cleanup()
self.set_state(STATE_STOP)
def main(self):
# sets the running flag that controls the running of the
# main loop and then changes the current state to start
# as the main loop is going to start, then executes the
# on start call indicating the (re-)start of the even loop
self._running = True
self._pausing = False
self.set_state(STATE_START)
self.on_start()
# runs the event loop, this is a blocking method that should
# be finished by the end of the execution of by pause
try:
self.loop()
self.finalize()
except (KeyboardInterrupt, SystemExit, errors.StopError):
self.info("Finishing '%s' service on user request ..." % self.name)
except errors.PauseError:
self.debug("Pausing '%s' service main loop" % self.name)
self.set_state(STATE_PAUSE)
self.on_pause()
except BaseException as exception:
self.error(exception)
self.log_stack(method = self.warning)
except:
self.critical("Critical level loop exception raised")
self.log_stack(method = self.error)
finally:
if self.is_paused(): return
self.finish()
def is_main(self):
if not self.tid: return True
return threading.current_thread().ident == self.tid
def is_running(self):
return self._running
def is_started(self):
return self.get_state() == STATE_START
def is_stopped(self):
return self.get_state() == STATE_STOP
def is_paused(self):
return self.get_state() == STATE_PAUSE
def is_edge(self):
return self.poll.is_edge()
def is_empty(self):
return self.poll.is_empty()
def is_sub_read(self, socket):
return self.poll.is_sub_read(socket)
def is_sub_write(self, socket):
return self.poll.is_sub_write(socket)
def is_sub_error(self, socket):
return self.poll.is_sub_error(socket)
def sub_all(self, socket):
return self.poll.sub_all(socket, owner = self)
def unsub_all(self, socket):
return self.poll.unsub_all(socket)
def sub_read(self, socket):
return self.poll.sub_read(socket, owner = self)
def sub_write(self, socket):
return self.poll.sub_write(socket, owner = self)
def sub_error(self, socket):
return self.poll.sub_error(socket, owner = self)
def unsub_read(self, socket):
return self.poll.unsub_read(socket)
def unsub_write(self, socket):
return self.poll.unsub_write(socket)
def unsub_error(self, socket):
return self.poll.unsub_error(socket)
def cleanup(self, destroy = True):
# runs the unload operation for the current base container this should
# unset/unload some of the components for this base infra-structure
self.unload()
# destroys the complete set of structures associated with the event
# notification, this should include both the map of events to binds
# association and the list of pending notifications to be processed
self._events.clear()
del self._notified[:]
# destroys the current information on the delays that are is longer
# going to be executed as the poll/system is closing, this is required
# in order to avoid any possible memory leak with clojures/cycles
del self._delayed[:]
del self._delayed_o[:]
del self._delayed_n[:]
# runs the expand destroy operation so that the complete set of expanded
# values get their (temporary) files removed (garbage collection)
self._expand_destroy()
# runs the destroy operation on the ssl component of the base
# element so that no more ssl is available/used (avoids leaks)
self._ssl_destroy()
# verifies if there's a valid (and open) notify pool, if that's
# the case starts the stop process for it so that there's no
# leaking of task descriptors and other structures
if self.npool: self.nstop()
# verifies if there's a valid (and open) task pool, if that's
# the case starts the stop process for it so that there's no
# leaking of task descriptors and other structures
if self.tpool: self.tstop()
# verifies if there's a valid (and open) file pool, if that's
# the case starts the stop process for it so that there's no
# leaking of file descriptors and other structures
if self.fpool: self.fstop()
# creates a copy of the connections list because this structure
# is going to be changed in the closing of the connection object
connections = copy.copy(self.connections)
# iterates over the complete set of connections currently
# registered in the base structure and closes them so that
# can no longer be used and are gracefully disconnected
for connection in connections: connection.close()
# iterates over the complete set of sockets in the connections
# map to properly close them (avoids any leak of resources)
for _socket in self.connections_m: _socket.close()
# in case the current thread is the main one then in case the
# instance set as global main is this one unsets the value
# meaning that the main instance has been unloaded
if self._main and AbstractBase.get_main() == self:
AbstractBase.unset_main()
# closes the current poll mechanism so that no more issues arise
# from an open poll system (memory leaks, etc.), note that this is
# only performed in case the current base instance is the owner of
# the poll that is going to be closed (works with containers)
if self.poll_owner: self.poll.close()
# deletes some of the internal data structures created for the instance
# and that are considered as no longer required
self.connections_m.clear()
self.callbacks_m.clear()
del self.connections[:]
del self._extra_handlers[:]
# runs the destroy operation for the current instance, this should remove
# the most obscure parts of the current instance
if destroy: self.destroy()
def loop(self):
# iterates continuously while the running flag is set, once
# it becomes unset the loop breaks at the next execution cycle
while True:
# calls the base tick int handler indicating that a new
# tick loop iteration is going to be started, all the
# "in between loop" operation should be performed in this
# callback as this is the "space" they have for execution
self.ticks()
# in case running flag is disabled it's time to break the
# cycle (just before the possible block) as it would imply
# extra time before we could stop the event loop
if not self._running: break
# updates the current state to poll to indicate
# that the base service is selecting the connections
self.set_state(STATE_POLL)
# runs the main selection operation on the current set
# of connection for each of the three operations returning
# the resulting active sets for the callbacks
reads, writes, errors = self.poll.poll()
# calls the various callbacks with the selections lists,
# these are the main entry points for the logic to be executed
# each of this methods should be implemented in the underlying
# class instances as no behavior is defined at this inheritance
# level (abstract class)
self.reads(reads)
self.writes(writes)
self.errors(errors)
def block(self):
try: self.loop()
finally: self._running = True
def fork(self):
# ensures that the children value is converted as an
# integer value as this is the expected structure
self.children = int(self.children)
# runs a series of validations to be able to verify
# if the fork operation should really be performed
if not self.children: return True
if not self.children > 0: return True
if not hasattr(os, "fork"): return True
if self._forked: return True
# prints a debug operation about the operation that is
# going to be performed for the forking
self.debug("Forking the current process into '%d' children ..." % self.children)
# calls the on fork method indicating that a new fork
# operation is soon going to be performed
self.on_fork()
# sets the initial pid value to the value of the current
# master process as this is going to be used for child
# detection (critical for the correct logic execution)
pid = os.getpid()
# iterates of the requested (number of children) to run
# the concrete fork operation and fork the logic
for _index in range(self.children):
pid = os.fork() #@UndefinedVariable
self._child = pid == 0
if self._child: self.on_child()
if self._child: break
self._childs.append(pid)
# sets the forked flag, meaning that the current process
# has been already forked (avoid duplicated operations)
self._forked = True
# in case the current process is a child one an immediate
# valid value should be returned (force logic continuation)
if self._child: return True
# registers for some of the common signals to be able to avoid
# any possible interaction with the joining process
def handler(signum = None, frame = None): raise errors.StopError("Stop")
self.bind_signals(handler = handler)
# sleeps forever, waiting for an interruption of the current
# process that triggers the children to quit, so that it's
# able to "join" all of them into the current process
try: self._wait_forever()
except: pass
# prints a debug information about the processes to be joined
# this indicated the start of the joining process
self.debug("Joining '%d' children processes ..." % self.children)
# iterates over the complete set of children to send the proper
# terminate signal to each of them for proper termination
for pid in self._childs: os.kill(pid, signal.SIGTERM)
# iterates over the complete set of child processed to join
# them (master responsibility)
for pid in self._childs: os.waitpid(pid, 0)
# prints a message about the end of the child process joining
# this is relevant to make sure everything is ok before exit
self.debug("Finished joining %d' children processes" % self.children)
# runs the cleanup operation for the current process this is
# required to avoid any leaked information
self.cleanup()
# returns an invalid value meaning that no control flow should
# continue, as this is the master process (coordinator)
return False
def finalize(self):
# verifies a series of conditions and raises a proper error in case
# any of them is verified under the current state
if self._pausing: raise errors.PauseError("Pause state expected")
if self._running: raise errors.AssertionError("Not expected running")
def ticks(self):
# updates the current state value to the tick state indicating
# that the current process is updating a new tick in loop
self.set_state(STATE_TICK)
# runs the verification/processing of the complete set of file
# events that have been raised meanwhile, this allows for the
# processing of various file driven operations
self.files()
# "calculates" the new loop id by incrementing one value
# to the previous one, note that the value is calculated
# in a modulus way so that no overflow occurs
self._lid = (self._lid + 1) % 2147483647
# runs the processing of the delayed calls so that the pending
# calls are called if the correct time has been reached
self._delays()
def reads(self, reads, state = True):
if state: self.set_state(STATE_READ)
def writes(self, writes, state = True):
if state: self.set_state(STATE_WRITE)
def errors(self, errors, state = True):
if state: self.set_state(STATE_ERRROR)
def pregister(self, pool):
# prints a debug message stating that a new pool is
# being created for the handling of message events
self.debug("Started pool, for async handling")
# tries to retrieve the file descriptor of the event virtual
# object that is notified for each operation associated with
# the pool, (primary communication mechanism)
eventfd = pool.eventfd()
if not eventfd: self.warning("Starting pool without eventfd")
if not eventfd: return
if not self.poll: return
self.sub_read(eventfd)
# creates the callback clojure around the current context
# so that a proper callback can be used for the operations
pool._callback = lambda e, s: self.pcallback(e, s, pool)
# registers for a callback operation in the event fd so that
# it gets properly de-notified as expected when a read operation
# is performed in it, this operations will be performed upon
# the request for the read operation
self.add_callback(eventfd, pool._callback)
# retrieves the class of the eventfd object and then uses it
# to retrieve the associated name for logging purposes
eventfd_cls = eventfd.__class__
eventfd_name = eventfd_cls.__name__
# echoes a debug message indicating that a new read event
# subscription has been created for the event fd of the pool
self.debug("Subscribed for read operations on event fd (%s)" % eventfd_name)
def punregister(self, pool):
# prints a debug message notifying the user that no more
# async handling is possible using the pool
self.debug("Stopped existing pool, no more async handling")
# tries to retrieve the event file descriptor for
# the pool an in case it exists unsubscribes
# from it under the current polling system
eventfd = pool.eventfd()
if not eventfd: self.warning("Stopping pool without eventfd")
if not eventfd: return
if not self.poll: return
self.unsub_read(eventfd)
# verifies if the callback operation in the event fd is defined
# for the pool and if that's not the case returns immediately
if not hasattr(pool, "_callback"): return
# unregisters from a callback operation in the event fd so that
# no more events are handled by the notifier, this is expected
# in order to avoid any leaks
self.remove_callback(eventfd, pool._callback)
# unsets the value of the callback removing its reference from
# the pool as its no longer going to be used
del pool._callback
# echoes a debug message indicating that a new read event
# unsubscription has been created for the event fd of the pool
self.debug("Unsubscribed for read operations on event fd")
def pcallback(self, event, socket, pool):
# runs a series of pre-validations on the callback so that
# no operations is performed for such conditions
if not pool: return
if not event == "read": return
# runs the de-notify operation clearing the pool from any
# possible extra notification (avoid extra counter)
pool.denotify()
def nensure(self):
if self.npool: return
self.nstart()
def nstart(self):
if self.npool: return
self.npool = netius.pool.NotifyPool()
self.npool.start()
self.pregister(self.npool)
def nstop(self):
if not self.npool: return
self.punregister(self.npool)
self.npool.stop()
def tensure(self):
if self.tpool: return
self.tstart()
def tstart(self):
if self.tpool: return
self.tpool = netius.pool.TaskPool()
self.tpool.start()
self.pregister(self.tpool)
def tstop(self):
if not self.tpool: return
self.punregister(self.tpool)
self.tpool.stop()
def texecute(self, callable, args = [], kwargs = {}, callback = None):
self.tensure()
self.tpool.execute(
callable,
args = args,
kwargs = kwargs,
callback = callback
)
def files(self):
if not self.fpool: return
events = self.fpool.pop_all()
for event in events:
callback = event[-1]
if not callback: continue
callback(*event[1:-1])
def fopen(self, *args, **kwargs):
self.fensure()
return self.fpool.open(*args, **kwargs)
def fclose(self, *args, **kwargs):
self.fensure()
return self.fpool.close(*args, **kwargs)
def fread(self, *args, **kwargs):
self.fensure()
return self.fpool.read(*args, **kwargs)
def fwrite(self, *args, **kwargs):
self.fensure()
return self.fpool.write(*args, **kwargs)
def fensure(self):
if self.fpool: return
self.fstart()
def fstart(self):
# verifies if there's an already open file pool for
# the current system and if that's not the case creates
# a new one and starts it's thread cycle
if self.fpool: return
self.fpool = netius.pool.FilePool()
self.fpool.start()
self.pregister(self.fpool)
def fstop(self):
# verifies if there's an available file pool and
# if that's the case initializes the stopping of
# such system, note that this is blocking call as
# all of the thread will be joined under it
if not self.fpool: return
self.punregister(self.fpool)
self.fpool.stop()
def on_connection_c(self, connection):
# prints some debug information about the connection that has
# just been created (for possible debugging purposes)
self.debug(
"Connection '%s' from '%s' created" %
(connection.id, connection.owner.name)
)
self.debug(
"There are %d connections for '%s'" %
(len(connection.owner.connections), connection.owner.name)
)
# triggers the event notifying any listener about the new connection
# that is now ready for operation to be performed in it
self.trigger("connection_c", self, connection)
def on_connection_d(self, connection):
# prints some debug information about the connection
# that has just been scheduled for destruction
self.debug(
"Connection '%s' from '%s' deleted" %
(connection.id, connection.owner.name)
)
self.debug(
"There are %d connections for '%s'" %
(len(connection.owner.connections), connection.owner.name)
)
# triggers the event notifying any listener about the
# deletion/destruction f the connection
self.trigger("connection_d", self, connection)
def on_stream_c(self, stream):
# retrieves the reference to the connection that is associated
# with the stream that has been created
connection = stream.connection
# prints some debug information on the stream that has just been
# created (may be used for debugging purposes)
self.debug(
"Stream '%s' from '%s' created" %
(stream.identifier, connection.owner.name)
)
# notifies any listener of the stream created event about the
# new stream (as expected per specification)
self.trigger("stream_c", self, stream)
def on_stream_d(self, stream):
# retrieves the reference to the connection that is associated
# with the stream that has been deleted
connection = stream.connection
# prints some debug information on the stream that has just been
# deleted (may be used for debugging purposes)
self.debug(
"Stream '%s' from '%s' deleted" %
(stream.identifier, connection.owner.name)
)
# notifies any listener of the stream deleted event about the
# new stream (as expected per specification
self.trigger("stream_d", self, stream)
def on_fork(self):
self.trigger("fork", self)
def on_child(self):
# triggers the child event indicating that a new child has been
# created and than any callback operation may now be performed
self.trigger("child", self)
# creates a new seed value from a pseudo random value and
# then adds this new value as the base for randomness in the
# ssl base infra-structure, required for security
seed = str(uuid.uuid4())
seed = legacy.bytes(seed)
ssl.RAND_add(seed, 0.0)
def on_diag(self):
self.trigger("diag", self)
def on_start(self):
self.trigger("start", self)
def on_stop(self):
self.trigger("stop", self)
def on_pause(self):
self.trigger("pause", self)
def on_resume(self):
self.trigger("resume", self)
def info_dict(self, full = False):
info = dict(
loaded = self._loaded,
connections = len(self.connections),
state = self.get_state_s(),
poll = self.get_poll_name()
)
if full: info.update(
name = self.name,
_lid = self._lid
)
return info
def info_string(self, full = False, safe = True):
try: info = self.info_dict(full = full)
except: info = dict()
info_s = json.dumps(
info,
ensure_ascii = False,
indent = 4,
separators = (",", " : "),
sort_keys = True
)
return info_s
def connections_dict(self, full = False):
connections = []
for connection in self.connections:
info = connection.info_dict(full = full)
connections.append(info)
return connections
def connection_dict(self, id, full = False):
connection = None
for _connection in self.connections:
if not _connection.id == id: continue
connection = _connection
break
if not connection: return None
return connection.info_dict(full = full)
def new_connection(self, socket, address, ssl = False):
"""
Creates a new connection for the provided socket
object and string based address, the returned
value should be a workable object.
:type socket: Socket
:param socket: The socket object to be encapsulated
by the object to be created (connection).
:type address: String
:param address: The address as a string to be used to
describe the connection object to be created.
:type ssl: bool
:param ssl: If the connection to be created is meant to
be secured using the ssl framework for encryption.
:rtype: Connection
:return: The connection object that encapsulates the
provided socket and address values.
"""
return Connection(
owner = self,
socket = socket,
address = address,
ssl = ssl
)
def add_callback(self, socket, callback):
callbacks = self.callbacks_m.get(socket, [])
if callback in callbacks: return
callbacks.append(callback)
self.callbacks_m[socket] = callbacks
def remove_callback(self, socket, callback):
callbacks = self.callbacks_m.get(socket, [])
if not callback in callbacks: return
callbacks.remove(callback)
if callbacks: return
del self.callbacks_m[socket]
def load_config(self, path = "config.json", **kwargs):
kwargs = self.apply_config(path, kwargs)
for key, value in legacy.iteritems(kwargs):
setattr(self, key, value)
def apply_config(self, path, kwargs):
if not os.path.exists(path): return kwargs
self.info("Applying configuration file '%s' ..." % path)
kwargs = copy.copy(kwargs)
file = open(path, "rb")
try: contents = json.load(file)
finally: file.close()
for key, value in legacy.iteritems(contents):
kwargs[key] = value
return kwargs
def exec_safe(self, connection, callable, *args, **kwargs):
try:
return callable(*args, **kwargs)
except ssl.SSLError as error:
error_v = error.args[0] if error.args else None
error_m = error.reason if hasattr(error, "reason") else None
if error_v in SSL_SILENT_ERRORS:
self.on_expected(error, connection)
elif not error_v in SSL_VALID_ERRORS and\
not error_m in SSL_VALID_REASONS:
self.on_exception(error, connection)
except socket.error as error:
error_v = error.args[0] if error.args else None
if error_v in SILENT_ERRORS:
self.on_expected(error, connection)
elif not error_v in VALID_ERRORS:
self.on_exception(error, connection)
except BaseException as exception:
self.on_exception(exception, connection)
return False
def is_devel(self):
"""
Verifies if the current running environment is meant to be used
for development purposes as opposed to a production environment.
The method should always be used in situations where some critical
and internal information is meant to be displayed in a development
environment but hidden in a production one.
This method should be used at runtime as opposed to the private
configuration based one.
:rtype: bool
:return: If the current environment is development oriented or
if it's considered to be a production one (invalid result).
"""
return self.is_debug()
def is_debug(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.DEBUG)
def is_info(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.INFO)
def is_warning(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.WARNING)
def is_error(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.ERROR)
def is_critical(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.CRITICAL)
def debug(self, object):
self.log(object, level = logging.DEBUG)
def info(self, object):
self.log(object, level = logging.INFO)
def warning(self, object):
self.log(object, level = logging.WARNING)
def error(self, object):
self.log(object, level = logging.ERROR)
def critical(self, object):
self.log(object, level = logging.CRITICAL)
def log_stack(self, method = None, info = True):
if not method: method = self.info
lines = traceback.format_exc().splitlines()
for line in lines: method(line)
if info: self.log_info(method = method)
def log_info(self, method = None):
if not method: method = self.info
info_string = self.info_string(full = True)
for line in info_string.split("\n"): method(line)
def log(self, *args, **kwargs):
if legacy.PYTHON_3: return self.log_python_3(*args, **kwargs)
else: return self.log_python_2(*args, **kwargs)
def log_python_3(self, object, level = logging.INFO):
is_str = isinstance(object, legacy.STRINGS)
try: message = str(object) if not is_str else object
except: message = str(object)
if not self.logger: return
self.logger.log(level, message)
def log_python_2(self, object, level = logging.INFO):
is_str = isinstance(object, legacy.STRINGS)
try: message = unicode(object) if not is_str else object #@UndefinedVariable
except: message = str(object).decode("utf-8", "ignore")
if not self.logger: return
self.logger.log(level, message)
def build_poll(self):
# retrieves the reference to the parent class associated with
# the current instance, it's going t be used for class methods
cls = self.__class__
# verifies if the currently set polling mechanism is open in
# case it's ther's no need to re-build the polling mechanism
# otherwise rebuilds the polling mechanism with the current
# name and returns the new poll object to the caller method
if self.poll and self.poll.is_open(): return self.poll
# runs the testing of the poll again and verifies if the polling
# class has changed in case it did not returns the current poll
# instance as expected by the current infra-structure
poll_c = cls.test_poll(preferred = self.poll_name)
if poll_c == self.poll_c: return self.poll
# updates the polling class with the new value and re-creates
# the polling instance with the new polling class returning this
# new value to the caller method
self.poll_c = poll_c
self.poll = self.poll_c()
return self.poll
def build_future(self, asyncio = True):
"""
Creates a future object that is bound to the current event
loop context, this allows for latter access to the owning loop.
:type asyncio: bool
:param asyncio: If the asyncio loop retrieval strategy should be
used or if instead the netius native one should be used.
:rtype: Future
:return: The generated future that should be bound to the
current context.
"""
# creates a normal future object, setting the current loop (global) as
# the loop, then returns the future to the caller method
loop = self.get_loop(asyncio = asyncio)
future = asynchronous.Future(loop = loop)
return future
def get_id(self, unique = True):
base = NAME + "-" + util.camel_to_underscore(self.name)
if not unique: return base
return base + "-" + str(self._uuid)
def get_poll(self):
return self.poll
def get_poll_name(self):
poll = self.get_poll()
name = poll.name()
return name
def get_state(self):
return self._state
def set_state(self, state):
self._state = state
def get_state_s(self, lower = True):
"""
Retrieves a string describing the current state
of the system, this string should be as descriptive
as possible.
An optional parameter controls if the string should
be lower cased or not.
:type lower: bool
:param lower: If the returned string should be converted
into a lower cased version.
:rtype: String
:return: A string describing the current sate of the loop
system, should be as descriptive as possible.
"""
state_s = STATE_STRINGS[self._state - 1]
state_s = state_s.lower() if lower else state_s
return state_s
def get_env(self, name, default = None, cast = None, expand = False):
"""
Retrieves the value of the environment variable with the
requested name, defaulting to the provided value in case
it's not possible to find such variable.
An optional cast type may be provided in order to cast the
value of the environment variable in to the target type.
An optional expand flag may be set so that the variable gets
expanded as a file system file, for this the newline values
should be escaped as explicit '\n' string sequences (two chars).
Current implementation forwards the request to the current
configuration registry so that other data providers may
also be used in search for configuration.
:type name: String
:param name: The name of the environment variable that is
meant to be retrieved from the current environment
:type default: Object
:param default: The default value to be returned in case
no value is found for the provided name.
:type cast: Type
:param cast: The cast type to be used to cast the value
of the requested environment variable.
:type expand: bool
:param expand: If the variable should be expanded as a file
object and stored in a temporary storage, for this situation
the resulting object should be a string with the file path.
:rtype: Object
:return: The value of the requested environment variable
properly casted into the target value.
"""
if not name in config.CONFIGS: return default
value = config.CONFIGS.get(name, default)
if expand: value = self.expand(value)
cast = config.CASTS.get(cast, cast)
if cast and not value == None: value = cast(value)
return value
def expand(self, value, encoding = "utf-8", force = False):
"""
Expands the provided string/bytes value into a file in the
current file system so that it may be correctly used by interfaces
that require certain values to be file system based.
The generated file is going to be removed on the cleanup operation
so that no temporary file leaking occurs (garbage collection).
In case the force value is provided the the file is created even
for situations where the provided value is invalid/unset.
:type value: String
:param value: The string/bytes based value that is going to be
expanded into a proper file system based (temporary) file.
:type encoding: String
:param encoding: The encoding that is going to be used to convert
the value into a bytes based one in case the provided value is not
bytes compliant (and must be converted).
:type force: bool
:param force: If the expansion operation should be performed even
for situations where the value is considered invalid/unset.
:rtype: String
:return: The path to the temporary file that has just been generated
for the expansion of the provided value.
"""
if not value and not force: return value
is_bytes = legacy.is_bytes(value)
if not is_bytes: value = value.encode(encoding)
value = value.replace(b"\\n", b"\n")
fd, file_path = tempfile.mkstemp()
os.close(fd)
file = open(file_path, "wb")
try: file.write(value)
finally: file.close()
self._expanded.append(file_path)
return file_path
def get_protocols(self):
"""
Retrieves the complete set of protocols (as ALPN strings) that are
going to be handled by the current protocol infra-structure.
:rtype: List
:return: The list containing the complete set of protocols handled
by the current infra-structure.
:see: https://tools.ietf.org/html/rfc7301
"""
return None
def get_adapter(self, name = "memory", *args, **kwargs):
"""
Retrieves an instance of a storage adapter described
by the provided name, note that the dynamic (extra)
arguments are going to be used in the construction of
the adapter instance.
:type name: String
:param name: The name of the adapter to be retrieved
this should be equivalent to the adapter class name.
:rtype: Adapter
:return: An instance (properly configured) of the
requested adapter (defined by the name argument).
"""
name_f = name.title() + "Adapter"
adapter_c = getattr(netius.adapters, name_f)
adapter = adapter_c(*args, **kwargs)
return adapter
def get_auth(self, name = "memory", *args, **kwargs):
"""
Gathers the proper authentication handler that is being
requested with the provided name. The retrieved auth
is a static class that should be used from its interface
based on class based methods.
The state of theses authentication (handlers) is based
on the "global" state of the environment (no instances).
:type name: String
:param name: The name of the authentication (handler)
class that should be retrieved.
:rtype: Auth
:return: An authentication based class that may be used
for the interaction of authentication methods.
"""
name_f = name.title() + "Auth"
auth_c = getattr(netius.auth, name_f)
return auth_c
def get_connection(self, socket):
"""
"Resolves" the connection associated with the provided socket
returning the structured connection object for it.
In case no connection exists the method raises an exception
invalidating the current logic stack.
:type socket: Socket
:param socket: The socket for which the connection is going to
be returned.
:rtype: Connection
:return: The connection object associated with the provided
socket reference.
"""
return self.connections_m[socket]
def _pending(self, connection):
"""
Tries to perform the pending operations in the connection,
these operations are set in the pending variable of the
connection structure, so that it may be read latter on.
The method returns if there are still pending operations
after this method tick.
:type connection: Connection
:param connection: The connection object to be checked for
pending operations and that is going to be used in the
performing of these operations.
:rtype: bool
:return: If there are still pending operations to be
performed in the provided connection.
"""
# calls the run (tick) starter operation that should start
# and run all the starters registered for the connection or
# continue any starter operation that is pending for it
return connection.run_starter()
def _notifies(self):
"""
Runs the notification process for the complete set of
pending notification in the notified list.
This tick operation may create tail recursion on callback
call and so the list is always processed as a queue.
The number of processed events is returned as part of the
result.
:rtype: int
:return: The number of processed pending events/notifications.
"""
# starts the counter that is going to be used to count
# the number of processed notifications, start at zero
count = 0
# iterates while there are pending notifications to be
# processed, the complete set of bind callables will be
# called for each of the notifications
while self._notified:
event, data = self._notified.pop(0)
binds = self._events.pop(event, [])
for callable in binds: callable(data)
count += 1
# returns the number of processed notifications to the
# the caller method
return count
def _delays(self):
"""
Calls the complete set of elements that are considered to
be part of the delayed set of methods to be called.
These methods are expected to be run before a poll call so
that they are run outside the handling.
The calling of the delayed methods takes into account a
series of assumptions including the loop identifier in order
to avoid loops in the delayed calls/insertions.
As part of the delay execution the pending notifications are
also going to be processed, they must be handled together so
that proper "recursion" is allowed (tail recursion).
"""
# runs the merge delay lists operation, so that delay operations
# inserts from different threads may be used and processed under
# the current execution (as expected)
self.delay_m()
# in case there's no delayed items to be called returns the control
# flow immediately, note that the notified elements (pending process)
# are also going to be verified for presence
if not self._delayed and not self._notified: return
# retrieves the value for the current timestamp, to be used in
# comparisons against the target timestamps of the callables
current = time.time()
# creates the lists that will hold all the values that are not
# yet ready to be called in this iteration, the value in this
# list will be added back to the heap at the end of the iteration
pendings = []
pendings_o = []
# iterates over all the delayed callable tuples to try to find
# (and call) the ones that are meant to be executed in the past
# (have a target timestamp with a value less than the current)
while self._delayed or self._notified:
# runs the notifies verification cycle and if there's at
# least one processed event continues the loop meaning that
# the if test evaluations must be re-processed
if self._notifies(): continue
# "pops" the current item from the delayed list to be used
# in the execution of the current iteration cycle
callable_t = heapq.heappop(self._delayed)
callable_o = heapq.heappop(self._delayed_o)
# unpacks the current callable tuple in iteration into a
# target (timestamp value) and a method to be called in
# case the target timestamp is valid (in the past)
target, _did, method, lid, options = callable_t
# defines the proper target value that is going to be used
# for the comparison against the current time reference
# this is performed by defaulting the value against negative
# ensuring immediate execution of the associated callable
if target == None: target = -1
# tests if the current target is valid (less than or
# equals to the current time value) and in case it's
# not restores the value to the heap and breaks the loop
is_valid = target <= current
if not is_valid:
pendings.append(callable_t)
pendings_o.append(callable_o)
break
# in case the loop id present in the delayed call tuple is
# the same as the current iteration identifier then the
# call must be done in the next iteration cycle, this
# verification avoids loops in calls, note that this verification
# is only required for target zero calls referring the delayed
# calls to be executed immediately (on next loop)
if target == 0 and self._lid == lid:
pendings.append(callable_t)
pendings_o.append(callable_o)
continue
# unpacks the multiple options so that it's possible to determine
# the way the delayed operation is going to be executed
run, = options
# in case the method is not meant to be run, probably canceled
# the execution of it should be properly ignored
if not run: continue
# calls the callback method as the delayed operation is
# now meant to be run, this is an operation that may change
# the current list of delayed object (causing cycles) and so
# must be implemented with the proper precautions, note that
# proper exception is set so that proper top level handling
# is defined and logging is performed
try: method()
except BaseException as exception:
self.error(exception)
self.log_stack(method = self.warning)
# iterates over all the pending callable tuple values and adds
# them back to the delayed heap list so that they are called
# latter on (not ready to be called now)
for pending, pending_o in zip(pendings, pendings_o):
heapq.heappush(self._delayed, pending)
heapq.heappush(self._delayed_o, pending_o)
# in case the delayed list is empty resets the delay id so that
# it never gets into a very large number, would break performance
if not self._delayed: self._did = 0
def _generate(self, hashed = True):
"""
Generates a random unique identifier that may be used
to uniquely identify a certain object or operation.
This method must be used carefully to avoid any unwanted
behavior resulting from value collisions.
:type hashed: bool
:param hashed: If the identifier should be hashed into
and hexadecimal string instead of an uuid based identifier.
:rtype: String
:return: The random unique identifier generated and that
may be used to identify objects or operations.
"""
identifier = str(uuid.uuid4())
identifier = identifier.upper()
if not hashed: return identifier
identifier = legacy.bytes(identifier)
hash = hashlib.sha256(identifier)
indetifier = hash.hexdigest()
identifier = identifier.upper()
return indetifier
def _socket_keepalive(
self,
_socket,
timeout = None,
interval = None,
count = None
):
if timeout == None: timeout = self.keepalive_timeout
if interval == None: interval = self.keepalive_interval
if count == None: count = self.keepalive_count
is_inet = _socket.family in (socket.AF_INET, socket.AF_INET6)
is_inet and hasattr(_socket, "TCP_KEEPIDLE") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE, #@UndefinedVariable
timeout
)
is_inet and hasattr(_socket, "TCP_KEEPINTVL") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL, #@UndefinedVariable
interval
)
is_inet and hasattr(_socket, "TCP_KEEPCNT") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPCNT, #@UndefinedVariable
count
)
hasattr(_socket, "SO_REUSEPORT") and\
self.socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEPORT, #@UndefinedVariable
1
)
def _ssl_init(self, strict = True, env = True):
# initializes the values of both the "main" context for ssl
# and the map that associated an hostname and a context, both
# are going to be used (if possible) at runtime for proper
# resolution of both key and certificated files
self._ssl_context = None
self._ssl_contexts = dict()
# verifies if the current ssl module contains a reference to
# the ssl context class symbol if not, the control flow is
# returned to the caller method as it's not possible to created
# any kind of context information for ssl
has_context = hasattr(ssl, "SSLContext")
if not has_context: return
# retrieves the reference to the environment variables that are going
# to be used in the construction of the various ssl contexts, note that
# the secure variable is extremely important to ensure that a proper and
# secure ssl connection is established with the peer
secure = self.get_env("SSL_SECURE", True, cast = bool) if env else False
contexts = self.get_env("SSL_CONTEXTS", {}, cast = dict) if env else {}
# creates the main/default ssl context setting the default key
# and certificate information in such context, then verifies
# if the callback registration method is defined and if it is
# defined registers a callback for when the hostname information
# is available, so that proper concrete context may be set, note
# that in case the strict mode is enabled (default) the context
# is unset for situation where no callback registration is possible
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._ssl_ctx_base(self._ssl_context, secure = secure)
self._ssl_ctx_protocols(self._ssl_context)
self._ssl_certs(self._ssl_context)
has_callback = hasattr(self._ssl_context, "set_servername_callback")
if has_callback: self._ssl_context.set_servername_callback(self._ssl_callback)
elif strict: self._ssl_context = None
# retrieves the reference to the map containing the various key
# and certificate paths for the various defined host names and
# uses it to create the complete set of ssl context objects
for hostname, values in legacy.iteritems(contexts):
context = self._ssl_ctx(values, secure = secure)
self._ssl_contexts[hostname] = (context, values)
def _ssl_destroy(self):
self._ssl_context = None
self._ssl_contexts = dict()
def _ssl_callback(self, socket, hostname, context):
context, values = self._ssl_contexts.get(hostname, (context, None))
self._ssl_ctx_protocols(context)
socket.context = context
if not values: return
ssl_host = values.get("ssl_host", None)
ssl_fingerprint = values.get("ssl_fingerprint", None)
if not ssl_host and not ssl_fingerprint: return
connection = self.connections_m.get(socket, None)
if not connection: return
connection.ssl_host = ssl_host
connection.ssl_fingerprint = ssl_fingerprint
def _ssl_ctx(self, values, context = None, secure = True):
context = context or ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._ssl_ctx_base(context, secure = secure)
self._ssl_ctx_protocols(context)
key_file = values.get("key_file", None)
cer_file = values.get("cer_file", None)
ca_file = values.get("ca_file", None)
ca_root = values.get("ca_root", True)
ssl_verify = values.get("ssl_verify", False)
cert_reqs = ssl.CERT_REQUIRED if ssl_verify else ssl.CERT_NONE
self._ssl_certs(
context,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
verify_mode = cert_reqs
)
return context
def _ssl_ctx_base(self, context, secure = True):
if secure and hasattr(ssl, "OP_NO_SSLv2"):
context.options |= ssl.OP_NO_SSLv2
if secure and hasattr(ssl, "OP_NO_SSLv3"):
context.options |= ssl.OP_NO_SSLv3
if secure and hasattr(ssl, "OP_SINGLE_DH_USE"):
context.options |= ssl.OP_SINGLE_DH_USE
if secure and hasattr(ssl, "OP_SINGLE_ECDH_USE"):
context.options |= ssl.OP_SINGLE_ECDH_USE
if secure and hasattr(ssl, "OP_CIPHER_SERVER_PREFERENCE"):
context.options |= ssl.OP_CIPHER_SERVER_PREFERENCE
if secure and hasattr(context, "set_ecdh_curve"):
context.set_ecdh_curve("prime256v1")
if secure and SSL_DH_PATH and hasattr(context, "load_dh_params"):
context.load_dh_params(SSL_DH_PATH)
def _ssl_ctx_protocols(self, context):
self._ssl_ctx_alpn(context)
self._ssl_ctx_npn(context)
def _ssl_ctx_alpn(self, context):
if not hasattr(ssl, "HAS_ALPN"): return
if not ssl.HAS_ALPN: return
if hasattr(context, "set_alpn_protocols"):
protocols = self.get_protocols()
protocols and context.set_alpn_protocols(protocols)
def _ssl_ctx_npn(self, context):
if not hasattr(ssl, "HAS_NPN"): return
if not ssl.HAS_NPN: return
if hasattr(context, "set_npn_protocols"):
protocols = self.get_protocols()
protocols and context.set_npn_protocols(protocols)
def _ssl_certs(
self,
context,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = False,
verify_mode = ssl.CERT_NONE,
check_hostname = False
):
dir_path = os.path.dirname(__file__)
root_path = os.path.join(dir_path, "../")
root_path = os.path.normpath(root_path)
base_path = os.path.join(root_path, "base")
extras_path = os.path.join(base_path, "extras")
key_file = key_file or os.path.join(extras_path, "net.key")
cer_file = cer_file or os.path.join(extras_path, "net.cer")
context.load_cert_chain(cer_file, keyfile = key_file)
context.verify_mode = verify_mode
if hasattr(context, "check_hostname"): context.check_hostname = check_hostname
if ca_file: context.load_verify_locations(cafile = ca_file)
if ca_root and hasattr(context, "load_default_certs"):
context.load_default_certs(purpose = ssl.Purpose.SERVER_AUTH)
if ca_root and SSL_CA_PATH:
context.load_verify_locations(cafile = SSL_CA_PATH)
def _ssl_upgrade(
self,
_socket,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = True,
server = True,
ssl_verify = False
):
socket_ssl = self._ssl_wrap(
_socket,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
server = server,
ssl_verify = ssl_verify
)
return socket_ssl
def _ssl_wrap(
self,
_socket,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = True,
server = True,
ssl_verify = False
):
dir_path = os.path.dirname(__file__)
root_path = os.path.join(dir_path, "../")
root_path = os.path.normpath(root_path)
base_path = os.path.join(root_path, "base")
extras_path = os.path.join(base_path, "extras")
key_file = key_file or os.path.join(extras_path, "net.key")
cer_file = cer_file or os.path.join(extras_path, "net.cer")
cert_reqs = ssl.CERT_REQUIRED if ssl_verify else ssl.CERT_NONE
if not self._ssl_context: return ssl.wrap_socket(
_socket,
keyfile = key_file,
certfile = cer_file,
server_side = server,
cert_reqs = cert_reqs,
ca_certs = ca_file,
ssl_version = ssl.PROTOCOL_SSLv23,
do_handshake_on_connect = False
)
self._ssl_certs(
self._ssl_context,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
verify_mode = cert_reqs
)
socket_ssl = self._ssl_context.wrap_socket(
_socket,
server_side = server,
do_handshake_on_connect = False
)
return socket_ssl
def _ssl_handshake(self, connection):
"""
Low level SSL handshake operation that triggers or resumes
the handshake process.
It should be able to handle the exceptions raised by the the
concrete handshake operation so that no exception is raised
(unhandled) to the upper layers.
:type connection: Connection
:param connection: The connection that is going to be used in the
handshake operation, this should contain a valid/open socket that
should be registered for both read and write in the poll.
"""
try:
# unsets the handshake flag associated with the ssl, meaning
# that the connection is considered to be currently under the
# handshaking process (may succeed in the current tick)
connection.ssl_handshake = False
connection.ssl_connecting = True
# tries to runs the handshake process, this represents
# a series of small operations both of writing and reading
# that a required to establish and guarantee a secure
# connection from this moment on, note that this operation
# may fail (non blocking issues) and further retries must
# be attempted to finish establishing the connection
_socket = connection.socket
_socket.do_handshake()
# sets the ssl handshake flag in the connection, effectively
# indicating that the ssl handshake process has finished, note
# that the connecting flag is also unset (ssl connect finished)
connection.ssl_handshake = True
connection.ssl_connecting = False
# calls the end starter method in the connection so that the
# connection gets notified that the current starter in process
# has finished and that the next one should be called as
# soon as possible to go further in the connection initialization
connection.end_starter()
except ssl.SSLError as error:
# tries to retrieve the error code from the argument information
# in the error, in case the error is defined in the list of
# valid errors, the handshake is delayed until either a write
# or read operation is available (retry process)
error_v = error.args[0] if error.args else None
if error_v in SSL_VALID_ERRORS:
if error_v == ssl.SSL_ERROR_WANT_WRITE and\
not self.is_sub_write(_socket):
self.sub_write(_socket)
elif self.is_sub_write(_socket):
self.unsub_write(_socket)
else: raise
def _expand_destroy(self):
"""
Destroys the complete set of infra-structure (files) associated
with the expansion operation on environment values.
This is required to avoid any kind of file leaking, should be run
on the cleanup operation of the infra-structure.
"""
# iterates over the complete list of expanded file paths to remove
# their corresponding files (graceful error handling)
for expanded in self._expanded:
try: os.remove(expanded)
except OSError: pass
# deletes the complete set of path references from the expanded
# list so that it is not going to be used any longer
del self._expanded[:]
def _level(self, level):
"""
Converts the provided logging level value into the best
representation of it, so that it may be used to update
a logger's level of representation.
This method takes into account the current interpreter
version so that no problem occur.
:type level: String/int
:param level: The level value that is meant to be converted
into the best representation possible.
:rtype: int
:return: The best representation of the level so that it may
be used freely for the setting of logging levels under the
current running interpreter.
"""
level_t = type(level)
if level_t == int: return level
if level == None: return level
if level == "SILENT": return log.SILENT
if hasattr(logging, "_checkLevel"):
return logging._checkLevel(level)
return logging.getLevelName(level)
def _format_delta(self, time_delta, count = 2):
days = time_delta.days
hours, remainder = divmod(time_delta.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
delta_s = ""
if days > 0:
delta_s += "%dd " % days
count -= 1
if count == 0: return delta_s.strip()
if hours > 0:
delta_s += "%dh " % hours
count -= 1
if count == 0: return delta_s.strip()
if minutes > 0:
delta_s += "%dm " % minutes
count -= 1
if count == 0: return delta_s.strip()
delta_s += "%ds" % seconds
return delta_s.strip()
def _wait_forever(self):
while True: time.sleep(60)
class DiagBase(AbstractBase):
def __init__(self, *args, **kwargs):
AbstractBase.__init__(self, *args, **kwargs)
self.reads_c = 0
self.writes_c = 0
self.errors_c = 0
def reads(self, *args, **kwargs):
AbstractBase.reads(self, *args, **kwargs)
self.reads_c += 1
def writes(self, *args, **kwargs):
AbstractBase.writes(self, *args, **kwargs)
self.writes_c += 1
def errors(self, *args, **kwargs):
AbstractBase.errors(self, *args, **kwargs)
self.errors_c += 1
def info_dict(self, full = False):
info = AbstractBase.info_dict(self, full = full)
info.update(
reads_c = self.reads_c,
writes_c = self.writes_c,
errors_c = self.errors_c
)
return info
class BaseThread(threading.Thread):
"""
The top level thread class that is meant to encapsulate
a running base object and run it in a new context.
This base thread may be used to run a network loop allowing
a main thread to continue with execution logic.
"""
def __init__(self, owner = None, daemon = False, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.owner = owner
self.daemon = daemon
def run(self):
threading.Thread.run(self)
if not self.owner: return
self.owner._thread = self
try:
self.owner.start()
finally:
self.owner._thread = None
self.owner = None
def ensure_main(factory = None):
if AbstractBase.get_main(): return
factory = factory or AbstractBase
instance = factory()
AbstractBase.set_main(instance)
def get_main(factory = None, ensure = True):
if ensure: ensure_main(factory = factory)
return AbstractBase.get_main()
def get_loop(factory = None, ensure = True, asyncio = True):
if ensure: ensure_main(factory = factory)
loop = AbstractBase.get_loop(asyncio = asyncio)
loop = loop or get_main(factory = factory)
return loop
def get_event_loop(*args, **kwargs):
return get_loop(*args, **kwargs)
def get_poll():
main = get_main()
if not main: return None
return main.poll
def build_future(asyncio = True):
main = get_main()
if not main: return None
return main.build_future(asyncio = asyncio)
def ensure(coroutine, args = [], kwargs = {}, thread = None):
loop = get_loop()
return loop.ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = thread
)
def ensure_pool(coroutine, args = [], kwargs = {}):
return ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = True
)
is_diag = config.conf("DIAG", False, cast = bool)
if is_diag: Base = DiagBase
else: Base = AbstractBase
new logging unload
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2017 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2017 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import copy
import json
import heapq
import signal
import logging
import hashlib
import tempfile
import traceback
import netius.pool
import netius.adapters
from . import log
from . import util
from . import compat
from . import asynchronous
from .. import middleware
from .conn import * #@UnusedWildImport
from .poll import * #@UnusedWildImport
from .asynchronous import * #@UnusedWildImport
NAME = "netius"
""" The global infra-structure name to be used in the
identification of both the clients and the services this
value may be prefixed or suffixed """
VERSION = "1.16.19"
""" The version value that identifies the version of the
current infra-structure, all of the services and clients
may share this value """
PLATFORM = "%s %d.%d.%d.%s %s" % (
sys.subversion[0] if hasattr(sys, "subversion") else "CPython",
sys.version_info[0],
sys.version_info[1],
sys.version_info[2],
sys.version_info[3],
sys.platform
)
""" Extra system information containing some of the details
of the technical platform that is running the system, this
string should be exposed carefully to avoid extra information
from being exposed to outside agents """
IDENTIFIER_SHORT = "%s/%s" % (NAME, VERSION)
""" The short version of the current environment's identifier
meant to be used in production like environment as it hides some
of the critical and internal information of the system """
IDENTIFIER_LONG = "%s/%s (%s)" % (NAME, VERSION, PLATFORM)
""" Longest version of the system identifier, to be used in the
development like environment as it shows critical information
about the system internals that may expose the system """
IDENTIFIER = IDENTIFIER_LONG if config._is_devel() else IDENTIFIER_SHORT
""" The identifier that may be used to identify an user agent
or service running under the current platform, this string
should comply with the typical structure for such values,
by default this value is set with the short version of the
identifier (less information) but this may be changed at
runtime if the current verbosity level is changed """
WSAEWOULDBLOCK = 10035
""" The wsa would block error code meant to be used on
windows environments as a replacement for the would block
error code that indicates the failure to operate on a non
blocking connection """
WSAECONNABORTED = 10053
""" Error code meant to be raised when a connection is aborted
from the other peer meaning that that client or a server in the
as abruptly dropped the connection """
WSAECONNRESET = 10054
""" Code that is used when a connection is reset meaning that
the connection as been disconnected using a graceful approach
and without raising any extraneous problems """
SSL_ERROR_CERT_ALREADY_IN_HASH_TABLE = 101
""" Error raised under the ssl infra-structure for situations
where the certificate does not required re-loading as it is
already present in the hash table, this error may be safely
ignored as it does not represent a threat """
POLL_ORDER = (
EpollPoll,
KqueuePoll,
PollPoll,
SelectPoll
)
""" The order from which the poll methods are going to be
selected from the fastest to the slowest, in case no explicit
poll method is defined for a base service they are selected
based on this list testing them for acceptance first """
SILENT_ERRORS = (
errno.ECONNABORTED,
errno.ECONNRESET,
errno.EPIPE,
WSAECONNABORTED,
WSAECONNRESET
)
""" List that contain the various connection error states that
should not raise any extra logging information because even though
they should drop the connection they are expected """
VALID_ERRORS = (
errno.EWOULDBLOCK,
errno.EAGAIN,
errno.EPERM,
errno.ENOENT,
errno.EINPROGRESS,
WSAEWOULDBLOCK
)
""" List containing the complete set of error that represent
non ready operations in a non blocking socket """
SSL_SILENT_ERRORS = (
ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN
)
""" The list containing the errors that should be silenced
while still making the connection dropped as they are expected
to occur and should not be considered an exception """
SSL_VALID_ERRORS = (
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE,
SSL_ERROR_CERT_ALREADY_IN_HASH_TABLE
)
""" The list containing the valid errors for the handshake
operation of the ssl connection establishment """
SSL_ERROR_NAMES = {
ssl.SSL_ERROR_WANT_READ : "SSL_ERROR_WANT_READ",
ssl.SSL_ERROR_WANT_WRITE : "SSL_ERROR_WANT_WRITE",
SSL_ERROR_CERT_ALREADY_IN_HASH_TABLE : "SSL_ERROR_CERT_ALREADY_IN_HASH_TABLE"
}
""" The dictionary containing the association between the
various ssl errors and their string representation """
SSL_VALID_REASONS = (
"CERT_ALREADY_IN_HASH_TABLE",
)
""" The list containing the valid reasons for the handshake
operation of the ssl connection establishment """
TCP_TYPE = 1
""" The type enumeration value that represents the tcp (stream)
based communication protocol, for various usages in the base
netius communication infra-structure """
UDP_TYPE = 2
""" The datagram based udp protocol enumeration value to be used
in static references to this kind of socket usage """
STATE_STOP = 1
""" The stop state value, this value is set when the service
is either in the constructed stage or when the service has been
stop normally or with an error """
STATE_START = 2
""" The start state set when the service is in the starting
stage and running, normal state """
STATE_PAUSE = 3
""" The pause state set for a service for which the main event
loop has been paused and should be resumed latter """
STATE_CONFIG = 4
""" The configuration state that is set when the service is
preparing to become started and the configuration attributes
are being set according to pre-determined indications """
STATE_POLL = 5
""" State to be used when the service is in the polling part
of the loop, this is the most frequent state in an idle service
as the service "spends" most of its time in it """
STATE_TICK = 6
""" Tick state representative of the situation where the loop
tick operation is being started and all the pre tick handlers
are going to be called for pre-operations """
STATE_READ = 7
""" Read state that is set when the connection are being read
and the on data handlers are being called, this is the part
where all the logic driven by incoming data is being called """
STATE_WRITE = 8
""" The write state that is set on the writing of data to the
connections, this is a pretty "fast" state as no logic is
associated with it """
STATE_ERRROR = 9
""" The error state to be used when the connection is processing
any error state coming from its main select operation and associated
with a certain connection (very rare) """
STATE_STRINGS = (
"STOP",
"START",
"PAUSE",
"CONFIG",
"POLL",
"TICK",
"READ",
"WRITE",
"ERROR"
)
""" Sequence that contains the various strings associated with
the various states for the base service, this may be used to
create an integer to string resolution mechanism """
KEEPALIVE_TIMEOUT = 300
""" The amount of time in seconds that a connection is set as
idle until a new refresh token is sent to it to make sure that
it's still online and not disconnected, make sure that this
value is high enough that it does not consume to much bandwidth """
KEEPALIVE_INTERVAL = int(KEEPALIVE_TIMEOUT / 10)
""" The time between the retrying of "ping" packets, this value
does not need to be too large and should not be considered too
important (may be calculated automatically) """
KEEPALIVE_COUNT = 3
""" The amount of times the "ping" packet is re-sent until the
connection is considered to be offline and is dropped """
LOG_FORMAT = "%(asctime)s [%(levelname)s] %(message)s"
""" The format that is going to be used by the logger of the
netius infra-structure for debugging purposes it should allow
and end developer to dig into the details of the execution """
# initializes the various paths that are going to be used for
# the base files configuration in the complete service infra
# structure, these should include the ssl based files
BASE_PATH = os.path.dirname(__file__)
EXTRAS_PATH = os.path.join(BASE_PATH, "extras")
SSL_KEY_PATH = os.path.join(EXTRAS_PATH, "net.key")
SSL_CER_PATH = os.path.join(EXTRAS_PATH, "net.cer")
SSL_CA_PATH = os.path.join(EXTRAS_PATH, "net.ca")
SSL_DH_PATH = os.path.join(EXTRAS_PATH, "dh.pem")
if not os.path.exists(SSL_CA_PATH): SSL_CA_PATH = None
if not os.path.exists(SSL_DH_PATH): SSL_DH_PATH = None
class AbstractBase(observer.Observable):
"""
Base network structure to be used by all the network
capable infra-structures (eg: servers and clients).
Should handle all the nonblocking event loop so that
the read and write operations are easy to handle.
"""
_MAIN = None
""" Reference to the top level main instance responsible
for the control of the main thread loop """
def __init__(self, name = None, handlers = None, *args, **kwargs):
observer.Observable.__init__(self, *args, **kwargs)
cls = self.__class__
poll = cls.test_poll()
self.name = name or self.__class__.__name__
self.handler_stream = logging.StreamHandler()
self.handlers = handlers or (self.handler_stream,)
self.level = kwargs.get("level", logging.INFO)
self.diag = kwargs.get("diag", False)
self.middleware = kwargs.get("middleware", [])
self.children = kwargs.get("children", 0)
self.tid = None
self.tname = None
self.logger = None
self.logging = None
self.npool = None
self.tpool = None
self.fpool = None
self.poll_c = kwargs.get("poll", poll)
self.poll = self.poll_c()
self.poll_name = self.poll.name()
self.poll_timeout = kwargs.get("poll_timeout", POLL_TIMEOUT)
self.keepalive_timeout = kwargs.get("keepalive_timeout", KEEPALIVE_TIMEOUT)
self.keepalive_interval = kwargs.get("keepalive_interval", KEEPALIVE_INTERVAL)
self.keepalive_count = kwargs.get("keepalive_count", KEEPALIVE_COUNT)
self.poll_owner = True
self.diag_app = None
self.middleware_l = []
self.connections = []
self.connections_m = {}
self.callbacks_m = {}
self._uuid = uuid.uuid4()
self._lid = 0
self._did = 0
self._main = False
self._running = False
self._pausing = False
self._loaded = False
self._forked = False
self._child = False
self._childs = []
self._events = {}
self._notified = []
self._delayed = []
self._delayed_o = []
self._delayed_n = []
self._delayed_l = threading.RLock()
self._extra_handlers = []
self._expanded = []
self._ssl_init()
self.set_state(STATE_STOP)
@classmethod
def test_poll(cls, preferred = None):
# sets the initial selected variable with the unselected
# (invalid) value so that at lease one selection must be
# done in order for this method to succeed
selected = None
# iterates over all the poll classes ordered by preference
# (best first) and tries to find the one that better matched
# the current situation, either the preferred poll method or
# the most performant one in case it's not possible
for poll in POLL_ORDER:
if not poll.test(): continue
if not selected: selected = poll
if not preferred: break
name = poll.name()
if not name == preferred: continue
selected = poll
break
# in case no polling method was selected must raise an exception
# indicating that no valid polling mechanism is available
if not selected: raise errors.NetiusError(
"No valid poll mechanism available"
)
# returns the selected polling mechanism class to the caller method
# as expected by the current method
return selected
@classmethod
def get_loop(cls, asyncio = False):
loop = cls.get_asyncio() if asyncio else None
loop = loop or cls.get_main()
return loop
@classmethod
def get_main(cls):
return cls._MAIN
@classmethod
def get_asyncio(cls):
asyncio = asynchronous.get_asyncio()
if not asyncio: return None
policy = asyncio.get_event_loop_policy()
if not policy._local._loop: return None
return asyncio.get_event_loop()
@classmethod
def set_main(cls, instance, set_legacy = True):
cls._MAIN = instance
if not set_legacy: return
asyncio = asynchronous.get_asyncio()
if not asyncio: return
cls.patch_asyncio()
if instance: loop = compat.CompatLoop(instance)
else: loop = None
asyncio.set_event_loop(loop)
@classmethod
def unset_main(cls, set_legacy = True):
cls.set_main(None, set_legacy = set_legacy)
@classmethod
def patch_asyncio(cls):
asyncio = asynchronous.get_asyncio()
if not asyncio: return
if hasattr(asyncio, "_patched"): return
if hasattr(asyncio.tasks, "_PyTask"):
asyncio.Task = asyncio.tasks._PyTask #@UndefinedVariable
asyncio.tasks.Task = asyncio.tasks._PyTask #@UndefinedVariable
asyncio._patched = True
def destroy(self):
observer.Observable.destroy(self)
# iterates over the complete set of sockets in the connections
# map to properly close them (avoids any leak of resources)
for _socket in self.connections_m: _socket.close()
# clears some of the internal structure so that they don't
# get called any longer (as expected)
self.connections_m.clear()
self.callbacks_m.clear()
def call_safe(self, callable, args = [], kwargs = {}):
"""
Calls the provided callable object using a safe strategy
meaning that in case there's an exception raised in the
middle of the callable execution it is going to be caught
and the details of it logged.
:type callable: Function
:param callable: The callable function that is going to
be called using the safe approach.
:type args: List
:param args: The normal (non keyword) arguments to be sent
to the callable.
:type kwargs: Dictionary
:param kwargs: The set of keyword arguments that are going
to be sent to the callable.
:rtype: Object
:return: The result of the calling of the callable.
"""
try:
# calls the provided callable (method) with the
# provided arguments and keyword arguments returning
# the result to the caller method
return callable(*args, **kwargs)
except BaseException as exception:
# in case there's an exception displays a warning
# about the raised exception and the logs the current
# stack so that the exception is traceable
self.warning(exception)
self.log_stack()
def wait_event(self, callable, name = None):
# tries to retrieve the list of binds for the event
# to be "waited" for, this list should contain the
# complete list of callables to be called upon the
# event notification/trigger
binds = self._events.get(name, [])
if callable in binds: return
# adds the callable to the list of binds for the event
# the complete set of callables will be called whenever
# the a notification for the event occurs
binds.append(callable)
self._events[name] = binds
def unwait_event(self, callable, name = None):
# tries to retrieve the list of binds for the event
# and verifies that the callable is present on them
# and if that's not the case ignores the operation
binds = self._events.get(name, None)
if not binds or not callable in binds: return
# removes the callable from the binds list so that
# it's no longer going to be called
binds.remove(callable)
# verifies if the binds list is still valid deleting
# it from the map of events otherwise
if binds: self._events[name] = binds
else: del self._events[name]
def delay(
self,
callable,
timeout = None,
immediately = False,
verify = False,
safe = False
):
# in case the safe flag is set and the thread trying to add
# delayed elements is not the main the proper (safe) method
# is used meaning a safe execution is targeted
if safe and not self.is_main():
return self.delay_s(
callable,
timeout = timeout,
immediately = immediately,
verify = verify
)
# creates the original target value with a zero value (forced
# execution in next tick) in case the timeout value is set the
# value is incremented to the current time, then created the
# callable original tuple with the target (time) and the callable
target = -1 if immediately else 0
if timeout: target = time.time() + timeout
callable_o = (target, callable)
callable_o = legacy.orderable(callable_o)
# in case the verify flag is set, must verify if the callable
# is already inserted in the list of delayed operations in
# case it does returns immediately to avoid duplicated values
is_duplicate = verify and callable_o in self._delayed_o
if is_duplicate: return
# creates the list that is going to be used to populate the
# options to be used by the calling tuple
options = [True]
# creates the "final" callable tuple with the target time, the
# callable and the loop id (lid) then inserts both the delayed
# (original) callable tuple and the callable tuple in the lists
callable_t = (target, self._did, callable, self._lid, options)
callable_t = legacy.orderable(callable_t)
heapq.heappush(self._delayed, callable_t)
heapq.heappush(self._delayed_o, callable_o)
# increments the "delay" identifier by one, this identifier is
# used to correctly identify a delayed object so that for the
# same target value a sorting is performed (fifo like)
self._did += 1
# returns the callable tuple that may be latter used to control
# the execution or not of the delayed operation (cancellation)
return callable_t
def delay_s(
self,
callable,
timeout = None,
immediately = True,
verify = False,
wakeup = True
):
"""
Safe version of the delay operation to be used to insert a callable
from a different thread (implied lock mechanisms).
This method should only be used from different threads as there's
a huge performance impact created from using this method instead of
the local event loop one (delay()).
:type callable: Function
:param callable: The callable that should be called on the next tick
according to the event loop rules.
:type timeout: int
:param timeout: The timeout for the callable to be called, this value
may not reflect an accurate value and depends greatly on the minimum
resolution value of the polling mechanism.
:type immediately: bool
:param immediately: If the callable should be called as soon as possible,
this is equivalent to setting timeout to -1.
:type verify: bool
:param verify: If the delayed sequences should be verified for possible
duplicated, avoiding possible issues.
:type wakeup: bool
:param wakeup: If the main event loop should be awaken so that the
callable is processed as soon as possible.
"""
# creates the next element tuple that is going to be scheduled according
# to the definition provided to the method
next = (callable, timeout, immediately, verify)
# acquires the lock that controls the access to the delayed for next
# tick list and then adds the callable to such list, please note that
# the delayed (next) list is only going to be joined/merged with delay
# operations and list on the next tick (through the merge operation)
self._delayed_l.acquire()
try: self._delayed_n.append(next)
finally: self._delayed_l.release()
# in case the wakeup flag is set this delay operation should have
# been called from a different thread and the event loop should
# awaken as soon as possible to handle the event
if wakeup: self.wakeup()
def delay_m(self):
"""
Runs the merge operation so that the delay next list (used by the delay
safe operation) is merged with the delayed and the delayed ordered
structures, making the events (effectively) ready to be executed by delays.
"""
# verifies if the delay next list is not valid or empty and if that's
# the case returns immediately as there's nothing to be merged
if not self._delayed_n: return
# iterates over the complete set of next elements in the delay next list
# and schedules them as delay for the next tick execution
for next in self._delayed_n:
callable, timeout, immediately, verify = next
self.delay(
callable,
timeout = timeout,
immediately = immediately,
verify = verify
)
# deletes the complete set of elements present in the delay next list, this
# is considered to be equivalent to the empty operation
del self._delayed_n[:]
def ensure(
self,
coroutine,
args = [],
kwargs = {},
thread = None,
future = None,
immediately = True
):
"""
Main method for the queuing/startup of an asynchronous coroutine
of async method, this should be called at the most higher level
of the execution of a chained coroutine sequence.
It should ensure that the provided callable is wrapped into a
coroutine if that's the case, so that chained calling is not
violated by a non compliant element.
The method should create a proper sequence/pipelined handling of
the various chained coroutine calls so that they are called one
after the other using futures for such handling. The final "simple"
abstraction should expose one "parent" future object as the interface.
Multiple calls to this method should generate different async
contexts (with different parent future instances).
:type coroutine: Coroutine/CoroutineObject/Callable
:param coroutine: The callable or coroutine that is going to be
"inserted" for an asynchronous execution, if a callable is provided
a coroutine is created wrapping the execution of such callable.
:type args: List
:param args: The list of "normal" arguments to be sent to the
coroutine as parts of its signature.
:type kwargs: Dictionary
:param kwargs: The keyword arguments to be sent to the coroutine.
:type thread: bool
:param thread: If the execution of the coroutine should be done
using a different thread (via thread pool), this may be interesting
if the coroutine includes blocking i/o calls.
:type future: Future
:param future: If provided ensures that non new future object is going
to be created for this async context to be created.
:type immediately: bool
:param immediately: If the callback should be scheduler in the event
pool to be executed immediately (as soon as possible).
:rtype: Future
:return: The future that has been created for this new async context
or the provided one if one was provided (this is considered to be the
parent future of the complete coroutine chain).
"""
# tries to determine if the provided callable is really
# a coroutine and uses that condition to determine the
# default value for the thread argument, notice that the
# verification is also performed for the coroutine object
is_coroutine = asynchronous.is_coroutine(coroutine)
is_coroutine_object = asynchronous.is_coroutine_object(coroutine)
is_defined = is_coroutine or is_coroutine_object
if thread == None: thread = False if is_defined else True
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# ensure execution operation
future = future or self.build_future()
# in case the provided coroutine callable is not really
# a coroutine and instead a "normal" function a conversion
# is required so that there's compatibility between the
# coroutine model and the typical sync model
if not is_defined:
# saves the "original" callable so that it may be latter
# used as part of the back calling process
coroutine_c = coroutine
# creates the coroutine that is going to be used to
# encapsulate the callable, note that the result of the
# callable is set as the result of the future (as expected)
def coroutine(future, *args, **kwargs):
yield
result = coroutine_c(*args, **kwargs)
future.set_result(result)
# creates the function that is going to "propagate" the cancel
# operation from the "parent" future to the child one, this
# should also close the associated generator
def cleanup(future):
if not future.cancelled(): return
if not hasattr(future, "child"): return
if not future.child: return
future.child.cancel()
# adds the cleanup function as a done callback so that whenever
# the future is canceled a child future is also canceled, this
# propagation of operations allows for proper cleanup
future.add_done_callback(cleanup)
# verifies if the currently provided coroutine is in fact (already)
# a coroutine object, if that's the case the sequence (generator)
# is already present and the coroutine is simply assigned to the
# sequence without any kind of conversion
if is_coroutine_object:
sequence = coroutine
# otherwise the sequence must be created by calling the coroutine
# (function) with the proper set of arguments, notice that the signature
# is inspected to determine if a future argument is required
else:
# retrieves the argument spec of the provided coroutine to check
# if the provided coroutine requires a future to be passed
spec = legacy.getargspec(coroutine)
is_future = spec[0] and spec[0][0] == "future"
# creates the generate sequence from the coroutine callable
# by calling it with the newly created future instance, that
# will be used for the control of the execution, notice that
# the future is only passed in case the coroutine has been
# determined to be receiving the future as first argument
if is_future: sequence = coroutine(future, *args, **kwargs)
else: sequence = coroutine(*args, **kwargs)
# calls the ensure generator method so that the provided sequence
# gets properly "normalized" into the expected generator structure
# in case the normalization is not possible a proper exception is
# raised indicating the "critical" problem
is_generator, sequence = asynchronous.ensure_generator(sequence)
if not is_generator: raise errors.AssertionError("Expected generator")
# creates the callable that is going to be used to call
# the coroutine with the proper future variable as argument
# note that in case the thread mode execution is enabled the
# callable is going to be executed on a different thread
if thread: callable = lambda f = future: self.texecute(step, [f])
else: callable = lambda f = future: step(f)
# creates the function that will be used to step through the
# various elements in the sequence created from the calling of
# the coroutine, the values returned from it may be either future
# or concrete values, for each situation a proper operation must
# be applied to complete the final task in the proper way
def step(_future):
# unsets any possible reference to a child element as it must
# have been processed if the control flow reached this point,
# this avoids duplicated approval of child futures
future.child = None
# iterates continuously over the generator that may emit both
# plain object values or future (delayed executions)
while True:
# in case the future object is considered to be closed,
# (done using a pipeline of callbacks) no more steps are
# going to be taken and the sequence should be closed as
# it's not longer going to be used (for sure), this means
# that the blocked coroutine is not going to be resumed
if future.closed: sequence.close(); future.cancel(); break
# determines if the future is ready to receive new work
# this is done using a pipeline of callbacks that must
# deliver a positive value so that the future is considered
# ready, note that in case the future is not ready the current
# iteration cycle is delayed until the next tick
if not future.ready: self.delay(callable); break
# retrieves the next value from the generator and in case
# value is the last one (stop iteration) verifies if the
# is still considered running (no value or exception) set and
# if that's the case runs the default value set (approve)
# and then breaks the loop, notice that if there's an
# exception raised in the middle of the generator iteration
# it's set on the future (indirect notification)
try: value = next(sequence)
except StopIteration as exception:
result = exception.args[0] if exception.args else None
if future.running: future.set_result(result)
break
except BaseException as exception:
future.set_exception(exception)
break
# determines if the value retrieved from the generator is a
# future and if that's the case schedules a proper execution
is_future = asynchronous.is_future(value)
# in case the current value is a future schedules it for execution
# taking into account the proper thread execution model, note that
# the future is set as a child of the current "parent" future
if is_future:
future.child = value
value.add_done_callback(callable)
break
# otherwise it's a normal value being yielded and should be sent
# to the future object as a partial value (pipelining)
else:
# for a situation where a thread pool should be used the new
# value should be "consumed" by adding the data handler operation
# to the list of delayed operations and notifying the task pool
# so that the event loop on the main thread gets unblocked and
# the proper partial value handling is performed (always on main thread)
if thread:
def handler():
future.partial(value)
callable()
self.delay_s(handler)
break
# otherwise we're already on the main thread so a simple partial callback
# notification should be enough for the proper consuming of the data
else:
future.partial(value)
# delays the execution of the callable so that it is executed
# immediately if possible (event on the same iteration)
self.delay(callable, immediately = immediately)
return future
def resolve_hostname(self, hostname, type = "a"):
import netius.clients
future = self.build_future()
def handler(response):
if not response: raise RuntimeError("Timeout in resolution")
if not response.answers: raise RuntimeError("Unable to resolve")
answer = response.answers[0]
address = answer[4]
future.set_result(address)
netius.clients.DNSClient.query_s(
hostname,
type = type,
callback = handler,
daemon = False
)
return future
def run_coroutine(
self,
coroutine,
args = [],
kwargs = {},
thread = None,
close = None
):
# creates the callback function that is going to be called when
# the future associated with the provided ensure context gets
# finished (on done callback)
def cleanup(future):
# calls the stop or pause method for the current loop, effectively
# ending the loop as soon as possible (next tick), notice that if
# the close method is called no more loop re-usage is possible
self.stop() if close else self.pause()
# tries to determine if the provided object is in fact a coroutine
# or if instead it is a "simple" future object ready to be used
is_future = asynchronous.is_future(coroutine)
is_coroutine = not is_future
# ensures that the provided coroutine get executed under a new
# context and retrieves the resulting future
future = self.ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = thread
) if is_coroutine else coroutine
# defines the cleanup operation (loop stop) as the target for the
# done operation on the future (allows cleanup)
future.add_done_callback(cleanup)
# starts the current event loop, this is a blocking operation until
# the done callback is called to stop the loop
self.start()
# tries to retrieve a possible exception associated with
# the future, in case it does not exist ignores the current
# execution and returns the control flow immediately with
# the future's result, to be used by the caller
exception = future.exception()
if not exception: return future.result()
# raises the exception to the upper layers so that it's properly
# handled by them, this is the expected behaviour by this sync
# execution mode of the coroutine inside an event loop
raise exception
def wakeup(self, force = False):
# verifies if this is the main thread and if that's not the case
# and the force flag is not set ignore the wakeup operation, avoiding
# extra usage of resources (not required)
if self.is_main() and not force: return
# makes sure that the the notify pool is started (required for proper
# event notification) and then runs the notification process, should
# "wake" the main event loop as soon as possible
if force: self.nensure()
if not self.npool: return
self.npool.notify()
def sleep(self, timeout, future = None):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# sleep operation to be executed
future = future or self.build_future()
# creates the callable that is going to be used to set
# the final value of the future variable
callable = lambda: future.set_result(timeout)
# delays the execution of the callable so that it is executed
# after the requested amount of timeout, note that the resolution
# of the event loop will condition the precision of the timeout
self.delay(callable, timeout = timeout)
return future
def wait(self, event, timeout = None, future = None):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# sleep operation to be executed
future = future or self.build_future()
# creates the callable that is going to be used to set
# the final value of the future variable, the result
# set in the future represents the payload of the event
def callable(data):
if future.cancelled(): return
future.set_result(data)
# creates the callable that is going to be called in case
# the timeout has been reached, this avoids constant waiting
# for an event to happen (dead lock)
def canceler():
if future.done(): return
future.cancel()
# creates the callback function that is going to be called
# whenever the future is completed (either error or success)
# this should run the series of cleanup operations
def cleanup(future):
self.unwait_event(callable, name = event)
# registers the cleanup function for the done operation so that
# the waiting for the event is canceled whenever the future is
# completed with either success or error
future.add_done_callback(cleanup)
# waits the execution of the callable until the event with the
# provided name is notified/triggered, the execution should be
# triggered on the same event loop tick as the notification
self.wait_event(callable, name = event)
# in case a valid timeout is set schedules the canceler operation
# to be performed (to unblock the waiting element)
if timeout: self.delay(canceler, timeout = timeout)
# returns the provided future or a new one in case none has been
# provided, this will be used for proper event registration
return future
def notify(self, event, data = None):
# adds the event with the provided name to the list of notifications
# that are going to be processed in the current tick operation
self._notified.append((event, data))
# in case this is considered to be the main thread there no need to
# proceed with the task pool notification process (expensive)
if self.is_main(): return
# runs the wakeup operation making sure that as soon as possible the
# main event loop gets unblocked for event processing
self.wakeup()
def load(self, full = False):
"""
Starts the loading process for the current engine, this should be
a singleton (run once) operation to be executed once per instance.
Some of the responsibilities of the loading process should include:
logging loading, system signal binding and welcome message printing.
The method should be protected against double execution issues, meaning
that should be safely called at any stage of the life cycle.
:type full: bool
:param full: If the loading process should be performed completely,
meaning that even the long tasks should be executed.
"""
# in case the current structure is considered/marked as already loaded
# there's no need to continue with the loading execution (returns immediately)
if self._loaded: return
# calls the boot hook responsible for the initialization of the various
# structures of the base system, note that is going to be called once
# per each loop starting process (structure should be destroyed on cleanup)
self.boot()
# loads the various parts of the base system, under this calls each
# of the systems should have it's internal structures started
self.load_logging(self.level)
# loads the diagnostics application handlers that allows external
# interaction with the service for diagnostics/debugging
self.load_diag()
# loads the complete set of middleware that has been registered for
# the current system (takes into account if it's considered the main one)
self.load_middleware()
# calls the welcome handle this is meant to be used to print some
# information about the finishing of the loading of the infra-structure
# this is going to be called once per base system
self.welcome()
# runs the binding of the system wide signals so that if
# any of such signals is raised it's properly handled and
# redirected to the proper logic through exceptions
self.bind_signals()
# sets the private loading flag ensuring that no extra load operations
# will be done after this first call to the loading (no duplicates)
self._loaded = True
def unload(self, full = True):
"""
Unloads the structures associated with the current engine, so that
the state of the current engine is reversed to the original one.
Note that this is not related in any way with the event loop and only
static structures are affected.
After a call to this method, the load method may be called again.
:type full: bool
:param full: If the complete set of structure unloading operations
should be performed, this is related with the full flag of load.
"""
# verifies if the current structure is considered/marked as already
# "unloaded", if that's the case returns the control flow immediately
# as there's nothing pending to be (undone)
if not self._loaded: return
# triggers the operation that will start the unloading process of the
# logging infra-structure of the current system
if full: self.unload_logging()
# unloads the middleware infra-structure that has been created for the
# current service, no longer going to be used
self.unload_middleware()
# marks the current system as unloaded as the complete set of operations
# meant to start the unloading process have been finished
self._loaded = False
def boot(self):
pass
def welcome(self):
pass
def load_logging(self, level = logging.DEBUG, format = LOG_FORMAT, unique = False):
# verifies if there's a logger already set in the current service
# if that's the case ignores the call no double reloading allowed
if self.logger: return
# normalizes the provided level value so that it represents
# a proper and understandable value, then starts the formatter
# that is going to be used and retrieves the (possibly unique)
# identifier to be used in the logger retrieval/identification
level = self._level(level)
formatter = logging.Formatter(format)
identifier = self.get_id(unique = unique)
# retrieves the logger that is going to be according to the
# decided identifier and then verifies that the counter value
# is properly updated deciding also if the logger instance is
# a new one or if instead it refers an already initialized/old
# instance that doesn't need a new initialization process
self.logger = logging.getLogger(identifier)
counter = self.logger._counter if hasattr(self.logger, "_counter") else 0
is_new = counter == 0
self.logger._counter = counter + 1
if not is_new: return
# start the extra logging infrastructure (extra handlers)
# and initializes the stream handlers with the proper level
# and formatter values (as expected)
self.extra_logging(level, formatter)
self.handler_stream.setLevel(level)
self.handler_stream.setFormatter(formatter)
# starts the new logger instance by setting no parent to it,
# updating the verbosity level of it and then registering the
# complete set of handlers for it (as expected)
self.logger.parent = None
self.logger.setLevel(level)
for handler in self.handlers:
if not handler: continue
self.logger.addHandler(handler)
def unload_logging(self, safe = True):
# verifies if there's a valid logger instance set in the
# current service, in case there's not returns immediately
# as there's nothing remaining to be done here
if not self.logger: return
#@todo comment this
if self._child: return
# updates the counter value for the logger and validates
# that no more "clients" are using the logger so that it
# may be properly destroyed (as expected)
counter = self.logger._counter
is_old = counter == 1
self.logger._counter = counter - 1
if not is_old: return
# iterates over the complete set of handlers in the current
# base element and removes them from the current logger
for handler in self.handlers:
if not handler: continue
self.logger.removeHandler(handler)
# in case the safe flag is set, iterates over the complete
# set of handlers registered for the logger and removes them
# from the current logger, this is required so that proper
# handler unregistration is ensured even for complex scenarios
for handler in self.logger.handlers if safe else ():
if not handler: continue
self.logger.removeHandler(handler)
# closes the base stream handler as it's no longer going to
# be used for any kind of logging operation (avoids leaks)
self.handler_stream.close()
# iterates over the complete set of (built) extra handlers
# and runs the close operation for each of them, as they are
# no longer considered required for logging purposes
for handler in self._extra_handlers: handler.close()
# unset the logger reference in the current service so that
# it's not possible to use it any longer
self.logger = None
def extra_logging(self, level, formatter):
"""
Loads the complete set of logging handlers defined in the
current logging value, should be a map of definitions.
This handlers will latter be used for piping the various
logging messages to certain output channels.
The creation of the handler is done using a special keyword
arguments strategy so that python and configuration files
are properly set as compatible.
:type level: String/int
:param level: The base severity level for which the new handler
will be configured in case no extra level definition is set.
:type formatter: Formatter
:param formatter: The logging formatter instance to be set in
the handler for formatting messages to the output.
"""
# verifies if the logging attribute of the current instance is
# defined and in case it's not returns immediately, otherwise
# starts by converting the currently defined set of handlers into
# a list so that it may be correctly manipulated (add handlers)
if not self.logging: return
self.handlers = list(self.handlers)
# iterates over the complete set of handler configuration in the
# logging to create the associated handler instances
for config in self.logging:
# gathers the base information on the current handler configuration
# running also the appropriate transformation on the level
name = config.get("name", None)
_level = config.get("level", level)
_level = self._level(_level)
# "clones" the configuration dictionary and then removes the base
# values so that they do not interfere with the building
config = dict(config)
if "level" in config: del config["level"]
if "name" in config: del config["name"]
# retrieves the proper building, skipping the current loop in case
# it does not exits and then builds the new handler instance, setting
# the proper level and formatter and then adding it to the set
if not hasattr(log, name + "_handler"): continue
builder = getattr(log, name + "_handler")
handler = builder(**config)
handler.setLevel(_level)
handler.setFormatter(formatter)
self.handlers.append(handler)
self._extra_handlers.append(handler)
# restores the handlers structure back to the "original" tuple form
# so that no expected data types are violated
self.handlers = tuple(self.handlers)
def level_logging(self, level):
"""
Changes the verbosity level of the current logging infra-structure
into the provided level of verbosity.
The provided value may be an integer (internal value) or a string
representation of the requested verbosity level.
:type level: int/String
:param level: The (logging) for which the logging infra-structure
must be changed, either an integer or string value.
"""
# converts the provided logging level value (either string or
# integer value) into the appropriate normalized value that can
# be used internally for logging level setting
level = self._level(level)
# sets the (new) level value value for both the base stream
# handler and also for the logger itself
self.handler_stream.setLevel(level)
self.logger.setLevel(level)
# iterates over the complete set of attached handlers to
# update their respective logging level
for handler in self.handlers: handler.setLevel(level)
def load_diag(self, env = True):
# verifies if the diagnostics "feature" has been requested
# for the current infra-structure and if that's not the case
# returns the control flow immediately to the caller
if not self.diag: return
# runs the import operations for the diag module, note that
# this must be performed locally no avoid any unwanted behavior
# or collision with a runtime process (would pose issues)
from . import diag
# verifies if the diag module has been correctly loaded and
# if that's not the case fails gracefully and returns the
# control flow to the caller method
if not diag.loaded:
self.info("Failed to load diagnostics, import problem")
return
# retrieves the various server related value for the diagnostics
# server, taking into account if the env flag is set
server = self.get_env("DIAG_SERVER", "netius") if env else "netius"
host = self.get_env("DIAG_HOST", "127.0.0.1") if env else "127.0.0.1"
port = self.get_env("DIAG_PORT", 5050, cast = int) if env else 5050
# creates the application object that is going to be
# used for serving the diagnostics app
self.diag_app = diag.DiagApp(self)
# calls the on diag method so that the current instance is
# able to act on the newly created application
self.on_diag()
# starts the "serving" procedure of it under a new thread
# to avoid blocking the current context of execution
self.diag_app.serve(
server = server,
host = host,
port = port,
diag = False,
threaded = True,
conf = False
)
def load_middleware(self, suffix = "Middleware"):
# iterates over the complete set of string that define the middleware
# that is going to be loaded and executes the loading process
for name in self.middleware:
# capitalizes the provided name and appends the suffix to it
# to created the proper (and complete) middleware class name
name_c = name.capitalize()
class_n = name_c + suffix
# retrieves the class (from the middleware module) that is going
# to be used for the current middleware
middleware_c = getattr(middleware, class_n)
# runs the registration process for the middleware, meaning that
# the class is going to be instantiated and started and the proper
# internal structures will be updated in accordance
self.register_middleware(middleware_c)
def unload_middleware(self):
# iterates over the complete set of middleware instance to stop
# them (close internal structures) and then removes the middleware
# list so that they don't get used any longer
for middleware_i in self.middleware_l: middleware_i.stop()
del self.middleware_l[:]
def register_middleware(self, middleware_c):
# instantiates a new middleware class as a new instance and then
# runs the start method indicating the intention to start a new
# middleware (should properly start its internal structures)
middleware_i = middleware_c(self)
middleware_i.start()
# adds the middleware instance that has just been created to the
# list of middleware loaded for the current service
self.middleware_l.append(middleware_i)
# returns the instance of middleware that has just been created
# while running the registration process
return middleware_i
def call_middleware(self, name, *args, **kwargs):
# iterates over the complete set of middleware instance to call the
# method with the provided name, with the provided arguments
for middleware_i in self.middleware_l:
method = getattr(middleware_i, name)
method(*args, **kwargs)
def bind_signals(
self,
signals = (
signal.SIGINT,
signal.SIGTERM,
signal.SIGHUP if hasattr(signal, "SIGHUP") else None, #@UndefinedVariable
signal.SIGQUIT if hasattr(signal, "SIGQUIT") else None #@UndefinedVariable
),
handler = None
):
# creates the signal handler function that propagates the raising
# of the system exit exception (proper logic is executed) and then
# registers such handler for the (typical) sigterm signal
def base_handler(signum = None, frame = None): raise SystemExit()
for signum in signals:
if signum == None: continue
try: signal.signal(signum, handler or base_handler)
except: self.debug("Failed to register %d handler" % signum)
def start(self):
# in case the current instance is currently paused runs the
# resume operation instead as that's the expected operation
if self.is_paused(): return self.resume()
# in case the event loop is already running then a new sub-
# context based loop should be created in order to block the
# current execution stack (as expected)
if self.is_running(): return self.block()
# re-builds the polling structure with the new name this
# is required so that it's possible to change the polling
# mechanism in the middle of the loading process
self.poll = self.build_poll()
# retrieves the name of the polling mechanism that is
# going to be used in the main loop of the current
# base service, this is going to be used for diagnostics
poll_name = self.get_poll_name()
# triggers the loading of the internal structures of
# the base structure in case the loading has already
# been done nothing is done (avoids duplicated load)
self.load()
# opens the polling mechanism so that its internal structures
# become ready for the polling cycle, the inverse operation
# (close) should be performed as part of the cleanup
self.poll.open(timeout = self.poll_timeout)
# makes sure that the notify pool is created so that the event
# notification (required for multi threaded environments) is created
# and ready to be used (as expected)
self.nensure()
# retrieves the complete set of information regarding the current
# thread that is being used for the starting of the loop, this data
# may be used for runtime debugging purposes (debug only data)
cthread = threading.current_thread()
self.tid = cthread.ident or 0
self.tname = cthread.getName()
self._main = self.tname == "MainThread"
# in case the current thread is the main one, the global main instance
# is set as the current instance, just in case no main variable is
# already set otherwise corruption may occur (override of value)
if self._main and not AbstractBase.get_main():
AbstractBase.set_main(self)
# enters the main loop operation by printing a message
# to the logger indicating this start, this stage
# should block the thread until a stop call is made
self.debug("Starting '%s' service main loop (%.2fs) ..." % (self.name, self.poll_timeout))
self.debug("Using thread '%s' with tid '%d'" % (self.tname, self.tid))
self.debug("Using '%s' as polling mechanism" % poll_name)
# calls the main method to be able to start the main event
# loop properly as defined by specification
self.main()
def stop(self):
if self.is_paused(): self.finish()
else: self._running = False
def pause(self):
self._running = False
self._pausing = True
def resume(self):
self.debug("Resuming '%s' service main loop (%.2fs) ..." % (self.name, self.poll_timeout))
self.on_resume()
self.main()
def close(self):
self.stop()
def finish(self):
self.debug("Finishing '%s' service main loop" % self.name)
self.on_stop()
self.cleanup()
self.set_state(STATE_STOP)
def main(self):
# sets the running flag that controls the running of the
# main loop and then changes the current state to start
# as the main loop is going to start, then executes the
# on start call indicating the (re-)start of the even loop
self._running = True
self._pausing = False
self.set_state(STATE_START)
self.on_start()
# runs the event loop, this is a blocking method that should
# be finished by the end of the execution of by pause
try:
self.loop()
self.finalize()
except (KeyboardInterrupt, SystemExit, errors.StopError):
self.info("Finishing '%s' service on user request ..." % self.name)
except errors.PauseError:
self.debug("Pausing '%s' service main loop" % self.name)
self.set_state(STATE_PAUSE)
self.on_pause()
except BaseException as exception:
self.error(exception)
self.log_stack(method = self.warning)
except:
self.critical("Critical level loop exception raised")
self.log_stack(method = self.error)
finally:
if self.is_paused(): return
self.finish()
def is_main(self):
if not self.tid: return True
return threading.current_thread().ident == self.tid
def is_running(self):
return self._running
def is_started(self):
return self.get_state() == STATE_START
def is_stopped(self):
return self.get_state() == STATE_STOP
def is_paused(self):
return self.get_state() == STATE_PAUSE
def is_edge(self):
return self.poll.is_edge()
def is_empty(self):
return self.poll.is_empty()
def is_sub_read(self, socket):
return self.poll.is_sub_read(socket)
def is_sub_write(self, socket):
return self.poll.is_sub_write(socket)
def is_sub_error(self, socket):
return self.poll.is_sub_error(socket)
def sub_all(self, socket):
return self.poll.sub_all(socket, owner = self)
def unsub_all(self, socket):
return self.poll.unsub_all(socket)
def sub_read(self, socket):
return self.poll.sub_read(socket, owner = self)
def sub_write(self, socket):
return self.poll.sub_write(socket, owner = self)
def sub_error(self, socket):
return self.poll.sub_error(socket, owner = self)
def unsub_read(self, socket):
return self.poll.unsub_read(socket)
def unsub_write(self, socket):
return self.poll.unsub_write(socket)
def unsub_error(self, socket):
return self.poll.unsub_error(socket)
def cleanup(self, destroy = True):
# runs the unload operation for the current base container this should
# unset/unload some of the components for this base infra-structure
self.unload()
# destroys the complete set of structures associated with the event
# notification, this should include both the map of events to binds
# association and the list of pending notifications to be processed
self._events.clear()
del self._notified[:]
# destroys the current information on the delays that are is longer
# going to be executed as the poll/system is closing, this is required
# in order to avoid any possible memory leak with clojures/cycles
del self._delayed[:]
del self._delayed_o[:]
del self._delayed_n[:]
# runs the expand destroy operation so that the complete set of expanded
# values get their (temporary) files removed (garbage collection)
self._expand_destroy()
# runs the destroy operation on the ssl component of the base
# element so that no more ssl is available/used (avoids leaks)
self._ssl_destroy()
# verifies if there's a valid (and open) notify pool, if that's
# the case starts the stop process for it so that there's no
# leaking of task descriptors and other structures
if self.npool: self.nstop()
# verifies if there's a valid (and open) task pool, if that's
# the case starts the stop process for it so that there's no
# leaking of task descriptors and other structures
if self.tpool: self.tstop()
# verifies if there's a valid (and open) file pool, if that's
# the case starts the stop process for it so that there's no
# leaking of file descriptors and other structures
if self.fpool: self.fstop()
# creates a copy of the connections list because this structure
# is going to be changed in the closing of the connection object
connections = copy.copy(self.connections)
# iterates over the complete set of connections currently
# registered in the base structure and closes them so that
# can no longer be used and are gracefully disconnected
for connection in connections: connection.close()
# iterates over the complete set of sockets in the connections
# map to properly close them (avoids any leak of resources)
for _socket in self.connections_m: _socket.close()
# in case the current thread is the main one then in case the
# instance set as global main is this one unsets the value
# meaning that the main instance has been unloaded
if self._main and AbstractBase.get_main() == self:
AbstractBase.unset_main()
# closes the current poll mechanism so that no more issues arise
# from an open poll system (memory leaks, etc.), note that this is
# only performed in case the current base instance is the owner of
# the poll that is going to be closed (works with containers)
if self.poll_owner: self.poll.close()
# deletes some of the internal data structures created for the instance
# and that are considered as no longer required
self.connections_m.clear()
self.callbacks_m.clear()
del self.connections[:]
del self._extra_handlers[:]
# runs the destroy operation for the current instance, this should remove
# the most obscure parts of the current instance
if destroy: self.destroy()
def loop(self):
# iterates continuously while the running flag is set, once
# it becomes unset the loop breaks at the next execution cycle
while True:
# calls the base tick int handler indicating that a new
# tick loop iteration is going to be started, all the
# "in between loop" operation should be performed in this
# callback as this is the "space" they have for execution
self.ticks()
# in case running flag is disabled it's time to break the
# cycle (just before the possible block) as it would imply
# extra time before we could stop the event loop
if not self._running: break
# updates the current state to poll to indicate
# that the base service is selecting the connections
self.set_state(STATE_POLL)
# runs the main selection operation on the current set
# of connection for each of the three operations returning
# the resulting active sets for the callbacks
reads, writes, errors = self.poll.poll()
# calls the various callbacks with the selections lists,
# these are the main entry points for the logic to be executed
# each of this methods should be implemented in the underlying
# class instances as no behavior is defined at this inheritance
# level (abstract class)
self.reads(reads)
self.writes(writes)
self.errors(errors)
def block(self):
try: self.loop()
finally: self._running = True
def fork(self):
# ensures that the children value is converted as an
# integer value as this is the expected structure
self.children = int(self.children)
# runs a series of validations to be able to verify
# if the fork operation should really be performed
if not self.children: return True
if not self.children > 0: return True
if not hasattr(os, "fork"): return True
if self._forked: return True
# prints a debug operation about the operation that is
# going to be performed for the forking
self.debug("Forking the current process into '%d' children ..." % self.children)
# calls the on fork method indicating that a new fork
# operation is soon going to be performed
self.on_fork()
# sets the initial pid value to the value of the current
# master process as this is going to be used for child
# detection (critical for the correct logic execution)
pid = os.getpid()
# iterates of the requested (number of children) to run
# the concrete fork operation and fork the logic
for _index in range(self.children):
pid = os.fork() #@UndefinedVariable
self._child = pid == 0
if self._child: self.on_child()
if self._child: break
self._childs.append(pid)
# sets the forked flag, meaning that the current process
# has been already forked (avoid duplicated operations)
self._forked = True
# in case the current process is a child one an immediate
# valid value should be returned (force logic continuation)
if self._child: return True
# registers for some of the common signals to be able to avoid
# any possible interaction with the joining process
def handler(signum = None, frame = None): raise errors.StopError("Stop")
self.bind_signals(handler = handler)
# sleeps forever, waiting for an interruption of the current
# process that triggers the children to quit, so that it's
# able to "join" all of them into the current process
try: self._wait_forever()
except: pass
# prints a debug information about the processes to be joined
# this indicated the start of the joining process
self.debug("Joining '%d' children processes ..." % self.children)
# iterates over the complete set of children to send the proper
# terminate signal to each of them for proper termination
for pid in self._childs: os.kill(pid, signal.SIGTERM)
# iterates over the complete set of child processed to join
# them (master responsibility)
for pid in self._childs: os.waitpid(pid, 0)
# prints a message about the end of the child process joining
# this is relevant to make sure everything is ok before exit
self.debug("Finished joining %d' children processes" % self.children)
# runs the cleanup operation for the current process this is
# required to avoid any leaked information
self.cleanup()
# returns an invalid value meaning that no control flow should
# continue, as this is the master process (coordinator)
return False
def finalize(self):
# verifies a series of conditions and raises a proper error in case
# any of them is verified under the current state
if self._pausing: raise errors.PauseError("Pause state expected")
if self._running: raise errors.AssertionError("Not expected running")
def ticks(self):
# updates the current state value to the tick state indicating
# that the current process is updating a new tick in loop
self.set_state(STATE_TICK)
# runs the verification/processing of the complete set of file
# events that have been raised meanwhile, this allows for the
# processing of various file driven operations
self.files()
# "calculates" the new loop id by incrementing one value
# to the previous one, note that the value is calculated
# in a modulus way so that no overflow occurs
self._lid = (self._lid + 1) % 2147483647
# runs the processing of the delayed calls so that the pending
# calls are called if the correct time has been reached
self._delays()
def reads(self, reads, state = True):
if state: self.set_state(STATE_READ)
def writes(self, writes, state = True):
if state: self.set_state(STATE_WRITE)
def errors(self, errors, state = True):
if state: self.set_state(STATE_ERRROR)
def pregister(self, pool):
# prints a debug message stating that a new pool is
# being created for the handling of message events
self.debug("Started pool, for async handling")
# tries to retrieve the file descriptor of the event virtual
# object that is notified for each operation associated with
# the pool, (primary communication mechanism)
eventfd = pool.eventfd()
if not eventfd: self.warning("Starting pool without eventfd")
if not eventfd: return
if not self.poll: return
self.sub_read(eventfd)
# creates the callback clojure around the current context
# so that a proper callback can be used for the operations
pool._callback = lambda e, s: self.pcallback(e, s, pool)
# registers for a callback operation in the event fd so that
# it gets properly de-notified as expected when a read operation
# is performed in it, this operations will be performed upon
# the request for the read operation
self.add_callback(eventfd, pool._callback)
# retrieves the class of the eventfd object and then uses it
# to retrieve the associated name for logging purposes
eventfd_cls = eventfd.__class__
eventfd_name = eventfd_cls.__name__
# echoes a debug message indicating that a new read event
# subscription has been created for the event fd of the pool
self.debug("Subscribed for read operations on event fd (%s)" % eventfd_name)
def punregister(self, pool):
# prints a debug message notifying the user that no more
# async handling is possible using the pool
self.debug("Stopped existing pool, no more async handling")
# tries to retrieve the event file descriptor for
# the pool an in case it exists unsubscribes
# from it under the current polling system
eventfd = pool.eventfd()
if not eventfd: self.warning("Stopping pool without eventfd")
if not eventfd: return
if not self.poll: return
self.unsub_read(eventfd)
# verifies if the callback operation in the event fd is defined
# for the pool and if that's not the case returns immediately
if not hasattr(pool, "_callback"): return
# unregisters from a callback operation in the event fd so that
# no more events are handled by the notifier, this is expected
# in order to avoid any leaks
self.remove_callback(eventfd, pool._callback)
# unsets the value of the callback removing its reference from
# the pool as its no longer going to be used
del pool._callback
# echoes a debug message indicating that a new read event
# unsubscription has been created for the event fd of the pool
self.debug("Unsubscribed for read operations on event fd")
def pcallback(self, event, socket, pool):
# runs a series of pre-validations on the callback so that
# no operations is performed for such conditions
if not pool: return
if not event == "read": return
# runs the de-notify operation clearing the pool from any
# possible extra notification (avoid extra counter)
pool.denotify()
def nensure(self):
if self.npool: return
self.nstart()
def nstart(self):
if self.npool: return
self.npool = netius.pool.NotifyPool()
self.npool.start()
self.pregister(self.npool)
def nstop(self):
if not self.npool: return
self.punregister(self.npool)
self.npool.stop()
def tensure(self):
if self.tpool: return
self.tstart()
def tstart(self):
if self.tpool: return
self.tpool = netius.pool.TaskPool()
self.tpool.start()
self.pregister(self.tpool)
def tstop(self):
if not self.tpool: return
self.punregister(self.tpool)
self.tpool.stop()
def texecute(self, callable, args = [], kwargs = {}, callback = None):
self.tensure()
self.tpool.execute(
callable,
args = args,
kwargs = kwargs,
callback = callback
)
def files(self):
if not self.fpool: return
events = self.fpool.pop_all()
for event in events:
callback = event[-1]
if not callback: continue
callback(*event[1:-1])
def fopen(self, *args, **kwargs):
self.fensure()
return self.fpool.open(*args, **kwargs)
def fclose(self, *args, **kwargs):
self.fensure()
return self.fpool.close(*args, **kwargs)
def fread(self, *args, **kwargs):
self.fensure()
return self.fpool.read(*args, **kwargs)
def fwrite(self, *args, **kwargs):
self.fensure()
return self.fpool.write(*args, **kwargs)
def fensure(self):
if self.fpool: return
self.fstart()
def fstart(self):
# verifies if there's an already open file pool for
# the current system and if that's not the case creates
# a new one and starts it's thread cycle
if self.fpool: return
self.fpool = netius.pool.FilePool()
self.fpool.start()
self.pregister(self.fpool)
def fstop(self):
# verifies if there's an available file pool and
# if that's the case initializes the stopping of
# such system, note that this is blocking call as
# all of the thread will be joined under it
if not self.fpool: return
self.punregister(self.fpool)
self.fpool.stop()
def on_connection_c(self, connection):
# prints some debug information about the connection that has
# just been created (for possible debugging purposes)
self.debug(
"Connection '%s' from '%s' created" %
(connection.id, connection.owner.name)
)
self.debug(
"There are %d connections for '%s'" %
(len(connection.owner.connections), connection.owner.name)
)
# triggers the event notifying any listener about the new connection
# that is now ready for operation to be performed in it
self.trigger("connection_c", self, connection)
def on_connection_d(self, connection):
# prints some debug information about the connection
# that has just been scheduled for destruction
self.debug(
"Connection '%s' from '%s' deleted" %
(connection.id, connection.owner.name)
)
self.debug(
"There are %d connections for '%s'" %
(len(connection.owner.connections), connection.owner.name)
)
# triggers the event notifying any listener about the
# deletion/destruction f the connection
self.trigger("connection_d", self, connection)
def on_stream_c(self, stream):
# retrieves the reference to the connection that is associated
# with the stream that has been created
connection = stream.connection
# prints some debug information on the stream that has just been
# created (may be used for debugging purposes)
self.debug(
"Stream '%s' from '%s' created" %
(stream.identifier, connection.owner.name)
)
# notifies any listener of the stream created event about the
# new stream (as expected per specification)
self.trigger("stream_c", self, stream)
def on_stream_d(self, stream):
# retrieves the reference to the connection that is associated
# with the stream that has been deleted
connection = stream.connection
# prints some debug information on the stream that has just been
# deleted (may be used for debugging purposes)
self.debug(
"Stream '%s' from '%s' deleted" %
(stream.identifier, connection.owner.name)
)
# notifies any listener of the stream deleted event about the
# new stream (as expected per specification
self.trigger("stream_d", self, stream)
def on_fork(self):
self.trigger("fork", self)
def on_child(self):
# triggers the child event indicating that a new child has been
# created and than any callback operation may now be performed
self.trigger("child", self)
# creates a new seed value from a pseudo random value and
# then adds this new value as the base for randomness in the
# ssl base infra-structure, required for security
seed = str(uuid.uuid4())
seed = legacy.bytes(seed)
ssl.RAND_add(seed, 0.0)
def on_diag(self):
self.trigger("diag", self)
def on_start(self):
self.trigger("start", self)
def on_stop(self):
self.trigger("stop", self)
def on_pause(self):
self.trigger("pause", self)
def on_resume(self):
self.trigger("resume", self)
def info_dict(self, full = False):
info = dict(
loaded = self._loaded,
connections = len(self.connections),
state = self.get_state_s(),
poll = self.get_poll_name()
)
if full: info.update(
name = self.name,
_lid = self._lid
)
return info
def info_string(self, full = False, safe = True):
try: info = self.info_dict(full = full)
except: info = dict()
info_s = json.dumps(
info,
ensure_ascii = False,
indent = 4,
separators = (",", " : "),
sort_keys = True
)
return info_s
def connections_dict(self, full = False):
connections = []
for connection in self.connections:
info = connection.info_dict(full = full)
connections.append(info)
return connections
def connection_dict(self, id, full = False):
connection = None
for _connection in self.connections:
if not _connection.id == id: continue
connection = _connection
break
if not connection: return None
return connection.info_dict(full = full)
def new_connection(self, socket, address, ssl = False):
"""
Creates a new connection for the provided socket
object and string based address, the returned
value should be a workable object.
:type socket: Socket
:param socket: The socket object to be encapsulated
by the object to be created (connection).
:type address: String
:param address: The address as a string to be used to
describe the connection object to be created.
:type ssl: bool
:param ssl: If the connection to be created is meant to
be secured using the ssl framework for encryption.
:rtype: Connection
:return: The connection object that encapsulates the
provided socket and address values.
"""
return Connection(
owner = self,
socket = socket,
address = address,
ssl = ssl
)
def add_callback(self, socket, callback):
callbacks = self.callbacks_m.get(socket, [])
if callback in callbacks: return
callbacks.append(callback)
self.callbacks_m[socket] = callbacks
def remove_callback(self, socket, callback):
callbacks = self.callbacks_m.get(socket, [])
if not callback in callbacks: return
callbacks.remove(callback)
if callbacks: return
del self.callbacks_m[socket]
def load_config(self, path = "config.json", **kwargs):
kwargs = self.apply_config(path, kwargs)
for key, value in legacy.iteritems(kwargs):
setattr(self, key, value)
def apply_config(self, path, kwargs):
if not os.path.exists(path): return kwargs
self.info("Applying configuration file '%s' ..." % path)
kwargs = copy.copy(kwargs)
file = open(path, "rb")
try: contents = json.load(file)
finally: file.close()
for key, value in legacy.iteritems(contents):
kwargs[key] = value
return kwargs
def exec_safe(self, connection, callable, *args, **kwargs):
try:
return callable(*args, **kwargs)
except ssl.SSLError as error:
error_v = error.args[0] if error.args else None
error_m = error.reason if hasattr(error, "reason") else None
if error_v in SSL_SILENT_ERRORS:
self.on_expected(error, connection)
elif not error_v in SSL_VALID_ERRORS and\
not error_m in SSL_VALID_REASONS:
self.on_exception(error, connection)
except socket.error as error:
error_v = error.args[0] if error.args else None
if error_v in SILENT_ERRORS:
self.on_expected(error, connection)
elif not error_v in VALID_ERRORS:
self.on_exception(error, connection)
except BaseException as exception:
self.on_exception(exception, connection)
return False
def is_devel(self):
"""
Verifies if the current running environment is meant to be used
for development purposes as opposed to a production environment.
The method should always be used in situations where some critical
and internal information is meant to be displayed in a development
environment but hidden in a production one.
This method should be used at runtime as opposed to the private
configuration based one.
:rtype: bool
:return: If the current environment is development oriented or
if it's considered to be a production one (invalid result).
"""
return self.is_debug()
def is_debug(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.DEBUG)
def is_info(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.INFO)
def is_warning(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.WARNING)
def is_error(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.ERROR)
def is_critical(self):
if not self.logger: return False
return self.logger.isEnabledFor(logging.CRITICAL)
def debug(self, object):
self.log(object, level = logging.DEBUG)
def info(self, object):
self.log(object, level = logging.INFO)
def warning(self, object):
self.log(object, level = logging.WARNING)
def error(self, object):
self.log(object, level = logging.ERROR)
def critical(self, object):
self.log(object, level = logging.CRITICAL)
def log_stack(self, method = None, info = True):
if not method: method = self.info
lines = traceback.format_exc().splitlines()
for line in lines: method(line)
if info: self.log_info(method = method)
def log_info(self, method = None):
if not method: method = self.info
info_string = self.info_string(full = True)
for line in info_string.split("\n"): method(line)
def log(self, *args, **kwargs):
if legacy.PYTHON_3: return self.log_python_3(*args, **kwargs)
else: return self.log_python_2(*args, **kwargs)
def log_python_3(self, object, level = logging.INFO):
is_str = isinstance(object, legacy.STRINGS)
try: message = str(object) if not is_str else object
except: message = str(object)
if not self.logger: return
self.logger.log(level, message)
def log_python_2(self, object, level = logging.INFO):
is_str = isinstance(object, legacy.STRINGS)
try: message = unicode(object) if not is_str else object #@UndefinedVariable
except: message = str(object).decode("utf-8", "ignore")
if not self.logger: return
self.logger.log(level, message)
def build_poll(self):
# retrieves the reference to the parent class associated with
# the current instance, it's going t be used for class methods
cls = self.__class__
# verifies if the currently set polling mechanism is open in
# case it's ther's no need to re-build the polling mechanism
# otherwise rebuilds the polling mechanism with the current
# name and returns the new poll object to the caller method
if self.poll and self.poll.is_open(): return self.poll
# runs the testing of the poll again and verifies if the polling
# class has changed in case it did not returns the current poll
# instance as expected by the current infra-structure
poll_c = cls.test_poll(preferred = self.poll_name)
if poll_c == self.poll_c: return self.poll
# updates the polling class with the new value and re-creates
# the polling instance with the new polling class returning this
# new value to the caller method
self.poll_c = poll_c
self.poll = self.poll_c()
return self.poll
def build_future(self, asyncio = True):
"""
Creates a future object that is bound to the current event
loop context, this allows for latter access to the owning loop.
:type asyncio: bool
:param asyncio: If the asyncio loop retrieval strategy should be
used or if instead the netius native one should be used.
:rtype: Future
:return: The generated future that should be bound to the
current context.
"""
# creates a normal future object, setting the current loop (global) as
# the loop, then returns the future to the caller method
loop = self.get_loop(asyncio = asyncio)
future = asynchronous.Future(loop = loop)
return future
def get_id(self, unique = True):
base = NAME + "-" + util.camel_to_underscore(self.name)
if not unique: return base
return base + "-" + str(self._uuid)
def get_poll(self):
return self.poll
def get_poll_name(self):
poll = self.get_poll()
name = poll.name()
return name
def get_state(self):
return self._state
def set_state(self, state):
self._state = state
def get_state_s(self, lower = True):
"""
Retrieves a string describing the current state
of the system, this string should be as descriptive
as possible.
An optional parameter controls if the string should
be lower cased or not.
:type lower: bool
:param lower: If the returned string should be converted
into a lower cased version.
:rtype: String
:return: A string describing the current sate of the loop
system, should be as descriptive as possible.
"""
state_s = STATE_STRINGS[self._state - 1]
state_s = state_s.lower() if lower else state_s
return state_s
def get_env(self, name, default = None, cast = None, expand = False):
"""
Retrieves the value of the environment variable with the
requested name, defaulting to the provided value in case
it's not possible to find such variable.
An optional cast type may be provided in order to cast the
value of the environment variable in to the target type.
An optional expand flag may be set so that the variable gets
expanded as a file system file, for this the newline values
should be escaped as explicit '\n' string sequences (two chars).
Current implementation forwards the request to the current
configuration registry so that other data providers may
also be used in search for configuration.
:type name: String
:param name: The name of the environment variable that is
meant to be retrieved from the current environment
:type default: Object
:param default: The default value to be returned in case
no value is found for the provided name.
:type cast: Type
:param cast: The cast type to be used to cast the value
of the requested environment variable.
:type expand: bool
:param expand: If the variable should be expanded as a file
object and stored in a temporary storage, for this situation
the resulting object should be a string with the file path.
:rtype: Object
:return: The value of the requested environment variable
properly casted into the target value.
"""
if not name in config.CONFIGS: return default
value = config.CONFIGS.get(name, default)
if expand: value = self.expand(value)
cast = config.CASTS.get(cast, cast)
if cast and not value == None: value = cast(value)
return value
def expand(self, value, encoding = "utf-8", force = False):
"""
Expands the provided string/bytes value into a file in the
current file system so that it may be correctly used by interfaces
that require certain values to be file system based.
The generated file is going to be removed on the cleanup operation
so that no temporary file leaking occurs (garbage collection).
In case the force value is provided the the file is created even
for situations where the provided value is invalid/unset.
:type value: String
:param value: The string/bytes based value that is going to be
expanded into a proper file system based (temporary) file.
:type encoding: String
:param encoding: The encoding that is going to be used to convert
the value into a bytes based one in case the provided value is not
bytes compliant (and must be converted).
:type force: bool
:param force: If the expansion operation should be performed even
for situations where the value is considered invalid/unset.
:rtype: String
:return: The path to the temporary file that has just been generated
for the expansion of the provided value.
"""
if not value and not force: return value
is_bytes = legacy.is_bytes(value)
if not is_bytes: value = value.encode(encoding)
value = value.replace(b"\\n", b"\n")
fd, file_path = tempfile.mkstemp()
os.close(fd)
file = open(file_path, "wb")
try: file.write(value)
finally: file.close()
self._expanded.append(file_path)
return file_path
def get_protocols(self):
"""
Retrieves the complete set of protocols (as ALPN strings) that are
going to be handled by the current protocol infra-structure.
:rtype: List
:return: The list containing the complete set of protocols handled
by the current infra-structure.
:see: https://tools.ietf.org/html/rfc7301
"""
return None
def get_adapter(self, name = "memory", *args, **kwargs):
"""
Retrieves an instance of a storage adapter described
by the provided name, note that the dynamic (extra)
arguments are going to be used in the construction of
the adapter instance.
:type name: String
:param name: The name of the adapter to be retrieved
this should be equivalent to the adapter class name.
:rtype: Adapter
:return: An instance (properly configured) of the
requested adapter (defined by the name argument).
"""
name_f = name.title() + "Adapter"
adapter_c = getattr(netius.adapters, name_f)
adapter = adapter_c(*args, **kwargs)
return adapter
def get_auth(self, name = "memory", *args, **kwargs):
"""
Gathers the proper authentication handler that is being
requested with the provided name. The retrieved auth
is a static class that should be used from its interface
based on class based methods.
The state of theses authentication (handlers) is based
on the "global" state of the environment (no instances).
:type name: String
:param name: The name of the authentication (handler)
class that should be retrieved.
:rtype: Auth
:return: An authentication based class that may be used
for the interaction of authentication methods.
"""
name_f = name.title() + "Auth"
auth_c = getattr(netius.auth, name_f)
return auth_c
def get_connection(self, socket):
"""
"Resolves" the connection associated with the provided socket
returning the structured connection object for it.
In case no connection exists the method raises an exception
invalidating the current logic stack.
:type socket: Socket
:param socket: The socket for which the connection is going to
be returned.
:rtype: Connection
:return: The connection object associated with the provided
socket reference.
"""
return self.connections_m[socket]
def _pending(self, connection):
"""
Tries to perform the pending operations in the connection,
these operations are set in the pending variable of the
connection structure, so that it may be read latter on.
The method returns if there are still pending operations
after this method tick.
:type connection: Connection
:param connection: The connection object to be checked for
pending operations and that is going to be used in the
performing of these operations.
:rtype: bool
:return: If there are still pending operations to be
performed in the provided connection.
"""
# calls the run (tick) starter operation that should start
# and run all the starters registered for the connection or
# continue any starter operation that is pending for it
return connection.run_starter()
def _notifies(self):
"""
Runs the notification process for the complete set of
pending notification in the notified list.
This tick operation may create tail recursion on callback
call and so the list is always processed as a queue.
The number of processed events is returned as part of the
result.
:rtype: int
:return: The number of processed pending events/notifications.
"""
# starts the counter that is going to be used to count
# the number of processed notifications, start at zero
count = 0
# iterates while there are pending notifications to be
# processed, the complete set of bind callables will be
# called for each of the notifications
while self._notified:
event, data = self._notified.pop(0)
binds = self._events.pop(event, [])
for callable in binds: callable(data)
count += 1
# returns the number of processed notifications to the
# the caller method
return count
def _delays(self):
"""
Calls the complete set of elements that are considered to
be part of the delayed set of methods to be called.
These methods are expected to be run before a poll call so
that they are run outside the handling.
The calling of the delayed methods takes into account a
series of assumptions including the loop identifier in order
to avoid loops in the delayed calls/insertions.
As part of the delay execution the pending notifications are
also going to be processed, they must be handled together so
that proper "recursion" is allowed (tail recursion).
"""
# runs the merge delay lists operation, so that delay operations
# inserts from different threads may be used and processed under
# the current execution (as expected)
self.delay_m()
# in case there's no delayed items to be called returns the control
# flow immediately, note that the notified elements (pending process)
# are also going to be verified for presence
if not self._delayed and not self._notified: return
# retrieves the value for the current timestamp, to be used in
# comparisons against the target timestamps of the callables
current = time.time()
# creates the lists that will hold all the values that are not
# yet ready to be called in this iteration, the value in this
# list will be added back to the heap at the end of the iteration
pendings = []
pendings_o = []
# iterates over all the delayed callable tuples to try to find
# (and call) the ones that are meant to be executed in the past
# (have a target timestamp with a value less than the current)
while self._delayed or self._notified:
# runs the notifies verification cycle and if there's at
# least one processed event continues the loop meaning that
# the if test evaluations must be re-processed
if self._notifies(): continue
# "pops" the current item from the delayed list to be used
# in the execution of the current iteration cycle
callable_t = heapq.heappop(self._delayed)
callable_o = heapq.heappop(self._delayed_o)
# unpacks the current callable tuple in iteration into a
# target (timestamp value) and a method to be called in
# case the target timestamp is valid (in the past)
target, _did, method, lid, options = callable_t
# defines the proper target value that is going to be used
# for the comparison against the current time reference
# this is performed by defaulting the value against negative
# ensuring immediate execution of the associated callable
if target == None: target = -1
# tests if the current target is valid (less than or
# equals to the current time value) and in case it's
# not restores the value to the heap and breaks the loop
is_valid = target <= current
if not is_valid:
pendings.append(callable_t)
pendings_o.append(callable_o)
break
# in case the loop id present in the delayed call tuple is
# the same as the current iteration identifier then the
# call must be done in the next iteration cycle, this
# verification avoids loops in calls, note that this verification
# is only required for target zero calls referring the delayed
# calls to be executed immediately (on next loop)
if target == 0 and self._lid == lid:
pendings.append(callable_t)
pendings_o.append(callable_o)
continue
# unpacks the multiple options so that it's possible to determine
# the way the delayed operation is going to be executed
run, = options
# in case the method is not meant to be run, probably canceled
# the execution of it should be properly ignored
if not run: continue
# calls the callback method as the delayed operation is
# now meant to be run, this is an operation that may change
# the current list of delayed object (causing cycles) and so
# must be implemented with the proper precautions, note that
# proper exception is set so that proper top level handling
# is defined and logging is performed
try: method()
except BaseException as exception:
self.error(exception)
self.log_stack(method = self.warning)
# iterates over all the pending callable tuple values and adds
# them back to the delayed heap list so that they are called
# latter on (not ready to be called now)
for pending, pending_o in zip(pendings, pendings_o):
heapq.heappush(self._delayed, pending)
heapq.heappush(self._delayed_o, pending_o)
# in case the delayed list is empty resets the delay id so that
# it never gets into a very large number, would break performance
if not self._delayed: self._did = 0
def _generate(self, hashed = True):
"""
Generates a random unique identifier that may be used
to uniquely identify a certain object or operation.
This method must be used carefully to avoid any unwanted
behavior resulting from value collisions.
:type hashed: bool
:param hashed: If the identifier should be hashed into
and hexadecimal string instead of an uuid based identifier.
:rtype: String
:return: The random unique identifier generated and that
may be used to identify objects or operations.
"""
identifier = str(uuid.uuid4())
identifier = identifier.upper()
if not hashed: return identifier
identifier = legacy.bytes(identifier)
hash = hashlib.sha256(identifier)
indetifier = hash.hexdigest()
identifier = identifier.upper()
return indetifier
def _socket_keepalive(
self,
_socket,
timeout = None,
interval = None,
count = None
):
if timeout == None: timeout = self.keepalive_timeout
if interval == None: interval = self.keepalive_interval
if count == None: count = self.keepalive_count
is_inet = _socket.family in (socket.AF_INET, socket.AF_INET6)
is_inet and hasattr(_socket, "TCP_KEEPIDLE") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE, #@UndefinedVariable
timeout
)
is_inet and hasattr(_socket, "TCP_KEEPINTVL") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL, #@UndefinedVariable
interval
)
is_inet and hasattr(_socket, "TCP_KEEPCNT") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPCNT, #@UndefinedVariable
count
)
hasattr(_socket, "SO_REUSEPORT") and\
self.socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEPORT, #@UndefinedVariable
1
)
def _ssl_init(self, strict = True, env = True):
# initializes the values of both the "main" context for ssl
# and the map that associated an hostname and a context, both
# are going to be used (if possible) at runtime for proper
# resolution of both key and certificated files
self._ssl_context = None
self._ssl_contexts = dict()
# verifies if the current ssl module contains a reference to
# the ssl context class symbol if not, the control flow is
# returned to the caller method as it's not possible to created
# any kind of context information for ssl
has_context = hasattr(ssl, "SSLContext")
if not has_context: return
# retrieves the reference to the environment variables that are going
# to be used in the construction of the various ssl contexts, note that
# the secure variable is extremely important to ensure that a proper and
# secure ssl connection is established with the peer
secure = self.get_env("SSL_SECURE", True, cast = bool) if env else False
contexts = self.get_env("SSL_CONTEXTS", {}, cast = dict) if env else {}
# creates the main/default ssl context setting the default key
# and certificate information in such context, then verifies
# if the callback registration method is defined and if it is
# defined registers a callback for when the hostname information
# is available, so that proper concrete context may be set, note
# that in case the strict mode is enabled (default) the context
# is unset for situation where no callback registration is possible
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._ssl_ctx_base(self._ssl_context, secure = secure)
self._ssl_ctx_protocols(self._ssl_context)
self._ssl_certs(self._ssl_context)
has_callback = hasattr(self._ssl_context, "set_servername_callback")
if has_callback: self._ssl_context.set_servername_callback(self._ssl_callback)
elif strict: self._ssl_context = None
# retrieves the reference to the map containing the various key
# and certificate paths for the various defined host names and
# uses it to create the complete set of ssl context objects
for hostname, values in legacy.iteritems(contexts):
context = self._ssl_ctx(values, secure = secure)
self._ssl_contexts[hostname] = (context, values)
def _ssl_destroy(self):
self._ssl_context = None
self._ssl_contexts = dict()
def _ssl_callback(self, socket, hostname, context):
context, values = self._ssl_contexts.get(hostname, (context, None))
self._ssl_ctx_protocols(context)
socket.context = context
if not values: return
ssl_host = values.get("ssl_host", None)
ssl_fingerprint = values.get("ssl_fingerprint", None)
if not ssl_host and not ssl_fingerprint: return
connection = self.connections_m.get(socket, None)
if not connection: return
connection.ssl_host = ssl_host
connection.ssl_fingerprint = ssl_fingerprint
def _ssl_ctx(self, values, context = None, secure = True):
context = context or ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._ssl_ctx_base(context, secure = secure)
self._ssl_ctx_protocols(context)
key_file = values.get("key_file", None)
cer_file = values.get("cer_file", None)
ca_file = values.get("ca_file", None)
ca_root = values.get("ca_root", True)
ssl_verify = values.get("ssl_verify", False)
cert_reqs = ssl.CERT_REQUIRED if ssl_verify else ssl.CERT_NONE
self._ssl_certs(
context,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
verify_mode = cert_reqs
)
return context
def _ssl_ctx_base(self, context, secure = True):
if secure and hasattr(ssl, "OP_NO_SSLv2"):
context.options |= ssl.OP_NO_SSLv2
if secure and hasattr(ssl, "OP_NO_SSLv3"):
context.options |= ssl.OP_NO_SSLv3
if secure and hasattr(ssl, "OP_SINGLE_DH_USE"):
context.options |= ssl.OP_SINGLE_DH_USE
if secure and hasattr(ssl, "OP_SINGLE_ECDH_USE"):
context.options |= ssl.OP_SINGLE_ECDH_USE
if secure and hasattr(ssl, "OP_CIPHER_SERVER_PREFERENCE"):
context.options |= ssl.OP_CIPHER_SERVER_PREFERENCE
if secure and hasattr(context, "set_ecdh_curve"):
context.set_ecdh_curve("prime256v1")
if secure and SSL_DH_PATH and hasattr(context, "load_dh_params"):
context.load_dh_params(SSL_DH_PATH)
def _ssl_ctx_protocols(self, context):
self._ssl_ctx_alpn(context)
self._ssl_ctx_npn(context)
def _ssl_ctx_alpn(self, context):
if not hasattr(ssl, "HAS_ALPN"): return
if not ssl.HAS_ALPN: return
if hasattr(context, "set_alpn_protocols"):
protocols = self.get_protocols()
protocols and context.set_alpn_protocols(protocols)
def _ssl_ctx_npn(self, context):
if not hasattr(ssl, "HAS_NPN"): return
if not ssl.HAS_NPN: return
if hasattr(context, "set_npn_protocols"):
protocols = self.get_protocols()
protocols and context.set_npn_protocols(protocols)
def _ssl_certs(
self,
context,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = False,
verify_mode = ssl.CERT_NONE,
check_hostname = False
):
dir_path = os.path.dirname(__file__)
root_path = os.path.join(dir_path, "../")
root_path = os.path.normpath(root_path)
base_path = os.path.join(root_path, "base")
extras_path = os.path.join(base_path, "extras")
key_file = key_file or os.path.join(extras_path, "net.key")
cer_file = cer_file or os.path.join(extras_path, "net.cer")
context.load_cert_chain(cer_file, keyfile = key_file)
context.verify_mode = verify_mode
if hasattr(context, "check_hostname"): context.check_hostname = check_hostname
if ca_file: context.load_verify_locations(cafile = ca_file)
if ca_root and hasattr(context, "load_default_certs"):
context.load_default_certs(purpose = ssl.Purpose.SERVER_AUTH)
if ca_root and SSL_CA_PATH:
context.load_verify_locations(cafile = SSL_CA_PATH)
def _ssl_upgrade(
self,
_socket,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = True,
server = True,
ssl_verify = False
):
socket_ssl = self._ssl_wrap(
_socket,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
server = server,
ssl_verify = ssl_verify
)
return socket_ssl
def _ssl_wrap(
self,
_socket,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = True,
server = True,
ssl_verify = False
):
dir_path = os.path.dirname(__file__)
root_path = os.path.join(dir_path, "../")
root_path = os.path.normpath(root_path)
base_path = os.path.join(root_path, "base")
extras_path = os.path.join(base_path, "extras")
key_file = key_file or os.path.join(extras_path, "net.key")
cer_file = cer_file or os.path.join(extras_path, "net.cer")
cert_reqs = ssl.CERT_REQUIRED if ssl_verify else ssl.CERT_NONE
if not self._ssl_context: return ssl.wrap_socket(
_socket,
keyfile = key_file,
certfile = cer_file,
server_side = server,
cert_reqs = cert_reqs,
ca_certs = ca_file,
ssl_version = ssl.PROTOCOL_SSLv23,
do_handshake_on_connect = False
)
self._ssl_certs(
self._ssl_context,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
verify_mode = cert_reqs
)
socket_ssl = self._ssl_context.wrap_socket(
_socket,
server_side = server,
do_handshake_on_connect = False
)
return socket_ssl
def _ssl_handshake(self, connection):
"""
Low level SSL handshake operation that triggers or resumes
the handshake process.
It should be able to handle the exceptions raised by the the
concrete handshake operation so that no exception is raised
(unhandled) to the upper layers.
:type connection: Connection
:param connection: The connection that is going to be used in the
handshake operation, this should contain a valid/open socket that
should be registered for both read and write in the poll.
"""
try:
# unsets the handshake flag associated with the ssl, meaning
# that the connection is considered to be currently under the
# handshaking process (may succeed in the current tick)
connection.ssl_handshake = False
connection.ssl_connecting = True
# tries to runs the handshake process, this represents
# a series of small operations both of writing and reading
# that a required to establish and guarantee a secure
# connection from this moment on, note that this operation
# may fail (non blocking issues) and further retries must
# be attempted to finish establishing the connection
_socket = connection.socket
_socket.do_handshake()
# sets the ssl handshake flag in the connection, effectively
# indicating that the ssl handshake process has finished, note
# that the connecting flag is also unset (ssl connect finished)
connection.ssl_handshake = True
connection.ssl_connecting = False
# calls the end starter method in the connection so that the
# connection gets notified that the current starter in process
# has finished and that the next one should be called as
# soon as possible to go further in the connection initialization
connection.end_starter()
except ssl.SSLError as error:
# tries to retrieve the error code from the argument information
# in the error, in case the error is defined in the list of
# valid errors, the handshake is delayed until either a write
# or read operation is available (retry process)
error_v = error.args[0] if error.args else None
if error_v in SSL_VALID_ERRORS:
if error_v == ssl.SSL_ERROR_WANT_WRITE and\
not self.is_sub_write(_socket):
self.sub_write(_socket)
elif self.is_sub_write(_socket):
self.unsub_write(_socket)
else: raise
def _expand_destroy(self):
"""
Destroys the complete set of infra-structure (files) associated
with the expansion operation on environment values.
This is required to avoid any kind of file leaking, should be run
on the cleanup operation of the infra-structure.
"""
# iterates over the complete list of expanded file paths to remove
# their corresponding files (graceful error handling)
for expanded in self._expanded:
try: os.remove(expanded)
except OSError: pass
# deletes the complete set of path references from the expanded
# list so that it is not going to be used any longer
del self._expanded[:]
def _level(self, level):
"""
Converts the provided logging level value into the best
representation of it, so that it may be used to update
a logger's level of representation.
This method takes into account the current interpreter
version so that no problem occur.
:type level: String/int
:param level: The level value that is meant to be converted
into the best representation possible.
:rtype: int
:return: The best representation of the level so that it may
be used freely for the setting of logging levels under the
current running interpreter.
"""
level_t = type(level)
if level_t == int: return level
if level == None: return level
if level == "SILENT": return log.SILENT
if hasattr(logging, "_checkLevel"):
return logging._checkLevel(level)
return logging.getLevelName(level)
def _format_delta(self, time_delta, count = 2):
days = time_delta.days
hours, remainder = divmod(time_delta.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
delta_s = ""
if days > 0:
delta_s += "%dd " % days
count -= 1
if count == 0: return delta_s.strip()
if hours > 0:
delta_s += "%dh " % hours
count -= 1
if count == 0: return delta_s.strip()
if minutes > 0:
delta_s += "%dm " % minutes
count -= 1
if count == 0: return delta_s.strip()
delta_s += "%ds" % seconds
return delta_s.strip()
def _wait_forever(self):
while True: time.sleep(60)
class DiagBase(AbstractBase):
def __init__(self, *args, **kwargs):
AbstractBase.__init__(self, *args, **kwargs)
self.reads_c = 0
self.writes_c = 0
self.errors_c = 0
def reads(self, *args, **kwargs):
AbstractBase.reads(self, *args, **kwargs)
self.reads_c += 1
def writes(self, *args, **kwargs):
AbstractBase.writes(self, *args, **kwargs)
self.writes_c += 1
def errors(self, *args, **kwargs):
AbstractBase.errors(self, *args, **kwargs)
self.errors_c += 1
def info_dict(self, full = False):
info = AbstractBase.info_dict(self, full = full)
info.update(
reads_c = self.reads_c,
writes_c = self.writes_c,
errors_c = self.errors_c
)
return info
class BaseThread(threading.Thread):
"""
The top level thread class that is meant to encapsulate
a running base object and run it in a new context.
This base thread may be used to run a network loop allowing
a main thread to continue with execution logic.
"""
def __init__(self, owner = None, daemon = False, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.owner = owner
self.daemon = daemon
def run(self):
threading.Thread.run(self)
if not self.owner: return
self.owner._thread = self
try:
self.owner.start()
finally:
self.owner._thread = None
self.owner = None
def ensure_main(factory = None):
if AbstractBase.get_main(): return
factory = factory or AbstractBase
instance = factory()
AbstractBase.set_main(instance)
def get_main(factory = None, ensure = True):
if ensure: ensure_main(factory = factory)
return AbstractBase.get_main()
def get_loop(factory = None, ensure = True, asyncio = True):
if ensure: ensure_main(factory = factory)
loop = AbstractBase.get_loop(asyncio = asyncio)
loop = loop or get_main(factory = factory)
return loop
def get_event_loop(*args, **kwargs):
return get_loop(*args, **kwargs)
def get_poll():
main = get_main()
if not main: return None
return main.poll
def build_future(asyncio = True):
main = get_main()
if not main: return None
return main.build_future(asyncio = asyncio)
def ensure(coroutine, args = [], kwargs = {}, thread = None):
loop = get_loop()
return loop.ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = thread
)
def ensure_pool(coroutine, args = [], kwargs = {}):
return ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = True
)
is_diag = config.conf("DIAG", False, cast = bool)
if is_diag: Base = DiagBase
else: Base = AbstractBase
|
#!/usr/local/opt/python/bin/python
#author: Tobias Hofmann, tobiashofmann@gmx.net
import os
import sys
import re
import glob
import shutil
import argparse
import commands
import subprocess
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#%%% Input %%%
# Complete path function
class CompletePath(argparse.Action):
"""give the full path of an input file/folder"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
# Get arguments
def get_args():
parser = argparse.ArgumentParser(
description="Assemble trimmed Illumina read files (fastq)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--input',
required=True,
action=CompletePath,
default=None,
help='Call the folder that contains the trimmed reads, organized in a separate subfolder for each sample. The name of the subfolder has to start with the sample name, delimited with an underscore [_]'
)
parser.add_argument(
'--output',
required=True,
action=CompletePath,
default=None,
help='The output directory where results will be safed'
)
parser.add_argument(
'--trinity',
default="/usr/local/bin/trinityrnaseq_r20140717/Trinity",
action=CompletePath,
help='The path to the Trinity executable'
)
parser.add_argument(
'--contig_length',
type=int,
default=200,
help='Set the minimum contig length for Trinity assembly. Contigs that are shorter than this threshold will be discarded.'
)
parser.add_argument(
'--cores',
type=int,
default=1,
help='For parallel processing you can set the number of cores you want to run Trinity on.'
)
return parser.parse_args()
# Preparation for calling input variables and files
args = get_args()
# Set working directory
out_folder = args.output
out_dir = "%s/stats" %out_folder
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Get all the other input variables
input_folder = args.input
min_length = args.contig_length
trinity = args.trinity
cores = args.cores
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#%%% Functions %%%
def assembly_trinity(forw,backw,output_folder,id_sample):
print "De-novo assembly with Trinity of sample %s:" %id_sample
command = [
trinity,
"--seqType",
"fq",
"--left",
forw,
"--right",
backw,
"--CPU",
str(cores),
"--min_contig_length",
str(min_length),
"--JM",
"20G",
"--output",
output_folder
]
try:
print "Building contigs........"
with open(os.path.join(output_folder, "%s_trinity_screen_out.txt" %id_sample), 'w') as log_err_file:
p = subprocess.Popen(command, stdout=log_err_file)
p.communicate()
print "%s assembled. Trinity-stats are printed into %s" %(id_sample, os.path.join(output_folder, "%s_trinity_screen_out.txt" %sample_id))
except:
print "Could not assemble %s" %id_sample
def get_stats(sample_output_folder,sample_id):
print "Extracting statistics for", sample_id
# Read counts
read_count_cmd = subprocess.Popen(["cat", "%s/both.fa.read_count" %sample_output_folder], stdout=subprocess.PIPE)
read_count = read_count_cmd.communicate()[0]
# Assembled read counts
assembled_reads_cmd = subprocess.Popen(["wc", "-l", "%s/chrysalis/readsToComponents.out.sort" %sample_output_folder], stdout=subprocess.PIPE)
assembled_reads = assembled_reads_cmd.communicate()[0]
assembled_reads_count, file = assembled_reads.split(" ")
# Contig count
unimportant = ""
contig_count_cmd = subprocess.Popen(["tail", "-n", "1", "%s/chrysalis/readsToComponents.out.sort" %sample_output_folder], stdout=subprocess.PIPE)
contig_count_pre = contig_count_cmd.communicate()[0]
print contig_count_pre
contig_count, header, percent, sequence = contig_count_pre.split("\t")
with open(os.path.join(sample_output_folder, "%s_stats.txt" %sample_id), 'w') as stat_file:
stat_file.write("Statistics for sample %s\n" %sample_id)
stat_file.write("Read-count in trimmed fastq read-files : %s" %read_count)
stat_file.write("Reads assembled into contigs : %s\n" %assembled_reads_count)
stat_file.write("Assembled contigs : %s\n" %contig_count)
def cleanup_trinity_assembly_folder(sample_output_folder, sample_id):
# This function is copied (and slightly modified) from phyluce, written by Brant Faircloth
print "Removing unnecessary files from the Trinity folder for %s" %sample_id
files = glob.glob(os.path.join(sample_output_folder, '*'))
# check the names to make sure we're not deleting something improperly
names = [os.path.basename(f) for f in files]
try:
assert "Trinity.fasta" in names
assert "%s_trinity_screen_out.txt" %sample_id in names
except:
raise IOError("Neither Trinity.fasta nor %s_trinity_screen_out.txt were found in output." %sample_id)
for file in files:
if not os.path.basename(file) in ("Trinity.fasta", "%s_trinity_screen_out.txt" %sample_id, "%s_stats.txt" %sample_id):
if os.path.isfile(file) or os.path.islink(file):
os.remove(file)
elif os.path.isdir(file):
shutil.rmtree(file)
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#%%% Workflow %%%
print "\n\nRunning Trinity parallel on %d cores" %cores
for subfolder, dirs, files in os.walk(input_folder):
subfolder_path_elements = re.split("%s/" %input_folder, subfolder)
if subfolder_path_elements[-1] != input_folder:
sample_folder = subfolder_path_elements[-1]
sample_id = re.split("_", sample_folder)[0]
# Loop through each sample-folder and find read-files
sample_output_folder = "%s/%s" %(out_dir,sample_id)
if not os.path.exists(sample_output_folder):
os.makedirs(sample_output_folder)
for misc1, misc2, fastq in os.walk(subfolder):
forward = ""
backward = ""
for element in fastq:
if sample_id in element and element.endswith("READ1.fastq"):
forward = "%s/%s" %(subfolder,element)
if sample_id in element and element.endswith("READ2.fastq"):
backward = "%s/%s" %(subfolder,element)
if forward != "" and backward != "":
print "\n", "#" * 50
print "Processing sample", sample_id, "\n"
assembly_trinity(forward,backward,sample_output_folder,sample_id)
get_stats(sample_output_folder,sample_id)
cleanup_trinity_assembly_folder(sample_output_folder,sample_id)
print "\n", "#" * 50
mv_cmd = "mv %s/Trinity.fasta %s/%s.fasta" %(sample_output_folder,out_folder,sample_id)
os.system(mv_cmd)
else:
print "\nError: Read-files for sample %s could not be found. Please check if subfolders/sample-folders are named in this pattern: 'sampleID_clean' and if the cleaned fastq files in the sample-folder end with 'READ1.fastq' and 'READ2.fastq' respectively." %sample_id
raise SystemExit
add abyss assembler
#!/usr/local/opt/python/bin/python
#author: Tobias Hofmann, tobiashofmann@gmx.net
import os
import sys
import re
import glob
import shutil
import argparse
import commands
import subprocess
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#%%% Input %%%
# Complete path function
class CompletePath(argparse.Action):
"""give the full path of an input file/folder"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
# Get arguments
def get_args():
parser = argparse.ArgumentParser(
description="Assemble trimmed Illumina read files (fastq)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--input',
required=True,
action=CompletePath,
default=None,
help='Call the folder that contains the trimmed reads, organized in a separate subfolder for each sample. The name of the subfolder has to start with the sample name, delimited with an underscore [_]'
)
parser.add_argument(
'--output',
required=True,
action=CompletePath,
default=None,
help='The output directory where results will be safed'
)
parser.add_argument(
'--assembler',
choices=["trinity", "abyss"],
default="abyss",
help="""The assembler to use."""
)
parser.add_argument(
'--trinity',
default="/usr/local/bin/trinityrnaseq_r20140717/Trinity",
action=CompletePath,
help='The path to the Trinity executable'
)
parser.add_argument(
'--abyss',
default="/usr/local/anaconda/bin/abyss-pe",
action=CompletePath,
help='The path to the abyss executable'
)
parser.add_argument(
'--kmer',
type=int,
default=35,
help='Set the kmer value'
)
parser.add_argument(
'--contig_length',
type=int,
default=200,
help='Set the minimum contig length for Trinity assembly. Contigs that are shorter than this threshold will be discarded.'
)
parser.add_argument(
'--single_reads',
action='store_true',
default=False,
help='Use this flag if you additionally want to use single reads for the assembly'
)
parser.add_argument(
'--cores',
type=int,
default=1,
help='For parallel processing you can set the number of cores you want to run Trinity on.'
)
return parser.parse_args()
# Preparation for calling input variables and files
args = get_args()
# Set working directory
out_folder = args.output
out_dir = "%s/stats" %out_folder
if not os.path.exists(out_dir):
os.makedirs(out_dir)
# Get all the other input variables
input_folder = args.input
min_length = args.contig_length
trinity = args.trinity
cores = args.cores
abyss = args.abyss
kmer = args.kmer
home_dir = os.getcwd()
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#%%% Functions %%%
def assembly_trinity(forw,backw,output_folder,id_sample):
print "De-novo assembly with Trinity of sample %s:" %id_sample
command = [
trinity,
"--seqType",
"fq",
"--left",
forw,
"--right",
backw,
"--CPU",
str(cores),
"--min_contig_length",
str(min_length),
"--JM",
"20G",
"--output",
output_folder
]
try:
print "Building contigs........"
with open(os.path.join(output_folder, "%s_trinity_screen_out.txt" %id_sample), 'w') as log_err_file:
p = subprocess.Popen(command, stdout=log_err_file)
p.communicate()
print "%s assembled. Trinity-stats are printed into %s" %(id_sample, os.path.join(output_folder, "%s_trinity_screen_out.txt" %sample_id))
except:
print "Could not assemble %s" %id_sample
def assembly_abyss(forw,backw,singlef,singleb,output_folder,id_sample):
print "De-novo assembly with abyss of sample %s:" %id_sample
command = [
abyss,
"k={}".format(kmer),
"j={}".format(cores),
'name={}'.format(id_sample),
'in={} {}'.format(forw,backw)
]
if args.single_reads:
command.append('se={} {}'.format(singlef,singleb))
try:
print "Building contigs........"
with open(os.path.join(output_folder, "%s_abyss_screen_out.txt" %id_sample), 'w') as log_err_file:
p = subprocess.Popen(command, stdout=log_err_file)
p.communicate()
print "%s assembled. Statistics are printed into %s" %(id_sample, os.path.join(output_folder, "%s_abyss_screen_out.txt" %sample_id))
except:
print "Could not assemble %s" %id_sample
def get_stats(sample_output_folder,sample_id):
print "Extracting statistics for", sample_id
# Read counts
read_count_cmd = subprocess.Popen(["cat", "%s/both.fa.read_count" %sample_output_folder], stdout=subprocess.PIPE)
read_count = read_count_cmd.communicate()[0]
# Assembled read counts
assembled_reads_cmd = subprocess.Popen(["wc", "-l", "%s/chrysalis/readsToComponents.out.sort" %sample_output_folder], stdout=subprocess.PIPE)
assembled_reads = assembled_reads_cmd.communicate()[0]
assembled_reads_count, file = assembled_reads.split(" ")
# Contig count
unimportant = ""
contig_count_cmd = subprocess.Popen(["tail", "-n", "1", "%s/chrysalis/readsToComponents.out.sort" %sample_output_folder], stdout=subprocess.PIPE)
contig_count_pre = contig_count_cmd.communicate()[0]
print contig_count_pre
contig_count, header, percent, sequence = contig_count_pre.split("\t")
with open(os.path.join(sample_output_folder, "%s_stats.txt" %sample_id), 'w') as stat_file:
stat_file.write("Statistics for sample %s\n" %sample_id)
stat_file.write("Read-count in trimmed fastq read-files : %s" %read_count)
stat_file.write("Reads assembled into contigs : %s\n" %assembled_reads_count)
stat_file.write("Assembled contigs : %s\n" %contig_count)
def cleanup_trinity_assembly_folder(sample_output_folder, sample_id):
# This function is copied (and slightly modified) from phyluce, written by Brant Faircloth
print "Removing unnecessary files from the Trinity folder for %s" %sample_id
files = glob.glob(os.path.join(sample_output_folder, '*'))
# check the names to make sure we're not deleting something improperly
names = [os.path.basename(f) for f in files]
try:
assert "Trinity.fasta" in names
assert "%s_trinity_screen_out.txt" %sample_id in names
except:
raise IOError("Neither Trinity.fasta nor %s_trinity_screen_out.txt were found in output." %sample_id)
for file in files:
if not os.path.basename(file) in ("Trinity.fasta", "%s_trinity_screen_out.txt" %sample_id, "%s_stats.txt" %sample_id):
if os.path.isfile(file) or os.path.islink(file):
os.remove(file)
elif os.path.isdir(file):
shutil.rmtree(file)
#XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#%%% Workflow %%%
print "\n\nRunning %s parallel on %d cores" %(args.assembler,cores)
for subfolder, dirs, files in os.walk(input_folder):
subfolder_path_elements = re.split("%s/" %input_folder, subfolder)
if subfolder_path_elements[-1] != input_folder:
sample_folder = subfolder_path_elements[-1]
sample_id = re.split("_", sample_folder)[0]
# Loop through each sample-folder and find read-files
sample_output_folder = "%s/%s" %(out_dir,sample_id)
if not os.path.exists(sample_output_folder):
os.makedirs(sample_output_folder)
for misc1, misc2, fastq in os.walk(subfolder):
forward = ""
backward = ""
single_f = ""
single_b = ""
for element in fastq:
if sample_id in element and element.endswith("READ1.fastq"):
forward = "%s/%s" %(subfolder,element)
if sample_id in element and element.endswith("READ2.fastq"):
backward = "%s/%s" %(subfolder,element)
if sample_id in element and element.endswith("READ1-single.fastq"):
single_f = "%s/%s" %(subfolder,element)
if sample_id in element and element.endswith("READ2-single.fastq"):
single_b = "%s/%s" %(subfolder,element)
if forward != "" and backward != "":
print "\n", "#" * 50
print "Processing sample", sample_id, "\n"
if args.assembler == "trinity":
assembly_trinity(forward,backward,sample_output_folder,sample_id)
get_stats(sample_output_folder,sample_id)
cleanup_trinity_assembly_folder(sample_output_folder,sample_id)
print "\n", "#" * 50
mv_cmd = "mv %s/Trinity.fasta %s/%s.fasta" %(sample_output_folder,out_folder,sample_id)
os.system(mv_cmd)
elif args.assembler == "abyss":
assembly_abyss(forward,backward,single_f,single_b,sample_output_folder,sample_id)
files = glob.glob(os.path.join(home_dir,'*'))
links = [f for f in files if os.path.islink(f)]
for l in links:
if l.endswith("-contigs.fa"):
contig_file = os.path.realpath(l)
mv_contig = "mv %s %s/../../%s-contigs.fasta" %(contig_file,sample_output_folder,sample_id)
os.system(mv_contig)
mv_cmd1 = "mv %s/%s* %s" %(home_dir,sample_id,sample_output_folder)
os.system(mv_cmd1)
mv_cmd2 = "mv %s/coverage.hist %s" %(home_dir,sample_output_folder)
os.system(mv_cmd2)
else:
print "\nError: Read-files for sample %s could not be found. Please check if subfolders/sample-folders are named in this pattern: 'sampleID_clean' and if the cleaned fastq files in the sample-folder end with 'READ1.fastq' and 'READ2.fastq' respectively." %sample_id
raise SystemExit
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from builtins import object
from monolithe.lib import SDKUtils
class SpecificationAttribute(object):
""" Define an attribute of an object
"""
def __init__(self, specification=None, data=None):
""" Define an attribute
Example:
name: associatedGatewayID
local_name: associated_gateway_id
local_type: str
"""
# Main attributes
self.description = None
self.local_name = None
self.local_type = None
# Other attributes
self._name = None
self.channel = None
self.allowed_chars = None
self.allowed_choices = None
self.autogenerated = False
self.availability = None
self.creation_only = False
self.default_order = False
self.default_value = None
self.deprecated = False
self.filterable = False
self.format = "free"
self.max_length = None
self.max_value = None
self.min_length = None
self.min_value = None
self.orderable = False
self.read_only = False
self.required = False
self.unique = False
self.unique_scope = None
self._type = None
self.exposed = False
self.transient = False
self.subtype = None
self.userlabel = None
self.specification = specification
# Load information from data
if data:
self.from_dict(data)
@property
def type(self):
"""
"""
return self._type
@type.setter
def type(self, value):
"""
"""
language = self.specification.monolithe_config.language if self.specification and self.specification.monolithe_config else 'python'
self._type = SDKUtils.massage_type_name(type_name=value)
self.local_type = SDKUtils.get_type_name_in_language(type_name=value, sub_type=self.subtype, language=language)
@property
def name(self):
"""
"""
return self._name
@name.setter
def name(self, value):
"""
"""
self._name = value
language = self.specification.monolithe_config.language if self.specification and self.specification.monolithe_config else 'python'
if self.specification and self.specification.monolithe_config:
self.local_name = SDKUtils.get_idiomatic_name_in_language(name=self.specification.monolithe_config.map_attribute(self.specification.rest_name, value), language=language)
else:
self.local_name = SDKUtils.get_idiomatic_name_in_language(name=value, language=language)
def from_dict(self, data):
"""
"""
try:
# this must be done before setting the type
self.subtype = data["subtype"] if "subtype" in data else None
# mandatory characteristics
self.name = data["name"]
self.description = data["description"]
self.type = data["type"]
# optional characteristics
self.allowed_chars = data["allowed_chars"] if "allowed_chars" in data else None
self.allowed_choices = data["allowed_choices"] if "allowed_choices" in data else None
self.autogenerated = data["autogenerated"] if "autogenerated" in data else False
self.channel = data["channel"] if "channel" in data else None
self.creation_only = data["creation_only"] if "creation_only" in data else False
self.default_order = data["default_order"] if "default_order" in data else False
self.default_value = data["default_value"] if "default_value" in data else None
self.deprecated = data["deprecated"] if "deprecated" in data else False
self.exposed = data["exposed"] if "exposed" in data else False
self.filterable = data["filterable"] if "filterable" in data else False
self.format = data["format"] if "format" in data else "free"
self.max_length = data["max_length"] if "max_length" in data else None
self.max_value = data["max_value"] if "max_value" in data else None
self.min_length = data["min_length"] if "min_length" in data else None
self.min_value = data["min_value"] if "min_value" in data else None
self.orderable = data["orderable"] if "orderable" in data else False
self.read_only = data["read_only"] if "read_only" in data else False
self.required = data["required"] if "required" in data else False
self.transient = data["transient"] if "transient" in data else False
self.unique = data["unique"] if "unique" in data else False
self.unique_scope = data["unique_scope"] if "unique_scope" in data else None
self.userlabel = data["userlabel"] if "userlabel" in data else None
except Exception as ex:
raise Exception("Unable to parse attribute %s for specification %s: %s" % (self.name, self.specification.rest_name, ex))
def to_dict(self):
""" Transform an attribute to a dict
"""
data = {}
# mandatory characteristics
data["name"] = self.name
data["description"] = self.description if self.description and len(self.description) else None
data["type"] = self.type if self.type and len(self.type) else None
data["allowed_chars"] = self.allowed_chars if self.allowed_chars and len(self.allowed_chars) else None
data["allowed_choices"] = self.allowed_choices
data["autogenerated"] = self.autogenerated
data["channel"] = self.channel if self.channel and len(self.channel) else None
data["creation_only"] = self.creation_only
data["default_order"] = self.default_order
data["default_value"] = self.default_value if self.default_value and len(self.default_value) else None
data["deprecated"] = self.deprecated
data["exposed"] = self.exposed
data["filterable"] = self.filterable
data["format"] = self.format if self.format and len(self.format) else None
data["max_length"] = int(self.max_length) if self.max_length is not None else None
data["max_value"] = int(self.max_value) if self.max_value is not None else None
data["min_length"] = int(self.min_length) if self.min_length is not None else None
data["min_value"] = int(self.min_value) if self.min_value is not None else None
data["orderable"] = self.orderable
data["read_only"] = self.read_only
data["required"] = self.required
data["transient"] = self.transient
data["unique"] = self.unique
data["uniqueScope"] = self.unique_scope if self.unique_scope and len(self.unique_scope) else None
data["subtype"] = self.subtype if self.subtype and len(self.subtype) else None
data["userlabel"] = self.userlabel if self.userlabel and len(self.userlabel) else None
return data
bool defaults
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
from builtins import object
from monolithe.lib import SDKUtils
class SpecificationAttribute(object):
""" Define an attribute of an object
"""
def __init__(self, specification=None, data=None):
""" Define an attribute
Example:
name: associatedGatewayID
local_name: associated_gateway_id
local_type: str
"""
# Main attributes
self.description = None
self.local_name = None
self.local_type = None
# Other attributes
self._name = None
self.channel = None
self.allowed_chars = None
self.allowed_choices = None
self.autogenerated = False
self.availability = None
self.creation_only = False
self.default_order = False
self.default_value = None
self.deprecated = False
self.filterable = False
self.format = "free"
self.max_length = None
self.max_value = None
self.min_length = None
self.min_value = None
self.orderable = False
self.read_only = False
self.required = False
self.unique = False
self.unique_scope = None
self._type = None
self.exposed = False
self.transient = False
self.subtype = None
self.userlabel = None
self.specification = specification
# Load information from data
if data:
self.from_dict(data)
@property
def type(self):
"""
"""
return self._type
@type.setter
def type(self, value):
"""
"""
language = self.specification.monolithe_config.language if self.specification and self.specification.monolithe_config else 'python'
self._type = SDKUtils.massage_type_name(type_name=value)
self.local_type = SDKUtils.get_type_name_in_language(type_name=value, sub_type=self.subtype, language=language)
@property
def name(self):
"""
"""
return self._name
@name.setter
def name(self, value):
"""
"""
self._name = value
language = self.specification.monolithe_config.language if self.specification and self.specification.monolithe_config else 'python'
if self.specification and self.specification.monolithe_config:
self.local_name = SDKUtils.get_idiomatic_name_in_language(name=self.specification.monolithe_config.map_attribute(self.specification.rest_name, value), language=language)
else:
self.local_name = SDKUtils.get_idiomatic_name_in_language(name=value, language=language)
def from_dict(self, data):
"""
"""
try:
# this must be done before setting the type
self.subtype = data["subtype"] if "subtype" in data else None
# mandatory characteristics
self.name = data["name"]
self.description = data["description"]
self.type = data["type"]
# optional characteristics
self.allowed_chars = data["allowed_chars"] if "allowed_chars" in data else None
self.allowed_choices = data["allowed_choices"] if "allowed_choices" in data else None
self.autogenerated = data["autogenerated"] if "autogenerated" in data else False
self.channel = data["channel"] if "channel" in data else None
self.creation_only = data["creation_only"] if "creation_only" in data else False
self.default_order = data["default_order"] if "default_order" in data else False
self.default_value = data["default_value"] if "default_value" in data else "false" if self.type = "boolean" else None
self.deprecated = data["deprecated"] if "deprecated" in data else False
self.exposed = data["exposed"] if "exposed" in data else False
self.filterable = data["filterable"] if "filterable" in data else False
self.format = data["format"] if "format" in data else "free"
self.max_length = data["max_length"] if "max_length" in data else None
self.max_value = data["max_value"] if "max_value" in data else None
self.min_length = data["min_length"] if "min_length" in data else None
self.min_value = data["min_value"] if "min_value" in data else None
self.orderable = data["orderable"] if "orderable" in data else False
self.read_only = data["read_only"] if "read_only" in data else False
self.required = data["required"] if "required" in data else False
self.transient = data["transient"] if "transient" in data else False
self.unique = data["unique"] if "unique" in data else False
self.unique_scope = data["unique_scope"] if "unique_scope" in data else None
self.userlabel = data["userlabel"] if "userlabel" in data else None
except Exception as ex:
raise Exception("Unable to parse attribute %s for specification %s: %s" % (self.name, self.specification.rest_name, ex))
def to_dict(self):
""" Transform an attribute to a dict
"""
data = {}
# mandatory characteristics
data["name"] = self.name
data["description"] = self.description if self.description and len(self.description) else None
data["type"] = self.type if self.type and len(self.type) else None
data["allowed_chars"] = self.allowed_chars if self.allowed_chars and len(self.allowed_chars) else None
data["allowed_choices"] = self.allowed_choices
data["autogenerated"] = self.autogenerated
data["channel"] = self.channel if self.channel and len(self.channel) else None
data["creation_only"] = self.creation_only
data["default_order"] = self.default_order
data["default_value"] = self.default_value if self.default_value and len(self.default_value) else None
data["deprecated"] = self.deprecated
data["exposed"] = self.exposed
data["filterable"] = self.filterable
data["format"] = self.format if self.format and len(self.format) else None
data["max_length"] = int(self.max_length) if self.max_length is not None else None
data["max_value"] = int(self.max_value) if self.max_value is not None else None
data["min_length"] = int(self.min_length) if self.min_length is not None else None
data["min_value"] = int(self.min_value) if self.min_value is not None else None
data["orderable"] = self.orderable
data["read_only"] = self.read_only
data["required"] = self.required
data["transient"] = self.transient
data["unique"] = self.unique
data["uniqueScope"] = self.unique_scope if self.unique_scope and len(self.unique_scope) else None
data["subtype"] = self.subtype if self.subtype and len(self.subtype) else None
data["userlabel"] = self.userlabel if self.userlabel and len(self.userlabel) else None
return data
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds desktop browsers that can be controlled by telemetry."""
import logging
import os
import platform
import subprocess
import sys
from telemetry.core import browser
from telemetry.core import possible_browser
from telemetry.core import profile_types
from telemetry.core.chrome import desktop_browser_backend
from telemetry.core.platform import linux_platform_backend
from telemetry.core.platform import mac_platform_backend
from telemetry.core.platform import win_platform_backend
ALL_BROWSER_TYPES = ','.join([
'exact',
'release',
'debug',
'canary',
'content-shell-debug',
'content-shell-release',
'system'])
class PossibleDesktopBrowser(possible_browser.PossibleBrowser):
"""A desktop browser that can be controlled."""
def __init__(self, browser_type, options, executable, flash_path,
is_content_shell):
super(PossibleDesktopBrowser, self).__init__(browser_type, options)
self._local_executable = executable
self._flash_path = flash_path
self._is_content_shell = is_content_shell
def __repr__(self):
return 'PossibleDesktopBrowser(browser_type=%s)' % self.browser_type
# Constructs a browser.
# Returns a touple of the form: (browser, backend)
def _CreateBrowserInternal(self, delete_profile_dir_after_run):
backend = desktop_browser_backend.DesktopBrowserBackend(
self._options, self._local_executable, self._flash_path,
self._is_content_shell,
delete_profile_dir_after_run=delete_profile_dir_after_run)
if sys.platform.startswith('linux'):
p = linux_platform_backend.LinuxPlatformBackend()
elif sys.platform == 'darwin':
p = mac_platform_backend.MacPlatformBackend()
elif sys.platform == 'win32':
p = win_platform_backend.WinPlatformBackend()
else:
raise NotImplementedError()
b = browser.Browser(backend, p)
backend.SetBrowser(b)
return (b, backend)
def Create(self):
# If a dirty profile is needed, instantiate an initial browser object and
# use that to create a dirty profile.
creator_class = profile_types.GetProfileCreator(self.options.profile_type)
if creator_class:
logging.info(
'Creating a dirty profile of type: %s', self.options.profile_type)
(b, backend) = \
self._CreateBrowserInternal(delete_profile_dir_after_run=False)
with b as b:
creator = creator_class(b)
creator.CreateProfile()
dirty_profile_dir = backend.profile_directory
logging.info(
"Dirty profile created succesfully in '%s'", dirty_profile_dir)
# Now create another browser to run tests on using the dirty profile
# we just created.
(b, backend) = \
self._CreateBrowserInternal(delete_profile_dir_after_run=True)
backend.SetProfileDirectory(dirty_profile_dir)
else:
(b, backend) = \
self._CreateBrowserInternal(delete_profile_dir_after_run=True)
return b
def SupportsOptions(self, options):
if (len(options.extensions_to_load) != 0) and self._is_content_shell:
return False
return True
def FindAllAvailableBrowsers(options):
"""Finds all the desktop browsers available on this machine."""
browsers = []
has_display = True
if (sys.platform.startswith('linux') and
os.getenv('DISPLAY') == None):
has_display = False
# Look for a browser in the standard chrome build locations.
if options.chrome_root:
chrome_root = options.chrome_root
else:
chrome_root = os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..', '..')
if sys.platform == 'darwin':
chromium_app_name = 'Chromium.app/Contents/MacOS/Chromium'
content_shell_app_name = 'Content Shell.app/Contents/MacOS/Content Shell'
flash_path = os.path.join(
chrome_root, 'third_party', 'adobe', 'flash', 'binaries', 'ppapi',
'mac', 'PepperFlashPlayer.plugin')
elif sys.platform.startswith('linux'):
chromium_app_name = 'chrome'
content_shell_app_name = 'content_shell'
linux_dir = 'linux'
if platform.architecture()[0] == '64bit':
linux_dir = 'linux_x64'
flash_path = os.path.join(
chrome_root, 'third_party', 'adobe', 'flash', 'binaries', 'ppapi',
linux_dir, 'libpepflashplayer.so')
elif sys.platform.startswith('win'):
chromium_app_name = 'chrome.exe'
content_shell_app_name = 'content_shell.exe'
flash_path = os.path.join(
chrome_root, 'third_party', 'adobe', 'flash', 'binaries', 'ppapi',
'win', 'pepflashplayer.dll')
else:
raise Exception('Platform not recognized')
if flash_path and not os.path.exists(flash_path):
logging.warning(('Could not find flash at %s. Running without flash.\n\n'
'To fix this see http://go/read-src-internal') %
flash_path)
flash_path = None
# Add the explicit browser executable if given.
if options.browser_executable:
normalized_executable = os.path.expanduser(options.browser_executable)
if os.path.exists(normalized_executable):
browsers.append(PossibleDesktopBrowser('exact', options,
normalized_executable, flash_path,
False))
else:
logging.warning('%s specified by browser_executable does not exist',
normalized_executable)
build_dirs = ['build',
'out',
'sconsbuild',
'xcodebuild']
def AddIfFound(browser_type, type_dir, app_name, content_shell):
for build_dir in build_dirs:
app = os.path.join(chrome_root, build_dir, type_dir, app_name)
if os.path.exists(app):
browsers.append(PossibleDesktopBrowser(browser_type, options,
app, flash_path, content_shell))
return True
return False
# Add local builds
AddIfFound('debug', 'Debug', chromium_app_name, False)
AddIfFound('content-shell-debug', 'Debug', content_shell_app_name, True)
AddIfFound('release', 'Release', chromium_app_name, False)
AddIfFound('content-shell-release', 'Release', content_shell_app_name, True)
# Mac-specific options.
if sys.platform == 'darwin':
mac_canary = ('/Applications/Google Chrome Canary.app/'
'Contents/MacOS/Google Chrome Canary')
mac_system = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
if os.path.exists(mac_canary):
browsers.append(PossibleDesktopBrowser('canary', options,
mac_canary, None, False))
if os.path.exists(mac_system):
browsers.append(PossibleDesktopBrowser('system', options,
mac_system, None, False))
# Linux specific options.
if sys.platform.startswith('linux'):
# Look for a google-chrome instance.
found = False
try:
with open(os.devnull, 'w') as devnull:
found = subprocess.call(['google-chrome', '--version'],
stdout=devnull, stderr=devnull) == 0
except OSError:
pass
if found:
browsers.append(PossibleDesktopBrowser('system', options,
'google-chrome', None, False))
# Win32-specific options.
if sys.platform.startswith('win'):
system_path = os.path.join('Google', 'Chrome', 'Application')
canary_path = os.path.join('Google', 'Chrome SxS', 'Application')
win_search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
for path in win_search_paths:
if not path:
continue
if AddIfFound('canary', os.path.join(path, canary_path),
chromium_app_name, False):
break
for path in win_search_paths:
if not path:
continue
if AddIfFound('system', os.path.join(path, system_path),
chromium_app_name, False):
break
if len(browsers) and not has_display:
logging.warning(
'Found (%s), but you do not have a DISPLAY environment set.' %
','.join([b.browser_type for b in browsers]))
return []
return browsers
Revert "[Telemetry] Use PPAPI flash on Mac."
This reverts commit:
https://src.chromium.org/viewvc/chrome?revision=209509&view=revision
Checking if the timeouts go away.
BUG=257410
TBR=tonyg@chromium.org
Review URL: https://codereview.chromium.org/18749003
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@210283 0039d316-1c4b-4281-b951-d872f2087c98
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Finds desktop browsers that can be controlled by telemetry."""
import logging
import os
import platform
import subprocess
import sys
from telemetry.core import browser
from telemetry.core import possible_browser
from telemetry.core import profile_types
from telemetry.core.chrome import desktop_browser_backend
from telemetry.core.platform import linux_platform_backend
from telemetry.core.platform import mac_platform_backend
from telemetry.core.platform import win_platform_backend
ALL_BROWSER_TYPES = ','.join([
'exact',
'release',
'debug',
'canary',
'content-shell-debug',
'content-shell-release',
'system'])
class PossibleDesktopBrowser(possible_browser.PossibleBrowser):
"""A desktop browser that can be controlled."""
def __init__(self, browser_type, options, executable, flash_path,
is_content_shell):
super(PossibleDesktopBrowser, self).__init__(browser_type, options)
self._local_executable = executable
self._flash_path = flash_path
self._is_content_shell = is_content_shell
def __repr__(self):
return 'PossibleDesktopBrowser(browser_type=%s)' % self.browser_type
# Constructs a browser.
# Returns a touple of the form: (browser, backend)
def _CreateBrowserInternal(self, delete_profile_dir_after_run):
backend = desktop_browser_backend.DesktopBrowserBackend(
self._options, self._local_executable, self._flash_path,
self._is_content_shell,
delete_profile_dir_after_run=delete_profile_dir_after_run)
if sys.platform.startswith('linux'):
p = linux_platform_backend.LinuxPlatformBackend()
elif sys.platform == 'darwin':
p = mac_platform_backend.MacPlatformBackend()
elif sys.platform == 'win32':
p = win_platform_backend.WinPlatformBackend()
else:
raise NotImplementedError()
b = browser.Browser(backend, p)
backend.SetBrowser(b)
return (b, backend)
def Create(self):
# If a dirty profile is needed, instantiate an initial browser object and
# use that to create a dirty profile.
creator_class = profile_types.GetProfileCreator(self.options.profile_type)
if creator_class:
logging.info(
'Creating a dirty profile of type: %s', self.options.profile_type)
(b, backend) = \
self._CreateBrowserInternal(delete_profile_dir_after_run=False)
with b as b:
creator = creator_class(b)
creator.CreateProfile()
dirty_profile_dir = backend.profile_directory
logging.info(
"Dirty profile created succesfully in '%s'", dirty_profile_dir)
# Now create another browser to run tests on using the dirty profile
# we just created.
(b, backend) = \
self._CreateBrowserInternal(delete_profile_dir_after_run=True)
backend.SetProfileDirectory(dirty_profile_dir)
else:
(b, backend) = \
self._CreateBrowserInternal(delete_profile_dir_after_run=True)
return b
def SupportsOptions(self, options):
if (len(options.extensions_to_load) != 0) and self._is_content_shell:
return False
return True
def FindAllAvailableBrowsers(options):
"""Finds all the desktop browsers available on this machine."""
browsers = []
has_display = True
if (sys.platform.startswith('linux') and
os.getenv('DISPLAY') == None):
has_display = False
# Look for a browser in the standard chrome build locations.
if options.chrome_root:
chrome_root = options.chrome_root
else:
chrome_root = os.path.join(os.path.dirname(__file__),
'..', '..', '..', '..', '..')
if sys.platform == 'darwin':
chromium_app_name = 'Chromium.app/Contents/MacOS/Chromium'
content_shell_app_name = 'Content Shell.app/Contents/MacOS/Content Shell'
# TODO(tonyg): Implement this on mac.
flash_path = None
elif sys.platform.startswith('linux'):
chromium_app_name = 'chrome'
content_shell_app_name = 'content_shell'
linux_dir = 'linux'
if platform.architecture()[0] == '64bit':
linux_dir = 'linux_x64'
flash_path = os.path.join(
chrome_root, 'third_party', 'adobe', 'flash', 'binaries', 'ppapi',
linux_dir, 'libpepflashplayer.so')
elif sys.platform.startswith('win'):
chromium_app_name = 'chrome.exe'
content_shell_app_name = 'content_shell.exe'
flash_path = os.path.join(
chrome_root, 'third_party', 'adobe', 'flash', 'binaries', 'ppapi',
'win', 'pepflashplayer.dll')
else:
raise Exception('Platform not recognized')
if flash_path and not os.path.exists(flash_path):
logging.warning(('Could not find flash at %s. Running without flash.\n\n'
'To fix this see http://go/read-src-internal') %
flash_path)
flash_path = None
# Add the explicit browser executable if given.
if options.browser_executable:
normalized_executable = os.path.expanduser(options.browser_executable)
if os.path.exists(normalized_executable):
browsers.append(PossibleDesktopBrowser('exact', options,
normalized_executable, flash_path,
False))
else:
logging.warning('%s specified by browser_executable does not exist',
normalized_executable)
build_dirs = ['build',
'out',
'sconsbuild',
'xcodebuild']
def AddIfFound(browser_type, type_dir, app_name, content_shell):
for build_dir in build_dirs:
app = os.path.join(chrome_root, build_dir, type_dir, app_name)
if os.path.exists(app):
browsers.append(PossibleDesktopBrowser(browser_type, options,
app, flash_path, content_shell))
return True
return False
# Add local builds
AddIfFound('debug', 'Debug', chromium_app_name, False)
AddIfFound('content-shell-debug', 'Debug', content_shell_app_name, True)
AddIfFound('release', 'Release', chromium_app_name, False)
AddIfFound('content-shell-release', 'Release', content_shell_app_name, True)
# Mac-specific options.
if sys.platform == 'darwin':
mac_canary = ('/Applications/Google Chrome Canary.app/'
'Contents/MacOS/Google Chrome Canary')
mac_system = '/Applications/Google Chrome.app/Contents/MacOS/Google Chrome'
if os.path.exists(mac_canary):
browsers.append(PossibleDesktopBrowser('canary', options,
mac_canary, None, False))
if os.path.exists(mac_system):
browsers.append(PossibleDesktopBrowser('system', options,
mac_system, None, False))
# Linux specific options.
if sys.platform.startswith('linux'):
# Look for a google-chrome instance.
found = False
try:
with open(os.devnull, 'w') as devnull:
found = subprocess.call(['google-chrome', '--version'],
stdout=devnull, stderr=devnull) == 0
except OSError:
pass
if found:
browsers.append(PossibleDesktopBrowser('system', options,
'google-chrome', None, False))
# Win32-specific options.
if sys.platform.startswith('win'):
system_path = os.path.join('Google', 'Chrome', 'Application')
canary_path = os.path.join('Google', 'Chrome SxS', 'Application')
win_search_paths = [os.getenv('PROGRAMFILES(X86)'),
os.getenv('PROGRAMFILES'),
os.getenv('LOCALAPPDATA')]
for path in win_search_paths:
if not path:
continue
if AddIfFound('canary', os.path.join(path, canary_path),
chromium_app_name, False):
break
for path in win_search_paths:
if not path:
continue
if AddIfFound('system', os.path.join(path, system_path),
chromium_app_name, False):
break
if len(browsers) and not has_display:
logging.warning(
'Found (%s), but you do not have a DISPLAY environment set.' %
','.join([b.browser_type for b in browsers]))
return []
return browsers
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (C) 2008-2012 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hive Netius System. If not, see <http://www.gnu.org/licenses/>.
__author__ = "João Magalhães joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2012 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "GNU General Public License (GPL), Version 3"
""" The license for the module """
import os
import ssl
import time
import types
import errno
import select
import logging
import traceback
import observer
from conn import * #@UnusedWildImport
WSAEWOULDBLOCK = 10035
""" The wsa would block error code meant to be used on
windows environments as a replacement for the would block
error code that indicates the failure to operate on a non
blocking connection """
VALID_ERRORS = (
errno.EWOULDBLOCK,
errno.EAGAIN,
errno.EPERM,
errno.ENOENT,
WSAEWOULDBLOCK
)
""" List containing the complete set of error that represent
non ready operations in a non blocking socket """
SSL_VALID_ERRORS = (
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE
)
""" The list containing the valid error in the handshake
operation of the ssl connection establishment """
STATE_STOP = 1
""" The stop state value, this value is set when the service
is either in the constructed stage or when the service has been
stop normally or with an error """
STATE_START = 2
""" The start state set when the service is in the starting
stage and running, normal state """
STATE_CONFIG = 3
""" The configuration state that is set when the service is
preparing to become started and the configuration attributes
are being set according to pre-determined indications """
STATE_SELECT = 4
""" State to be used when the service is in the select part
of the loop, this is the most frequent state in an idle service
as the service "spends" most of its time in it """
STATE_TICK = 5
""" Tick state representative of the situation where the loop
tick operation is being started and all the pre tick handlers
are going to be called for pre-operations """
STATE_READ = 6
""" Read state that is set when the connection are being read
and the on data handlers are being called, this is the part
where all the logic driven by incoming data is being called """
STATE_WRITE = 7
""" The write state that is set on the writing of data to the
connections, this is a pretty "fast" state as no logic is
associated with it """
STATE_ERRROR = 8
""" The error state to be used when the connection is processing
any error state coming from its main select operation and associated
with a certain connection (very rare) """
STATE_STRINGS = (
"STOP",
"START",
"CONFIG",
"SELECT",
"TICK",
"READ",
"WRITE",
"ERROR"
)
""" Sequence that contains the various strings associated with
the various states for the base service, this may be used to
create an integer to string resolution mechanism """
# initializes the various paths that are going to be used for
# the base files configuration in the complete service infra
# structure, these should include the ssl based files
BASE_PATH = os.path.dirname(__file__)
EXTRAS_PATH = os.path.join(BASE_PATH, "extras")
SSL_KEY_PATH = os.path.join(EXTRAS_PATH, "net.key")
SSL_CER_PATH = os.path.join(EXTRAS_PATH, "net.cer")
class Base(observer.Observable):
"""
Base network structure to be used by all the network
capable infra-structures (eg: servers and clients).
Should handle all the nonblocking event loop so that
the read and write operations are easy to handle.
"""
def __init__(self, name = None, handler = None, *args, **kwargs):
observer.Observable.__init__(self, *args, **kwargs)
self.name = name or self.__class__.__name__
self.handler = handler
self.logger = None
self.read_l = []
self.write_l = []
self.error_l = []
self.connections = []
self.connections_m = {}
self._running = False
self._loaded = False
self.set_state(STATE_STOP);
def load(self):
if self._loaded: return
self.load_logging();
self._loaded = True
def load_logging(self, level = logging.DEBUG):
logging.basicConfig(format = "%(asctime)s [%(levelname)s] %(message)s")
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(level)
self.handler and self.logger.addHandler(self.handler)
def start(self):
# triggers the loading of the internal structures of
# the base structure in case the loading has already
# been done nothing is done (avoids duplicated load)
self.load()
# sets the running flag that controls the running of the
# main loop and then changes the current state to start
# as the main loop is going to start
self._running = True
self.set_state(STATE_START)
# enters the main loop operation printing a message
# to the logger indicating this start, this stage
# should block the thread until a stop call is made
self.info("Starting the '%s' service main loop" % self.name)
try: self.loop()
except BaseException, exception:
self.error(exception)
lines = traceback.format_exc().splitlines()
for line in lines: self.warning(line)
except:
self.critical("Critical level loop exception raised")
lines = traceback.format_exc().splitlines()
for line in lines: self.error(line)
finally:
self.info("Stopping the service's main loop")
self.set_state(STATE_STOP)
def stop(self):
self._running = False
def is_empty(self):
return not self.read_l and not self.write_l and not self.error_l
def loop(self):
# iterates continuously while the running flag
# is set, once it becomes unset the loop breaks
# at the next execution cycle
while self._running:
# calls the base tick int handler indicating that a new
# tick loop iteration is going to be started, all the
# "in between loop" operation should be performed in this
# callback as this is the "space" they have for execution
self.ticks()
# updates the current state to select to indicate
# that the base service is selecting the connections
self.set_state(STATE_SELECT)
# verifies if the current selection list is empty
# in case it's sleeps for a while and then continues
# the loop (this avoids error in empty selection)
is_empty = self.is_empty()
if is_empty: time.sleep(0.25); continue
# runs the main selection operation on the current set
# of connection for each of the three operations returning
# the resulting active sets for the callbacks
reads, writes, errors = select.select(
self.read_l,
self.write_l,
self.error_l,
0.25
)
# calls the various callbacks with the selections lists,
# these are the main entry points for the logic to be executed
# each of this methods should be implemented in the underlying
# class instances as no behavior is defined at this inheritance
# level (abstract class)
self.reads(reads)
self.writes(writes)
self.errors(errors)
def ticks(self):
self.set_state(STATE_TICK)
def reads(self, reads):
self.set_state(STATE_READ)
def writes(self, writes):
self.set_state(STATE_WRITE)
def errors(self, errors):
self.set_state(STATE_ERRROR)
def info_dict(self):
info = dict()
info["loaded"] = self._loaded
info["connections"] = len(self.connections)
info["state"] = self.get_state_s()
return info
def new_connection(self, socket, address, ssl = False):
"""
Creates a new connection for the provided socket
object and string based address, the returned
value should be a workable object.
@type socket: Socket
@param socket: The socket object to be encapsulated
by the object to be created (connection).
@type address: String
@param address: The address as a string to be used to
describe the connection object to be created.
@type ssl: bool
@param ssl: If the connection to be created is meant to
be secured using the ssl framework for encryption.
@rtype: Connection
@return: The connection object that encapsulates the
provided socket and address values.
"""
return Connection(self, socket, address, ssl = ssl)
def debug(self, object):
self.log(object, level = logging.DEBUG)
def info(self, object):
self.log(object, level = logging.INFO)
def warning(self, object):
self.log(object, level = logging.WARNING)
def error(self, object):
self.log(object, level = logging.ERROR)
def critical(self, object):
self.log(object, level = logging.CRITICAL)
def log(self, object, level = logging.INFO):
object_t = type(object)
message = unicode(object) if not object_t in types.StringTypes else object
self.logger.log(level, message)
def set_state(self, state):
self._state = state
def get_state_s(self, lower = True):
"""
Retrieves a string describing the current state
of the system, this string should be as descriptive
as possible.
An optional parameter controls if the string should
be lower cased or not.
@type lower: bool
@param lower: If the returned string should be converted
into a lower cased version.
@rtype: String
@return: A string describing the current sate of the loop
system, should be as descriptive as possible.
"""
state_s = STATE_STRINGS[self._state - 1]
state_s = state_s.lower() if lower else state_s
return state_s
def _pending(self, _socket):
"""
Tries to perform the pending operations in the socket
and, these operations are set in the pending variable
of the socket structure.
The method returns if there are still pending operations
after this method tick.
@type _socket: Socket
@param _socket: The socket object to be checked for
pending operations and that is going to be used in the
performing of these operations.
@rtype: bool
@return: If there are still pending operations to be
performed in the provided socket.
"""
# verifies if the pending attribute exists in the socket
# and that the value is valid, in case it's not there's
# no pending operation (method call) to be performed, and
# as such must return immediately with no pending value
if not hasattr(_socket, "_pending") or\
not _socket._pending: return False
# calls the pending callback method and verifies if the
# pending value still persists in the socket if that the
# case returns the is pending value to the caller method
_socket._pending(_socket)
is_pending = not _socket._pending == None
return is_pending
def _ssl_wrap(self, _socket, key_file = None, cer_file = None, server = True):
dir_path = os.path.dirname(__file__)
base_path = os.path.join(dir_path, "../../")
base_path = os.path.normpath(base_path)
extras_path = os.path.join(base_path, "extras")
ssl_path = os.path.join(extras_path, "ssl")
key_file = key_file or os.path.join(ssl_path, "server.key")
cer_file = cer_file or os.path.join(ssl_path, "server.cer")
socket_ssl = ssl.wrap_socket(
_socket,
keyfile = key_file,
certfile = cer_file,
server_side = server,
ssl_version = ssl.PROTOCOL_TLSv1,
do_handshake_on_connect = False
)
return socket_ssl
def _ssl_handshake(self, _socket):
try:
_socket.do_handshake()
_socket._pending = None
except ssl.SSLError, error:
error_v = error.args[0]
if error_v in SSL_VALID_ERRORS:
_socket._pending = self._ssl_handshake
else: raise
removed unused import
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (C) 2008-2012 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hive Netius System. If not, see <http://www.gnu.org/licenses/>.
__author__ = "João Magalhães joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2012 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "GNU General Public License (GPL), Version 3"
""" The license for the module """
import os
import ssl
import time
import errno
import select
import logging
import traceback
import observer
from conn import * #@UnusedWildImport
WSAEWOULDBLOCK = 10035
""" The wsa would block error code meant to be used on
windows environments as a replacement for the would block
error code that indicates the failure to operate on a non
blocking connection """
VALID_ERRORS = (
errno.EWOULDBLOCK,
errno.EAGAIN,
errno.EPERM,
errno.ENOENT,
WSAEWOULDBLOCK
)
""" List containing the complete set of error that represent
non ready operations in a non blocking socket """
SSL_VALID_ERRORS = (
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE
)
""" The list containing the valid error in the handshake
operation of the ssl connection establishment """
STATE_STOP = 1
""" The stop state value, this value is set when the service
is either in the constructed stage or when the service has been
stop normally or with an error """
STATE_START = 2
""" The start state set when the service is in the starting
stage and running, normal state """
STATE_CONFIG = 3
""" The configuration state that is set when the service is
preparing to become started and the configuration attributes
are being set according to pre-determined indications """
STATE_SELECT = 4
""" State to be used when the service is in the select part
of the loop, this is the most frequent state in an idle service
as the service "spends" most of its time in it """
STATE_TICK = 5
""" Tick state representative of the situation where the loop
tick operation is being started and all the pre tick handlers
are going to be called for pre-operations """
STATE_READ = 6
""" Read state that is set when the connection are being read
and the on data handlers are being called, this is the part
where all the logic driven by incoming data is being called """
STATE_WRITE = 7
""" The write state that is set on the writing of data to the
connections, this is a pretty "fast" state as no logic is
associated with it """
STATE_ERRROR = 8
""" The error state to be used when the connection is processing
any error state coming from its main select operation and associated
with a certain connection (very rare) """
STATE_STRINGS = (
"STOP",
"START",
"CONFIG",
"SELECT",
"TICK",
"READ",
"WRITE",
"ERROR"
)
""" Sequence that contains the various strings associated with
the various states for the base service, this may be used to
create an integer to string resolution mechanism """
# initializes the various paths that are going to be used for
# the base files configuration in the complete service infra
# structure, these should include the ssl based files
BASE_PATH = os.path.dirname(__file__)
EXTRAS_PATH = os.path.join(BASE_PATH, "extras")
SSL_KEY_PATH = os.path.join(EXTRAS_PATH, "net.key")
SSL_CER_PATH = os.path.join(EXTRAS_PATH, "net.cer")
class Base(observer.Observable):
"""
Base network structure to be used by all the network
capable infra-structures (eg: servers and clients).
Should handle all the nonblocking event loop so that
the read and write operations are easy to handle.
"""
def __init__(self, name = None, handler = None, *args, **kwargs):
observer.Observable.__init__(self, *args, **kwargs)
self.name = name or self.__class__.__name__
self.handler = handler
self.logger = None
self.read_l = []
self.write_l = []
self.error_l = []
self.connections = []
self.connections_m = {}
self._running = False
self._loaded = False
self.set_state(STATE_STOP);
def load(self):
if self._loaded: return
self.load_logging();
self._loaded = True
def load_logging(self, level = logging.DEBUG):
logging.basicConfig(format = "%(asctime)s [%(levelname)s] %(message)s")
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(level)
self.handler and self.logger.addHandler(self.handler)
def start(self):
# triggers the loading of the internal structures of
# the base structure in case the loading has already
# been done nothing is done (avoids duplicated load)
self.load()
# sets the running flag that controls the running of the
# main loop and then changes the current state to start
# as the main loop is going to start
self._running = True
self.set_state(STATE_START)
# enters the main loop operation printing a message
# to the logger indicating this start, this stage
# should block the thread until a stop call is made
self.info("Starting the '%s' service main loop" % self.name)
try: self.loop()
except BaseException, exception:
self.error(exception)
lines = traceback.format_exc().splitlines()
for line in lines: self.warning(line)
except:
self.critical("Critical level loop exception raised")
lines = traceback.format_exc().splitlines()
for line in lines: self.error(line)
finally:
self.info("Stopping the service's main loop")
self.set_state(STATE_STOP)
def stop(self):
self._running = False
def is_empty(self):
return not self.read_l and not self.write_l and not self.error_l
def loop(self):
# iterates continuously while the running flag
# is set, once it becomes unset the loop breaks
# at the next execution cycle
while self._running:
# calls the base tick int handler indicating that a new
# tick loop iteration is going to be started, all the
# "in between loop" operation should be performed in this
# callback as this is the "space" they have for execution
self.ticks()
# updates the current state to select to indicate
# that the base service is selecting the connections
self.set_state(STATE_SELECT)
# verifies if the current selection list is empty
# in case it's sleeps for a while and then continues
# the loop (this avoids error in empty selection)
is_empty = self.is_empty()
if is_empty: time.sleep(0.25); continue
# runs the main selection operation on the current set
# of connection for each of the three operations returning
# the resulting active sets for the callbacks
reads, writes, errors = select.select(
self.read_l,
self.write_l,
self.error_l,
0.25
)
# calls the various callbacks with the selections lists,
# these are the main entry points for the logic to be executed
# each of this methods should be implemented in the underlying
# class instances as no behavior is defined at this inheritance
# level (abstract class)
self.reads(reads)
self.writes(writes)
self.errors(errors)
def ticks(self):
self.set_state(STATE_TICK)
def reads(self, reads):
self.set_state(STATE_READ)
def writes(self, writes):
self.set_state(STATE_WRITE)
def errors(self, errors):
self.set_state(STATE_ERRROR)
def info_dict(self):
info = dict()
info["loaded"] = self._loaded
info["connections"] = len(self.connections)
info["state"] = self.get_state_s()
return info
def new_connection(self, socket, address, ssl = False):
"""
Creates a new connection for the provided socket
object and string based address, the returned
value should be a workable object.
@type socket: Socket
@param socket: The socket object to be encapsulated
by the object to be created (connection).
@type address: String
@param address: The address as a string to be used to
describe the connection object to be created.
@type ssl: bool
@param ssl: If the connection to be created is meant to
be secured using the ssl framework for encryption.
@rtype: Connection
@return: The connection object that encapsulates the
provided socket and address values.
"""
return Connection(self, socket, address, ssl = ssl)
def debug(self, object):
self.log(object, level = logging.DEBUG)
def info(self, object):
self.log(object, level = logging.INFO)
def warning(self, object):
self.log(object, level = logging.WARNING)
def error(self, object):
self.log(object, level = logging.ERROR)
def critical(self, object):
self.log(object, level = logging.CRITICAL)
def log(self, object, level = logging.INFO):
object_t = type(object)
message = unicode(object) if not object_t in types.StringTypes else object
self.logger.log(level, message)
def set_state(self, state):
self._state = state
def get_state_s(self, lower = True):
"""
Retrieves a string describing the current state
of the system, this string should be as descriptive
as possible.
An optional parameter controls if the string should
be lower cased or not.
@type lower: bool
@param lower: If the returned string should be converted
into a lower cased version.
@rtype: String
@return: A string describing the current sate of the loop
system, should be as descriptive as possible.
"""
state_s = STATE_STRINGS[self._state - 1]
state_s = state_s.lower() if lower else state_s
return state_s
def _pending(self, _socket):
"""
Tries to perform the pending operations in the socket
and, these operations are set in the pending variable
of the socket structure.
The method returns if there are still pending operations
after this method tick.
@type _socket: Socket
@param _socket: The socket object to be checked for
pending operations and that is going to be used in the
performing of these operations.
@rtype: bool
@return: If there are still pending operations to be
performed in the provided socket.
"""
# verifies if the pending attribute exists in the socket
# and that the value is valid, in case it's not there's
# no pending operation (method call) to be performed, and
# as such must return immediately with no pending value
if not hasattr(_socket, "_pending") or\
not _socket._pending: return False
# calls the pending callback method and verifies if the
# pending value still persists in the socket if that the
# case returns the is pending value to the caller method
_socket._pending(_socket)
is_pending = not _socket._pending == None
return is_pending
def _ssl_wrap(self, _socket, key_file = None, cer_file = None, server = True):
dir_path = os.path.dirname(__file__)
base_path = os.path.join(dir_path, "../../")
base_path = os.path.normpath(base_path)
extras_path = os.path.join(base_path, "extras")
ssl_path = os.path.join(extras_path, "ssl")
key_file = key_file or os.path.join(ssl_path, "server.key")
cer_file = cer_file or os.path.join(ssl_path, "server.cer")
socket_ssl = ssl.wrap_socket(
_socket,
keyfile = key_file,
certfile = cer_file,
server_side = server,
ssl_version = ssl.PROTOCOL_TLSv1,
do_handshake_on_connect = False
)
return socket_ssl
def _ssl_handshake(self, _socket):
try:
_socket.do_handshake()
_socket._pending = None
except ssl.SSLError, error:
error_v = error.args[0]
if error_v in SSL_VALID_ERRORS:
_socket._pending = self._ssl_handshake
else: raise
|
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
class AccountPayment(models.Model):
_inherit = "account.payment"
@api.depends("journal_id", "partner_id", "partner_type", "is_internal_transfer")
def _compute_destination_account_id(self):
res = super(AccountPayment, self)._compute_destination_account_id()
for ap in self:
sa = ap.payment_transaction_id.membership_ids.mapped(
"product_id.property_subscription_account"
)
if ap.payment_transaction_id.membership_ids and sa:
ap.destination_account_id = sa
else:
sa = ap.payment_transaction_id.membership_request_ids.mapped(
"partner_id.subscription_product_id.property_subscription_account"
)
if ap.payment_transaction_id.membership_request_ids and sa:
ap.destination_account_id = sa
return res
Fix the account for memberships payements
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
from odoo.fields import first
class AccountPayment(models.Model):
_inherit = "account.payment"
@api.depends("journal_id", "partner_id", "partner_type", "is_internal_transfer")
def _compute_destination_account_id(self):
res = super(AccountPayment, self)._compute_destination_account_id()
for ap in self:
membership_related = (
ap.payment_transaction_id.membership_ids
or ap.payment_transaction_id.membership_request_ids
)
if not membership_related:
continue
sa = ap.payment_transaction_id.membership_ids.mapped(
"product_id.property_subscription_account"
)
if ap.payment_transaction_id.membership_ids and sa:
ap.destination_account_id = sa
continue
sa = ap.payment_transaction_id.membership_request_ids.mapped(
"partner_id.subscription_product_id.property_subscription_account"
)
if ap.payment_transaction_id.membership_request_ids and sa:
ap.destination_account_id = sa
continue
subscription_accounts = (
self.env["product.product"]
.search([("membership", "=", True)])
.mapped("property_subscription_account")
)
ap.destination_account_id = first(subscription_accounts)
return res
def _seek_for_lines(self):
self.ensure_one()
liquidity_lines, counterpart_lines, writeoff_lines = super(
AccountPayment, self
)._seek_for_lines()
subscription_accounts = (
self.env["product.product"]
.search([("membership", "=", True)])
.mapped("property_subscription_account")
)
for line in self.move_id.line_ids:
if line.account_id in subscription_accounts:
counterpart_lines += line
return liquidity_lines, counterpart_lines, writeoff_lines
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from six import moves
from tempest.api.compute import base
from tempest import exceptions
from tempest import test
class ListServersNegativeTestJSON(base.BaseV2ComputeTest):
force_tenant_isolation = True
@classmethod
@test.safe_setup
def setUpClass(cls):
super(ListServersNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
# The following servers are created for use
# by the test methods in this class. These
# servers are cleaned up automatically in the
# tearDownClass method of the super-class.
cls.existing_fixtures = []
cls.deleted_fixtures = []
cls.start_time = datetime.datetime.utcnow()
for x in moves.xrange(2):
resp, srv = cls.create_test_server()
cls.existing_fixtures.append(srv)
resp, srv = cls.create_test_server()
cls.client.delete_server(srv['id'])
# We ignore errors on termination because the server may
# be put into ERROR status on a quick spawn, then delete,
# as the compute node expects the instance local status
# to be spawning, not deleted. See LP Bug#1061167
cls.client.wait_for_server_termination(srv['id'],
ignore_error=True)
cls.deleted_fixtures.append(srv)
@test.attr(type=['negative', 'gate'])
def test_list_servers_with_a_deleted_server(self):
# Verify deleted servers do not show by default in list servers
# List servers and verify server not returned
resp, body = self.client.list_servers()
servers = body['servers']
deleted_ids = [s['id'] for s in self.deleted_fixtures]
actual = [srv for srv in servers
if srv['id'] in deleted_ids]
self.assertEqual('200', resp['status'])
self.assertEqual([], actual)
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_image(self):
# Listing servers for a non existing image returns empty list
non_existing_image = '1234abcd-zzz0-aaa9-ppp3-0987654abcde'
resp, body = self.client.list_servers(dict(image=non_existing_image))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_flavor(self):
# Listing servers by non existing flavor returns empty list
non_existing_flavor = 1234
resp, body = self.client.list_servers(dict(flavor=non_existing_flavor))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_server_name(self):
# Listing servers for a non existent server name returns empty list
non_existing_name = 'junk_server_1234'
resp, body = self.client.list_servers(dict(name=non_existing_name))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type=['negative', 'gate'])
def test_list_servers_status_non_existing(self):
# Return an empty list when invalid status is specified
non_existing_status = 'BALONEY'
resp, body = self.client.list_servers(dict(status=non_existing_status))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type='gate')
def test_list_servers_by_limits(self):
# List servers by specifying limits
resp, body = self.client.list_servers({'limit': 1})
self.assertEqual('200', resp['status'])
# when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in body['servers'] if 'id' in x]))
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_greater_than_actual_count(self):
# List servers by specifying a greater value for limit
resp, body = self.client.list_servers({'limit': 100})
self.assertEqual('200', resp['status'])
self.assertEqual(len(self.existing_fixtures), len(body['servers']))
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_string(self):
# Return an error if a string value is passed for limit
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': 'testing'})
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_negative_value(self):
# Return an error if a negative value for limit is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': -1})
@test.attr(type='gate')
def test_list_servers_by_changes_since(self):
# Servers are listed by specifying changes-since date
changes_since = {'changes-since': self.start_time.isoformat()}
resp, body = self.client.list_servers(changes_since)
self.assertEqual('200', resp['status'])
# changes-since returns all instances, including deleted.
num_expected = (len(self.existing_fixtures) +
len(self.deleted_fixtures))
self.assertEqual(num_expected, len(body['servers']),
"Number of servers %d is wrong in %s" %
(num_expected, body['servers']))
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_invalid_date(self):
# Return an error when invalid date format is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'changes-since': '2011/01/01'})
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_future_date(self):
# Return an empty list when a date in the future is passed
changes_since = {'changes-since': '2051-01-01T12:34:00Z'}
resp, body = self.client.list_servers(changes_since)
self.assertEqual('200', resp['status'])
self.assertEqual(0, len(body['servers']))
@test.attr(type=['negative', 'gate'])
def test_list_servers_detail_server_is_deleted(self):
# Server details are not listed for a deleted server
deleted_ids = [s['id'] for s in self.deleted_fixtures]
resp, body = self.client.list_servers_with_detail()
servers = body['servers']
actual = [srv for srv in servers
if srv['id'] in deleted_ids]
self.assertEqual('200', resp['status'])
self.assertEqual([], actual)
class ListServersNegativeTestXML(ListServersNegativeTestJSON):
_interface = 'xml'
Remove the list_servers_since_test
This test is mostly just a very good way to add new failures. There
are subtleties on the timestamping of servers which means that this
can often return the wrong number of servers. Because of this we
should just delete the test.
If we want something like this we should create a much more sane setup
that includes getting all the servers, determinging some subset that
the filter should return, fetching those, and getting the count.
Change-Id: I5c08ef031043229b76a6800eeaab6d418fc4614f
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import moves
from tempest.api.compute import base
from tempest import exceptions
from tempest import test
class ListServersNegativeTestJSON(base.BaseV2ComputeTest):
force_tenant_isolation = True
@classmethod
@test.safe_setup
def setUpClass(cls):
super(ListServersNegativeTestJSON, cls).setUpClass()
cls.client = cls.servers_client
# The following servers are created for use
# by the test methods in this class. These
# servers are cleaned up automatically in the
# tearDownClass method of the super-class.
cls.existing_fixtures = []
cls.deleted_fixtures = []
for x in moves.xrange(2):
resp, srv = cls.create_test_server()
cls.existing_fixtures.append(srv)
resp, srv = cls.create_test_server()
cls.client.delete_server(srv['id'])
# We ignore errors on termination because the server may
# be put into ERROR status on a quick spawn, then delete,
# as the compute node expects the instance local status
# to be spawning, not deleted. See LP Bug#1061167
cls.client.wait_for_server_termination(srv['id'],
ignore_error=True)
cls.deleted_fixtures.append(srv)
@test.attr(type=['negative', 'gate'])
def test_list_servers_with_a_deleted_server(self):
# Verify deleted servers do not show by default in list servers
# List servers and verify server not returned
resp, body = self.client.list_servers()
servers = body['servers']
deleted_ids = [s['id'] for s in self.deleted_fixtures]
actual = [srv for srv in servers
if srv['id'] in deleted_ids]
self.assertEqual('200', resp['status'])
self.assertEqual([], actual)
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_image(self):
# Listing servers for a non existing image returns empty list
non_existing_image = '1234abcd-zzz0-aaa9-ppp3-0987654abcde'
resp, body = self.client.list_servers(dict(image=non_existing_image))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_flavor(self):
# Listing servers by non existing flavor returns empty list
non_existing_flavor = 1234
resp, body = self.client.list_servers(dict(flavor=non_existing_flavor))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_non_existing_server_name(self):
# Listing servers for a non existent server name returns empty list
non_existing_name = 'junk_server_1234'
resp, body = self.client.list_servers(dict(name=non_existing_name))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type=['negative', 'gate'])
def test_list_servers_status_non_existing(self):
# Return an empty list when invalid status is specified
non_existing_status = 'BALONEY'
resp, body = self.client.list_servers(dict(status=non_existing_status))
servers = body['servers']
self.assertEqual('200', resp['status'])
self.assertEqual([], servers)
@test.attr(type='gate')
def test_list_servers_by_limits(self):
# List servers by specifying limits
resp, body = self.client.list_servers({'limit': 1})
self.assertEqual('200', resp['status'])
# when _interface='xml', one element for servers_links in servers
self.assertEqual(1, len([x for x in body['servers'] if 'id' in x]))
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_greater_than_actual_count(self):
# List servers by specifying a greater value for limit
resp, body = self.client.list_servers({'limit': 100})
self.assertEqual('200', resp['status'])
self.assertEqual(len(self.existing_fixtures), len(body['servers']))
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_string(self):
# Return an error if a string value is passed for limit
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': 'testing'})
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_limits_pass_negative_value(self):
# Return an error if a negative value for limit is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'limit': -1})
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_invalid_date(self):
# Return an error when invalid date format is passed
self.assertRaises(exceptions.BadRequest, self.client.list_servers,
{'changes-since': '2011/01/01'})
@test.attr(type=['negative', 'gate'])
def test_list_servers_by_changes_since_future_date(self):
# Return an empty list when a date in the future is passed
changes_since = {'changes-since': '2051-01-01T12:34:00Z'}
resp, body = self.client.list_servers(changes_since)
self.assertEqual('200', resp['status'])
self.assertEqual(0, len(body['servers']))
@test.attr(type=['negative', 'gate'])
def test_list_servers_detail_server_is_deleted(self):
# Server details are not listed for a deleted server
deleted_ids = [s['id'] for s in self.deleted_fixtures]
resp, body = self.client.list_servers_with_detail()
servers = body['servers']
actual = [srv for srv in servers
if srv['id'] in deleted_ids]
self.assertEqual('200', resp['status'])
self.assertEqual([], actual)
class ListServersNegativeTestXML(ListServersNegativeTestJSON):
_interface = 'xml'
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2016 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2016 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import copy
import json
import heapq
import signal
import logging
import hashlib
import tempfile
import traceback
import netius.pool
import netius.adapters
from . import log
from . import errors
from .conn import * #@UnusedWildImport
from .poll import * #@UnusedWildImport
from .async import * #@UnusedWildImport
NAME = "netius"
""" The global infra-structure name to be used in the
identification of both the clients and the services this
value may be prefixed or suffixed """
VERSION = "1.7.15"
""" The version value that identifies the version of the
current infra-structure, all of the services and clients
may share this value """
PLATFORM = "%s %d.%d.%d.%s %s" % (
sys.subversion[0] if hasattr(sys, "subversion") else "CPython",
sys.version_info[0],
sys.version_info[1],
sys.version_info[2],
sys.version_info[3],
sys.platform
)
""" Extra system information containing some of the details
of the technical platform that is running the system, this
string should be exposed carefully to avoid extra information
from being exposed to outside agents """
IDENTIFIER_SHORT = "%s/%s" % (NAME, VERSION)
""" The short version of the current environment's identifier
meant to be used in production like environment as it hides some
of the critical and internal information of the system """
IDENTIFIER_LONG = "%s/%s (%s)" % (NAME, VERSION, PLATFORM)
""" Longest version of the system identifier, to be used in the
development like environment as it shows critical information
about the system internals that may expose the system """
IDENTIFIER = IDENTIFIER_LONG if config._is_devel() else IDENTIFIER_SHORT
""" The identifier that may be used to identify an user agent
or service running under the current platform, this string
should comply with the typical structure for such values,
by default this value is set with the short version of the
identifier (less information) but this may be changed at
runtime if the current verbosity level is changed """
WSAEWOULDBLOCK = 10035
""" The wsa would block error code meant to be used on
windows environments as a replacement for the would block
error code that indicates the failure to operate on a non
blocking connection """
WSAECONNABORTED = 10053
""" Error code meant to be raised when a connection is aborted
from the other peer meaning that that client or a server in the
as abruptly dropped the connection """
WSAECONNRESET = 10054
""" Code that is used when a connection is reset meaning that
the connection as been disconnected using a graceful approach
and without raising any extraneous problems """
POLL_ORDER = (
EpollPoll,
KqueuePoll,
PollPoll,
SelectPoll
)
""" The order from which the poll methods are going to be
selected from the fastest to the slowest, in case no explicit
poll method is defined for a base service they are selected
based on this list testing them for acceptance first """
SILENT_ERRORS = (
errno.ECONNABORTED,
errno.ECONNRESET,
errno.EPIPE,
WSAECONNABORTED,
WSAECONNRESET
)
""" List that contain the various connection error states that
should not raise any extra logging information because even though
they should drop the connection they are expected """
VALID_ERRORS = (
errno.EWOULDBLOCK,
errno.EAGAIN,
errno.EPERM,
errno.ENOENT,
errno.EINPROGRESS,
WSAEWOULDBLOCK
)
""" List containing the complete set of error that represent
non ready operations in a non blocking socket """
SSL_SILENT_ERRORS = (
ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN
)
""" The list containing the errors that should be silenced
while still making the connection dropped as they are expected
to occur and should not be considered an exception """
SSL_VALID_ERRORS = (
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE
)
""" The list containing the valid error in the handshake
operation of the ssl connection establishment """
TCP_TYPE = 1
""" The type enumeration value that represents the tcp (stream)
based communication protocol, for various usages in the base
netius communication infra-structure """
UDP_TYPE = 2
""" The datagram based udp protocol enumeration value to be used
in static references to this kind of socket usage """
STATE_STOP = 1
""" The stop state value, this value is set when the service
is either in the constructed stage or when the service has been
stop normally or with an error """
STATE_START = 2
""" The start state set when the service is in the starting
stage and running, normal state """
STATE_PAUSE = 3
""" The pause state set for a service for which the main event
loop has been paused and should be resumed latter """
STATE_CONFIG = 4
""" The configuration state that is set when the service is
preparing to become started and the configuration attributes
are being set according to pre-determined indications """
STATE_POLL = 5
""" State to be used when the service is in the polling part
of the loop, this is the most frequent state in an idle service
as the service "spends" most of its time in it """
STATE_TICK = 6
""" Tick state representative of the situation where the loop
tick operation is being started and all the pre tick handlers
are going to be called for pre-operations """
STATE_READ = 7
""" Read state that is set when the connection are being read
and the on data handlers are being called, this is the part
where all the logic driven by incoming data is being called """
STATE_WRITE = 8
""" The write state that is set on the writing of data to the
connections, this is a pretty "fast" state as no logic is
associated with it """
STATE_ERRROR = 9
""" The error state to be used when the connection is processing
any error state coming from its main select operation and associated
with a certain connection (very rare) """
STATE_STRINGS = (
"STOP",
"START",
"PAUSE",
"CONFIG",
"POLL",
"TICK",
"READ",
"WRITE",
"ERROR"
)
""" Sequence that contains the various strings associated with
the various states for the base service, this may be used to
create an integer to string resolution mechanism """
KEEPALIVE_TIMEOUT = 300
""" The amount of time in seconds that a connection is set as
idle until a new refresh token is sent to it to make sure that
it's still online and not disconnected, make sure that this
value is high enough that it does not consume to much bandwidth """
KEEPALIVE_COUNT = 3
""" The amount of times the "ping" packet is re-sent until the
connection is considered to be offline and is dropped """
KEEPALIVE_INTERVAL = int(KEEPALIVE_TIMEOUT / 10)
""" The time between the retrying of "ping" packets, this value
does not need to be too large and should not be considered too
important (may be calculated automatically) """
LOG_FORMAT = "%(asctime)s [%(levelname)s] %(message)s"
""" The format that is going to be used by the logger of the
netius infra-structure for debugging purposes it should allow
and end developer to dig into the details of the execution """
# initializes the various paths that are going to be used for
# the base files configuration in the complete service infra
# structure, these should include the ssl based files
BASE_PATH = os.path.dirname(__file__)
EXTRAS_PATH = os.path.join(BASE_PATH, "extras")
SSL_KEY_PATH = os.path.join(EXTRAS_PATH, "net.key")
SSL_CER_PATH = os.path.join(EXTRAS_PATH, "net.cer")
SSL_CA_PATH = os.path.join(EXTRAS_PATH, "net.ca")
SSL_DH_PATH = os.path.join(EXTRAS_PATH, "dh.pem")
if not os.path.exists(SSL_CA_PATH): SSL_CA_PATH = None
if not os.path.exists(SSL_DH_PATH): SSL_DH_PATH = None
class AbstractBase(observer.Observable):
"""
Base network structure to be used by all the network
capable infra-structures (eg: servers and clients).
Should handle all the nonblocking event loop so that
the read and write operations are easy to handle.
"""
_MAIN = None
""" Reference to the top level main instance responsible
for the control of the main thread loop """
def __init__(self, name = None, handlers = None, *args, **kwargs):
observer.Observable.__init__(self, *args, **kwargs)
poll = AbstractBase.test_poll()
self.name = name or self.__class__.__name__
self.handler_stream = logging.StreamHandler()
self.handlers = handlers or (self.handler_stream,)
self.level = kwargs.get("level", logging.INFO)
self.diag = kwargs.get("diag", False)
self.children = kwargs.get("children", 0)
self.tid = None
self.tname = None
self.logger = None
self.logging = None
self.tpool = None
self.fpool = None
self.poll_c = kwargs.get("poll", poll)
self.poll = self.poll_c()
self.poll_name = self.poll.name()
self.poll_timeout = kwargs.get("poll_timeout", POLL_TIMEOUT)
self.poll_owner = True
self.diag_app = None
self.connections = []
self.connections_m = {}
self._uuid = uuid.uuid4()
self._lid = 0
self._main = False
self._running = False
self._pausing = False
self._loaded = False
self._delayed = []
self._delayed_o = []
self._delayed_n = []
self._delayed_l = threading.RLock()
self._extra_handlers = []
self._ssl_init()
self.set_state(STATE_STOP)
@classmethod
def test_poll(cls, preferred = None):
# sets the initial selected variable with the unselected
# (invalid) value so that at lease one selection must be
# done in order for this method to succeed
selected = None
# iterates over all the poll classes ordered by preference
# (best first) and tries to find the one that better matched
# the current situation, either the preferred poll method or
# the most performant one in case it's not possible
for poll in POLL_ORDER:
if not poll.test(): continue
if not selected: selected = poll
if not preferred: break
name = poll.name()
if not name == preferred: continue
selected = poll
break
# in case no polling method was selected must raise an exception
# indicating that no valid polling mechanism is available
if not selected: raise errors.NetiusError(
"No valid poll mechanism available"
)
# returns the selected polling mechanism class to the caller method
# as expected by the current method
return selected
def delay(self, callable, timeout = None, immediately = False, verify = False):
# creates the original target value with a zero value (forced
# execution in next tick) in case the timeout value is set the
# value is incremented to the current time, then created the
# callable original tuple with the target (time) and the callable
target = -1 if immediately else 0
if timeout: target = time.time() + timeout
callable_o = (target, callable)
callable_o = legacy.orderable(callable_o)
# in case the verify flag is set, must verify id the callable
# is already inserted in the list of delayed operations in
# case it does returns immediately to avoid duplicated values
is_duplicate = verify and callable_o in self._delayed_o
if is_duplicate: return
# creates the "final" callable tuple with the target time, the
# callable and the loop id (lid) then inserts both the delayed
# (original) callable tuple and the callable tuple in the lists
callable_t = (target, callable, self._lid)
callable_t = legacy.orderable(callable_t)
heapq.heappush(self._delayed, callable_t)
heapq.heappush(self._delayed_o, callable_o)
def delay_s(self, callable):
"""
Safe version of the delay operation to be used to insert a callable
from a different thread (implied lock mechanisms).
This method should only be used from different threads as there's
a huge performance impact created from using this method instead of
the local event loop one (delay()).
:type callable: Function
:param callable: The callable that should be called on the next tick
according to the event loop rules.
"""
# acquires the lock that controls the access to the delayed for next
# tick list and then adds the callable to such list, please note that
# the delayed (next) list is only going to be joined/merged with delay
# operations and list on the next tick (through the merge operation)
self._delayed_l.acquire()
try: self._delayed_n.append(callable)
finally: self._delayed_l.release()
def delay_m(self):
"""
Runs the merge operation so that the delay next list (used by the delay
safe operation) is merged with the delayed and the delayed ordered
structures, making the events (effectively) ready to be executed by delays.
"""
# verifies if the delay next list is not valid or empty and if that's
# the case returns immediately as there's nothing to be merged
if not self._delayed_n: return
# iterates over the complete set of next elements in the delay next list
# and schedules them as delay for the next tick execution
for next in self._delayed_n: self.delay(next, immediately = True)
# deletes the complete set of elements present in the delay next list, this
# is considered to be equivalent to the empty operation
del self._delayed_n[:]
def ensure(
self,
coroutine,
args = [],
kwargs = {},
thread = False,
future = None,
immediately = True
):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# ensure execution operation
future = future or Future()
# creates the generate sequence from the coroutine callable
# by calling it with the newly created future instance, that
# will be used for the control of the execution
sequence = coroutine(future, *args, **kwargs)
# creates the callable that is going to be used to call
# the coroutine with the proper future variable as argument
# note that in case the thread mode execution is enabled the
# callable is going to be executed on a different thread
if thread: callable = lambda f = future: self.texecute(step, [f])
else: callable = lambda f = future: step(f)
# creates the function that will be used to step through the
# various elements in the sequence created from the calling of
# the coroutine, the values returned from it may be either future
# or concrete values, for each situation a proper operation must
# be applied to complete the final task in the proper way
def step(_future):
# iterates continuously over the generator that may emit both
# plain object values or future (delayed executions)
while True:
# determines if the future is ready to receive new work
# this is done using a pipeline of callbacks that must
# deliver a positive value so that the future is considered
# ready, note that in case the future is not ready the current
# iteration cycle is delayed until the next tick
if not future.ready: self.delay(callable); break
# retrieves the next value from the generator and in case
# value is the last one (stop iteration) breaks the cycle,
# notice that if there's an exception raised in the middle
# of the generator iteration it's set on the future
try: value = next(sequence)
except StopIteration: break
except BaseException as exception:
future.set_exception(exception)
break
# determines if the value retrieved from the generator is a
# future and if that's the case schedules a proper execution
is_future = isinstance(value, Future)
# in case the current value is a future schedules it for execution
# taking into account the proper thread execution model
if is_future:
value.add_done_callback(callable)
break
# otherwise it's a normal value being yielded and should be sent
# to the future object as a partial value (pipelining)
else:
# for a situation where a thread pool should be used the new
# value should be "consumed" by adding the data handler operation
# to the list of delayed operations and notifying the task pool
# so that the event loop on the main thread gets unblocked and
# the proper partial value handling is performed (always on main thread)
if thread:
def handler():
self.tpool.denotify()
future.partial(value)
callable()
self.delay_s(handler)
self.tpool.notify()
break
# otherwise we're already on the main thread so a simple partial callback
# notification should be enough for the proper consuming of the data
else:
future.partial(value)
# delays the execution of the callable so that it is executed
# immediately if possible (event on the same iteration)
self.delay(callable, immediately = immediately)
return future
def sleep(self, timeout, future = None):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# sleep operation to be executed
future = future or Future()
# creates the callable that is going to be used to set
# the final value of the future variable
callable = lambda: future.set_result(None)
# delays the execution of the callable so that it is executed
# after the requested amount of timeout, note that the resolution
# of the event loop will condition the precision of the timeout
self.delay(callable, timeout = timeout)
return future
def load(self, full = False):
# in case the current structure is considered/marked as already loaded
# there's no need to continue with the loading execution (returns immediately)
if self._loaded: return
# calls the boot hook responsible for the initialization of the various
# structures of the base system, note that is going to be called once
# per each loop starting process (structure should be destroyed on cleanup)
self.boot()
# loads the various parts of the base system, under this calls each
# of the systems should have it's internal structures started
self.load_logging(self.level)
# loads the diagnostics application handlers that allows external
# interaction with the service for diagnostics/debugging
self.load_diag()
# calls the welcome handle this is meant to be used to print some
# information about the finishing of the loading of the infra-structure
# this is going to be called once per base system
self.welcome()
# runs the binding of the system wide signals so that if
# any of such signals is raised it's properly handled and
# redirected to the proper logic through exceptions
self.bind_signals()
# sets the private loading flag ensuring that no extra load operations
# will be done after this first call to the loading (no duplicates)
self._loaded = True
def unload(self, full = False):
# verifies if the current structure is considered/marked as already
# "unloaded", if that's the case returns the control flow immediately
# as there's nothing pending to be (undone)
if not self._loaded: return
# triggers the operation that will start the unloading process of the
# logging infra-structure of the current system
if full: self.unload_logging()
# marks the current system as unloaded as the complete set of operations
# meant to start the unloading process have been finished
self._loaded = False
def boot(self):
pass
def welcome(self):
pass
def load_logging(self, level = logging.DEBUG, format = LOG_FORMAT, unique = False):
# normalizes the provided level value so that it represents
# a proper and understandable value, then starts the formatter
# that is going to be used and retrieves the (possibly unique)
# identifier to be used in the logger retrieval/identification
level = self._level(level)
formatter = logging.Formatter(format)
identifier = self.get_id(unique = unique)
# retrieves the logger that is going to be according to the
# decided identifier and then verifies that the counter value
# is properly updated deciding also if the logger instance is
# a new one or if instead it refers an already initialized/old
# instance that doesn't need a new initialization process
self.logger = logging.getLogger(identifier)
counter = self.logger._counter if hasattr(self.logger, "_counter") else 0
is_new = counter == 0
self.logger._counter = counter + 1
if not is_new: return
# start the extra logging infrastructure (extra handlers)
# and initializes the stream handlers with the proper level
# and formatter values (as expected)
self.extra_logging(level, formatter)
self.handler_stream.setLevel(level)
self.handler_stream.setFormatter(formatter)
# starts the new logger instance by setting no parent to it,
# updating the verbosity level of it and then registering the
# complete set of handlers for it (as expected)
self.logger.parent = None
self.logger.setLevel(level)
for handler in self.handlers:
if not handler: continue
self.logger.addHandler(handler)
def unload_logging(self):
# updates the counter value for the logger and validates
# that no more "clients" are using the logger so that it
# may be properly destroyed (as expected)
counter = self.logger._counter
is_old = counter == 1
self.logger._counter = counter - 1
if not is_old: return
# iterates over the complete set of handlers in the current
# base element and removes them from the current logger
for handler in self.handlers:
if not handler: continue
self.logger.removeHandler(handler)
# iterates over the complete set of (built) extra handlers
# and runs the close operation for each of them, as they are
# no longer considered required for logging purposes
for handler in self._extra_handlers: handler.close()
def extra_logging(self, level, formatter):
"""
Loads the complete set of logging handlers defined in the
current logging value, should be a map of definitions.
This handlers will latter be used for piping the various
logging messages to certain output channels.
The creation of the handler is done using a special keyword
arguments strategy so that python and configuration files
are properly set as compatible.
:type level: String/int
:param level: The base severity level for which the new handler
will be configured in case no extra level definition is set.
:type formatter: Formatter
:param formatter: The logging formatter instance to be set in
the handler for formatting messages to the output.
"""
# verifies if the logging attribute of the current instance is
# defined and in case it's not returns immediately, otherwise
# starts by converting the currently defined set of handlers into
# a list so that it may be correctly manipulated (add handlers)
if not self.logging: return
self.handlers = list(self.handlers)
# iterates over the complete set of handler configuration in the
# logging to create the associated handler instances
for config in self.logging:
# gathers the base information on the current handler configuration
# running also the appropriate transformation on the level
name = config.get("name", None)
_level = config.get("level", level)
_level = self._level(_level)
# "clones" the configuration dictionary and then removes the base
# values so that they do not interfere with the building
config = dict(config)
if "level" in config: del config["level"]
if "name" in config: del config["name"]
# retrieves the proper building, skipping the current loop in case
# it does not exits and then builds the new handler instance, setting
# the proper level and formatter and then adding it to the set
if not hasattr(log, name + "_handler"): continue
builder = getattr(log, name + "_handler")
handler = builder(**config)
handler.setLevel(_level)
handler.setFormatter(formatter)
self.handlers.append(handler)
self._extra_handlers.append(handler)
# restores the handlers structure back to the "original" tuple form
# so that no expected data types are violated
self.handlers = tuple(self.handlers)
def level_logging(self, level):
"""
Changes the verbosity level of the current logging infra-structure
into the provided level of verbosity.
The provided value may be an integer (internal value) or a string
representation of the requested verbosity level.
:type level: int/String
:param level: The (logging) for which the logging infra-structure
must be changed, either an integer or string value.
"""
# converts the provided logging level value (either string or
# integer value) into the appropriate normalized value that can
# be used internally for logging level setting
level = self._level(level)
# sets the (new) level value value for both the base stream
# handler and also for the logger itself
self.handler_stream.setLevel(level)
self.logger.setLevel(level)
# iterates over the complete set of attached handlers to
# update their respective logging level
for handler in self.handlers: handler.setLevel(level)
def load_diag(self, env = True):
# verifies if the diagnostics "feature" has been requested
# for the current infra-structure and if that's not the case
# returns the control flow immediately to the caller
if not self.diag: return
# runs the import operations for the diag module, note that
# this must be performed locally no avoid any unwanted behaviour
# or collision with a runtime process (would pose issues)
from . import diag
# verifies if the diag module has been correctly loaded and
# if that's not the case fails gracefully and returns the
# control flow to the caller method
if not diag.loaded:
self.info("Failed to load diagnostics, import problem")
return
# retrieves the various server related value for the diagnostics
# server, taking into account if the env flag is set
server = self.get_env("DIAG_SERVER", "netius") if env else "netius"
host = self.get_env("DIAG_HOST", "127.0.0.1") if env else "127.0.0.1"
port = self.get_env("DIAG_PORT", 5050, cast = int) if env else 5050
# creates the application object that is going to be
# used for serving the diagnostics app and then starts
# the "serving" of it under a new thread
self.diag_app = diag.DiagApp(self)
self.diag_app.serve(
server = server,
host = host,
port = port,
diag = False,
threaded = True,
conf = False
)
def bind_signals(self):
# creates the signal handler function that propagates the raising
# of the system exit exception (proper logic is executed) and then
# registers such handler for the (typical) sigterm signal
def handler(signum = None, frame = None): raise SystemExit()
try: signal.signal(signal.SIGTERM, handler)
except: self.debug("Failed to register SIGTERM handler")
def start(self):
# in case the current instance is currently paused runs the
# resume operation instead as that's the expected operation
if self.is_paused(): return self.resume()
# re-builds the polling structure with the new name this
# is required so that it's possible to change the polling
# mechanism in the middle of the loading process
self.poll = self.build_poll()
# retrieves the name of the polling mechanism that is
# going to be used in the main loop of the current
# base service, this is going to be used for diagnostics
poll_name = self.get_poll_name()
# triggers the loading of the internal structures of
# the base structure in case the loading has already
# been done nothing is done (avoids duplicated load)
self.load()
# opens the polling mechanism so that its internal structures
# become ready for the polling cycle, the inverse operation
# (close) should be performed as part of the cleanup
self.poll.open(timeout = self.poll_timeout)
# retrieves the complete set of information regarding the current
# thread that is being used for the starting of the loop, this data
# may be used for runtime debugging purposes (debug only data)
cthread = threading.current_thread()
self.tid = cthread.ident or 0
self.tname = cthread.getName()
self._main = self.tname == "MainThread"
# in case the current thread is the main one, the global
# main instance is set as the current instance
if self._main: AbstractBase._MAIN = self
# enters the main loop operation by printing a message
# to the logger indicating this start, this stage
# should block the thread until a stop call is made
self.debug("Starting '%s' service main loop (%.2fs) ..." % (self.name, self.poll_timeout))
self.debug("Using thread '%s' with tid '%d'" % (self.tname, self.tid))
self.debug("Using '%s' as polling mechanism" % poll_name)
# runs the fork operation responsible for the forking of the
# current process into the various child processes for multiple
# process based parallelism, note that this must be done after
# the master socket has been created (to be shared)
self.fork()
# calls the main method to be able to start the main event
# loop properly as defined by specification
self.main()
def stop(self):
self._running = False
def pause(self):
self._running = False
self._pausing = True
def resume(self):
self.debug("Resuming '%s' service main loop (%.2fs) ..." % (self.name, self.poll_timeout))
self.main()
def close(self):
self.stop()
def main(self):
# sets the running flag that controls the running of the
# main loop and then changes the current state to start
# as the main loop is going to start, then triggers the
# start event indicating the (re-)start of the even loop
self._running = True
self._pausing = False
self.set_state(STATE_START)
self.trigger("start", self)
# runs the event loop, this is a blocking method that should
# be finished by the end of the execution of by pause
try:
self.loop()
self.finalize()
except (KeyboardInterrupt, SystemExit):
self.info("Finishing '%s' service on user request ..." % self.name)
except errors.PauseError:
self.set_state(STATE_PAUSE)
self.trigger("pause", self)
self.debug("Pausing '%s' service main loop" % self.name)
except BaseException as exception:
self.error(exception)
self.log_stack(method = self.warning)
except:
self.critical("Critical level loop exception raised")
self.log_stack(method = self.error)
finally:
if self.is_paused(): return
self.trigger("stop", self)
self.debug("Finished '%s' service main loop" % self.name)
self.cleanup()
self.set_state(STATE_STOP)
def is_started(self):
return self.get_state() == STATE_START
def is_stopped(self):
return self.get_state() == STATE_STOP
def is_paused(self):
return self.get_state() == STATE_PAUSE
def is_edge(self):
return self.poll.is_edge()
def is_empty(self):
return self.poll.is_empty()
def is_sub_read(self, socket):
return self.poll.is_sub_read(socket)
def is_sub_write(self, socket):
return self.poll.is_sub_write(socket)
def is_sub_error(self, socket):
return self.poll.is_sub_error(socket)
def sub_all(self, socket):
return self.poll.sub_all(socket, owner = self)
def unsub_all(self, socket):
return self.poll.unsub_all(socket)
def sub_read(self, socket):
return self.poll.sub_read(socket, owner = self)
def sub_write(self, socket):
return self.poll.sub_write(socket, owner = self)
def sub_error(self, socket):
return self.poll.sub_error(socket, owner = self)
def unsub_read(self, socket):
return self.poll.unsub_read(socket)
def unsub_write(self, socket):
return self.poll.unsub_write(socket)
def unsub_error(self, socket):
return self.poll.unsub_error(socket)
def cleanup(self):
# runs the unload operation for the current base container this should
# unset/unload some of the components for this base infra-structure
self.unload()
# destroys the current information on the delays that are is longer
# going to be executed as the poll/system is closing, this is required
# in order to avoid any possible memory leak with clojures/cycles
del self._delayed[:]
del self._delayed_o[:]
del self._delayed_n[:]
# runs the destroy operation on the ssl component of the base
# element so that no more ssl is available/used (avoids leaks)
self._ssl_destroy()
# verifies if there's a valid (and open) task pool, if that's
# the case starts the stop process for it so that there's no
# leaking of task descriptors and other structures
if self.tpool: self.tstop()
# verifies if there's a valid (and open) file pool, if that's
# the case starts the stop process for it so that there's no
# leaking of file descriptors and other structures
if self.fpool: self.fstop()
# creates a copy of the connections list because this structure
# is going to be changed in the closing of the connection object
connections = copy.copy(self.connections)
# iterates over the complete set of connections currently
# registered in the base structure and closes them so that
# can no longer be used and are gracefully disconnected
for connection in connections: connection.close()
# iterates over the complete set of sockets in the connections
# map to properly close them (avoids any leak of resources)
for _socket in self.connections_m: _socket.close()
# in case the current thread is the main one and the global
# main instance is unset to an invalid value (main unloaded)
if self._main: AbstractBase._MAIN = None
# closes the current poll mechanism so that no more issues arise
# from an open poll system (memory leaks, etc.), note that this is
# only performed in case the current base instance is the owner of
# the poll that is going to be closed (works with containers)
if self.poll_owner: self.poll.close()
# deletes some of the internal data structures created for the instance
# and that are considered as no longer required
self.connections_m.clear()
del self.connections[:]
del self._extra_handlers[:]
def loop(self):
# iterates continuously while the running flag is set, once
# it becomes unset the loop breaks at the next execution cycle
while self._running:
# calls the base tick int handler indicating that a new
# tick loop iteration is going to be started, all the
# "in between loop" operation should be performed in this
# callback as this is the "space" they have for execution
self.ticks()
# updates the current state to poll to indicate
# that the base service is selecting the connections
self.set_state(STATE_POLL)
# runs the main selection operation on the current set
# of connection for each of the three operations returning
# the resulting active sets for the callbacks
reads, writes, errors = self.poll.poll()
# calls the various callbacks with the selections lists,
# these are the main entry points for the logic to be executed
# each of this methods should be implemented in the underlying
# class instances as no behavior is defined at this inheritance
# level (abstract class)
self.reads(reads)
self.writes(writes)
self.errors(errors)
def fork(self):
if not os.name in ("posix",): return
if not self.children: return
self.debug("Forking the current process into '%d' children ..." % self.children)
for _index in range(self.children):
pid = os.fork() #@UndefinedVariable
if not pid == 0: continue
break
def finalize(self):
# verifies a series of conditions and raises a proper error in case
# any of them is verified under the current state
if self._pausing: raise errors.PauseError("Pause state expected")
if self._running: raise errors.AssertionError("Not expected running")
def ticks(self):
# updates the current state value to the tick state indicating
# that the current process is updating a new tick in loop
self.set_state(STATE_TICK)
# runs the verification/processing of the complete set of file
# events that have been raised meanwhile, this allows for the
# processing of various file driven operations
self.files()
# "calculates" the new loop id by incrementing one value
# to the previous one, note that the value is calculated
# in a modulus way so that no overflow occurs
self._lid = (self._lid + 1) % 2147483647
# runs the processing of the delayed calls so that the pending
# calls are called if the correct time has been reached
self._delays()
def reads(self, reads, state = True):
if state: self.set_state(STATE_READ)
def writes(self, writes, state = True):
if state: self.set_state(STATE_WRITE)
def errors(self, errors, state = True):
if state: self.set_state(STATE_ERRROR)
def pregister(self, pool):
# prints a debug message stating that a new pool is
# being created for the handling of message events
self.debug("Started pool, for async handling")
# tries to retrieve the file descriptor of the event virtual
# object that is notified for each operation associated with
# the pool, (primary communication mechanism)
eventfd = pool.eventfd()
if not eventfd: self.warning("Starting pool without eventfd")
if not eventfd: return
if not self.poll: return
self.poll.sub_read(eventfd)
# echoes a debug message indicating that a new read event
# subscription has been created for the event fd of the pool
self.debug("Subscribed for read operations on event fd")
def punregister(self, pool):
# prints a debug message notifying the user that no more
# async handling is possible using the pool
self.debug("Stopped existing pool, no more async handling")
# tries to retrieve the event file descriptor for
# the pool an in case it exists unsubscribes
# from it under the current polling system
eventfd = pool.eventfd()
if not eventfd: self.warning("Stopping pool without eventfd")
if not eventfd: return
if not self.poll: return
self.poll.unsub_read(eventfd)
# echoes a debug message indicating that a new read event
# unsubscription has been created for the event fd of the pool
self.debug("Unsubscribed for read operations on event fd")
def tensure(self):
if self.tpool: return
self.tstart()
def tstart(self):
if self.tpool: return
self.tpool = netius.pool.TaskPool()
self.tpool.start()
self.pregister(self.tpool)
def tstop(self):
if not self.tpool: return
self.punregister(self.tpool)
self.tpool.stop()
def texecute(self, callable, args = [], kwargs = {}):
self.tensure()
self.tpool.execute(callable, args = args, kwargs = kwargs)
def files(self):
if not self.fpool: return
events = self.fpool.pop_all(denotify = True)
for event in events:
callback = event[-1]
if not callback: continue
callback(*event[1:-1])
def fopen(self, *args, **kwargs):
self.fensure()
return self.fpool.open(*args, **kwargs)
def fclose(self, *args, **kwargs):
self.fensure()
return self.fpool.close(*args, **kwargs)
def fread(self, *args, **kwargs):
self.fensure()
return self.fpool.read(*args, **kwargs)
def fwrite(self, *args, **kwargs):
self.fensure()
return self.fpool.write(*args, **kwargs)
def fensure(self):
if self.fpool: return
self.fstart()
def fstart(self):
# verifies if there's an already open file pool for
# the current system and if that's not the case creates
# a new one and starts it's thread cycle
if self.fpool: return
self.fpool = netius.pool.FilePool()
self.fpool.start()
self.pregister(self.fpool)
def fstop(self):
# verifies if there's an available file pool and
# if that's the case initializes the stopping of
# such system, note that this is blocking call as
# all of the thread will be joined under it
if not self.fpool: return
self.punregister(self.fpool)
self.fpool.stop()
def on_connection_c(self, connection):
self.debug(
"Connection '%s' from '%s' created ..." %
(connection.id, connection.owner.name)
)
self.debug(
"There are '%d' connections for '%s' ..." %
(len(connection.owner.connections), connection.owner.name)
)
def on_connection_d(self, connection):
self.debug(
"Connection '%s' from '%s' deleted" %
(connection.id, connection.owner.name)
)
self.debug(
"There are '%d' connections for '%s' ..." %
(len(connection.owner.connections), connection.owner.name)
)
def info_dict(self, full = False):
info = dict(
loaded = self._loaded,
connections = len(self.connections),
state = self.get_state_s(),
poll = self.get_poll_name()
)
if full: info.update(
_lid = self._lid
)
return info
def info_string(self, full = False, safe = True):
try: info = self.info_dict(full = full)
except: info = dict()
info_s = json.dumps(
info,
ensure_ascii = False,
indent = 4,
separators = (",", " : "),
sort_keys = True
)
return info_s
def connections_dict(self, full = False):
connections = []
for connection in self.connections:
info = connection.info_dict(full = full)
connections.append(info)
return connections
def connection_dict(self, id, full = False):
connection = None
for _connection in self.connections:
if not _connection.id == id: continue
connection = _connection
break
if not connection: return None
return connection.info_dict(full = full)
def new_connection(self, socket, address, ssl = False):
"""
Creates a new connection for the provided socket
object and string based address, the returned
value should be a workable object.
:type socket: Socket
:param socket: The socket object to be encapsulated
by the object to be created (connection).
:type address: String
:param address: The address as a string to be used to
describe the connection object to be created.
:type ssl: bool
:param ssl: If the connection to be created is meant to
be secured using the ssl framework for encryption.
:rtype: Connection
:return: The connection object that encapsulates the
provided socket and address values.
"""
return Connection(
owner = self,
socket = socket,
address = address,
ssl = ssl
)
def load_config(self, path = "config.json", **kwargs):
kwargs = self.apply_config(path, kwargs)
for key, value in kwargs.items():
setattr(self, key, value)
def apply_config(self, path, kwargs):
if not os.path.exists(path): return kwargs
self.info("Applying configuration file '%s' ..." % path)
kwargs = copy.copy(kwargs)
file = open(path, "rb")
try: contents = json.load(file)
finally: file.close()
for key, value in contents.items():
kwargs[key] = value
return kwargs
def is_devel(self):
"""
Verifies if the current running environment is meant to be used
for development purposes as opposed to a production environment.
The method should always be used in situations where some critical
and internal information is meant to be displayed in a development
environment but hidden in a production one.
This method should be used at runtime as opposed to the private
configuration based one.
:rtype: bool
:return: If the current environment is development oriented or
if it's considered to be a production one (invalid result).
"""
return self.is_debug()
def is_debug(self):
return self.logger.isEnabledFor(logging.DEBUG)
def is_info(self):
return self.logger.isEnabledFor(logging.INFO)
def is_warning(self):
return self.logger.isEnabledFor(logging.WARNING)
def is_error(self):
return self.logger.isEnabledFor(logging.ERROR)
def is_critical(self):
return self.logger.isEnabledFor(logging.CRITICAL)
def debug(self, object):
self.log(object, level = logging.DEBUG)
def info(self, object):
self.log(object, level = logging.INFO)
def warning(self, object):
self.log(object, level = logging.WARNING)
def error(self, object):
self.log(object, level = logging.ERROR)
def critical(self, object):
self.log(object, level = logging.CRITICAL)
def log_stack(self, method = None, info = True):
if not method: method = self.info
lines = traceback.format_exc().splitlines()
for line in lines: method(line)
if info: self.log_info(method = method)
def log_info(self, method = None):
if not method: method = self.info
info_string = self.info_string(full = True)
for line in info_string.split("\n"): method(line)
def log(self, *args, **kwargs):
if legacy.PYTHON_3: return self.log_python_3(*args, **kwargs)
else: return self.log_python_2(*args, **kwargs)
def log_python_3(self, object, level = logging.INFO):
object_t = type(object)
try: message = str(object) if not object_t == str else object
except: message = str(object)
if not self.logger: return
self.logger.log(level, message)
def log_python_2(self, object, level = logging.INFO):
object_t = type(object)
try: message = unicode(object) if not object_t in legacy.str else object #@UndefinedVariable
except: message = str(object).decode("utf-8", "ignore")
if not self.logger: return
self.logger.log(level, message)
def build_poll(self):
# verifies if the currently set polling mechanism is open in
# case it's ther's no need to re-build the polling mechanism
# otherwise rebuilds the polling mechanism with the current
# name and returns the new poll object to the caller method
if self.poll and self.poll.is_open(): return self.poll
# runs the testing of the poll again and verifies if the polling
# class has changed in case it did not returns the current poll
# instance as expected by the current infra-structure
poll_c = AbstractBase.test_poll(preferred = self.poll_name)
if poll_c == self.poll_c: return self.poll
# updates the polling class with the new value and re-creates
# the polling instance with the new polling class returning this
# new value to the caller method
self.poll_c = poll_c
self.poll = self.poll_c()
return self.poll
def get_id(self, unique = True):
base = NAME + "-" + self.name
if not unique: return base
return base + "-" + str(self._uuid)
def get_poll(self):
return self.poll
def get_poll_name(self):
poll = self.get_poll()
name = poll.name()
return name
def get_state(self):
return self._state
def set_state(self, state):
self._state = state
def get_state_s(self, lower = True):
"""
Retrieves a string describing the current state
of the system, this string should be as descriptive
as possible.
An optional parameter controls if the string should
be lower cased or not.
:type lower: bool
:param lower: If the returned string should be converted
into a lower cased version.
:rtype: String
:return: A string describing the current sate of the loop
system, should be as descriptive as possible.
"""
state_s = STATE_STRINGS[self._state - 1]
state_s = state_s.lower() if lower else state_s
return state_s
def get_env(self, name, default = None, cast = None, expand = False):
"""
Retrieves the value of the environment variable with the
requested name, defaulting to the provided value in case
it's not possible to find such variable.
An optional cast type may be provided in order to cast the
value of the environment variable in to the target type.
An optional expand flag may be set so that the variable gets
expanded as a file system file, for this the newline values
should be escaped as explicit '\n' string sequences (two chars).
Current implementation forwards the request to the current
configuration registry so that other data providers may
also be used in search for configuration.
:type name: String
:param name: The name of the environment variable that is
meant to be retrieved from the current environment
:type default: Object
:param default: The default value to be returned in case
no value is found for the provided name.
:type cast: Type
:param cast: The cast type to be used to cast the value
of the requested environment variable.
:type expand: bool
:param expand: If the variable should be expanded as a file
object and stored in a temporary storage, for this situation
the resulting object should be a string with the file path.
:rtype: Object
:return: The value of the requested environment variable
properly casted into the target value.
"""
if not name in config.CONFIGS: return default
value = config.CONFIGS.get(name, default)
if expand: value = self.expand(value)
cast = config.CASTS.get(cast, cast)
if cast and not value == None: value = cast(value)
return value
def expand(self, value, encoding = "utf-8", force = False):
"""
Expands the provided string/bytes value into a file in the
current file system so that it may be correctly used by interfaces
that require certain values to be file system based.
In case the force value is provided the the file is created even
for situations where the provided value is invalid/unset.
:type value: String
:param value: The string/bytes based value that is going to be
expanded into a proper file system based (temporary) file.
:type encoding: String
:param encoding: The encoding that is going to be used to convert
the value into a bytes based one in case the provided value is not
bytes compliant (and must be converted).
:type force: bool
:param force: If the expansion operation should be performed even
for situations where the value is considered invalid/unset.
:rtype: String
:return: The path to the temporary file that has just been generated
for the expansion of the provided value.
"""
if not value and not force: return value
is_bytes = legacy.is_bytes(value)
if not is_bytes: value = value.encode(encoding)
value = value.replace(b"\\n", b"\n")
fd, file_path = tempfile.mkstemp()
file = open(file_path, "wb")
try: file.write(value)
except: os.close(fd); file.close()
return file_path
def get_protocols(self):
"""
Retrieves the complete set of protocols (as ALPN strings) that are
going to be handled by the current protocol infra-structure.
:rtype: List
:return: The list containing the complete set of protocols handled
by the current infra-structure.
:see: https://tools.ietf.org/html/rfc7301
"""
return None
def get_adapter(self, name = "memory", *args, **kwargs):
"""
Retrieves an instance of a storage adapter described
by the provided name, note that the dynamic (extra)
arguments are going to be used in the construction of
the adapter instance.
:type name: String
:param name: The name of the adapter to be retrieved
this should be equivalent to the adapter class name.
:rtype: Adapter
:return: An instance (properly configured) of the
requested adapter (defined by the name argument).
"""
name_f = name.title() + "Adapter"
adapter_c = getattr(netius.adapters, name_f)
adapter = adapter_c(*args, **kwargs)
return adapter
def get_auth(self, name = "memory", *args, **kwargs):
"""
Gathers the proper authentication handler that is being
requested with the provided name. The retrieved auth
is a static class that should be used from its interface
based on class based methods.
The state of theses authentication (handlers) is based
on the "global" state of the environment (no instances).
:type name: String
:param name: The name of the authentication (handler)
class that should be retrieved.
:rtype: Auth
:return: An authentication based class that may be used
for the interaction of authentication methods.
"""
name_f = name.title() + "Auth"
auth_c = getattr(netius.auth, name_f)
return auth_c
def _pending(self, _socket):
"""
Tries to perform the pending operations in the socket
and, these operations are set in the pending variable
of the socket structure.
The method returns if there are still pending operations
after this method tick.
:type _socket: Socket
:param _socket: The socket object to be checked for
pending operations and that is going to be used in the
performing of these operations.
:rtype: bool
:return: If there are still pending operations to be
performed in the provided socket.
"""
# verifies if the pending attribute exists in the socket
# and that the value is valid, in case it's not there's
# no pending operation (method call) to be performed, and
# as such must return immediately with no pending value
if not hasattr(_socket, "_pending") or\
not _socket._pending: return False
# calls the pending callback method and verifies if the
# pending value still persists in the socket if that the
# case returns the is pending value to the caller method
_socket._pending(_socket)
is_pending = not _socket._pending == None
return is_pending
def _delays(self):
"""
Calls the complete set of elements that are considered to
be part of the delayed set of methods to be called.
These methods are expected to be run before a poll call so
that they are run outside the handling.
The calling of the delayed methods takes into account a
series of assumptions including the loop identifier in order
to avoid loops in the delayed calls/insertions.
"""
# runs the merge delay lists operation, so that delay operations
# inserts from different threads may be used and processed under
# the current execution (as expected)
self.delay_m()
# in case there's no delayed items to be called returns immediately
# otherwise creates a copy of the delayed list and removes all
# of the elements from the current list in instance
if not self._delayed: return
# retrieves the value for the current timestamp, to be used in
# comparisons against the target timestamps of the callables
current = time.time()
# creates the lists that will hold all the values that are not
# yet ready to be called in this iteration, the value in this
# list will be added back to the heap at the end of the iteration
pendings = []
pendings_o = []
# iterates over all the delayed callable tuples to try to find
# (and call) the ones that are meant to be executed in the past
# (have a target timestamp with a value less than the current)
while self._delayed:
# "pops" the current item from the delayed list to be used
# in the execution of the current iteration cycle
callable_t = heapq.heappop(self._delayed)
callable_o = heapq.heappop(self._delayed_o)
# unpacks the current callable tuple in iteration into a
# target (timestamp value) and a method to be called in
# case the target timestamp is valid (in the past)
target, method, lid = callable_t
# tests if the current target is valid (less than or
# equals to the current time value) and in case it's
# not restores the value to the heap and breaks the loop
is_valid = target <= current
if not is_valid:
pendings.append(callable_t)
pendings_o.append(callable_o)
break
# in case the loop id present in the delayed call tuple is
# the same as the current iteration identifier then the
# call must be done in the next iteration cycle, this
# verification avoids loops in calls, note that this verification
# is only required for target zero calls referring the delayed
# calls to be executed immediately (on next loop)
if target == 0 and self._lid == lid:
pendings.append(callable_t)
pendings_o.append(callable_o)
continue
# calls the callback method as the delayed operation is
# now meant to be run, this is an operation that may change
# the current list of delayed object (causing cycles) and so
# must be implemented with the proper precautions
method()
# iterates over all the pending callable tuple values and adds
# them back to the delayed heap list so that they are called
# latter on (not ready to be called now)
for pending, pending_o in zip(pendings, pendings_o):
heapq.heappush(self._delayed, pending)
heapq.heappush(self._delayed_o, pending_o)
def _generate(self, hashed = True):
"""
Generates a random unique identifier that may be used
to uniquely identify a certain object or operation.
This method must be used carefully to avoid any unwanted
behavior resulting from value collisions.
:type hashed: bool
:param hashed: If the identifier should be hashed into
and hexadecimal string instead of an uuid based identifier.
:rtype: String
:return: The random unique identifier generated and that
may be used to identify objects or operations.
"""
identifier = str(uuid.uuid4())
identifier = identifier.upper()
if not hashed: return identifier
identifier = legacy.bytes(identifier)
hash = hashlib.sha256(identifier)
indetifier = hash.hexdigest()
identifier = identifier.upper()
return indetifier
def _socket_keepalive(self, _socket):
is_inet = _socket.family in (socket.AF_INET, socket.AF_INET6)
is_inet and hasattr(_socket, "TCP_KEEPIDLE") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE, #@UndefinedVariable
KEEPALIVE_TIMEOUT
)
is_inet and hasattr(_socket, "TCP_KEEPINTVL") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL, #@UndefinedVariable
KEEPALIVE_INTERVAL
)
is_inet and hasattr(_socket, "TCP_KEEPCNT") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPCNT, #@UndefinedVariable
KEEPALIVE_COUNT
)
hasattr(_socket, "SO_REUSEPORT") and\
self.socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEPORT, #@UndefinedVariable
1
)
def _ssl_init(self, strict = True, env = True):
# initializes the values of both the "main" context for ssl
# and the map that associated an hostname and a context, both
# are going to be used (if possible) at runtime for proper
# resolution of both key and certificated files
self._ssl_context = None
self._ssl_contexts = dict()
# verifies if the current ssl module contains a reference to
# the ssl context class symbol if not, the control flow is
# returned to the caller method as it's not possible to created
# any kind of context information for ssl
has_context = hasattr(ssl, "SSLContext")
if not has_context: return
# retrieves the reference to the environment variables that are going
# to be used in the construction of the various ssl contexts, note that
# the secure variable is extremely important to ensure that a proper and
# secure ssl connection is established with the peer
secure = self.get_env("SSL_SECURE", True, cast = bool) if env else False
contexts = self.get_env("SSL_CONTEXTS", {}, cast = dict) if env else {}
# creates the main/default ssl context setting the default key
# and certificate information in such context, then verifies
# if the callback registration method is defined and if it is
# defined registers a callback for when the hostname information
# is available, so that proper concrete context may be set, note
# that in case the strict mode is enabled (default) the context
# is unset for situation where no callback registration is possible
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._ssl_ctx_base(self._ssl_context, secure = secure)
self._ssl_certs(self._ssl_context)
has_callback = hasattr(self._ssl_context, "set_servername_callback")
if has_callback: self._ssl_context.set_servername_callback(self._ssl_callback)
elif strict: self._ssl_context = None
# retrieves the reference to the map containing the various key
# and certificate paths for the various defined host names and
# uses it to create the complete set of ssl context objects
for hostname, values in legacy.iteritems(contexts):
context = self._ssl_ctx(values, secure = secure)
self._ssl_contexts[hostname] = (context, values)
def _ssl_destroy(self):
self._ssl_context = None
self._ssl_contexts = dict()
def _ssl_callback(self, socket, hostname, context):
context, values = self._ssl_contexts.get(hostname, (context, None))
self._ssl_ctx_base(context)
socket.context = context
if not values: return
ssl_host = values.get("ssl_host", None)
if not ssl_host: return
connection = self.connections_m.get(socket, None)
if not connection: return
connection.ssl_host = ssl_host
def _ssl_ctx(self, values, context = None, secure = True):
context = context or ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._ssl_ctx_base(context, secure = secure)
key_file = values.get("key_file", None)
cer_file = values.get("cer_file", None)
ca_file = values.get("ca_file", None)
ca_root = values.get("ca_root", True)
ssl_verify = values.get("ssl_verify", False)
cert_reqs = ssl.CERT_REQUIRED if ssl_verify else ssl.CERT_NONE
self._ssl_certs(
context,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
verify_mode = cert_reqs
)
return context
def _ssl_ctx_base(self, context, secure = True):
if hasattr(context, "set_alpn_protocols"):
protocols = self.get_protocols()
protocols and context.set_alpn_protocols(protocols)
if secure and hasattr(ssl, "OP_NO_SSLv2"):
context.options |= ssl.OP_NO_SSLv2
if secure and hasattr(ssl, "OP_NO_SSLv3"):
context.options |= ssl.OP_NO_SSLv3
if secure and hasattr(ssl, "OP_SINGLE_DH_USE"):
context.options |= ssl.OP_SINGLE_DH_USE
if secure and hasattr(ssl, "OP_SINGLE_ECDH_USE"):
context.options |= ssl.OP_SINGLE_ECDH_USE
if secure and hasattr(ssl, "OP_CIPHER_SERVER_PREFERENCE"):
context.options |= ssl.OP_CIPHER_SERVER_PREFERENCE
if secure and hasattr(context, "set_ecdh_curve"):
context.set_ecdh_curve("prime256v1")
if secure and SSL_DH_PATH and hasattr(context, "load_dh_params"):
context.load_dh_params(SSL_DH_PATH)
def _ssl_certs(
self,
context,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = False,
verify_mode = ssl.CERT_NONE,
check_hostname = False
):
dir_path = os.path.dirname(__file__)
root_path = os.path.join(dir_path, "../")
root_path = os.path.normpath(root_path)
base_path = os.path.join(root_path, "base")
extras_path = os.path.join(base_path, "extras")
key_file = key_file or os.path.join(extras_path, "net.key")
cer_file = cer_file or os.path.join(extras_path, "net.cer")
context.load_cert_chain(cer_file, keyfile = key_file)
context.verify_mode = verify_mode
if hasattr(context, "check_hostname"): context.check_hostname = check_hostname
if ca_file: context.load_verify_locations(cafile = ca_file)
if ca_root and hasattr(context, "load_default_certs"):
context.load_default_certs(purpose = ssl.Purpose.SERVER_AUTH)
if ca_root and SSL_CA_PATH:
context.load_verify_locations(cafile = SSL_CA_PATH)
def _ssl_upgrade(
self,
_socket,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = True,
server = True,
ssl_verify = False
):
socket_ssl = self._ssl_wrap(
_socket,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
server = server,
ssl_verify = ssl_verify
)
return socket_ssl
def _ssl_wrap(
self,
_socket,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = True,
server = True,
ssl_verify = False
):
dir_path = os.path.dirname(__file__)
root_path = os.path.join(dir_path, "../")
root_path = os.path.normpath(root_path)
base_path = os.path.join(root_path, "base")
extras_path = os.path.join(base_path, "extras")
key_file = key_file or os.path.join(extras_path, "net.key")
cer_file = cer_file or os.path.join(extras_path, "net.cer")
cert_reqs = ssl.CERT_REQUIRED if ssl_verify else ssl.CERT_NONE
if not self._ssl_context: return ssl.wrap_socket(
_socket,
keyfile = key_file,
certfile = cer_file,
server_side = server,
cert_reqs = cert_reqs,
ca_certs = ca_file,
ssl_version = ssl.PROTOCOL_SSLv23,
do_handshake_on_connect = False
)
self._ssl_certs(
self._ssl_context,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
verify_mode = cert_reqs
)
socket_ssl = self._ssl_context.wrap_socket(
_socket,
server_side = server,
do_handshake_on_connect = False
)
return socket_ssl
def _ssl_handshake(self, _socket):
try:
_socket.do_handshake()
_socket._pending = None
except ssl.SSLError as error:
error_v = error.args[0] if error.args else None
if error_v in SSL_VALID_ERRORS:
_socket._pending = self._ssl_handshake
else: raise
def _level(self, level):
"""
Converts the provided logging level value into the best
representation of it, so that it may be used to update
a logger's level of representation.
This method takes into account the current interpreter
version so that no problem occur.
:type level: String/int
:param level: The level value that is meant to be converted
into the best representation possible.
:rtype: int
:return: The best representation of the level so that it may
be used freely for the setting of logging levels under the
current running interpreter.
"""
level_t = type(level)
if level_t == int: return level
if level == None: return level
if level == "SILENT": return log.SILENT
if hasattr(logging, "_checkLevel"):
return logging._checkLevel(level)
return logging.getLevelName(level)
def _format_delta(self, time_delta, count = 2):
days = time_delta.days
hours, remainder = divmod(time_delta.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
delta_s = ""
if days > 0:
delta_s += "%dd " % days
count -= 1
if count == 0: return delta_s.strip()
if hours > 0:
delta_s += "%dh " % hours
count -= 1
if count == 0: return delta_s.strip()
if minutes > 0:
delta_s += "%dm " % minutes
count -= 1
if count == 0: return delta_s.strip()
delta_s += "%ds" % seconds
return delta_s.strip()
class DiagBase(AbstractBase):
def __init__(self, *args, **kwargs):
AbstractBase.__init__(self, *args, **kwargs)
self.reads_c = 0
self.writes_c = 0
self.errors_c = 0
def reads(self, *args, **kwargs):
AbstractBase.reads(self, *args, **kwargs)
self.reads_c += 1
def writes(self, *args, **kwargs):
AbstractBase.writes(self, *args, **kwargs)
self.writes_c += 1
def errors(self, *args, **kwargs):
AbstractBase.errors(self, *args, **kwargs)
self.errors_c += 1
def info_dict(self, full = False):
info = AbstractBase.info_dict(self, full = full)
info.update(
reads_c = self.reads_c,
writes_c = self.writes_c,
errors_c = self.errors_c
)
return info
class BaseThread(threading.Thread):
"""
The top level thread class that is meant to encapsulate
a running base object and run it in a new context.
This base thread may be used to run a network loop allowing
a main thread to continue with execution logic.
"""
def __init__(self, owner = None, daemon = False, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.owner = owner
self.daemon = daemon
def run(self):
threading.Thread.run(self)
if not self.owner: return
self.owner.start()
self.owner = None
def get_main():
return AbstractBase._MAIN
def get_loop():
return get_main()
def get_poll():
main = get_main()
if not main: return None
return main.poll
def ensure(coroutine, args = [], kwargs = {}, thread = False):
loop = get_loop()
return loop.ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = thread
)
def ensure_pool(coroutine, args = [], kwargs = {}):
return ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = True
)
is_diag = config.conf("DIAG", False, cast = bool)
if is_diag: Base = DiagBase
else: Base = AbstractBase
new fork position
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2016 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2016 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import copy
import json
import heapq
import signal
import logging
import hashlib
import tempfile
import traceback
import netius.pool
import netius.adapters
from . import log
from . import errors
from .conn import * #@UnusedWildImport
from .poll import * #@UnusedWildImport
from .async import * #@UnusedWildImport
NAME = "netius"
""" The global infra-structure name to be used in the
identification of both the clients and the services this
value may be prefixed or suffixed """
VERSION = "1.7.15"
""" The version value that identifies the version of the
current infra-structure, all of the services and clients
may share this value """
PLATFORM = "%s %d.%d.%d.%s %s" % (
sys.subversion[0] if hasattr(sys, "subversion") else "CPython",
sys.version_info[0],
sys.version_info[1],
sys.version_info[2],
sys.version_info[3],
sys.platform
)
""" Extra system information containing some of the details
of the technical platform that is running the system, this
string should be exposed carefully to avoid extra information
from being exposed to outside agents """
IDENTIFIER_SHORT = "%s/%s" % (NAME, VERSION)
""" The short version of the current environment's identifier
meant to be used in production like environment as it hides some
of the critical and internal information of the system """
IDENTIFIER_LONG = "%s/%s (%s)" % (NAME, VERSION, PLATFORM)
""" Longest version of the system identifier, to be used in the
development like environment as it shows critical information
about the system internals that may expose the system """
IDENTIFIER = IDENTIFIER_LONG if config._is_devel() else IDENTIFIER_SHORT
""" The identifier that may be used to identify an user agent
or service running under the current platform, this string
should comply with the typical structure for such values,
by default this value is set with the short version of the
identifier (less information) but this may be changed at
runtime if the current verbosity level is changed """
WSAEWOULDBLOCK = 10035
""" The wsa would block error code meant to be used on
windows environments as a replacement for the would block
error code that indicates the failure to operate on a non
blocking connection """
WSAECONNABORTED = 10053
""" Error code meant to be raised when a connection is aborted
from the other peer meaning that that client or a server in the
as abruptly dropped the connection """
WSAECONNRESET = 10054
""" Code that is used when a connection is reset meaning that
the connection as been disconnected using a graceful approach
and without raising any extraneous problems """
POLL_ORDER = (
EpollPoll,
KqueuePoll,
PollPoll,
SelectPoll
)
""" The order from which the poll methods are going to be
selected from the fastest to the slowest, in case no explicit
poll method is defined for a base service they are selected
based on this list testing them for acceptance first """
SILENT_ERRORS = (
errno.ECONNABORTED,
errno.ECONNRESET,
errno.EPIPE,
WSAECONNABORTED,
WSAECONNRESET
)
""" List that contain the various connection error states that
should not raise any extra logging information because even though
they should drop the connection they are expected """
VALID_ERRORS = (
errno.EWOULDBLOCK,
errno.EAGAIN,
errno.EPERM,
errno.ENOENT,
errno.EINPROGRESS,
WSAEWOULDBLOCK
)
""" List containing the complete set of error that represent
non ready operations in a non blocking socket """
SSL_SILENT_ERRORS = (
ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN
)
""" The list containing the errors that should be silenced
while still making the connection dropped as they are expected
to occur and should not be considered an exception """
SSL_VALID_ERRORS = (
ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE
)
""" The list containing the valid error in the handshake
operation of the ssl connection establishment """
TCP_TYPE = 1
""" The type enumeration value that represents the tcp (stream)
based communication protocol, for various usages in the base
netius communication infra-structure """
UDP_TYPE = 2
""" The datagram based udp protocol enumeration value to be used
in static references to this kind of socket usage """
STATE_STOP = 1
""" The stop state value, this value is set when the service
is either in the constructed stage or when the service has been
stop normally or with an error """
STATE_START = 2
""" The start state set when the service is in the starting
stage and running, normal state """
STATE_PAUSE = 3
""" The pause state set for a service for which the main event
loop has been paused and should be resumed latter """
STATE_CONFIG = 4
""" The configuration state that is set when the service is
preparing to become started and the configuration attributes
are being set according to pre-determined indications """
STATE_POLL = 5
""" State to be used when the service is in the polling part
of the loop, this is the most frequent state in an idle service
as the service "spends" most of its time in it """
STATE_TICK = 6
""" Tick state representative of the situation where the loop
tick operation is being started and all the pre tick handlers
are going to be called for pre-operations """
STATE_READ = 7
""" Read state that is set when the connection are being read
and the on data handlers are being called, this is the part
where all the logic driven by incoming data is being called """
STATE_WRITE = 8
""" The write state that is set on the writing of data to the
connections, this is a pretty "fast" state as no logic is
associated with it """
STATE_ERRROR = 9
""" The error state to be used when the connection is processing
any error state coming from its main select operation and associated
with a certain connection (very rare) """
STATE_STRINGS = (
"STOP",
"START",
"PAUSE",
"CONFIG",
"POLL",
"TICK",
"READ",
"WRITE",
"ERROR"
)
""" Sequence that contains the various strings associated with
the various states for the base service, this may be used to
create an integer to string resolution mechanism """
KEEPALIVE_TIMEOUT = 300
""" The amount of time in seconds that a connection is set as
idle until a new refresh token is sent to it to make sure that
it's still online and not disconnected, make sure that this
value is high enough that it does not consume to much bandwidth """
KEEPALIVE_COUNT = 3
""" The amount of times the "ping" packet is re-sent until the
connection is considered to be offline and is dropped """
KEEPALIVE_INTERVAL = int(KEEPALIVE_TIMEOUT / 10)
""" The time between the retrying of "ping" packets, this value
does not need to be too large and should not be considered too
important (may be calculated automatically) """
LOG_FORMAT = "%(asctime)s [%(levelname)s] %(message)s"
""" The format that is going to be used by the logger of the
netius infra-structure for debugging purposes it should allow
and end developer to dig into the details of the execution """
# initializes the various paths that are going to be used for
# the base files configuration in the complete service infra
# structure, these should include the ssl based files
BASE_PATH = os.path.dirname(__file__)
EXTRAS_PATH = os.path.join(BASE_PATH, "extras")
SSL_KEY_PATH = os.path.join(EXTRAS_PATH, "net.key")
SSL_CER_PATH = os.path.join(EXTRAS_PATH, "net.cer")
SSL_CA_PATH = os.path.join(EXTRAS_PATH, "net.ca")
SSL_DH_PATH = os.path.join(EXTRAS_PATH, "dh.pem")
if not os.path.exists(SSL_CA_PATH): SSL_CA_PATH = None
if not os.path.exists(SSL_DH_PATH): SSL_DH_PATH = None
class AbstractBase(observer.Observable):
"""
Base network structure to be used by all the network
capable infra-structures (eg: servers and clients).
Should handle all the nonblocking event loop so that
the read and write operations are easy to handle.
"""
_MAIN = None
""" Reference to the top level main instance responsible
for the control of the main thread loop """
def __init__(self, name = None, handlers = None, *args, **kwargs):
observer.Observable.__init__(self, *args, **kwargs)
poll = AbstractBase.test_poll()
self.name = name or self.__class__.__name__
self.handler_stream = logging.StreamHandler()
self.handlers = handlers or (self.handler_stream,)
self.level = kwargs.get("level", logging.INFO)
self.diag = kwargs.get("diag", False)
self.children = kwargs.get("children", 0)
self.tid = None
self.tname = None
self.logger = None
self.logging = None
self.tpool = None
self.fpool = None
self.poll_c = kwargs.get("poll", poll)
self.poll = self.poll_c()
self.poll_name = self.poll.name()
self.poll_timeout = kwargs.get("poll_timeout", POLL_TIMEOUT)
self.poll_owner = True
self.diag_app = None
self.connections = []
self.connections_m = {}
self._uuid = uuid.uuid4()
self._lid = 0
self._main = False
self._running = False
self._pausing = False
self._loaded = False
self._delayed = []
self._delayed_o = []
self._delayed_n = []
self._delayed_l = threading.RLock()
self._extra_handlers = []
self._ssl_init()
self.set_state(STATE_STOP)
@classmethod
def test_poll(cls, preferred = None):
# sets the initial selected variable with the unselected
# (invalid) value so that at lease one selection must be
# done in order for this method to succeed
selected = None
# iterates over all the poll classes ordered by preference
# (best first) and tries to find the one that better matched
# the current situation, either the preferred poll method or
# the most performant one in case it's not possible
for poll in POLL_ORDER:
if not poll.test(): continue
if not selected: selected = poll
if not preferred: break
name = poll.name()
if not name == preferred: continue
selected = poll
break
# in case no polling method was selected must raise an exception
# indicating that no valid polling mechanism is available
if not selected: raise errors.NetiusError(
"No valid poll mechanism available"
)
# returns the selected polling mechanism class to the caller method
# as expected by the current method
return selected
def delay(self, callable, timeout = None, immediately = False, verify = False):
# creates the original target value with a zero value (forced
# execution in next tick) in case the timeout value is set the
# value is incremented to the current time, then created the
# callable original tuple with the target (time) and the callable
target = -1 if immediately else 0
if timeout: target = time.time() + timeout
callable_o = (target, callable)
callable_o = legacy.orderable(callable_o)
# in case the verify flag is set, must verify id the callable
# is already inserted in the list of delayed operations in
# case it does returns immediately to avoid duplicated values
is_duplicate = verify and callable_o in self._delayed_o
if is_duplicate: return
# creates the "final" callable tuple with the target time, the
# callable and the loop id (lid) then inserts both the delayed
# (original) callable tuple and the callable tuple in the lists
callable_t = (target, callable, self._lid)
callable_t = legacy.orderable(callable_t)
heapq.heappush(self._delayed, callable_t)
heapq.heappush(self._delayed_o, callable_o)
def delay_s(self, callable):
"""
Safe version of the delay operation to be used to insert a callable
from a different thread (implied lock mechanisms).
This method should only be used from different threads as there's
a huge performance impact created from using this method instead of
the local event loop one (delay()).
:type callable: Function
:param callable: The callable that should be called on the next tick
according to the event loop rules.
"""
# acquires the lock that controls the access to the delayed for next
# tick list and then adds the callable to such list, please note that
# the delayed (next) list is only going to be joined/merged with delay
# operations and list on the next tick (through the merge operation)
self._delayed_l.acquire()
try: self._delayed_n.append(callable)
finally: self._delayed_l.release()
def delay_m(self):
"""
Runs the merge operation so that the delay next list (used by the delay
safe operation) is merged with the delayed and the delayed ordered
structures, making the events (effectively) ready to be executed by delays.
"""
# verifies if the delay next list is not valid or empty and if that's
# the case returns immediately as there's nothing to be merged
if not self._delayed_n: return
# iterates over the complete set of next elements in the delay next list
# and schedules them as delay for the next tick execution
for next in self._delayed_n: self.delay(next, immediately = True)
# deletes the complete set of elements present in the delay next list, this
# is considered to be equivalent to the empty operation
del self._delayed_n[:]
def ensure(
self,
coroutine,
args = [],
kwargs = {},
thread = False,
future = None,
immediately = True
):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# ensure execution operation
future = future or Future()
# creates the generate sequence from the coroutine callable
# by calling it with the newly created future instance, that
# will be used for the control of the execution
sequence = coroutine(future, *args, **kwargs)
# creates the callable that is going to be used to call
# the coroutine with the proper future variable as argument
# note that in case the thread mode execution is enabled the
# callable is going to be executed on a different thread
if thread: callable = lambda f = future: self.texecute(step, [f])
else: callable = lambda f = future: step(f)
# creates the function that will be used to step through the
# various elements in the sequence created from the calling of
# the coroutine, the values returned from it may be either future
# or concrete values, for each situation a proper operation must
# be applied to complete the final task in the proper way
def step(_future):
# iterates continuously over the generator that may emit both
# plain object values or future (delayed executions)
while True:
# determines if the future is ready to receive new work
# this is done using a pipeline of callbacks that must
# deliver a positive value so that the future is considered
# ready, note that in case the future is not ready the current
# iteration cycle is delayed until the next tick
if not future.ready: self.delay(callable); break
# retrieves the next value from the generator and in case
# value is the last one (stop iteration) breaks the cycle,
# notice that if there's an exception raised in the middle
# of the generator iteration it's set on the future
try: value = next(sequence)
except StopIteration: break
except BaseException as exception:
future.set_exception(exception)
break
# determines if the value retrieved from the generator is a
# future and if that's the case schedules a proper execution
is_future = isinstance(value, Future)
# in case the current value is a future schedules it for execution
# taking into account the proper thread execution model
if is_future:
value.add_done_callback(callable)
break
# otherwise it's a normal value being yielded and should be sent
# to the future object as a partial value (pipelining)
else:
# for a situation where a thread pool should be used the new
# value should be "consumed" by adding the data handler operation
# to the list of delayed operations and notifying the task pool
# so that the event loop on the main thread gets unblocked and
# the proper partial value handling is performed (always on main thread)
if thread:
def handler():
self.tpool.denotify()
future.partial(value)
callable()
self.delay_s(handler)
self.tpool.notify()
break
# otherwise we're already on the main thread so a simple partial callback
# notification should be enough for the proper consuming of the data
else:
future.partial(value)
# delays the execution of the callable so that it is executed
# immediately if possible (event on the same iteration)
self.delay(callable, immediately = immediately)
return future
def sleep(self, timeout, future = None):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# sleep operation to be executed
future = future or Future()
# creates the callable that is going to be used to set
# the final value of the future variable
callable = lambda: future.set_result(None)
# delays the execution of the callable so that it is executed
# after the requested amount of timeout, note that the resolution
# of the event loop will condition the precision of the timeout
self.delay(callable, timeout = timeout)
return future
def load(self, full = False):
# in case the current structure is considered/marked as already loaded
# there's no need to continue with the loading execution (returns immediately)
if self._loaded: return
# calls the boot hook responsible for the initialization of the various
# structures of the base system, note that is going to be called once
# per each loop starting process (structure should be destroyed on cleanup)
self.boot()
# loads the various parts of the base system, under this calls each
# of the systems should have it's internal structures started
self.load_logging(self.level)
# loads the diagnostics application handlers that allows external
# interaction with the service for diagnostics/debugging
self.load_diag()
# calls the welcome handle this is meant to be used to print some
# information about the finishing of the loading of the infra-structure
# this is going to be called once per base system
self.welcome()
# runs the binding of the system wide signals so that if
# any of such signals is raised it's properly handled and
# redirected to the proper logic through exceptions
self.bind_signals()
# sets the private loading flag ensuring that no extra load operations
# will be done after this first call to the loading (no duplicates)
self._loaded = True
def unload(self, full = False):
# verifies if the current structure is considered/marked as already
# "unloaded", if that's the case returns the control flow immediately
# as there's nothing pending to be (undone)
if not self._loaded: return
# triggers the operation that will start the unloading process of the
# logging infra-structure of the current system
if full: self.unload_logging()
# marks the current system as unloaded as the complete set of operations
# meant to start the unloading process have been finished
self._loaded = False
def boot(self):
pass
def welcome(self):
pass
def load_logging(self, level = logging.DEBUG, format = LOG_FORMAT, unique = False):
# normalizes the provided level value so that it represents
# a proper and understandable value, then starts the formatter
# that is going to be used and retrieves the (possibly unique)
# identifier to be used in the logger retrieval/identification
level = self._level(level)
formatter = logging.Formatter(format)
identifier = self.get_id(unique = unique)
# retrieves the logger that is going to be according to the
# decided identifier and then verifies that the counter value
# is properly updated deciding also if the logger instance is
# a new one or if instead it refers an already initialized/old
# instance that doesn't need a new initialization process
self.logger = logging.getLogger(identifier)
counter = self.logger._counter if hasattr(self.logger, "_counter") else 0
is_new = counter == 0
self.logger._counter = counter + 1
if not is_new: return
# start the extra logging infrastructure (extra handlers)
# and initializes the stream handlers with the proper level
# and formatter values (as expected)
self.extra_logging(level, formatter)
self.handler_stream.setLevel(level)
self.handler_stream.setFormatter(formatter)
# starts the new logger instance by setting no parent to it,
# updating the verbosity level of it and then registering the
# complete set of handlers for it (as expected)
self.logger.parent = None
self.logger.setLevel(level)
for handler in self.handlers:
if not handler: continue
self.logger.addHandler(handler)
def unload_logging(self):
# updates the counter value for the logger and validates
# that no more "clients" are using the logger so that it
# may be properly destroyed (as expected)
counter = self.logger._counter
is_old = counter == 1
self.logger._counter = counter - 1
if not is_old: return
# iterates over the complete set of handlers in the current
# base element and removes them from the current logger
for handler in self.handlers:
if not handler: continue
self.logger.removeHandler(handler)
# iterates over the complete set of (built) extra handlers
# and runs the close operation for each of them, as they are
# no longer considered required for logging purposes
for handler in self._extra_handlers: handler.close()
def extra_logging(self, level, formatter):
"""
Loads the complete set of logging handlers defined in the
current logging value, should be a map of definitions.
This handlers will latter be used for piping the various
logging messages to certain output channels.
The creation of the handler is done using a special keyword
arguments strategy so that python and configuration files
are properly set as compatible.
:type level: String/int
:param level: The base severity level for which the new handler
will be configured in case no extra level definition is set.
:type formatter: Formatter
:param formatter: The logging formatter instance to be set in
the handler for formatting messages to the output.
"""
# verifies if the logging attribute of the current instance is
# defined and in case it's not returns immediately, otherwise
# starts by converting the currently defined set of handlers into
# a list so that it may be correctly manipulated (add handlers)
if not self.logging: return
self.handlers = list(self.handlers)
# iterates over the complete set of handler configuration in the
# logging to create the associated handler instances
for config in self.logging:
# gathers the base information on the current handler configuration
# running also the appropriate transformation on the level
name = config.get("name", None)
_level = config.get("level", level)
_level = self._level(_level)
# "clones" the configuration dictionary and then removes the base
# values so that they do not interfere with the building
config = dict(config)
if "level" in config: del config["level"]
if "name" in config: del config["name"]
# retrieves the proper building, skipping the current loop in case
# it does not exits and then builds the new handler instance, setting
# the proper level and formatter and then adding it to the set
if not hasattr(log, name + "_handler"): continue
builder = getattr(log, name + "_handler")
handler = builder(**config)
handler.setLevel(_level)
handler.setFormatter(formatter)
self.handlers.append(handler)
self._extra_handlers.append(handler)
# restores the handlers structure back to the "original" tuple form
# so that no expected data types are violated
self.handlers = tuple(self.handlers)
def level_logging(self, level):
"""
Changes the verbosity level of the current logging infra-structure
into the provided level of verbosity.
The provided value may be an integer (internal value) or a string
representation of the requested verbosity level.
:type level: int/String
:param level: The (logging) for which the logging infra-structure
must be changed, either an integer or string value.
"""
# converts the provided logging level value (either string or
# integer value) into the appropriate normalized value that can
# be used internally for logging level setting
level = self._level(level)
# sets the (new) level value value for both the base stream
# handler and also for the logger itself
self.handler_stream.setLevel(level)
self.logger.setLevel(level)
# iterates over the complete set of attached handlers to
# update their respective logging level
for handler in self.handlers: handler.setLevel(level)
def load_diag(self, env = True):
# verifies if the diagnostics "feature" has been requested
# for the current infra-structure and if that's not the case
# returns the control flow immediately to the caller
if not self.diag: return
# runs the import operations for the diag module, note that
# this must be performed locally no avoid any unwanted behaviour
# or collision with a runtime process (would pose issues)
from . import diag
# verifies if the diag module has been correctly loaded and
# if that's not the case fails gracefully and returns the
# control flow to the caller method
if not diag.loaded:
self.info("Failed to load diagnostics, import problem")
return
# retrieves the various server related value for the diagnostics
# server, taking into account if the env flag is set
server = self.get_env("DIAG_SERVER", "netius") if env else "netius"
host = self.get_env("DIAG_HOST", "127.0.0.1") if env else "127.0.0.1"
port = self.get_env("DIAG_PORT", 5050, cast = int) if env else 5050
# creates the application object that is going to be
# used for serving the diagnostics app and then starts
# the "serving" of it under a new thread
self.diag_app = diag.DiagApp(self)
self.diag_app.serve(
server = server,
host = host,
port = port,
diag = False,
threaded = True,
conf = False
)
def bind_signals(self):
# creates the signal handler function that propagates the raising
# of the system exit exception (proper logic is executed) and then
# registers such handler for the (typical) sigterm signal
def handler(signum = None, frame = None): raise SystemExit()
try: signal.signal(signal.SIGTERM, handler)
except: self.debug("Failed to register SIGTERM handler")
def start(self):
# in case the current instance is currently paused runs the
# resume operation instead as that's the expected operation
if self.is_paused(): return self.resume()
# re-builds the polling structure with the new name this
# is required so that it's possible to change the polling
# mechanism in the middle of the loading process
self.poll = self.build_poll()
# retrieves the name of the polling mechanism that is
# going to be used in the main loop of the current
# base service, this is going to be used for diagnostics
poll_name = self.get_poll_name()
# triggers the loading of the internal structures of
# the base structure in case the loading has already
# been done nothing is done (avoids duplicated load)
self.load()
# opens the polling mechanism so that its internal structures
# become ready for the polling cycle, the inverse operation
# (close) should be performed as part of the cleanup
self.poll.open(timeout = self.poll_timeout)
# runs the fork operation responsible for the forking of the
# current process into the various child processes for multiple
# process based parallelism, note that this must be done after
# the master socket has been created (to be shared)
self.fork()
# retrieves the complete set of information regarding the current
# thread that is being used for the starting of the loop, this data
# may be used for runtime debugging purposes (debug only data)
cthread = threading.current_thread()
self.tid = cthread.ident or 0
self.tname = cthread.getName()
self._main = self.tname == "MainThread"
# in case the current thread is the main one, the global
# main instance is set as the current instance
if self._main: AbstractBase._MAIN = self
# enters the main loop operation by printing a message
# to the logger indicating this start, this stage
# should block the thread until a stop call is made
self.debug("Starting '%s' service main loop (%.2fs) ..." % (self.name, self.poll_timeout))
self.debug("Using thread '%s' with tid '%d'" % (self.tname, self.tid))
self.debug("Using '%s' as polling mechanism" % poll_name)
# calls the main method to be able to start the main event
# loop properly as defined by specification
self.main()
def stop(self):
self._running = False
def pause(self):
self._running = False
self._pausing = True
def resume(self):
self.debug("Resuming '%s' service main loop (%.2fs) ..." % (self.name, self.poll_timeout))
self.main()
def close(self):
self.stop()
def main(self):
# sets the running flag that controls the running of the
# main loop and then changes the current state to start
# as the main loop is going to start, then triggers the
# start event indicating the (re-)start of the even loop
self._running = True
self._pausing = False
self.set_state(STATE_START)
self.trigger("start", self)
# runs the event loop, this is a blocking method that should
# be finished by the end of the execution of by pause
try:
self.loop()
self.finalize()
except (KeyboardInterrupt, SystemExit):
self.info("Finishing '%s' service on user request ..." % self.name)
except errors.PauseError:
self.set_state(STATE_PAUSE)
self.trigger("pause", self)
self.debug("Pausing '%s' service main loop" % self.name)
except BaseException as exception:
self.error(exception)
self.log_stack(method = self.warning)
except:
self.critical("Critical level loop exception raised")
self.log_stack(method = self.error)
finally:
if self.is_paused(): return
self.trigger("stop", self)
self.debug("Finished '%s' service main loop" % self.name)
self.cleanup()
self.set_state(STATE_STOP)
def is_started(self):
return self.get_state() == STATE_START
def is_stopped(self):
return self.get_state() == STATE_STOP
def is_paused(self):
return self.get_state() == STATE_PAUSE
def is_edge(self):
return self.poll.is_edge()
def is_empty(self):
return self.poll.is_empty()
def is_sub_read(self, socket):
return self.poll.is_sub_read(socket)
def is_sub_write(self, socket):
return self.poll.is_sub_write(socket)
def is_sub_error(self, socket):
return self.poll.is_sub_error(socket)
def sub_all(self, socket):
return self.poll.sub_all(socket, owner = self)
def unsub_all(self, socket):
return self.poll.unsub_all(socket)
def sub_read(self, socket):
return self.poll.sub_read(socket, owner = self)
def sub_write(self, socket):
return self.poll.sub_write(socket, owner = self)
def sub_error(self, socket):
return self.poll.sub_error(socket, owner = self)
def unsub_read(self, socket):
return self.poll.unsub_read(socket)
def unsub_write(self, socket):
return self.poll.unsub_write(socket)
def unsub_error(self, socket):
return self.poll.unsub_error(socket)
def cleanup(self):
# runs the unload operation for the current base container this should
# unset/unload some of the components for this base infra-structure
self.unload()
# destroys the current information on the delays that are is longer
# going to be executed as the poll/system is closing, this is required
# in order to avoid any possible memory leak with clojures/cycles
del self._delayed[:]
del self._delayed_o[:]
del self._delayed_n[:]
# runs the destroy operation on the ssl component of the base
# element so that no more ssl is available/used (avoids leaks)
self._ssl_destroy()
# verifies if there's a valid (and open) task pool, if that's
# the case starts the stop process for it so that there's no
# leaking of task descriptors and other structures
if self.tpool: self.tstop()
# verifies if there's a valid (and open) file pool, if that's
# the case starts the stop process for it so that there's no
# leaking of file descriptors and other structures
if self.fpool: self.fstop()
# creates a copy of the connections list because this structure
# is going to be changed in the closing of the connection object
connections = copy.copy(self.connections)
# iterates over the complete set of connections currently
# registered in the base structure and closes them so that
# can no longer be used and are gracefully disconnected
for connection in connections: connection.close()
# iterates over the complete set of sockets in the connections
# map to properly close them (avoids any leak of resources)
for _socket in self.connections_m: _socket.close()
# in case the current thread is the main one and the global
# main instance is unset to an invalid value (main unloaded)
if self._main: AbstractBase._MAIN = None
# closes the current poll mechanism so that no more issues arise
# from an open poll system (memory leaks, etc.), note that this is
# only performed in case the current base instance is the owner of
# the poll that is going to be closed (works with containers)
if self.poll_owner: self.poll.close()
# deletes some of the internal data structures created for the instance
# and that are considered as no longer required
self.connections_m.clear()
del self.connections[:]
del self._extra_handlers[:]
def loop(self):
# iterates continuously while the running flag is set, once
# it becomes unset the loop breaks at the next execution cycle
while self._running:
# calls the base tick int handler indicating that a new
# tick loop iteration is going to be started, all the
# "in between loop" operation should be performed in this
# callback as this is the "space" they have for execution
self.ticks()
# updates the current state to poll to indicate
# that the base service is selecting the connections
self.set_state(STATE_POLL)
# runs the main selection operation on the current set
# of connection for each of the three operations returning
# the resulting active sets for the callbacks
reads, writes, errors = self.poll.poll()
# calls the various callbacks with the selections lists,
# these are the main entry points for the logic to be executed
# each of this methods should be implemented in the underlying
# class instances as no behavior is defined at this inheritance
# level (abstract class)
self.reads(reads)
self.writes(writes)
self.errors(errors)
def fork(self):
if not os.name in ("posix",): return
if not self.children: return
self.debug("Forking the current process into '%d' children ..." % self.children)
for _index in range(self.children):
pid = os.fork() #@UndefinedVariable
if not pid == 0: continue
break
def finalize(self):
# verifies a series of conditions and raises a proper error in case
# any of them is verified under the current state
if self._pausing: raise errors.PauseError("Pause state expected")
if self._running: raise errors.AssertionError("Not expected running")
def ticks(self):
# updates the current state value to the tick state indicating
# that the current process is updating a new tick in loop
self.set_state(STATE_TICK)
# runs the verification/processing of the complete set of file
# events that have been raised meanwhile, this allows for the
# processing of various file driven operations
self.files()
# "calculates" the new loop id by incrementing one value
# to the previous one, note that the value is calculated
# in a modulus way so that no overflow occurs
self._lid = (self._lid + 1) % 2147483647
# runs the processing of the delayed calls so that the pending
# calls are called if the correct time has been reached
self._delays()
def reads(self, reads, state = True):
if state: self.set_state(STATE_READ)
def writes(self, writes, state = True):
if state: self.set_state(STATE_WRITE)
def errors(self, errors, state = True):
if state: self.set_state(STATE_ERRROR)
def pregister(self, pool):
# prints a debug message stating that a new pool is
# being created for the handling of message events
self.debug("Started pool, for async handling")
# tries to retrieve the file descriptor of the event virtual
# object that is notified for each operation associated with
# the pool, (primary communication mechanism)
eventfd = pool.eventfd()
if not eventfd: self.warning("Starting pool without eventfd")
if not eventfd: return
if not self.poll: return
self.poll.sub_read(eventfd)
# echoes a debug message indicating that a new read event
# subscription has been created for the event fd of the pool
self.debug("Subscribed for read operations on event fd")
def punregister(self, pool):
# prints a debug message notifying the user that no more
# async handling is possible using the pool
self.debug("Stopped existing pool, no more async handling")
# tries to retrieve the event file descriptor for
# the pool an in case it exists unsubscribes
# from it under the current polling system
eventfd = pool.eventfd()
if not eventfd: self.warning("Stopping pool without eventfd")
if not eventfd: return
if not self.poll: return
self.poll.unsub_read(eventfd)
# echoes a debug message indicating that a new read event
# unsubscription has been created for the event fd of the pool
self.debug("Unsubscribed for read operations on event fd")
def tensure(self):
if self.tpool: return
self.tstart()
def tstart(self):
if self.tpool: return
self.tpool = netius.pool.TaskPool()
self.tpool.start()
self.pregister(self.tpool)
def tstop(self):
if not self.tpool: return
self.punregister(self.tpool)
self.tpool.stop()
def texecute(self, callable, args = [], kwargs = {}):
self.tensure()
self.tpool.execute(callable, args = args, kwargs = kwargs)
def files(self):
if not self.fpool: return
events = self.fpool.pop_all(denotify = True)
for event in events:
callback = event[-1]
if not callback: continue
callback(*event[1:-1])
def fopen(self, *args, **kwargs):
self.fensure()
return self.fpool.open(*args, **kwargs)
def fclose(self, *args, **kwargs):
self.fensure()
return self.fpool.close(*args, **kwargs)
def fread(self, *args, **kwargs):
self.fensure()
return self.fpool.read(*args, **kwargs)
def fwrite(self, *args, **kwargs):
self.fensure()
return self.fpool.write(*args, **kwargs)
def fensure(self):
if self.fpool: return
self.fstart()
def fstart(self):
# verifies if there's an already open file pool for
# the current system and if that's not the case creates
# a new one and starts it's thread cycle
if self.fpool: return
self.fpool = netius.pool.FilePool()
self.fpool.start()
self.pregister(self.fpool)
def fstop(self):
# verifies if there's an available file pool and
# if that's the case initializes the stopping of
# such system, note that this is blocking call as
# all of the thread will be joined under it
if not self.fpool: return
self.punregister(self.fpool)
self.fpool.stop()
def on_connection_c(self, connection):
self.debug(
"Connection '%s' from '%s' created ..." %
(connection.id, connection.owner.name)
)
self.debug(
"There are '%d' connections for '%s' ..." %
(len(connection.owner.connections), connection.owner.name)
)
def on_connection_d(self, connection):
self.debug(
"Connection '%s' from '%s' deleted" %
(connection.id, connection.owner.name)
)
self.debug(
"There are '%d' connections for '%s' ..." %
(len(connection.owner.connections), connection.owner.name)
)
def info_dict(self, full = False):
info = dict(
loaded = self._loaded,
connections = len(self.connections),
state = self.get_state_s(),
poll = self.get_poll_name()
)
if full: info.update(
_lid = self._lid
)
return info
def info_string(self, full = False, safe = True):
try: info = self.info_dict(full = full)
except: info = dict()
info_s = json.dumps(
info,
ensure_ascii = False,
indent = 4,
separators = (",", " : "),
sort_keys = True
)
return info_s
def connections_dict(self, full = False):
connections = []
for connection in self.connections:
info = connection.info_dict(full = full)
connections.append(info)
return connections
def connection_dict(self, id, full = False):
connection = None
for _connection in self.connections:
if not _connection.id == id: continue
connection = _connection
break
if not connection: return None
return connection.info_dict(full = full)
def new_connection(self, socket, address, ssl = False):
"""
Creates a new connection for the provided socket
object and string based address, the returned
value should be a workable object.
:type socket: Socket
:param socket: The socket object to be encapsulated
by the object to be created (connection).
:type address: String
:param address: The address as a string to be used to
describe the connection object to be created.
:type ssl: bool
:param ssl: If the connection to be created is meant to
be secured using the ssl framework for encryption.
:rtype: Connection
:return: The connection object that encapsulates the
provided socket and address values.
"""
return Connection(
owner = self,
socket = socket,
address = address,
ssl = ssl
)
def load_config(self, path = "config.json", **kwargs):
kwargs = self.apply_config(path, kwargs)
for key, value in kwargs.items():
setattr(self, key, value)
def apply_config(self, path, kwargs):
if not os.path.exists(path): return kwargs
self.info("Applying configuration file '%s' ..." % path)
kwargs = copy.copy(kwargs)
file = open(path, "rb")
try: contents = json.load(file)
finally: file.close()
for key, value in contents.items():
kwargs[key] = value
return kwargs
def is_devel(self):
"""
Verifies if the current running environment is meant to be used
for development purposes as opposed to a production environment.
The method should always be used in situations where some critical
and internal information is meant to be displayed in a development
environment but hidden in a production one.
This method should be used at runtime as opposed to the private
configuration based one.
:rtype: bool
:return: If the current environment is development oriented or
if it's considered to be a production one (invalid result).
"""
return self.is_debug()
def is_debug(self):
return self.logger.isEnabledFor(logging.DEBUG)
def is_info(self):
return self.logger.isEnabledFor(logging.INFO)
def is_warning(self):
return self.logger.isEnabledFor(logging.WARNING)
def is_error(self):
return self.logger.isEnabledFor(logging.ERROR)
def is_critical(self):
return self.logger.isEnabledFor(logging.CRITICAL)
def debug(self, object):
self.log(object, level = logging.DEBUG)
def info(self, object):
self.log(object, level = logging.INFO)
def warning(self, object):
self.log(object, level = logging.WARNING)
def error(self, object):
self.log(object, level = logging.ERROR)
def critical(self, object):
self.log(object, level = logging.CRITICAL)
def log_stack(self, method = None, info = True):
if not method: method = self.info
lines = traceback.format_exc().splitlines()
for line in lines: method(line)
if info: self.log_info(method = method)
def log_info(self, method = None):
if not method: method = self.info
info_string = self.info_string(full = True)
for line in info_string.split("\n"): method(line)
def log(self, *args, **kwargs):
if legacy.PYTHON_3: return self.log_python_3(*args, **kwargs)
else: return self.log_python_2(*args, **kwargs)
def log_python_3(self, object, level = logging.INFO):
object_t = type(object)
try: message = str(object) if not object_t == str else object
except: message = str(object)
if not self.logger: return
self.logger.log(level, message)
def log_python_2(self, object, level = logging.INFO):
object_t = type(object)
try: message = unicode(object) if not object_t in legacy.str else object #@UndefinedVariable
except: message = str(object).decode("utf-8", "ignore")
if not self.logger: return
self.logger.log(level, message)
def build_poll(self):
# verifies if the currently set polling mechanism is open in
# case it's ther's no need to re-build the polling mechanism
# otherwise rebuilds the polling mechanism with the current
# name and returns the new poll object to the caller method
if self.poll and self.poll.is_open(): return self.poll
# runs the testing of the poll again and verifies if the polling
# class has changed in case it did not returns the current poll
# instance as expected by the current infra-structure
poll_c = AbstractBase.test_poll(preferred = self.poll_name)
if poll_c == self.poll_c: return self.poll
# updates the polling class with the new value and re-creates
# the polling instance with the new polling class returning this
# new value to the caller method
self.poll_c = poll_c
self.poll = self.poll_c()
return self.poll
def get_id(self, unique = True):
base = NAME + "-" + self.name
if not unique: return base
return base + "-" + str(self._uuid)
def get_poll(self):
return self.poll
def get_poll_name(self):
poll = self.get_poll()
name = poll.name()
return name
def get_state(self):
return self._state
def set_state(self, state):
self._state = state
def get_state_s(self, lower = True):
"""
Retrieves a string describing the current state
of the system, this string should be as descriptive
as possible.
An optional parameter controls if the string should
be lower cased or not.
:type lower: bool
:param lower: If the returned string should be converted
into a lower cased version.
:rtype: String
:return: A string describing the current sate of the loop
system, should be as descriptive as possible.
"""
state_s = STATE_STRINGS[self._state - 1]
state_s = state_s.lower() if lower else state_s
return state_s
def get_env(self, name, default = None, cast = None, expand = False):
"""
Retrieves the value of the environment variable with the
requested name, defaulting to the provided value in case
it's not possible to find such variable.
An optional cast type may be provided in order to cast the
value of the environment variable in to the target type.
An optional expand flag may be set so that the variable gets
expanded as a file system file, for this the newline values
should be escaped as explicit '\n' string sequences (two chars).
Current implementation forwards the request to the current
configuration registry so that other data providers may
also be used in search for configuration.
:type name: String
:param name: The name of the environment variable that is
meant to be retrieved from the current environment
:type default: Object
:param default: The default value to be returned in case
no value is found for the provided name.
:type cast: Type
:param cast: The cast type to be used to cast the value
of the requested environment variable.
:type expand: bool
:param expand: If the variable should be expanded as a file
object and stored in a temporary storage, for this situation
the resulting object should be a string with the file path.
:rtype: Object
:return: The value of the requested environment variable
properly casted into the target value.
"""
if not name in config.CONFIGS: return default
value = config.CONFIGS.get(name, default)
if expand: value = self.expand(value)
cast = config.CASTS.get(cast, cast)
if cast and not value == None: value = cast(value)
return value
def expand(self, value, encoding = "utf-8", force = False):
"""
Expands the provided string/bytes value into a file in the
current file system so that it may be correctly used by interfaces
that require certain values to be file system based.
In case the force value is provided the the file is created even
for situations where the provided value is invalid/unset.
:type value: String
:param value: The string/bytes based value that is going to be
expanded into a proper file system based (temporary) file.
:type encoding: String
:param encoding: The encoding that is going to be used to convert
the value into a bytes based one in case the provided value is not
bytes compliant (and must be converted).
:type force: bool
:param force: If the expansion operation should be performed even
for situations where the value is considered invalid/unset.
:rtype: String
:return: The path to the temporary file that has just been generated
for the expansion of the provided value.
"""
if not value and not force: return value
is_bytes = legacy.is_bytes(value)
if not is_bytes: value = value.encode(encoding)
value = value.replace(b"\\n", b"\n")
fd, file_path = tempfile.mkstemp()
file = open(file_path, "wb")
try: file.write(value)
except: os.close(fd); file.close()
return file_path
def get_protocols(self):
"""
Retrieves the complete set of protocols (as ALPN strings) that are
going to be handled by the current protocol infra-structure.
:rtype: List
:return: The list containing the complete set of protocols handled
by the current infra-structure.
:see: https://tools.ietf.org/html/rfc7301
"""
return None
def get_adapter(self, name = "memory", *args, **kwargs):
"""
Retrieves an instance of a storage adapter described
by the provided name, note that the dynamic (extra)
arguments are going to be used in the construction of
the adapter instance.
:type name: String
:param name: The name of the adapter to be retrieved
this should be equivalent to the adapter class name.
:rtype: Adapter
:return: An instance (properly configured) of the
requested adapter (defined by the name argument).
"""
name_f = name.title() + "Adapter"
adapter_c = getattr(netius.adapters, name_f)
adapter = adapter_c(*args, **kwargs)
return adapter
def get_auth(self, name = "memory", *args, **kwargs):
"""
Gathers the proper authentication handler that is being
requested with the provided name. The retrieved auth
is a static class that should be used from its interface
based on class based methods.
The state of theses authentication (handlers) is based
on the "global" state of the environment (no instances).
:type name: String
:param name: The name of the authentication (handler)
class that should be retrieved.
:rtype: Auth
:return: An authentication based class that may be used
for the interaction of authentication methods.
"""
name_f = name.title() + "Auth"
auth_c = getattr(netius.auth, name_f)
return auth_c
def _pending(self, _socket):
"""
Tries to perform the pending operations in the socket
and, these operations are set in the pending variable
of the socket structure.
The method returns if there are still pending operations
after this method tick.
:type _socket: Socket
:param _socket: The socket object to be checked for
pending operations and that is going to be used in the
performing of these operations.
:rtype: bool
:return: If there are still pending operations to be
performed in the provided socket.
"""
# verifies if the pending attribute exists in the socket
# and that the value is valid, in case it's not there's
# no pending operation (method call) to be performed, and
# as such must return immediately with no pending value
if not hasattr(_socket, "_pending") or\
not _socket._pending: return False
# calls the pending callback method and verifies if the
# pending value still persists in the socket if that the
# case returns the is pending value to the caller method
_socket._pending(_socket)
is_pending = not _socket._pending == None
return is_pending
def _delays(self):
"""
Calls the complete set of elements that are considered to
be part of the delayed set of methods to be called.
These methods are expected to be run before a poll call so
that they are run outside the handling.
The calling of the delayed methods takes into account a
series of assumptions including the loop identifier in order
to avoid loops in the delayed calls/insertions.
"""
# runs the merge delay lists operation, so that delay operations
# inserts from different threads may be used and processed under
# the current execution (as expected)
self.delay_m()
# in case there's no delayed items to be called returns immediately
# otherwise creates a copy of the delayed list and removes all
# of the elements from the current list in instance
if not self._delayed: return
# retrieves the value for the current timestamp, to be used in
# comparisons against the target timestamps of the callables
current = time.time()
# creates the lists that will hold all the values that are not
# yet ready to be called in this iteration, the value in this
# list will be added back to the heap at the end of the iteration
pendings = []
pendings_o = []
# iterates over all the delayed callable tuples to try to find
# (and call) the ones that are meant to be executed in the past
# (have a target timestamp with a value less than the current)
while self._delayed:
# "pops" the current item from the delayed list to be used
# in the execution of the current iteration cycle
callable_t = heapq.heappop(self._delayed)
callable_o = heapq.heappop(self._delayed_o)
# unpacks the current callable tuple in iteration into a
# target (timestamp value) and a method to be called in
# case the target timestamp is valid (in the past)
target, method, lid = callable_t
# tests if the current target is valid (less than or
# equals to the current time value) and in case it's
# not restores the value to the heap and breaks the loop
is_valid = target <= current
if not is_valid:
pendings.append(callable_t)
pendings_o.append(callable_o)
break
# in case the loop id present in the delayed call tuple is
# the same as the current iteration identifier then the
# call must be done in the next iteration cycle, this
# verification avoids loops in calls, note that this verification
# is only required for target zero calls referring the delayed
# calls to be executed immediately (on next loop)
if target == 0 and self._lid == lid:
pendings.append(callable_t)
pendings_o.append(callable_o)
continue
# calls the callback method as the delayed operation is
# now meant to be run, this is an operation that may change
# the current list of delayed object (causing cycles) and so
# must be implemented with the proper precautions
method()
# iterates over all the pending callable tuple values and adds
# them back to the delayed heap list so that they are called
# latter on (not ready to be called now)
for pending, pending_o in zip(pendings, pendings_o):
heapq.heappush(self._delayed, pending)
heapq.heappush(self._delayed_o, pending_o)
def _generate(self, hashed = True):
"""
Generates a random unique identifier that may be used
to uniquely identify a certain object or operation.
This method must be used carefully to avoid any unwanted
behavior resulting from value collisions.
:type hashed: bool
:param hashed: If the identifier should be hashed into
and hexadecimal string instead of an uuid based identifier.
:rtype: String
:return: The random unique identifier generated and that
may be used to identify objects or operations.
"""
identifier = str(uuid.uuid4())
identifier = identifier.upper()
if not hashed: return identifier
identifier = legacy.bytes(identifier)
hash = hashlib.sha256(identifier)
indetifier = hash.hexdigest()
identifier = identifier.upper()
return indetifier
def _socket_keepalive(self, _socket):
is_inet = _socket.family in (socket.AF_INET, socket.AF_INET6)
is_inet and hasattr(_socket, "TCP_KEEPIDLE") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE, #@UndefinedVariable
KEEPALIVE_TIMEOUT
)
is_inet and hasattr(_socket, "TCP_KEEPINTVL") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL, #@UndefinedVariable
KEEPALIVE_INTERVAL
)
is_inet and hasattr(_socket, "TCP_KEEPCNT") and\
self.socket.setsockopt(
socket.IPPROTO_TCP,
socket.TCP_KEEPCNT, #@UndefinedVariable
KEEPALIVE_COUNT
)
hasattr(_socket, "SO_REUSEPORT") and\
self.socket.setsockopt(
socket.SOL_SOCKET,
socket.SO_REUSEPORT, #@UndefinedVariable
1
)
def _ssl_init(self, strict = True, env = True):
# initializes the values of both the "main" context for ssl
# and the map that associated an hostname and a context, both
# are going to be used (if possible) at runtime for proper
# resolution of both key and certificated files
self._ssl_context = None
self._ssl_contexts = dict()
# verifies if the current ssl module contains a reference to
# the ssl context class symbol if not, the control flow is
# returned to the caller method as it's not possible to created
# any kind of context information for ssl
has_context = hasattr(ssl, "SSLContext")
if not has_context: return
# retrieves the reference to the environment variables that are going
# to be used in the construction of the various ssl contexts, note that
# the secure variable is extremely important to ensure that a proper and
# secure ssl connection is established with the peer
secure = self.get_env("SSL_SECURE", True, cast = bool) if env else False
contexts = self.get_env("SSL_CONTEXTS", {}, cast = dict) if env else {}
# creates the main/default ssl context setting the default key
# and certificate information in such context, then verifies
# if the callback registration method is defined and if it is
# defined registers a callback for when the hostname information
# is available, so that proper concrete context may be set, note
# that in case the strict mode is enabled (default) the context
# is unset for situation where no callback registration is possible
self._ssl_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._ssl_ctx_base(self._ssl_context, secure = secure)
self._ssl_certs(self._ssl_context)
has_callback = hasattr(self._ssl_context, "set_servername_callback")
if has_callback: self._ssl_context.set_servername_callback(self._ssl_callback)
elif strict: self._ssl_context = None
# retrieves the reference to the map containing the various key
# and certificate paths for the various defined host names and
# uses it to create the complete set of ssl context objects
for hostname, values in legacy.iteritems(contexts):
context = self._ssl_ctx(values, secure = secure)
self._ssl_contexts[hostname] = (context, values)
def _ssl_destroy(self):
self._ssl_context = None
self._ssl_contexts = dict()
def _ssl_callback(self, socket, hostname, context):
context, values = self._ssl_contexts.get(hostname, (context, None))
self._ssl_ctx_base(context)
socket.context = context
if not values: return
ssl_host = values.get("ssl_host", None)
if not ssl_host: return
connection = self.connections_m.get(socket, None)
if not connection: return
connection.ssl_host = ssl_host
def _ssl_ctx(self, values, context = None, secure = True):
context = context or ssl.SSLContext(ssl.PROTOCOL_SSLv23)
self._ssl_ctx_base(context, secure = secure)
key_file = values.get("key_file", None)
cer_file = values.get("cer_file", None)
ca_file = values.get("ca_file", None)
ca_root = values.get("ca_root", True)
ssl_verify = values.get("ssl_verify", False)
cert_reqs = ssl.CERT_REQUIRED if ssl_verify else ssl.CERT_NONE
self._ssl_certs(
context,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
verify_mode = cert_reqs
)
return context
def _ssl_ctx_base(self, context, secure = True):
if hasattr(context, "set_alpn_protocols"):
protocols = self.get_protocols()
protocols and context.set_alpn_protocols(protocols)
if secure and hasattr(ssl, "OP_NO_SSLv2"):
context.options |= ssl.OP_NO_SSLv2
if secure and hasattr(ssl, "OP_NO_SSLv3"):
context.options |= ssl.OP_NO_SSLv3
if secure and hasattr(ssl, "OP_SINGLE_DH_USE"):
context.options |= ssl.OP_SINGLE_DH_USE
if secure and hasattr(ssl, "OP_SINGLE_ECDH_USE"):
context.options |= ssl.OP_SINGLE_ECDH_USE
if secure and hasattr(ssl, "OP_CIPHER_SERVER_PREFERENCE"):
context.options |= ssl.OP_CIPHER_SERVER_PREFERENCE
if secure and hasattr(context, "set_ecdh_curve"):
context.set_ecdh_curve("prime256v1")
if secure and SSL_DH_PATH and hasattr(context, "load_dh_params"):
context.load_dh_params(SSL_DH_PATH)
def _ssl_certs(
self,
context,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = False,
verify_mode = ssl.CERT_NONE,
check_hostname = False
):
dir_path = os.path.dirname(__file__)
root_path = os.path.join(dir_path, "../")
root_path = os.path.normpath(root_path)
base_path = os.path.join(root_path, "base")
extras_path = os.path.join(base_path, "extras")
key_file = key_file or os.path.join(extras_path, "net.key")
cer_file = cer_file or os.path.join(extras_path, "net.cer")
context.load_cert_chain(cer_file, keyfile = key_file)
context.verify_mode = verify_mode
if hasattr(context, "check_hostname"): context.check_hostname = check_hostname
if ca_file: context.load_verify_locations(cafile = ca_file)
if ca_root and hasattr(context, "load_default_certs"):
context.load_default_certs(purpose = ssl.Purpose.SERVER_AUTH)
if ca_root and SSL_CA_PATH:
context.load_verify_locations(cafile = SSL_CA_PATH)
def _ssl_upgrade(
self,
_socket,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = True,
server = True,
ssl_verify = False
):
socket_ssl = self._ssl_wrap(
_socket,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
server = server,
ssl_verify = ssl_verify
)
return socket_ssl
def _ssl_wrap(
self,
_socket,
key_file = None,
cer_file = None,
ca_file = None,
ca_root = True,
server = True,
ssl_verify = False
):
dir_path = os.path.dirname(__file__)
root_path = os.path.join(dir_path, "../")
root_path = os.path.normpath(root_path)
base_path = os.path.join(root_path, "base")
extras_path = os.path.join(base_path, "extras")
key_file = key_file or os.path.join(extras_path, "net.key")
cer_file = cer_file or os.path.join(extras_path, "net.cer")
cert_reqs = ssl.CERT_REQUIRED if ssl_verify else ssl.CERT_NONE
if not self._ssl_context: return ssl.wrap_socket(
_socket,
keyfile = key_file,
certfile = cer_file,
server_side = server,
cert_reqs = cert_reqs,
ca_certs = ca_file,
ssl_version = ssl.PROTOCOL_SSLv23,
do_handshake_on_connect = False
)
self._ssl_certs(
self._ssl_context,
key_file = key_file,
cer_file = cer_file,
ca_file = ca_file,
ca_root = ca_root,
verify_mode = cert_reqs
)
socket_ssl = self._ssl_context.wrap_socket(
_socket,
server_side = server,
do_handshake_on_connect = False
)
return socket_ssl
def _ssl_handshake(self, _socket):
try:
_socket.do_handshake()
_socket._pending = None
except ssl.SSLError as error:
error_v = error.args[0] if error.args else None
if error_v in SSL_VALID_ERRORS:
_socket._pending = self._ssl_handshake
else: raise
def _level(self, level):
"""
Converts the provided logging level value into the best
representation of it, so that it may be used to update
a logger's level of representation.
This method takes into account the current interpreter
version so that no problem occur.
:type level: String/int
:param level: The level value that is meant to be converted
into the best representation possible.
:rtype: int
:return: The best representation of the level so that it may
be used freely for the setting of logging levels under the
current running interpreter.
"""
level_t = type(level)
if level_t == int: return level
if level == None: return level
if level == "SILENT": return log.SILENT
if hasattr(logging, "_checkLevel"):
return logging._checkLevel(level)
return logging.getLevelName(level)
def _format_delta(self, time_delta, count = 2):
days = time_delta.days
hours, remainder = divmod(time_delta.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
delta_s = ""
if days > 0:
delta_s += "%dd " % days
count -= 1
if count == 0: return delta_s.strip()
if hours > 0:
delta_s += "%dh " % hours
count -= 1
if count == 0: return delta_s.strip()
if minutes > 0:
delta_s += "%dm " % minutes
count -= 1
if count == 0: return delta_s.strip()
delta_s += "%ds" % seconds
return delta_s.strip()
class DiagBase(AbstractBase):
def __init__(self, *args, **kwargs):
AbstractBase.__init__(self, *args, **kwargs)
self.reads_c = 0
self.writes_c = 0
self.errors_c = 0
def reads(self, *args, **kwargs):
AbstractBase.reads(self, *args, **kwargs)
self.reads_c += 1
def writes(self, *args, **kwargs):
AbstractBase.writes(self, *args, **kwargs)
self.writes_c += 1
def errors(self, *args, **kwargs):
AbstractBase.errors(self, *args, **kwargs)
self.errors_c += 1
def info_dict(self, full = False):
info = AbstractBase.info_dict(self, full = full)
info.update(
reads_c = self.reads_c,
writes_c = self.writes_c,
errors_c = self.errors_c
)
return info
class BaseThread(threading.Thread):
"""
The top level thread class that is meant to encapsulate
a running base object and run it in a new context.
This base thread may be used to run a network loop allowing
a main thread to continue with execution logic.
"""
def __init__(self, owner = None, daemon = False, *args, **kwargs):
threading.Thread.__init__(self, *args, **kwargs)
self.owner = owner
self.daemon = daemon
def run(self):
threading.Thread.run(self)
if not self.owner: return
self.owner.start()
self.owner = None
def get_main():
return AbstractBase._MAIN
def get_loop():
return get_main()
def get_poll():
main = get_main()
if not main: return None
return main.poll
def ensure(coroutine, args = [], kwargs = {}, thread = False):
loop = get_loop()
return loop.ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = thread
)
def ensure_pool(coroutine, args = [], kwargs = {}):
return ensure(
coroutine,
args = args,
kwargs = kwargs,
thread = True
)
is_diag = config.conf("DIAG", False, cast = bool)
if is_diag: Base = DiagBase
else: Base = AbstractBase
|
# -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import api, models, fields
class MrpProduction(models.Model):
_inherit = "mrp.production"
def get_new_components_info(self, product_id, loc_id, loc_dest_id,
uom_id, uos_id, qty, workorder):
move_obj = self.env['stock.move']
ul_move = move_obj.onchange_product_id(
prod_id=product_id,
loc_id=loc_id,
loc_dest_id=loc_dest_id)
ul_move['value'].update({
'product_id': product_id,
'product_uom': uom_id,
'product_uos': uos_id,
'product_qty': qty,
'work_order': workorder,
'product_uos_qty': move_obj.onchange_quantity(
product_id, qty, uom_id,
uos_id)['value']['product_uos_qty']})
return ul_move['value']
def get_raw_products_data(self):
res = []
workorder =\
self.workcenter_lines and self.workcenter_lines[0].id
for attr_value in self.product_id.attribute_value_ids:
if attr_value.raw_product:
raw_product = attr_value.raw_product
value = self.get_new_components_info(
raw_product.id,
raw_product.property_stock_production.id,
raw_product.property_stock_inventory.id,
raw_product.uom_id.id,
raw_product.uos_id.id,
self.product_qty * attr_value.raw_qty,
workorder)
res.append(value)
return res
@api.one
def action_compute(self, properties=None):
result = super(MrpProduction, self).action_compute(
properties=properties)
res = self.get_raw_products_data()
self.write({'product_lines': map(lambda x: (0, 0, x), res)})
return result
product_id = fields.Many2one()
raw_products = fields.One2many('mrp.production.product.line',
'raw_production', string='Raw Products')
@api.one
@api.onchange('product_id')
def onchange_bring_raw_products(self):
self.raw_products = self.get_raw_products_data()
class MrpProductionProductLine(models.Model):
_inherit = 'mrp.production.product.line'
raw_production = fields.Many2one('mrp.production', string='Production')
[IMP] raw material replaced by phantom bom data
# -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import api, models, fields
class MrpProduction(models.Model):
_inherit = "mrp.production"
def get_new_components_info(self, product_id, loc_id, loc_dest_id,
uom_id, uos_id, qty, workorder):
move_obj = self.env['stock.move']
ul_move = move_obj.onchange_product_id(
prod_id=product_id,
loc_id=loc_id,
loc_dest_id=loc_dest_id)
ul_move['value'].update({
'product_id': product_id,
'product_uom': uom_id,
'product_uos': uos_id,
'product_qty': qty,
'work_order': workorder,
'product_uos_qty': move_obj.onchange_quantity(
product_id, qty, uom_id,
uos_id)['value']['product_uos_qty']})
return ul_move['value']
def get_raw_products_data(self):
res = []
workorder =\
self.workcenter_lines and self.workcenter_lines[0].id
for attr_value in self.product_id.attribute_value_ids:
raw_product = attr_value.raw_product
if raw_product:
bom_obj = self.env['mrp.bom']
bom_id = bom_obj._bom_phantom_find(
product_id=raw_product.id)
qty = self.product_qty * attr_value.raw_qty
if not bom_id:
value = self.get_new_components_info(
raw_product.id,
raw_product.property_stock_production.id,
raw_product.property_stock_inventory.id,
raw_product.uom_id.id,
raw_product.uos_id.id,
qty,
workorder)
res.append(value)
else:
result, result1 = bom_obj._bom_explode(
bom_obj.browse(bom_id), raw_product.id,
self.product_qty * attr_value.raw_qty)
for line in result:
product = self.env['product.product'].browse(
line['product_id'])
value = self.get_new_components_info(
line['product_id'],
product.property_stock_production.id,
product.property_stock_inventory.id,
product.uom_id.id,
product.uos_id.id,
line['product_qty'] * qty,
workorder)
res.append(value)
return res
@api.one
def action_compute(self, properties=None):
result = super(MrpProduction, self).action_compute(
properties=properties)
res = self.get_raw_products_data()
self.write({'product_lines': map(lambda x: (0, 0, x), res)})
return result
product_id = fields.Many2one()
raw_products = fields.One2many('mrp.production.product.line',
'raw_production', string='Raw Products')
@api.one
@api.onchange('product_id')
def onchange_bring_raw_products(self):
self.raw_products = self.get_raw_products_data()
class MrpProductionProductLine(models.Model):
_inherit = 'mrp.production.product.line'
raw_production = fields.Many2one('mrp.production', string='Production')
class MrpBom(models.Model):
_inherit = 'mrp.bom'
def _bom_phantom_find(
self, product_tmpl_id=None, product_id=None, properties=None):
""" Finds BoM for particular product and product uom.
@param product_tmpl_id: Selected product.
@param product_uom: Unit of measure of a product.
@param properties: List of related properties.
@return: False or BoM id.
"""
if properties is None:
properties = []
if product_id:
if not product_tmpl_id:
product_tmpl_id = self.env['product.product'].browse(
product_id).product_tmpl_id.id
domain = [
'|', ('product_id', '=', product_id),
'&', ('product_id', '=', False),
('product_tmpl_id', '=', product_tmpl_id)
]
elif product_tmpl_id:
domain = [('product_id', '=', False),
('product_tmpl_id', '=', product_tmpl_id)]
else:
# neither product nor template, makes no sense to search
return False
domain += [('type', '=', 'phantom')]
domain = domain + ['|', ('date_start', '=', False),
('date_start', '<=', fields.Datetime.now()),
'|', ('date_stop', '=', False),
('date_stop', '>=', fields.Datetime.now())]
# order to prioritize bom with product_id over the one without
bom_ids = self.search(domain, order='sequence, product_id')
# Search a BoM which has all properties specified, or if you can not
# find one, you could pass a BoM without any properties with the
# smallest sequence
bom_empty_prop = False
for bom in bom_ids:
if not (set(map(int, bom.property_ids or [])) -
set(properties or [])):
if not properties or bom.property_ids:
return bom.id
elif not bom_empty_prop:
bom_empty_prop = bom.id
return bom_empty_prop
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functionality for loading events from a record file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensorboard.compat import tf
from tensorboard.compat import _pywrap_tensorflow
from tensorboard.compat.proto import event_pb2
from tensorboard.util import platform_util
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
class RawEventFileLoader(object):
"""An iterator that yields Event protos as serialized bytestrings."""
def __init__(self, file_path):
if file_path is None:
raise ValueError("A file path is required")
file_path = platform_util.readahead_file_path(file_path)
logger.debug("Opening a record reader pointing at %s", file_path)
with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:
self._reader = _pywrap_tensorflow.PyRecordReader_New(
tf.compat.as_bytes(file_path), 0, tf.compat.as_bytes(""), status
)
# Store it for logging purposes.
self._file_path = file_path
if not self._reader:
raise IOError(
"Failed to open a record reader pointing to %s" % file_path
)
def Load(self):
"""Loads all new events from disk as raw serialized proto bytestrings.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All event proto bytestrings in the file that have not been yielded yet.
"""
logger.debug("Loading events from %s", self._file_path)
# GetNext() expects a status argument on TF <= 1.7.
get_next_args = inspect.getargspec(
self._reader.GetNext
).args # pylint: disable=deprecated-method
# First argument is self
legacy_get_next = len(get_next_args) > 1
while True:
try:
if legacy_get_next:
with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
else:
self._reader.GetNext()
except (tf.errors.DataLossError, tf.errors.OutOfRangeError) as e:
logger.debug("Cannot read more events: %s", e)
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
yield self._reader.record()
logger.debug("No more events in %s", self._file_path)
class EventFileLoader(RawEventFileLoader):
"""An iterator that yields parsed Event protos."""
def Load(self):
"""Loads all new events from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All events in the file that have not been yielded yet.
"""
for record in super(EventFileLoader, self).Load():
yield event_pb2.Event.FromString(record)
class TimestampedEventFileLoader(EventFileLoader):
"""An iterator that yields (UNIX timestamp float, Event proto) pairs."""
def Load(self):
"""Loads all new events and their wall time values from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
Pairs of (UNIX timestamp float, Event proto) for all events in the file
that have not been yielded yet.
"""
for event in super(TimestampedEventFileLoader, self).Load():
yield (event.wall_time, event)
When available, use inspect.getfullargspec instead of inspect.getargspec. (#3116)
* When available, use inspect.getfullargspec instead of
inspect.getargspec.
The former is only available in Python3, in which getargspec was
deprecated. This allows to get rid of a deprecation warning.
Both return a namedtuple with an equivalent "args" attribute, so the
functionality is unchanged.
* Changes so that black --check succeeds.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functionality for loading events from a record file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
from tensorboard.compat import tf
from tensorboard.compat import _pywrap_tensorflow
from tensorboard.compat.proto import event_pb2
from tensorboard.util import platform_util
from tensorboard.util import tb_logging
logger = tb_logging.get_logger()
class RawEventFileLoader(object):
"""An iterator that yields Event protos as serialized bytestrings."""
def __init__(self, file_path):
if file_path is None:
raise ValueError("A file path is required")
file_path = platform_util.readahead_file_path(file_path)
logger.debug("Opening a record reader pointing at %s", file_path)
with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:
self._reader = _pywrap_tensorflow.PyRecordReader_New(
tf.compat.as_bytes(file_path), 0, tf.compat.as_bytes(""), status
)
# Store it for logging purposes.
self._file_path = file_path
if not self._reader:
raise IOError(
"Failed to open a record reader pointing to %s" % file_path
)
def Load(self):
"""Loads all new events from disk as raw serialized proto bytestrings.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All event proto bytestrings in the file that have not been yielded yet.
"""
logger.debug("Loading events from %s", self._file_path)
# getargspec is deprecated in Python3, use getfullargspec if it exists.
try:
getargspec = inspect.getfullargspec
except AttributeError:
getargspec = inspect.getargspec # pylint: disable=deprecated-method
# GetNext() expects a status argument on TF <= 1.7.
get_next_args = getargspec(self._reader.GetNext).args
# First argument is self
legacy_get_next = len(get_next_args) > 1
while True:
try:
if legacy_get_next:
with tf.compat.v1.errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
else:
self._reader.GetNext()
except (tf.errors.DataLossError, tf.errors.OutOfRangeError) as e:
logger.debug("Cannot read more events: %s", e)
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
yield self._reader.record()
logger.debug("No more events in %s", self._file_path)
class EventFileLoader(RawEventFileLoader):
"""An iterator that yields parsed Event protos."""
def Load(self):
"""Loads all new events from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All events in the file that have not been yielded yet.
"""
for record in super(EventFileLoader, self).Load():
yield event_pb2.Event.FromString(record)
class TimestampedEventFileLoader(EventFileLoader):
"""An iterator that yields (UNIX timestamp float, Event proto) pairs."""
def Load(self):
"""Loads all new events and their wall time values from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
Pairs of (UNIX timestamp float, Event proto) for all events in the file
that have not been yielded yet.
"""
for event in super(TimestampedEventFileLoader, self).Load():
yield (event.wall_time, event)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2018 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2018 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import sys
import time
import socket
from . import config
from . import errors
from . import legacy
from . import transport
from . import asynchronous
asyncio = asynchronous.get_asyncio() if asynchronous.is_neo() else None
BaseLoop = asyncio.AbstractEventLoop if asyncio else object
BaseTransport = asyncio.BaseTransport if asyncio else object
class CompatLoop(BaseLoop):
"""
Top level compatibility class that adds compatibility support
for the asyncio event loop strategy.
This is required to be able to access netius event loop on a
asyncio like manner.
:see: https://docs.python.org/3/library/asyncio-eventloop.html
"""
def __init__(self, loop):
self._loop = loop
self._task_factory = asynchronous.Task
self._executor = asynchronous.ThreadPoolExecutor(loop)
self._handler = self._default_handler
def __getattr__(self, name):
if hasattr(self._loop, name):
return getattr(self._loop, name)
raise AttributeError("'%s' not found" % name)
def time(self):
return time.time()
def call_soon(self, callback, *args):
return self._call_delay(callback, args, immediately = True)
def call_soon_threadsafe(self, callback, *args):
return self._call_delay(callback, args, immediately = True, safe = True)
def call_at(self, when, callback, *args):
delay = when - self.time()
return self._call_delay(callback, args, timeout = delay)
def call_later(self, delay, callback, *args):
"""
Calls the provided callback with the provided parameters after
the defined delay (in seconds), should ensure proper sleep operation.
:type delay: float
:param delay: The delay in seconds after which the callback is going
to be called with the provided arguments.
:type callback: Function
:param callback: The function to be called after the provided delay.
:rtype: Handle
:return: The handle object to the operation, that may be used to cancel it.
"""
return self._call_delay(callback, args, timeout = delay)
def create_future(self):
return self._loop.build_future()
def create_task(self, coroutine):
future = self._loop.ensure(coroutine)
task = self._task_factory(future)
return task
def create_connection(self, *args, **kwargs):
coroutine = self._create_connection(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def create_datagram_endpoint(self, *args, **kwargs):
coroutine = self._create_datagram_endpoint(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def getaddrinfo(self, *args, **kwargs):
coroutine = self._getaddrinfo(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def getnameinfo(self, *args, **kwargs):
coroutine = self._getnameinfo(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def run_until_complete(self, future):
self._set_current_task(future)
try: return self._loop.run_coroutine(future)
finally: self._unset_current_task()
def run_forever(self):
return self._loop.run_forever()
def run_in_executor(self, *args, **kwargs):
coroutine = self._run_in_executor(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def stop(self):
self._loop.stop()
def close(self):
self._loop.close()
def get_exception_handler(self):
return self._handler
def set_exception_handler(self, handler):
self._handler = handler
def default_exception_handler(self, context):
return self._default_handler(context)
def call_exception_handler(self, context):
if not self._handler: return
return self._handler(context)
def get_debug(self):
return self._loop.is_debug()
def set_debug(self, enabled):
pass
def set_default_executor(self, executor):
self._executor = executor
def get_task_factory(self):
return self._task_factory
def set_task_factory(self, factory):
self._task_factory = factory
def is_running(self):
return self._loop.is_running()
def is_closed(self):
return self._loop.is_stopped()
def _getaddrinfo(
self,
host,
port,
family = 0,
type = 0,
proto = 0,
flags = 0
):
future = self.create_future()
result = socket.getaddrinfo(
host,
port,
family,
type,
proto,
flags = flags
)
self._loop.delay(lambda: future.set_result(result), immediately = True)
yield future
def _getnameinfo(self, sockaddr, flags = 0):
raise errors.NotImplemented("Missing implementation")
def _run_in_executor(self, executor, func, *args):
executor = executor or self._executor
future = executor.submit(func, *args)
yield future
def _create_connection(
self,
protocol_factory,
host = None,
port = None,
ssl = None,
family = 0,
proto = 0,
flags = 0,
sock = None,
local_addr = None,
server_hostname = None,
*args,
**kwargs
):
family = family or socket.AF_INET
proto = proto or socket.SOCK_STREAM
future = self.create_future()
def connect(connection):
protocol = protocol_factory()
transport = CompatTransport(self, connection)
transport._set_compat(protocol)
future.set_result((transport, protocol))
connection = self._loop.connect(
host,
port,
ssl = ssl,
family = family
)
connection.bind("connect", connect)
yield future
def _create_datagram_endpoint(
self,
protocol_factory,
local_addr = None,
remote_addr = None,
family = 0,
proto = 0,
flags = 0,
reuse_address = None,
reuse_port = None,
allow_broadcast = None,
sock = None,
*args,
**kwargs
):
family = family or socket.AF_INET
proto = proto or socket.SOCK_DGRAM
future = self.create_future()
def connect(connection):
protocol = protocol_factory()
_transport = transport.TransportDatagram(connection)
_transport._set_compat(protocol)
future.set_result((_transport, protocol))
connection = self._loop.datagram(
family = family,
type = proto,
remote_host = remote_addr[0] if remote_addr else None,
remote_port = remote_addr[1] if remote_addr else None
)
self._loop.delay(lambda: connect(connection))
yield future
def _set_current_task(self, task):
asyncio = asynchronous.get_asyncio()
if not asyncio: return
asyncio.Task._current_tasks[self] = task
def _unset_current_task(self):
asyncio = asynchronous.get_asyncio()
if not asyncio: return
asyncio.Task._current_tasks.pop(self, None)
def _call_delay(
self,
callback,
args,
timeout = None,
immediately = False,
verify = False,
safe = False
):
# creates the callable to be called after the timeout, note the
# clojure around the "normal" arguments (allows proper propagation)
callable = lambda: callback(*args)
# schedules the delay call of the created callable according to
# the provided set of options expected by the delay operation the
# callback tuple is returned so that a proper handle may be created
callable_t = self._loop.delay(
callable,
timeout = timeout,
immediately = immediately,
verify = verify,
safe = safe
)
# creates the handle to control the operation and then returns the
# object to the caller method, allowing operation cancellation
handle = asynchronous.Handle(callable_t = callable_t)
return handle
def _sleep(self, timeout, future = None):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# sleep operation to be executed
future = future or self.create_future()
# creates the callable that is going to be used to set
# the final value of the future variable
callable = lambda: future.set_result(timeout)
# delays the execution of the callable so that it is executed
# after the requested amount of timeout, note that the resolution
# of the event loop will condition the precision of the timeout
future._loop.call_later(timeout, callable)
return future
def _default_handler(self, context):
message = context.pop("message", None)
sys.stderr.write("%s\n" % message)
for key, value in legacy.iteritems(context):
sys.stderr.write("%s: %s\n" % (key, value))
@property
def _thread_id(self):
return self._loop.tid
class CompatTransport(BaseTransport):
"""
Decorator class to be used to add the functionality of a
transport layer as defined by the asyncio.
Allows adding the functionality to an internal netius
(or equivalent) object, this is considered to be the adaptor
from the internal loop implementation and the expected
transport layer from asyncio.
:see: https://docs.python.org/3/library/asyncio-protocol.html
"""
def __init__(self, loop, connection):
self._loop = loop
self._connection = connection
self._protocol = None
def close(self):
self._connection.close()
def abort(self):
self._connection.close()
def write(self, data):
self._connection.send(data)
def get_extra_info(self, name, default = None):
if name == "socket": return self._connection.socket
else: return default
def set_protocol(self, protocol):
self._set_protocol(protocol, mark = False)
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._connection.is_closed()
def _on_data(self, connection, data):
self._protocol.data_received(data)
def _on_close(self, connection):
if not self._protocol == None:
self._protocol.eof_received()
self._cleanup()
def _set_compat(self, protocol):
self._set_binds()
self._set_protocol(protocol)
def _set_binds(self):
self._connection.bind("data", self._on_data)
self._connection.bind("close", self._on_close)
def _set_protocol(self, protocol, mark = True):
self._protocol = protocol
if mark: self._protocol.connection_made(self)
def _cleanup(self):
self._loop.call_soon(self._call_connection_lost, None)
self._loop = None
def _call_connection_lost(self, context):
if not self._protocol == None:
self._protocol.connection_lost(context)
def is_compat():
"""
Determines if the compatibility mode for the netius
event loop is required.
Under this mode the event loop for netius tries to emulate
the behaviour of the asyncio event loop so that it may
be used with 3rd party protocol classes (not compliant
with the netius protocol).
:rtype: bool
:return: If the netius infra-structure should run under
the compatibility mode.
"""
compat = config.conf("COMPAT", False, cast = bool)
compat |= is_asyncio()
return compat and asynchronous.is_neo()
def is_asyncio():
"""
Checks if the asyncio mode of execution (external event
loop) is the required approach under the current runtime.
If that's the case the netius event loop is not going to
be used and the asyncio one is going to be used instead.
:rtype: bool
:return: If the asyncio event loop model is enabled and
proper library support available.
"""
asyncio = config.conf("ASYNCIO", False, cast = bool)
return asyncio and asynchronous.is_asynclib()
initial create server support
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2018 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2018 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import sys
import time
import socket
from . import config
from . import errors
from . import legacy
from . import transport
from . import asynchronous
asyncio = asynchronous.get_asyncio() if asynchronous.is_neo() else None
BaseLoop = asyncio.AbstractEventLoop if asyncio else object
BaseTransport = asyncio.BaseTransport if asyncio else object
class CompatLoop(BaseLoop):
"""
Top level compatibility class that adds compatibility support
for the asyncio event loop strategy.
This is required to be able to access netius event loop on a
asyncio like manner.
:see: https://docs.python.org/3/library/asyncio-eventloop.html
"""
def __init__(self, loop):
self._loop = loop
self._task_factory = asynchronous.Task
self._executor = asynchronous.ThreadPoolExecutor(loop)
self._handler = self._default_handler
def __getattr__(self, name):
if hasattr(self._loop, name):
return getattr(self._loop, name)
raise AttributeError("'%s' not found" % name)
def time(self):
return time.time()
def call_soon(self, callback, *args):
return self._call_delay(callback, args, immediately = True)
def call_soon_threadsafe(self, callback, *args):
return self._call_delay(callback, args, immediately = True, safe = True)
def call_at(self, when, callback, *args):
delay = when - self.time()
return self._call_delay(callback, args, timeout = delay)
def call_later(self, delay, callback, *args):
"""
Calls the provided callback with the provided parameters after
the defined delay (in seconds), should ensure proper sleep operation.
:type delay: float
:param delay: The delay in seconds after which the callback is going
to be called with the provided arguments.
:type callback: Function
:param callback: The function to be called after the provided delay.
:rtype: Handle
:return: The handle object to the operation, that may be used to cancel it.
"""
return self._call_delay(callback, args, timeout = delay)
def create_future(self):
return self._loop.build_future()
def create_task(self, coroutine):
future = self._loop.ensure(coroutine)
task = self._task_factory(future)
return task
def create_server(self, *args, **kwargs):
coroutine = self._create_server(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def create_connection(self, *args, **kwargs):
coroutine = self._create_connection(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def create_datagram_endpoint(self, *args, **kwargs):
coroutine = self._create_datagram_endpoint(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def getaddrinfo(self, *args, **kwargs):
coroutine = self._getaddrinfo(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def getnameinfo(self, *args, **kwargs):
coroutine = self._getnameinfo(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def run_until_complete(self, future):
self._set_current_task(future)
try: return self._loop.run_coroutine(future)
finally: self._unset_current_task()
def run_forever(self):
return self._loop.run_forever()
def run_in_executor(self, *args, **kwargs):
coroutine = self._run_in_executor(*args, **kwargs)
return asynchronous.coroutine_return(coroutine)
def stop(self):
self._loop.stop()
def close(self):
self._loop.close()
def get_exception_handler(self):
return self._handler
def set_exception_handler(self, handler):
self._handler = handler
def default_exception_handler(self, context):
return self._default_handler(context)
def call_exception_handler(self, context):
if not self._handler: return
return self._handler(context)
def get_debug(self):
return self._loop.is_debug()
def set_debug(self, enabled):
pass
def set_default_executor(self, executor):
self._executor = executor
def get_task_factory(self):
return self._task_factory
def set_task_factory(self, factory):
self._task_factory = factory
def is_running(self):
return self._loop.is_running()
def is_closed(self):
return self._loop.is_stopped()
def _getaddrinfo(
self,
host,
port,
family = 0,
type = 0,
proto = 0,
flags = 0
):
future = self.create_future()
result = socket.getaddrinfo(
host,
port,
family,
type,
proto,
flags = flags
)
self._loop.delay(lambda: future.set_result(result), immediately = True)
yield future
def _getnameinfo(self, sockaddr, flags = 0):
raise errors.NotImplemented("Missing implementation")
def _run_in_executor(self, executor, func, *args):
executor = executor or self._executor
future = executor.submit(func, *args)
yield future
def _create_server(
self,
protocol_factory,
host = None,
port = None,
ssl = None,
family = 0,
proto = 0,
flags = 0,
sock = None,
local_addr = None,
server_hostname = None,
*args,
**kwargs
):
family = family or socket.AF_INET
proto = proto or socket.SOCK_STREAM
future = self.create_future()
def connect(connection):
protocol = protocol_factory()
transport = CompatTransport(self, connection)
transport._set_compat(protocol)
future.set_result((transport, protocol))
connection = self._loop.server(
host,
port,
ssl = ssl,
family = family
)
connection.bind("connect", connect)
yield future
def _create_connection(
self,
protocol_factory,
host = None,
port = None,
ssl = None,
family = 0,
proto = 0,
flags = 0,
sock = None,
local_addr = None,
server_hostname = None,
*args,
**kwargs
):
family = family or socket.AF_INET
proto = proto or socket.SOCK_STREAM
future = self.create_future()
def connect(connection):
protocol = protocol_factory()
transport = CompatTransport(self, connection)
transport._set_compat(protocol)
future.set_result((transport, protocol))
connection = self._loop.connect(
host,
port,
ssl = ssl,
family = family
)
connection.bind("connect", connect)
yield future
def _create_datagram_endpoint(
self,
protocol_factory,
local_addr = None,
remote_addr = None,
family = 0,
proto = 0,
flags = 0,
reuse_address = None,
reuse_port = None,
allow_broadcast = None,
sock = None,
*args,
**kwargs
):
family = family or socket.AF_INET
proto = proto or socket.SOCK_DGRAM
future = self.create_future()
def connect(connection):
protocol = protocol_factory()
_transport = transport.TransportDatagram(connection)
_transport._set_compat(protocol)
future.set_result((_transport, protocol))
connection = self._loop.datagram(
family = family,
type = proto,
remote_host = remote_addr[0] if remote_addr else None,
remote_port = remote_addr[1] if remote_addr else None
)
self._loop.delay(lambda: connect(connection))
yield future
def _set_current_task(self, task):
asyncio = asynchronous.get_asyncio()
if not asyncio: return
asyncio.Task._current_tasks[self] = task
def _unset_current_task(self):
asyncio = asynchronous.get_asyncio()
if not asyncio: return
asyncio.Task._current_tasks.pop(self, None)
def _call_delay(
self,
callback,
args,
timeout = None,
immediately = False,
verify = False,
safe = False
):
# creates the callable to be called after the timeout, note the
# clojure around the "normal" arguments (allows proper propagation)
callable = lambda: callback(*args)
# schedules the delay call of the created callable according to
# the provided set of options expected by the delay operation the
# callback tuple is returned so that a proper handle may be created
callable_t = self._loop.delay(
callable,
timeout = timeout,
immediately = immediately,
verify = verify,
safe = safe
)
# creates the handle to control the operation and then returns the
# object to the caller method, allowing operation cancellation
handle = asynchronous.Handle(callable_t = callable_t)
return handle
def _sleep(self, timeout, future = None):
# verifies if a future variable is meant to be re-used
# or if instead a new one should be created for the new
# sleep operation to be executed
future = future or self.create_future()
# creates the callable that is going to be used to set
# the final value of the future variable
callable = lambda: future.set_result(timeout)
# delays the execution of the callable so that it is executed
# after the requested amount of timeout, note that the resolution
# of the event loop will condition the precision of the timeout
future._loop.call_later(timeout, callable)
return future
def _default_handler(self, context):
message = context.pop("message", None)
sys.stderr.write("%s\n" % message)
for key, value in legacy.iteritems(context):
sys.stderr.write("%s: %s\n" % (key, value))
@property
def _thread_id(self):
return self._loop.tid
class CompatTransport(BaseTransport):
"""
Decorator class to be used to add the functionality of a
transport layer as defined by the asyncio.
Allows adding the functionality to an internal netius
(or equivalent) object, this is considered to be the adaptor
from the internal loop implementation and the expected
transport layer from asyncio.
:see: https://docs.python.org/3/library/asyncio-protocol.html
"""
def __init__(self, loop, connection):
self._loop = loop
self._connection = connection
self._protocol = None
def close(self):
self._connection.close()
def abort(self):
self._connection.close()
def write(self, data):
self._connection.send(data)
def get_extra_info(self, name, default = None):
if name == "socket": return self._connection.socket
else: return default
def set_protocol(self, protocol):
self._set_protocol(protocol, mark = False)
def get_protocol(self):
return self._protocol
def is_closing(self):
return self._connection.is_closed()
def _on_data(self, connection, data):
self._protocol.data_received(data)
def _on_close(self, connection):
if not self._protocol == None:
self._protocol.eof_received()
self._cleanup()
def _set_compat(self, protocol):
self._set_binds()
self._set_protocol(protocol)
def _set_binds(self):
self._connection.bind("data", self._on_data)
self._connection.bind("close", self._on_close)
def _set_protocol(self, protocol, mark = True):
self._protocol = protocol
if mark: self._protocol.connection_made(self)
def _cleanup(self):
self._loop.call_soon(self._call_connection_lost, None)
self._loop = None
def _call_connection_lost(self, context):
if not self._protocol == None:
self._protocol.connection_lost(context)
def is_compat():
"""
Determines if the compatibility mode for the netius
event loop is required.
Under this mode the event loop for netius tries to emulate
the behaviour of the asyncio event loop so that it may
be used with 3rd party protocol classes (not compliant
with the netius protocol).
:rtype: bool
:return: If the netius infra-structure should run under
the compatibility mode.
"""
compat = config.conf("COMPAT", False, cast = bool)
compat |= is_asyncio()
return compat and asynchronous.is_neo()
def is_asyncio():
"""
Checks if the asyncio mode of execution (external event
loop) is the required approach under the current runtime.
If that's the case the netius event loop is not going to
be used and the asyncio one is going to be used instead.
:rtype: bool
:return: If the asyncio event loop model is enabled and
proper library support available.
"""
asyncio = config.conf("ASYNCIO", False, cast = bool)
return asyncio and asynchronous.is_asynclib()
|
"""html2markdown converts an html string to markdown while preserving unsupported markup."""
# TODO:
# escape all characters (in _escapeCharacters. cf. https://daringfireball.net/projects/markdown/syntax#backslash)
# implement standard <table> (i.e. without attributes)
import bs4
from bs4 import BeautifulSoup
import re
_supportedTags = (
# NOTE: will be ignored if they have unsupported attributes (cf. _supportedAttributes)
'blockquote',
'p',
'a',
'h1','h2','h3','h4','h5','h6',
'strong','b',
'em','i',
'ul','ol','li',
'br',
'img',
'pre','code',
'hr'
)
_supportedAttributes = (
'a href',
'a title',
'img alt',
'img src',
'img title',
)
_inlineTags = (
# these can be mixed with markdown (when unprocessed)
# block tags will be surrounded by newlines and be unprocessed inside
# (unless supported tag + supported attribute[s])
'span',
'strong','b',
'em','i',
'a',
'img',
'code'
)
def _supportedAttrs(tag):
sAttrs = [attr.split(' ')[1] for attr in _supportedAttributes if attr.split(' ')[0]==tag.name]
for attr in tag.attrs:
if attr not in sAttrs:
return False
return True
def _recursivelyValid(tag):
# not all tags require this property
# requires: <blockquote><p style="...">asdf</p></blockquote>
# does not: <div><p style="...">asdf</p></div>
children = tag.find_all(recursive = False)
for child in children:
if not _recursivelyValid(child):
return False
if tag.name == '[document]':
return True
elif tag.name in _inlineTags:
return True
elif tag.name not in _supportedTags:
return False
if not _supportedAttrs(tag):
return False
return True
def _escapeCharacters(tag):
'''non-recursively escape underlines and asterisks
in the tag'''
for i,c in enumerate(tag.contents):
if type(c) != bs4.element.NavigableString:
continue
c.replace_with(c.replace('_','\\_').replace('*','\\*'))
def _breakRemNewlines(tag):
'''non-recursively break spaces and remove newlines in the tag'''
for i,c in enumerate(tag.contents):
if type(c) != bs4.element.NavigableString:
continue
c.replace_with(re.sub(r' {2,}', ' ', c).replace('\n',''))
def _markdownify(tag, _listType=None, _blockQuote=False, _listIndex=1):
'''recursively converts a tag into markdown'''
children = tag.find_all(recursive=False)
if tag.name == '[document]':
for child in children:
_markdownify(child)
return
if tag.name not in _supportedTags or not _supportedAttrs(tag):
if tag.name not in _inlineTags:
tag.insert_before('\n\n')
tag.insert_after('\n\n')
else:
_escapeCharacters(tag)
for child in children:
_markdownify(child)
return
if tag.name not in ('pre', 'code'):
_escapeCharacters(tag)
_breakRemNewlines(tag)
if tag.name == 'p':
if tag.string != None:
if tag.string.strip() == u'':
tag.string = u'\xa0'
tag.unwrap()
return
if not _blockQuote:
tag.insert_before('\n\n')
tag.insert_after('\n\n')
else:
tag.insert_before('\n')
tag.insert_after('\n')
tag.unwrap()
for child in children:
_markdownify(child)
elif tag.name == 'br':
tag.string = ' \n'
tag.unwrap()
elif tag.name == 'img':
alt = ''
title = ''
if tag.has_attr('alt'):
alt = tag['alt']
if tag.has_attr('title') and tag['title']:
title = ' "%s"' % tag['title']
tag.string = '' % (alt, tag['src'], title)
tag.unwrap()
elif tag.name == 'hr':
tag.string = '\n---\n'
tag.unwrap()
elif tag.name == 'pre':
tag.insert_before('\n\n')
tag.insert_after('\n\n')
if tag.code:
if not _supportedAttrs(tag.code):
return
for child in tag.code.find_all(recursive=False):
if child.name != 'br':
return
# code block
for br in tag.code.find_all('br'):
br.string = '\n'
br.unwrap()
tag.code.unwrap()
lines = unicode(tag).strip().split('\n')
lines[0] = lines[0][5:]
lines[-1] = lines[-1][:-6]
if not lines[-1]:
lines.pop()
for i,line in enumerate(lines):
line = line.replace(u'\xa0', ' ')
lines[i] = ' %s' % line
tag.replace_with(BeautifulSoup('\n'.join(lines), 'html.parser'))
return
elif tag.name == 'code':
# inline code
if children:
return
tag.insert_before('`` ')
tag.insert_after(' ``')
tag.unwrap()
elif _recursivelyValid(tag):
if tag.name == 'blockquote':
# ! FIXME: hack
tag.insert_before('<<<BLOCKQUOTE: ')
tag.insert_after('>>>')
tag.unwrap()
for child in children:
_markdownify(child, _blockQuote=True)
return
elif tag.name == 'a':
# process children first
for child in children:
_markdownify(child)
if tag.string != tag['href'] or tag.has_attr('title'):
title = ''
if tag.has_attr('title') and tag['title']:
title = ' "%s"' % tag['title']
tag.string = '[%s](%s%s)' % (BeautifulSoup(unicode(tag), 'html.parser').string,
tag['href'],
title)
else:
# ! FIXME: hack
tag.string = '<<<FLOATING LINK: %s>>>' % tag.string
tag.unwrap()
return
elif tag.name == 'h1':
tag.insert_before('\n\n# ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h2':
tag.insert_before('\n\n## ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h3':
tag.insert_before('\n\n### ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h4':
tag.insert_before('\n\n#### ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h5':
tag.insert_before('\n\n##### ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h6':
tag.insert_before('\n\n###### ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name in ('ul', 'ol'):
tag.insert_before('\n\n')
tag.insert_after('\n\n')
tag.unwrap()
for i, child in enumerate(children):
_markdownify(child, _listType=tag.name, _listIndex=i+1)
return
elif tag.name == 'li':
if not _listType:
# <li> outside of list; ignore
return
if _listType == 'ul':
tag.insert_before('* ')
else:
tag.insert_before('%d. ' % _listIndex)
for child in children:
_markdownify(child)
for c in tag.contents:
if type(c) != bs4.element.NavigableString:
continue
c.replace_with('\n '.join(c.split('\n')))
tag.insert_after('\n')
tag.unwrap()
return
elif tag.name in ('strong','b'):
tag.insert_before('__')
tag.insert_after('__')
tag.unwrap()
elif tag.name in ('em','i'):
tag.insert_before('_')
tag.insert_after('_')
tag.unwrap()
for child in children:
_markdownify(child)
def convert(html):
"""converts an html string to markdown while preserving unsupported markup."""
bs = BeautifulSoup(html, 'html.parser')
_markdownify(bs)
ret = unicode(bs).replace(u'\xa0', ' ')
ret = re.sub(r'\n{3,}', r'\n\n', ret)
# ! FIXME: hack
ret = re.sub(r'<<<FLOATING LINK: (.+)>>>', r'<\1>', ret)
# ! FIXME: hack
sp = re.split(r'(<<<BLOCKQUOTE: .*?>>>)', ret, flags=re.DOTALL)
for i,e in enumerate(sp):
if e[:len('<<<BLOCKQUOTE:')] == '<<<BLOCKQUOTE:':
sp[i] = '> ' + e[len('<<<BLOCKQUOTE:') : -len('>>>')]
sp[i] = sp[i].replace('\n', '\n> ')
ret = ''.join(sp)
return ret.strip('\n')
WA to provide python 3.x compatibility
"""html2markdown converts an html string to markdown while preserving unsupported markup."""
# TODO:
# escape all characters (in _escapeCharacters. cf. https://daringfireball.net/projects/markdown/syntax#backslash)
# implement standard <table> (i.e. without attributes)
import bs4
from bs4 import BeautifulSoup
import re
import sys
if sys.version_info.major > 2:
unicode = str
_supportedTags = (
# NOTE: will be ignored if they have unsupported attributes (cf. _supportedAttributes)
'blockquote',
'p',
'a',
'h1','h2','h3','h4','h5','h6',
'strong','b',
'em','i',
'ul','ol','li',
'br',
'img',
'pre','code',
'hr'
)
_supportedAttributes = (
'a href',
'a title',
'img alt',
'img src',
'img title',
)
_inlineTags = (
# these can be mixed with markdown (when unprocessed)
# block tags will be surrounded by newlines and be unprocessed inside
# (unless supported tag + supported attribute[s])
'span',
'strong','b',
'em','i',
'a',
'img',
'code'
)
def _supportedAttrs(tag):
sAttrs = [attr.split(' ')[1] for attr in _supportedAttributes if attr.split(' ')[0]==tag.name]
for attr in tag.attrs:
if attr not in sAttrs:
return False
return True
def _recursivelyValid(tag):
# not all tags require this property
# requires: <blockquote><p style="...">asdf</p></blockquote>
# does not: <div><p style="...">asdf</p></div>
children = tag.find_all(recursive = False)
for child in children:
if not _recursivelyValid(child):
return False
if tag.name == '[document]':
return True
elif tag.name in _inlineTags:
return True
elif tag.name not in _supportedTags:
return False
if not _supportedAttrs(tag):
return False
return True
def _escapeCharacters(tag):
'''non-recursively escape underlines and asterisks
in the tag'''
for i,c in enumerate(tag.contents):
if type(c) != bs4.element.NavigableString:
continue
c.replace_with(c.replace('_','\\_').replace('*','\\*'))
def _breakRemNewlines(tag):
'''non-recursively break spaces and remove newlines in the tag'''
for i,c in enumerate(tag.contents):
if type(c) != bs4.element.NavigableString:
continue
c.replace_with(re.sub(r' {2,}', ' ', c).replace('\n',''))
def _markdownify(tag, _listType=None, _blockQuote=False, _listIndex=1):
'''recursively converts a tag into markdown'''
children = tag.find_all(recursive=False)
if tag.name == '[document]':
for child in children:
_markdownify(child)
return
if tag.name not in _supportedTags or not _supportedAttrs(tag):
if tag.name not in _inlineTags:
tag.insert_before('\n\n')
tag.insert_after('\n\n')
else:
_escapeCharacters(tag)
for child in children:
_markdownify(child)
return
if tag.name not in ('pre', 'code'):
_escapeCharacters(tag)
_breakRemNewlines(tag)
if tag.name == 'p':
if tag.string != None:
if tag.string.strip() == u'':
tag.string = u'\xa0'
tag.unwrap()
return
if not _blockQuote:
tag.insert_before('\n\n')
tag.insert_after('\n\n')
else:
tag.insert_before('\n')
tag.insert_after('\n')
tag.unwrap()
for child in children:
_markdownify(child)
elif tag.name == 'br':
tag.string = ' \n'
tag.unwrap()
elif tag.name == 'img':
alt = ''
title = ''
if tag.has_attr('alt'):
alt = tag['alt']
if tag.has_attr('title') and tag['title']:
title = ' "%s"' % tag['title']
tag.string = '' % (alt, tag['src'], title)
tag.unwrap()
elif tag.name == 'hr':
tag.string = '\n---\n'
tag.unwrap()
elif tag.name == 'pre':
tag.insert_before('\n\n')
tag.insert_after('\n\n')
if tag.code:
if not _supportedAttrs(tag.code):
return
for child in tag.code.find_all(recursive=False):
if child.name != 'br':
return
# code block
for br in tag.code.find_all('br'):
br.string = '\n'
br.unwrap()
tag.code.unwrap()
lines = unicode(tag).strip().split('\n')
lines[0] = lines[0][5:]
lines[-1] = lines[-1][:-6]
if not lines[-1]:
lines.pop()
for i,line in enumerate(lines):
line = line.replace(u'\xa0', ' ')
lines[i] = ' %s' % line
tag.replace_with(BeautifulSoup('\n'.join(lines), 'html.parser'))
return
elif tag.name == 'code':
# inline code
if children:
return
tag.insert_before('`` ')
tag.insert_after(' ``')
tag.unwrap()
elif _recursivelyValid(tag):
if tag.name == 'blockquote':
# ! FIXME: hack
tag.insert_before('<<<BLOCKQUOTE: ')
tag.insert_after('>>>')
tag.unwrap()
for child in children:
_markdownify(child, _blockQuote=True)
return
elif tag.name == 'a':
# process children first
for child in children:
_markdownify(child)
if tag.string != tag['href'] or tag.has_attr('title'):
title = ''
if tag.has_attr('title') and tag['title']:
title = ' "%s"' % tag['title']
tag.string = '[%s](%s%s)' % (BeautifulSoup(unicode(tag), 'html.parser').string,
tag['href'],
title)
else:
# ! FIXME: hack
tag.string = '<<<FLOATING LINK: %s>>>' % tag.string
tag.unwrap()
return
elif tag.name == 'h1':
tag.insert_before('\n\n# ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h2':
tag.insert_before('\n\n## ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h3':
tag.insert_before('\n\n### ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h4':
tag.insert_before('\n\n#### ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h5':
tag.insert_before('\n\n##### ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name == 'h6':
tag.insert_before('\n\n###### ')
tag.insert_after('\n\n')
tag.unwrap()
elif tag.name in ('ul', 'ol'):
tag.insert_before('\n\n')
tag.insert_after('\n\n')
tag.unwrap()
for i, child in enumerate(children):
_markdownify(child, _listType=tag.name, _listIndex=i+1)
return
elif tag.name == 'li':
if not _listType:
# <li> outside of list; ignore
return
if _listType == 'ul':
tag.insert_before('* ')
else:
tag.insert_before('%d. ' % _listIndex)
for child in children:
_markdownify(child)
for c in tag.contents:
if type(c) != bs4.element.NavigableString:
continue
c.replace_with('\n '.join(c.split('\n')))
tag.insert_after('\n')
tag.unwrap()
return
elif tag.name in ('strong','b'):
tag.insert_before('__')
tag.insert_after('__')
tag.unwrap()
elif tag.name in ('em','i'):
tag.insert_before('_')
tag.insert_after('_')
tag.unwrap()
for child in children:
_markdownify(child)
def convert(html):
"""converts an html string to markdown while preserving unsupported markup."""
bs = BeautifulSoup(html, 'html.parser')
_markdownify(bs)
ret = unicode(bs).replace(u'\xa0', ' ')
ret = re.sub(r'\n{3,}', r'\n\n', ret)
# ! FIXME: hack
ret = re.sub(r'<<<FLOATING LINK: (.+)>>>', r'<\1>', ret)
# ! FIXME: hack
sp = re.split(r'(<<<BLOCKQUOTE: .*?>>>)', ret, flags=re.DOTALL)
for i,e in enumerate(sp):
if e[:len('<<<BLOCKQUOTE:')] == '<<<BLOCKQUOTE:':
sp[i] = '> ' + e[len('<<<BLOCKQUOTE:') : -len('>>>')]
sp[i] = sp[i].replace('\n', '\n> ')
ret = ''.join(sp)
return ret.strip('\n') |
import base64
import datetime
import logging
import math
import os
import signal
import time
import urllib.parse
import psycopg2
from psycopg2 import errorcodes as postgres_errors
from ctf_gameserver.lib.args import get_arg_parser_with_db
from ctf_gameserver.lib import daemon
from ctf_gameserver.lib.checkresult import CheckResult
from ctf_gameserver.lib.exceptions import DBDataError
import ctf_gameserver.lib.flag as flag_lib
from . import database
from .supervisor import RunnerSupervisor
from .supervisor import ACTION_FLAG, ACTION_LOAD, ACTION_STORE, ACTION_RESULT
def main():
arg_parser = get_arg_parser_with_db('CTF Gameserver Checker Master')
arg_parser.add_argument('--ippattern', type=str, required=True,
help='(Old-style) Python formatstring for building the IP to connect to')
arg_parser.add_argument('--flagsecret', type=str, required=True,
help='Base64 string used as secret in flag generation')
group = arg_parser.add_argument_group('statedb', 'Checker state database')
group.add_argument('--statedbhost', type=str, help='Hostname of the database. If unspecified, the '
'default Unix socket will be used.')
group.add_argument('--statedbname', type=str, required=True,
help='Name of the used database')
group.add_argument('--statedbuser', type=str, required=True,
help='User name for database access')
group.add_argument('--statedbpassword', type=str,
help='Password for database access if needed')
group = arg_parser.add_argument_group('check', 'Check parameters')
group.add_argument('--service', type=str, required=True,
help='Slug of the service')
group.add_argument('--checkerscript', type=str, required=True,
help='Path of the Checker Script')
group.add_argument('--sudouser', type=str, help=' User to excute the Checker Scripts as, will be passed '
'to `sudo -u`')
group.add_argument('--maxcheckduration', type=float, required=True,
help='Maximum duration of a Checker Script run in seconds')
group.add_argument('--checkercount', type=int, required=True,
help='Number of Checker Masters running for this service')
group.add_argument('--interval', type=float, required=True,
help='Time between launching batches of Checker Scripts in seconds')
group = arg_parser.add_argument_group('logging', 'Checker Script logging')
group.add_argument('--journald', action='store_true', help='Log Checker Script messages to journald')
group.add_argument('--gelf-server', help='Log Checker Script messages to the specified GELF (Graylog) '
'server ("<host>:<port>")')
args = arg_parser.parse_args()
logging.basicConfig(format='[%(levelname)s] %(message)s [%(name)s]')
numeric_loglevel = getattr(logging, args.loglevel.upper())
logging.getLogger().setLevel(numeric_loglevel)
if args.interval < 3:
logging.error('`--interval` must be at least 3 seconds')
return os.EX_USAGE
logging_params = {}
if args.journald:
try:
# pylint: disable=import-outside-toplevel,unused-import,import-error
from systemd.journal import JournalHandler
except ImportError:
logging.error('systemd module is required for journald logging')
return os.EX_USAGE
logging_params['journald'] = True
if args.gelf_server is not None:
try:
# pylint: disable=import-outside-toplevel,unused-import,import-error
import graypy
except ImportError:
logging.error('graypy module is required for GELF logging')
return os.EX_USAGE
# Use pseudo URL for splitting, see https://stackoverflow.com/a/53172593
gelf_server = urllib.parse.urlsplit('//' + args.gelf_server)
gelf_host = gelf_server.hostname
gelf_port = gelf_server.port
if gelf_host is None or gelf_server is None:
logging.error('GELF server needs to be specified as "<host>:<port>"')
return os.EX_USAGE
logging_params['gelf'] = {'host': gelf_host, 'port': gelf_port}
flag_secret = base64.b64decode(args.flagsecret)
game_db_conn, state_db_conn = database.connect_to_dbs(args.dbhost, args.dbname, args.dbuser,
args.dbpassword, args.statedbhost,
args.statedbname, args.statedbuser,
args.statedbpassword)
if game_db_conn is None or state_db_conn is None:
return os.EX_UNAVAILABLE
# Check database grants
try:
try:
database.get_control_info(game_db_conn, prohibit_changes=True)
except DBDataError as e:
logging.warning('Invalid database state: %s', e)
try:
service_id = database.get_service_attributes(game_db_conn, args.service,
prohibit_changes=True)['id']
except DBDataError as e:
logging.warning('Invalid database state: %s', e)
service_id = 1337 # Use dummy value for subsequent grant checks
try:
database.get_current_tick(game_db_conn, prohibit_changes=True)
except DBDataError as e:
logging.warning('Invalid database state: %s', e)
database.get_task_count(game_db_conn, service_id, prohibit_changes=True)
database.get_new_tasks(game_db_conn, service_id, 1, prohibit_changes=True)
database.commit_result(game_db_conn, service_id, 1, 0, 0, prohibit_changes=True, fake_team_id=1)
database.load_state(state_db_conn, service_id, 1, 'identifier', prohibit_changes=True)
database.store_state(state_db_conn, service_id, 1, 'identifier', 'data', prohibit_changes=True)
except psycopg2.ProgrammingError as e:
if e.pgcode == postgres_errors.INSUFFICIENT_PRIVILEGE:
# Log full exception because only the backtrace will tell which kind of permission is missing
logging.exception('Missing database permissions:')
return os.EX_NOPERM
else:
raise
daemon.notify('READY=1')
while True:
try:
master_loop = MasterLoop(game_db_conn, state_db_conn, args.service, args.checkerscript,
args.sudouser, args.maxcheckduration, args.checkercount, args.interval,
args.ippattern, flag_secret, logging_params)
break
except DBDataError as e:
logging.warning('Waiting for valid database state: %s', e)
time.sleep(60)
# Graceful shutdown to prevent loss of check results
def sigterm_handler(_, __):
logging.info('Shutting down, waiting for %d Checker Scripts to finish',
master_loop.get_running_script_count())
master_loop.shutting_down = True
signal.signal(signal.SIGTERM, sigterm_handler)
while True:
try:
master_loop.step()
if master_loop.shutting_down and master_loop.get_running_script_count() == 0:
break
except (psycopg2.OperationalError, psycopg2.InterfaceError) as e:
logging.error('Database error: %s', e)
while True:
logging.info('Trying to reconnect to databases')
game_db_conn, state_db_conn = database.connect_to_dbs(args.dbhost, args.dbname, args.dbuser,
args.dbpassword, args.statedbhost,
args.statedbname, args.statedbuser,
args.statedbpassword)
if game_db_conn is None or state_db_conn is None:
logging.warning('Could not reconnect to databases, waiting to try again')
time.sleep(60)
else:
master_loop.game_db_conn = game_db_conn
master_loop.state_db_conn = state_db_conn
break
except: # noqa, pylint: disable=bare-except
logging.exception('Error in main loop:')
logging.info('Closing database connections')
game_db_conn.close()
state_db_conn.close()
return os.EX_OK
class MasterLoop:
def __init__(self, game_db_conn, state_db_conn, service_slug, checker_script, sudo_user,
max_check_duration, checker_count, interval, ip_pattern, flag_secret, logging_params):
self.game_db_conn = game_db_conn
self.state_db_conn = state_db_conn
self.checker_script = checker_script
self.sudo_user = sudo_user
self.max_check_duration = max_check_duration
self.checker_count = checker_count
self.interval = interval
self.ip_pattern = ip_pattern
self.flag_secret = flag_secret
self.logging_params = logging_params
self.refresh_control_info()
self.service = database.get_service_attributes(self.game_db_conn, service_slug)
self.service['slug'] = service_slug
self.supervisor = RunnerSupervisor()
self.known_tick = -1
# Trigger launch of tasks in first step()
self.last_launch = get_monotonic_time() - self.interval
self.tasks_per_launch = None
self.shutting_down = False
def refresh_control_info(self):
control_info = database.get_control_info(self.game_db_conn)
self.contest_start = control_info['contest_start']
self.tick_duration = datetime.timedelta(seconds=control_info['tick_duration'])
self.flag_valid_ticks = control_info['valid_ticks']
def step(self):
"""
Handles a request from the supervisor, kills overdue tasks and launches new ones.
Only processes one request at a time to make sure that launch_tasks() gets called regularly and
long-running tasks get killed, at the cost of accumulating a backlog of messages.
Returns:
A boolean indicating whether a request was handled.
"""
req = self.supervisor.get_request()
if req is not None:
resp = None
send_resp = True
try:
if req['action'] == ACTION_FLAG:
resp = self.handle_flag_request(req['info'], req['param'])
elif req['action'] == ACTION_LOAD:
resp = self.handle_load_request(req['info'], req['param'])
elif req['action'] == ACTION_STORE:
self.handle_store_request(req['info'], req['param'])
elif req['action'] == ACTION_RESULT:
self.handle_result_request(req['info'], req['param'])
else:
logging.error('Unknown action received from Checker Script for team %d (net number %d) '
'in tick %d: %s', req['info']['_team_id'], req['info']['team'],
req['info']['tick'], req['action'])
# We can't signal an error to the Checker Script (which might be waiting for a response),
# so our only option is to kill it
self.supervisor.terminate_runner(req['runner_id'])
send_resp = False
except: # noqa, pylint: disable=bare-except
logging.exception('Checker Script communication error for team %d (net number %d) in tick '
'%d:', req['info']['_team_id'], req['info']['team'], req['info']['tick'])
self.supervisor.terminate_runner(req['runner_id'])
else:
if send_resp:
req['send'].send(resp)
if not self.shutting_down:
# Launch new tasks and catch up missed intervals
while get_monotonic_time() - self.last_launch >= self.interval:
self.last_launch += self.interval
self.launch_tasks()
return req is not None
def handle_flag_request(self, task_info, params):
try:
tick = int(params['tick'])
except (KeyError, ValueError):
return None
try:
payload = base64.b64decode(params['payload'])
except KeyError:
payload = None
if payload == b'':
payload = None
# We need current value for self.contest_start which might have changed
self.refresh_control_info()
expiration = self.contest_start + (self.flag_valid_ticks + tick) * self.tick_duration
return flag_lib.generate(task_info['team'], self.service['id'], self.flag_secret, payload,
expiration.timestamp())
def handle_load_request(self, task_info, param):
return database.load_state(self.state_db_conn, self.service['id'], task_info['team'], param)
def handle_store_request(self, task_info, params):
database.store_state(self.state_db_conn, self.service['id'], task_info['team'], params['key'],
params['data'])
def handle_result_request(self, task_info, param):
try:
result = int(param)
except ValueError:
logging.error('Invalid result from Checker Script for team %d (net number %d) in tick %d: %s',
task_info['_team_id'], task_info['team'], task_info['tick'], param)
return
try:
check_result = CheckResult(result)
except ValueError:
logging.error('Invalid result from Checker Script for team %d (net number %d) in tick %d: %d',
task_info['_team_id'], task_info['team'], task_info['tick'], result)
return
logging.info('Result from Checker Script for team %d (net number %d) in tick %d: %s',
task_info['_team_id'], task_info['team'], task_info['tick'], check_result)
database.commit_result(self.game_db_conn, self.service['id'], task_info['team'], task_info['tick'],
result)
def launch_tasks(self):
current_tick = database.get_current_tick(self.game_db_conn)
if current_tick < 0:
# Competition not running yet
return
if current_tick != self.known_tick:
self.supervisor.terminate_runners()
self.update_launch_params()
self.known_tick = current_tick
tasks = database.get_new_tasks(self.game_db_conn, self.service['id'], self.tasks_per_launch)
for task in tasks:
ip = self.ip_pattern % task['team_net_no']
runner_args = [self.checker_script, ip, str(task['team_net_no']), str(task['tick'])]
# Information in task_info should be somewhat human-readable, because it also ends up in Checker
# Script logs
task_info = {'service': self.service['slug'],
'team': task['team_net_no'],
'_team_id': task['team_id'],
'tick': current_tick}
logging.info('Starting Checker Script for team %d (net number %d) in tick %d', task['team_id'],
task['team_net_no'], current_tick)
self.supervisor.start_runner(runner_args, self.sudo_user, task_info, self.logging_params)
def update_launch_params(self):
"""
Determines the number of Checker tasks to start per launch.
Our goal here is to balance the load over a tick with some smearing (to make Checker fingerprinting
more difficult), while also ensuring that all teams get checked in every tick.
This simple implementation distributes the start of tasks evenly across the available time with some
safety margin at the end.
"""
total_tasks = database.get_task_count(self.game_db_conn, self.service['id'])
local_tasks = math.ceil(total_tasks / self.checker_count)
margin_seconds = self.tick_duration.total_seconds() / 6
launch_timeframe = self.tick_duration.total_seconds() - self.max_check_duration - margin_seconds
if launch_timeframe < 0:
raise ValueError('Maximum Checker Script duration too long for tick')
intervals_per_timeframe = max(math.floor(launch_timeframe / self.interval), 1)
self.tasks_per_launch = math.ceil(local_tasks / intervals_per_timeframe)
def get_running_script_count(self):
return len(self.supervisor.processes)
def get_monotonic_time():
"""
Wrapper around time.monotonic() to enables mocking in test cases. Globally mocking time.monotonic()
breaks library code (e.g. multiprocessing in RunnerSupervisor).
"""
return time.monotonic()
Checker: Resolve race condition when tick changes while launching tasks
Co-authored-by: Simon Ruderich <088e16a1019277b15d58faf0541e11910eb756f6@ruderich.org>
import base64
import datetime
import logging
import math
import os
import signal
import time
import urllib.parse
import psycopg2
from psycopg2 import errorcodes as postgres_errors
from ctf_gameserver.lib.args import get_arg_parser_with_db
from ctf_gameserver.lib import daemon
from ctf_gameserver.lib.checkresult import CheckResult
from ctf_gameserver.lib.exceptions import DBDataError
import ctf_gameserver.lib.flag as flag_lib
from . import database
from .supervisor import RunnerSupervisor
from .supervisor import ACTION_FLAG, ACTION_LOAD, ACTION_STORE, ACTION_RESULT
def main():
arg_parser = get_arg_parser_with_db('CTF Gameserver Checker Master')
arg_parser.add_argument('--ippattern', type=str, required=True,
help='(Old-style) Python formatstring for building the IP to connect to')
arg_parser.add_argument('--flagsecret', type=str, required=True,
help='Base64 string used as secret in flag generation')
group = arg_parser.add_argument_group('statedb', 'Checker state database')
group.add_argument('--statedbhost', type=str, help='Hostname of the database. If unspecified, the '
'default Unix socket will be used.')
group.add_argument('--statedbname', type=str, required=True,
help='Name of the used database')
group.add_argument('--statedbuser', type=str, required=True,
help='User name for database access')
group.add_argument('--statedbpassword', type=str,
help='Password for database access if needed')
group = arg_parser.add_argument_group('check', 'Check parameters')
group.add_argument('--service', type=str, required=True,
help='Slug of the service')
group.add_argument('--checkerscript', type=str, required=True,
help='Path of the Checker Script')
group.add_argument('--sudouser', type=str, help=' User to excute the Checker Scripts as, will be passed '
'to `sudo -u`')
group.add_argument('--maxcheckduration', type=float, required=True,
help='Maximum duration of a Checker Script run in seconds')
group.add_argument('--checkercount', type=int, required=True,
help='Number of Checker Masters running for this service')
group.add_argument('--interval', type=float, required=True,
help='Time between launching batches of Checker Scripts in seconds')
group = arg_parser.add_argument_group('logging', 'Checker Script logging')
group.add_argument('--journald', action='store_true', help='Log Checker Script messages to journald')
group.add_argument('--gelf-server', help='Log Checker Script messages to the specified GELF (Graylog) '
'server ("<host>:<port>")')
args = arg_parser.parse_args()
logging.basicConfig(format='[%(levelname)s] %(message)s [%(name)s]')
numeric_loglevel = getattr(logging, args.loglevel.upper())
logging.getLogger().setLevel(numeric_loglevel)
if args.interval < 3:
logging.error('`--interval` must be at least 3 seconds')
return os.EX_USAGE
logging_params = {}
if args.journald:
try:
# pylint: disable=import-outside-toplevel,unused-import,import-error
from systemd.journal import JournalHandler
except ImportError:
logging.error('systemd module is required for journald logging')
return os.EX_USAGE
logging_params['journald'] = True
if args.gelf_server is not None:
try:
# pylint: disable=import-outside-toplevel,unused-import,import-error
import graypy
except ImportError:
logging.error('graypy module is required for GELF logging')
return os.EX_USAGE
# Use pseudo URL for splitting, see https://stackoverflow.com/a/53172593
gelf_server = urllib.parse.urlsplit('//' + args.gelf_server)
gelf_host = gelf_server.hostname
gelf_port = gelf_server.port
if gelf_host is None or gelf_server is None:
logging.error('GELF server needs to be specified as "<host>:<port>"')
return os.EX_USAGE
logging_params['gelf'] = {'host': gelf_host, 'port': gelf_port}
flag_secret = base64.b64decode(args.flagsecret)
game_db_conn, state_db_conn = database.connect_to_dbs(args.dbhost, args.dbname, args.dbuser,
args.dbpassword, args.statedbhost,
args.statedbname, args.statedbuser,
args.statedbpassword)
if game_db_conn is None or state_db_conn is None:
return os.EX_UNAVAILABLE
# Check database grants
try:
try:
database.get_control_info(game_db_conn, prohibit_changes=True)
except DBDataError as e:
logging.warning('Invalid database state: %s', e)
try:
service_id = database.get_service_attributes(game_db_conn, args.service,
prohibit_changes=True)['id']
except DBDataError as e:
logging.warning('Invalid database state: %s', e)
service_id = 1337 # Use dummy value for subsequent grant checks
try:
database.get_current_tick(game_db_conn, prohibit_changes=True)
except DBDataError as e:
logging.warning('Invalid database state: %s', e)
database.get_task_count(game_db_conn, service_id, prohibit_changes=True)
database.get_new_tasks(game_db_conn, service_id, 1, prohibit_changes=True)
database.commit_result(game_db_conn, service_id, 1, 0, 0, prohibit_changes=True, fake_team_id=1)
database.load_state(state_db_conn, service_id, 1, 'identifier', prohibit_changes=True)
database.store_state(state_db_conn, service_id, 1, 'identifier', 'data', prohibit_changes=True)
except psycopg2.ProgrammingError as e:
if e.pgcode == postgres_errors.INSUFFICIENT_PRIVILEGE:
# Log full exception because only the backtrace will tell which kind of permission is missing
logging.exception('Missing database permissions:')
return os.EX_NOPERM
else:
raise
daemon.notify('READY=1')
while True:
try:
master_loop = MasterLoop(game_db_conn, state_db_conn, args.service, args.checkerscript,
args.sudouser, args.maxcheckduration, args.checkercount, args.interval,
args.ippattern, flag_secret, logging_params)
break
except DBDataError as e:
logging.warning('Waiting for valid database state: %s', e)
time.sleep(60)
# Graceful shutdown to prevent loss of check results
def sigterm_handler(_, __):
logging.info('Shutting down, waiting for %d Checker Scripts to finish',
master_loop.get_running_script_count())
master_loop.shutting_down = True
signal.signal(signal.SIGTERM, sigterm_handler)
while True:
try:
master_loop.step()
if master_loop.shutting_down and master_loop.get_running_script_count() == 0:
break
except (psycopg2.OperationalError, psycopg2.InterfaceError) as e:
logging.error('Database error: %s', e)
while True:
logging.info('Trying to reconnect to databases')
game_db_conn, state_db_conn = database.connect_to_dbs(args.dbhost, args.dbname, args.dbuser,
args.dbpassword, args.statedbhost,
args.statedbname, args.statedbuser,
args.statedbpassword)
if game_db_conn is None or state_db_conn is None:
logging.warning('Could not reconnect to databases, waiting to try again')
time.sleep(60)
else:
master_loop.game_db_conn = game_db_conn
master_loop.state_db_conn = state_db_conn
break
except: # noqa, pylint: disable=bare-except
logging.exception('Error in main loop:')
logging.info('Closing database connections')
game_db_conn.close()
state_db_conn.close()
return os.EX_OK
class MasterLoop:
def __init__(self, game_db_conn, state_db_conn, service_slug, checker_script, sudo_user,
max_check_duration, checker_count, interval, ip_pattern, flag_secret, logging_params):
self.game_db_conn = game_db_conn
self.state_db_conn = state_db_conn
self.checker_script = checker_script
self.sudo_user = sudo_user
self.max_check_duration = max_check_duration
self.checker_count = checker_count
self.interval = interval
self.ip_pattern = ip_pattern
self.flag_secret = flag_secret
self.logging_params = logging_params
self.refresh_control_info()
self.service = database.get_service_attributes(self.game_db_conn, service_slug)
self.service['slug'] = service_slug
self.supervisor = RunnerSupervisor()
self.known_tick = -1
# Trigger launch of tasks in first step()
self.last_launch = get_monotonic_time() - self.interval
self.tasks_per_launch = None
self.shutting_down = False
def refresh_control_info(self):
control_info = database.get_control_info(self.game_db_conn)
self.contest_start = control_info['contest_start']
self.tick_duration = datetime.timedelta(seconds=control_info['tick_duration'])
self.flag_valid_ticks = control_info['valid_ticks']
def step(self):
"""
Handles a request from the supervisor, kills overdue tasks and launches new ones.
Only processes one request at a time to make sure that launch_tasks() gets called regularly and
long-running tasks get killed, at the cost of accumulating a backlog of messages.
Returns:
A boolean indicating whether a request was handled.
"""
req = self.supervisor.get_request()
if req is not None:
resp = None
send_resp = True
try:
if req['action'] == ACTION_FLAG:
resp = self.handle_flag_request(req['info'], req['param'])
elif req['action'] == ACTION_LOAD:
resp = self.handle_load_request(req['info'], req['param'])
elif req['action'] == ACTION_STORE:
self.handle_store_request(req['info'], req['param'])
elif req['action'] == ACTION_RESULT:
self.handle_result_request(req['info'], req['param'])
else:
logging.error('Unknown action received from Checker Script for team %d (net number %d) '
'in tick %d: %s', req['info']['_team_id'], req['info']['team'],
req['info']['tick'], req['action'])
# We can't signal an error to the Checker Script (which might be waiting for a response),
# so our only option is to kill it
self.supervisor.terminate_runner(req['runner_id'])
send_resp = False
except: # noqa, pylint: disable=bare-except
logging.exception('Checker Script communication error for team %d (net number %d) in tick '
'%d:', req['info']['_team_id'], req['info']['team'], req['info']['tick'])
self.supervisor.terminate_runner(req['runner_id'])
else:
if send_resp:
req['send'].send(resp)
if not self.shutting_down:
# Launch new tasks and catch up missed intervals
while get_monotonic_time() - self.last_launch >= self.interval:
self.last_launch += self.interval
self.launch_tasks()
return req is not None
def handle_flag_request(self, task_info, params):
try:
tick = int(params['tick'])
except (KeyError, ValueError):
return None
try:
payload = base64.b64decode(params['payload'])
except KeyError:
payload = None
if payload == b'':
payload = None
# We need current value for self.contest_start which might have changed
self.refresh_control_info()
expiration = self.contest_start + (self.flag_valid_ticks + tick) * self.tick_duration
return flag_lib.generate(task_info['team'], self.service['id'], self.flag_secret, payload,
expiration.timestamp())
def handle_load_request(self, task_info, param):
return database.load_state(self.state_db_conn, self.service['id'], task_info['team'], param)
def handle_store_request(self, task_info, params):
database.store_state(self.state_db_conn, self.service['id'], task_info['team'], params['key'],
params['data'])
def handle_result_request(self, task_info, param):
try:
result = int(param)
except ValueError:
logging.error('Invalid result from Checker Script for team %d (net number %d) in tick %d: %s',
task_info['_team_id'], task_info['team'], task_info['tick'], param)
return
try:
check_result = CheckResult(result)
except ValueError:
logging.error('Invalid result from Checker Script for team %d (net number %d) in tick %d: %d',
task_info['_team_id'], task_info['team'], task_info['tick'], result)
return
logging.info('Result from Checker Script for team %d (net number %d) in tick %d: %s',
task_info['_team_id'], task_info['team'], task_info['tick'], check_result)
database.commit_result(self.game_db_conn, self.service['id'], task_info['team'], task_info['tick'],
result)
def launch_tasks(self):
def change_tick(new_tick):
self.supervisor.terminate_runners()
self.update_launch_params()
self.known_tick = new_tick
current_tick = database.get_current_tick(self.game_db_conn)
if current_tick < 0:
# Competition not running yet
return
if current_tick != self.known_tick:
change_tick(current_tick)
tasks = database.get_new_tasks(self.game_db_conn, self.service['id'], self.tasks_per_launch)
# The current tick might have changed since calling `database.get_current_tick()`, so terminate the
# old Runners; `database.get_new_tasks()` only returns tasks for one single tick
if len(tasks) > 0 and tasks[0]['tick'] != current_tick:
current_tick = tasks[0]['tick']
change_tick(current_tick)
for task in tasks:
ip = self.ip_pattern % task['team_net_no']
runner_args = [self.checker_script, ip, str(task['team_net_no']), str(task['tick'])]
# Information in task_info should be somewhat human-readable, because it also ends up in Checker
# Script logs
task_info = {'service': self.service['slug'],
'team': task['team_net_no'],
'_team_id': task['team_id'],
'tick': task['tick']}
logging.info('Starting Checker Script for team %d (net number %d) in tick %d', task['team_id'],
task['team_net_no'], task['tick'])
self.supervisor.start_runner(runner_args, self.sudo_user, task_info, self.logging_params)
def update_launch_params(self):
"""
Determines the number of Checker tasks to start per launch.
Our goal here is to balance the load over a tick with some smearing (to make Checker fingerprinting
more difficult), while also ensuring that all teams get checked in every tick.
This simple implementation distributes the start of tasks evenly across the available time with some
safety margin at the end.
"""
total_tasks = database.get_task_count(self.game_db_conn, self.service['id'])
local_tasks = math.ceil(total_tasks / self.checker_count)
margin_seconds = self.tick_duration.total_seconds() / 6
launch_timeframe = self.tick_duration.total_seconds() - self.max_check_duration - margin_seconds
if launch_timeframe < 0:
raise ValueError('Maximum Checker Script duration too long for tick')
intervals_per_timeframe = max(math.floor(launch_timeframe / self.interval), 1)
self.tasks_per_launch = math.ceil(local_tasks / intervals_per_timeframe)
def get_running_script_count(self):
return len(self.supervisor.processes)
def get_monotonic_time():
"""
Wrapper around time.monotonic() to enables mocking in test cases. Globally mocking time.monotonic()
breaks library code (e.g. multiprocessing in RunnerSupervisor).
"""
return time.monotonic()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2017 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2017 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import sys
import json
from . import legacy
FILE_NAME = "netius.json"
""" The default name of the file that is going to be
used for the loading of configuration values from json """
FILE_TEMPLATE = "netius.%s.json"
""" The template to be used in the construction of the
domain specific configuration file paths """
HOME_FILE = "~/.home"
""" The location of the file that may be used to "redirect"
the home directory contents to a different directory """
CASTS = {
bool : lambda v: v if type(v) == bool else v == "1",
list : lambda v: v if type(v) == list else v.split(";"),
tuple : lambda v: v if type(v) == tuple else tuple(v.split(";"))
}
""" The map containing the various cast method
operation associated with the various data types,
they provide a different type of casting strategy """
ENV_ENCODINGS = (
"utf-8",
sys.getdefaultencoding(),
sys.getfilesystemencoding()
)
""" The sequence of encodings that are going to
be used to try to decode possible byte based strings
for the various environment variable values """
CONFIGS = {}
""" The map that contains the key value association
for all the currently set global configurations """
CONFIG_F = []
""" The list of files that have been used for the loading
of the configuration through this session, every time a
loading of configuration from a file occurs the same path
is added to this global list """
HOMES = []
""" Global reference to the paths to the directory considered
to be the home on in terms of configuration, this value should
be set on the initial loading of the ".home" file """
def conf(name, default = None, cast = None):
"""
Retrieves the configuration value for the provided value
defaulting to the provided default value in case no value
is found for the provided name.
An optional cast operation may be performed on the value
in case it's requested.
:type name: String
:param name: The name of the configuration value to be
retrieved.
:type default: Object
:param default: The default value to be retrieved in case
no value was found for the provided name.
:type cast: Type/String
:param cast: The cast operation to be performed in the
resolved value (optional).
:rtype: Object
:return: The value for the configuration with the requested
name or the default value if no value was found.
"""
is_string = type(cast) in legacy.STRINGS
if is_string: cast = __builtins__.get(cast, None)
cast = CASTS.get(cast, cast)
value = CONFIGS.get(name, default)
if cast and not value == None: value = cast(value)
return value
def conf_prefix(prefix):
configs = dict()
for name, value in CONFIGS.items():
if not name.startswith(prefix): continue
configs[name] = value
return configs
def conf_suffix(suffix):
configs = dict()
for name, value in CONFIGS.items():
if not name.endswith(suffix): continue
configs[name] = value
return configs
def conf_s(name, value):
global CONFIGS
CONFIGS[name] = value
def conf_d():
return CONFIGS
def load(names = (FILE_NAME,), path = None, encoding = "utf-8"):
paths = []
homes = get_homes()
for home in homes:
paths += [
os.path.join(home),
os.path.join(home, ".config"),
]
paths += [sys.prefix]
paths.append(path)
for path in paths:
for name in names:
load_file(name = name, path = path, encoding = encoding)
load_env()
def load_file(name = FILE_NAME, path = None, encoding = "utf-8"):
if path: path = os.path.normpath(path)
if path: file_path = os.path.join(path, name)
else: file_path = name
file_path = os.path.abspath(file_path)
file_path = os.path.normpath(file_path)
base_path = os.path.dirname(file_path)
exists = os.path.exists(file_path)
if not exists: return
exists = file_path in CONFIG_F
if exists: CONFIG_F.remove(file_path)
CONFIG_F.append(file_path)
file = open(file_path, "rb")
try: data = file.read()
finally: file.close()
if not data: return
data = data.decode(encoding)
data_j = json.loads(data)
_load_includes(base_path, data_j, encoding = encoding)
for key, value in data_j.items():
CONFIGS[key] = value
def load_env():
config = dict(os.environ)
homes = get_homes()
for home in homes:
_load_includes(home, config)
for key, value in legacy.iteritems(config):
CONFIGS[key] = value
is_bytes = legacy.is_bytes(value)
if not is_bytes: continue
for encoding in ENV_ENCODINGS:
try: value = value.decode(encoding)
except UnicodeDecodeError: pass
else: break
CONFIGS[key] = value
def get_homes(
file_path = HOME_FILE,
default = "~",
encoding = "utf-8",
force_default = False
):
global HOMES
if HOMES: return HOMES
HOMES = os.environ.get("HOMES", None)
HOMES = HOMES.split(";") if HOMES else HOMES
if not HOMES == None: return HOMES
default = os.path.expanduser(default)
default = os.path.abspath(default)
default = os.path.normpath(default)
HOMES = [default]
file_path = os.path.expanduser(file_path)
file_path = os.path.normpath(file_path)
exists = os.path.exists(file_path)
if not exists: return HOMES
if not force_default: del HOMES[:]
file = open(file_path, "rb")
try: data = file.read()
finally: file.close()
data = data.decode("utf-8")
data = data.strip()
paths = data.split()
for path in paths:
path = path.strip()
if not path: continue
path = os.path.expanduser(path)
path = os.path.abspath(path)
path = os.path.normpath(path)
HOMES.append(path)
return HOMES
def _load_includes(base_path, config, encoding = "utf-8"):
includes = ()
for alias in ("$import", "$include", "$IMPORT", "$INCLUDE"):
includes = config.get(alias, includes)
if legacy.is_string(includes):
includes = includes.split(";")
for include in includes:
load_file(
name = include,
path = base_path,
encoding = encoding
)
def _is_devel():
"""
Simple debug/development level detection mechanism to be
used at load time to determine if the system is running
under a development (debug) environment.
This function should not be used at runtime as there are
other (more powerful) mechanisms to archive the same
type of results.
:rtype: bool
:return: If the current environment is running under a
development type level of traceability.
"""
return conf("LEVEL", "INFO") in ("DEBUG",)
load()
new pop in include configuration
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Hive Netius System
# Copyright (c) 2008-2017 Hive Solutions Lda.
#
# This file is part of Hive Netius System.
#
# Hive Netius System is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by the Apache
# Foundation, either version 2.0 of the License, or (at your option) any
# later version.
#
# Hive Netius System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License along with
# Hive Netius System. If not, see <http://www.apache.org/licenses/>.
__author__ = "João Magalhães <joamag@hive.pt>"
""" The author(s) of the module """
__version__ = "1.0.0"
""" The version of the module """
__revision__ = "$LastChangedRevision$"
""" The revision number of the module """
__date__ = "$LastChangedDate$"
""" The last change date of the module """
__copyright__ = "Copyright (c) 2008-2017 Hive Solutions Lda."
""" The copyright for the module """
__license__ = "Apache License, Version 2.0"
""" The license for the module """
import os
import sys
import json
from . import legacy
FILE_NAME = "netius.json"
""" The default name of the file that is going to be
used for the loading of configuration values from json """
FILE_TEMPLATE = "netius.%s.json"
""" The template to be used in the construction of the
domain specific configuration file paths """
HOME_FILE = "~/.home"
""" The location of the file that may be used to "redirect"
the home directory contents to a different directory """
CASTS = {
bool : lambda v: v if type(v) == bool else v == "1",
list : lambda v: v if type(v) == list else v.split(";"),
tuple : lambda v: v if type(v) == tuple else tuple(v.split(";"))
}
""" The map containing the various cast method
operation associated with the various data types,
they provide a different type of casting strategy """
ENV_ENCODINGS = (
"utf-8",
sys.getdefaultencoding(),
sys.getfilesystemencoding()
)
""" The sequence of encodings that are going to
be used to try to decode possible byte based strings
for the various environment variable values """
CONFIGS = {}
""" The map that contains the key value association
for all the currently set global configurations """
CONFIG_F = []
""" The list of files that have been used for the loading
of the configuration through this session, every time a
loading of configuration from a file occurs the same path
is added to this global list """
HOMES = []
""" Global reference to the paths to the directory considered
to be the home on in terms of configuration, this value should
be set on the initial loading of the ".home" file """
def conf(name, default = None, cast = None):
"""
Retrieves the configuration value for the provided value
defaulting to the provided default value in case no value
is found for the provided name.
An optional cast operation may be performed on the value
in case it's requested.
:type name: String
:param name: The name of the configuration value to be
retrieved.
:type default: Object
:param default: The default value to be retrieved in case
no value was found for the provided name.
:type cast: Type/String
:param cast: The cast operation to be performed in the
resolved value (optional).
:rtype: Object
:return: The value for the configuration with the requested
name or the default value if no value was found.
"""
is_string = type(cast) in legacy.STRINGS
if is_string: cast = __builtins__.get(cast, None)
cast = CASTS.get(cast, cast)
value = CONFIGS.get(name, default)
if cast and not value == None: value = cast(value)
return value
def conf_prefix(prefix):
configs = dict()
for name, value in CONFIGS.items():
if not name.startswith(prefix): continue
configs[name] = value
return configs
def conf_suffix(suffix):
configs = dict()
for name, value in CONFIGS.items():
if not name.endswith(suffix): continue
configs[name] = value
return configs
def conf_s(name, value):
global CONFIGS
CONFIGS[name] = value
def conf_d():
return CONFIGS
def load(names = (FILE_NAME,), path = None, encoding = "utf-8"):
paths = []
homes = get_homes()
for home in homes:
paths += [
os.path.join(home),
os.path.join(home, ".config"),
]
paths += [sys.prefix]
paths.append(path)
for path in paths:
for name in names:
load_file(name = name, path = path, encoding = encoding)
load_env()
def load_file(name = FILE_NAME, path = None, encoding = "utf-8"):
if path: path = os.path.normpath(path)
if path: file_path = os.path.join(path, name)
else: file_path = name
file_path = os.path.abspath(file_path)
file_path = os.path.normpath(file_path)
base_path = os.path.dirname(file_path)
exists = os.path.exists(file_path)
if not exists: return
exists = file_path in CONFIG_F
if exists: CONFIG_F.remove(file_path)
CONFIG_F.append(file_path)
file = open(file_path, "rb")
try: data = file.read()
finally: file.close()
if not data: return
data = data.decode(encoding)
data_j = json.loads(data)
_load_includes(base_path, data_j, encoding = encoding)
for key, value in data_j.items():
CONFIGS[key] = value
def load_env():
config = dict(os.environ)
homes = get_homes()
for home in homes:
_load_includes(home, config)
for key, value in legacy.iteritems(config):
CONFIGS[key] = value
is_bytes = legacy.is_bytes(value)
if not is_bytes: continue
for encoding in ENV_ENCODINGS:
try: value = value.decode(encoding)
except UnicodeDecodeError: pass
else: break
CONFIGS[key] = value
def get_homes(
file_path = HOME_FILE,
default = "~",
encoding = "utf-8",
force_default = False
):
global HOMES
if HOMES: return HOMES
HOMES = os.environ.get("HOMES", None)
HOMES = HOMES.split(";") if HOMES else HOMES
if not HOMES == None: return HOMES
default = os.path.expanduser(default)
default = os.path.abspath(default)
default = os.path.normpath(default)
HOMES = [default]
file_path = os.path.expanduser(file_path)
file_path = os.path.normpath(file_path)
exists = os.path.exists(file_path)
if not exists: return HOMES
if not force_default: del HOMES[:]
file = open(file_path, "rb")
try: data = file.read()
finally: file.close()
data = data.decode("utf-8")
data = data.strip()
paths = data.split()
for path in paths:
path = path.strip()
if not path: continue
path = os.path.expanduser(path)
path = os.path.abspath(path)
path = os.path.normpath(path)
HOMES.append(path)
return HOMES
def _load_includes(base_path, config, encoding = "utf-8"):
includes = ()
for alias in ("$import", "$include", "$IMPORT", "$INCLUDE"):
includes = config.pop(alias, includes)
if legacy.is_string(includes):
includes = includes.split(";")
for include in includes:
load_file(
name = include,
path = base_path,
encoding = encoding
)
def _is_devel():
"""
Simple debug/development level detection mechanism to be
used at load time to determine if the system is running
under a development (debug) environment.
This function should not be used at runtime as there are
other (more powerful) mechanisms to archive the same
type of results.
:rtype: bool
:return: If the current environment is running under a
development type level of traceability.
"""
return conf("LEVEL", "INFO") in ("DEBUG",)
load()
|
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2015 MUJIN Inc.
# Mujin controller client for bin picking task
import os
# logging
import logging
log = logging.getLogger(__name__)
# mujin imports
from . import controllerclientbase
class BinpickingControllerClient(controllerclientbase.ControllerClientBase):
"""mujin controller client for bin picking task
"""
tasktype = 'binpicking'
sceneparams = {}
def __init__(self, controllerurl, controllerusername, controllerpassword, robotControllerUri, scenepk, robotname, robotspeed, regionname, targetname, toolname, envclearance, binpickingzmqport=None, binpickingheartbeatport=None, binpickingheartbeattimeout=None, usewebapi=True, initializezmq=False, ctx=None):
"""logs into the mujin controller, initializes binpicking task, and sets up parameters
:param controllerurl: url of the mujin controller, e.g. http://controller14
:param controllerusername: username of the mujin controller, e.g. testuser
:param controllerpassword: password of the mujin controller
:param robotControllerUri: URI of the robot controller, e.g. tcp://192.168.13.201:7000?densowavearmgroup=5
:param binpickingzmqport: port of the binpicking task's zmq server, e.g. 7110
:param binpickingheartbeatport: port of the binpicking task's zmq server's heartbeat publisher, e.g. 7111
:param binpickingheartbeattimeout: seconds until reinitializing binpicking task's zmq server if no hearbeat is received, e.g. 7
:param scenepk: pk of the bin picking task scene, e.g. irex2013.mujin.dae
:param robotname: name of the robot, e.g. VP-5243I
:param robotspeed: speed of the robot, e.g. 0.4
:param regionname: name of the bin, e.g. container1
:param targetname: name of the target, e.g. plasticnut-center
:param toolname: name of the manipulator, e.g. 2BaseZ
:param envclearance: environment clearance in milimeter, e.g. 20
:param usewebapi: whether to use webapi for controller commands
"""
super(BinpickingControllerClient, self).__init__(controllerurl, controllerusername, controllerpassword, binpickingzmqport, binpickingheartbeatport, binpickingheartbeattimeout, self.tasktype, scenepk, initializezmq, usewebapi, ctx)
# robot controller
self.robotControllerUri = robotControllerUri
# bin picking task
self.scenepk = scenepk
self.robotname = robotname
self.robotspeed = robotspeed
self.regionname = regionname
self.targetname = targetname
self.toolname = toolname
self.envclearance = envclearance
sceneuri = controllerclientbase.GetURIFromPrimaryKey(self.scenepk)
# for now (HACK) need to set the correct scenefilename. newer version of mujin controller need only scenepk, so remove scenefilename eventually
mujinpath = os.path.join(os.environ.get('MUJIN_MEDIA_ROOT_DIR', '/var/www/media/u'), controllerusername)
scenefilename = controllerclientbase.GetFilenameFromURI(sceneuri, mujinpath)[1]
self.sceneparams = {'scenetype': 'mujincollada', 'sceneuri':sceneuri, 'scenefilename': scenefilename, 'scale': [1.0, 1.0, 1.0]} # TODO: set scenetype according to the scene
def ReloadModule(self, timeout=None, **kwargs):
return self.ExecuteCommand({'command': 'ReloadModule', 'sceneparams': self.sceneparams, 'tasktype': self.tasktype}, timeout=timeout, **kwargs)
#########################
# robot commands
#########################
def ExecuteRobotCommand(self, taskparameters, robotspeed=None, usewebapi=None, timeout=None):
"""wrapper to ExecuteCommand with robot info set up in taskparameters
executes a command on the task.
:return: a dictionary that contains:
- robottype: robot type,string
- currentjointvalues: current joint values, DOF floats
- elapsedtime: elapsed time in seconds, float
- numpoints: the number of points, int
- error: optional error info, dictionary
- desc: error message, string
- type: error type, string
- errorcode: error code, string
"""
robotname = self.robotname
robotControllerUri = self.robotControllerUri
taskparameters['robot'] = robotname
taskparameters['robotControllerUri'] = robotControllerUri
if taskparameters.get('speed', None) is None:
# taskparameters does not have robotspeed, so set the global speed
if robotspeed is None:
taskparameters['robotspeed'] = self.robotspeed
else:
taskparameters['robotspeed'] = robotspeed
return self.ExecuteCommand(taskparameters, usewebapi, timeout=timeout)
def ExecuteTrajectory(self, trajectoryxml, robotspeed=None, timeout=None, **kwargs):
"""Executes a trajectory on the robot from a serialized Mujin Trajectory XML file.
"""
taskparameters = {'command': 'ExecuteTrajectory',
'trajectory': trajectoryxml,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def MoveJoints(self, jointvalues, jointindices=None, robotspeed=None, execute=1, startvalues=None, densowavearmgroup=None, usewebapi=False, timeout=None, **kwargs):
"""moves the robot to desired joint angles specified in jointvalues
:param jointvalues: list of joint values
:param jointindices: list of corresponding joint indices, default is range(len(jointvalues))
:param robotspeed: value in [0,1] of the percentage of robot speed to move at
:param envclearance: environment clearance in milimeter
"""
if jointindices is None:
jointindices = range(len(jointvalues))
log.warn('no jointindices specified, moving joints with default jointindices: ', jointindices)
taskparameters = {'command': 'MoveJoints',
'goaljoints': list(jointvalues),
'jointindices': list(jointindices),
'envclearance': self.envclearance,
'execute': execute,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
if startvalues is not None:
taskparameters['startvalues'] = list(startvalues)
if densowavearmgroup is not None:
taskparameters['densowavearmgroup'] = densowavearmgroup
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, usewebapi=usewebapi, timeout=timeout)
def UnchuckGripper(self, toolname=None, targetname=None, robotspeed=None, timeout=None):
"""unchucks the manipulator and releases the target
:param toolname: name of the manipulator, default is self.toolname
:param targetname: name of the target, default is self.targetname
"""
if toolname is None:
toolname = self.toolname
if targetname is None:
targetname = self.targetname
taskparameters = {'command': 'UnchuckGripper',
'toolname': toolname,
'targetname': targetname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def ChuckGripper(self, toolname=None, robotspeed=None, timeout=None):
"""chucks the manipulator
:param toolname: name of the manipulator, default is self.toolname
"""
if toolname is None:
toolname = self.toolname
taskparameters = {'command': 'ChuckGripper',
'toolname': toolname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def GetJointValues(self, timeout=None, **kwargs):
"""gets the current robot joint values
:return: current joint values in a json dictionary with
- currentjointvalues: [0,0,0,0,0,0]
"""
taskparameters = {'command': 'GetJointValues',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
# NOTE: if you are using densowave robot, you always need to set "densowavearmgroup"
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def GetManipulatorTransformInRobotFrame(self, timeout=None):
"""gets the transform of the manipulator in robot frame
:return: current transform of the manipulator in robot frame in a json dictionary, e.g. {'translation': [100,200,300], 'rotationmat': [[1,0,0],[0,1,0],[0,0,1]], 'quaternion': [1,0,0,0]}
"""
taskparameters = {'command': 'GetManipTransformToRobot',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def PickAndPlace(self, goaltype, goals, targetnamepattern=None, approachoffset=30, departoffsetdir=[0, 0, 50], destdepartoffsetdir=[0, 0, 30], deletetarget=0, debuglevel=4, movetodestination=1, freeinc=[0.08], worksteplength=None, densowavearmgroup=5, regionname=None, cameranames=None, envclearance=15, toolname=None, robotspeed=0.5, timeout=None, **kwargs):
"""picks up an object with the targetnamepattern and places it down at one of the goals. First computes the entire plan from robot moving to a grasp and then moving to its destination, then runs it on the real robot. Task finishes once the real robot is at the destination.
:param desttargetname: The destination target name where the destination goal ikparams come from
:param destikparamnames: A list of lists of ikparam names for the destinations of the target. Only destikparamnames[0] is looked at and tells the system to place the part in any of the ikparams in destikparamnames[0]
:param targetnamepattern: regular expression describing the name of the object, default is '%s_\d+'%(self.targetname). See https://docs.python.org/2/library/re.html
:param approachoffset: distance in milimeter to move straight to the grasp point, e.g. 30 mm
:param departoffsetdir: the direction and distance in mm to move the part in global frame (usually along negative gravity) after it is grasped, e.g. [0,0,50]
:param destdepartoffsetdir: the direction and distance in mm to move away from the object after it is placed, e.g. [0,0,30]. Depending on leaveoffsetintool parameter, this can in the global coordinate system or tool coordinate system.
:param leaveoffsetintool: If 1, destdepartoffsetdir is in the tool coordinate system. If 0, destdepartoffsetdir is in the global coordinate system. By default this is 0.
:param deletetarget: whether to delete target after pick and place is done
:param toolname: name of the manipulator
:param regionname: name of the region of the objects
:param cameranames: the names of the cameras to avoid occlusions with the robot, list of strings
:param envclearance: environment clearance in milimeter
Low level planning parameters:
:param debuglevel: sets debug level of the task
:param movetodestination: planning parameter
:param freeinc: planning parameter
:param worksteplength: planning parameter
:param densowavearmgroup: planning parameter
:param graspsetname: the name of the grasp set belong to the target objects to use for the target. Grasp sets are a list of ikparams
Manual Destination Specification (deprecated)
:param goaltype: type of the goal, e.g. translationdirection5d or transform6d
:param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
"""
if worksteplength is None:
worksteplength = 0.01
if toolname is None:
toolname = self.toolname
if targetnamepattern is None:
targetnamepattern = '%s_\d+' % (self.targetname)
if regionname is None:
regionname = self.regionname
if robotspeed is None:
robotspeed = self.robotspeed
taskparameters = {'command': 'PickAndPlace',
'toolname': toolname,
'goaltype': goaltype,
'envclearance': envclearance,
'movetodestination': movetodestination,
'goals': goals,
'approachoffset': approachoffset,
'departoffsetdir': departoffsetdir,
'destdepartoffsetdir': destdepartoffsetdir,
'freeinc': freeinc,
'worksteplength': worksteplength,
'targetnamepattern': targetnamepattern,
'containername': regionname,
'deletetarget': deletetarget,
'robotspeed': robotspeed,
'debuglevel': debuglevel,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def StartPickAndPlaceThread(self, goaltype=None, goals=None, targetnamepattern=None, approachoffset=30, departoffsetdir=[0, 0, 50], destdepartoffsetdir=[0, 0, 30], deletetarget=0, debuglevel=4, movetodestination=1, worksteplength=None, regionname=None, envclearance=15, toolname=None, robotspeed=None, timeout=None, **kwargs):
"""Start a background loop to continuously pick up objects with the targetnamepattern and place them down at the goals. The loop will check new objects arriving in and move the robot as soon as it finds a feasible grasp. The thread can be quit with StopPickPlaceThread.
:param desttargetname: The destination target name where the destination goal ikparams come from
:param destikparamnames: A list of lists of ikparam names for the ordered destinations of the target. destikparamnames[0] is where the first picked up part goes, desttargetname[1] is where the second picked up target goes.
:param cycledests: When finished cycling through all destikparamnames, will delete all the targets and start from the first index again doing this for cycledests times. By default it is 1.
:param targetnamepattern: regular expression describing the name of the object, default is '%s_\d+'%(self.targetname). See https://docs.python.org/2/library/re.html
:param approachoffset: distance in milimeter to move straight to the grasp point, e.g. 30 mm
:param departoffsetdir: the direction and distance in mm to move the part in global frame (usually along negative gravity) after it is grasped, e.g. [0,0,50]
:param destdepartoffsetdir: the direction and distance in mm to move away from the object after it is placed, e.g. [0,0,30]. Depending on leaveoffsetintool parameter, this can in the global coordinate system or tool coordinate system.
:param leaveoffsetintool: If 1, destdepartoffsetdir is in the tool coordinate system. If 0, destdepartoffsetdir is in the global coordinate system. By default this is 0.
:param deletetarget: whether to delete target after pick and place is done
:param toolname: name of the manipulator
:param regionname: name of the region of the objects
:param cameranames: the names of the cameras to avoid occlusions with the robot, list of strings
:param envclearance: environment clearance in milimeter
Low level planning parameters:
:param debuglevel: sets debug level of the task
:param movetodestination: planning parameter
:param worksteplength: planning parameter
:param densowavearmgroup: robot parameters
:param graspsetname: the name of the grasp set belong to the target objects to use for the target. Grasp sets are a list of ikparams
:param goaltype: type of the goal, e.g. translationdirection5d
:param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
:param useworkspaceplanner: If 1 is set, will try the workspace planner for moving the hand straight. If 2 is set, will try the RRT for moving straight. Can set 3 for trying both.
"""
if worksteplength is None:
worksteplength = 0.01
if toolname is None:
toolname = self.toolname
if targetnamepattern is None:
targetnamepattern = '%s_\d+' % (self.targetname)
if regionname is None:
regionname = self.regionname
if robotspeed is None:
robotspeed = self.robotspeed
taskparameters = {'command': 'StartPickAndPlaceThread',
'toolname': toolname,
'envclearance': envclearance,
'movetodestination': movetodestination,
'approachoffset': approachoffset,
'departoffsetdir': departoffsetdir,
'destdepartoffsetdir': destdepartoffsetdir,
'worksteplength': worksteplength,
'targetnamepattern': targetnamepattern,
'containername': regionname,
'deletetarget': deletetarget,
'robotspeed': robotspeed,
'debuglevel': debuglevel,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
if goals is not None:
taskparameters['orderedgoals'] = goals
taskparameters['goaltype'] = goaltype
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def StopPickPlaceThread(self, timeout=None, **kwargs):
"""stops the pick and place thread started with StartPickAndPlaceThread
"""
taskparameters = {'command': 'StopPickPlaceThread',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def GetPickPlaceStatus(self, timeout=None, **kwargs):
"""gets the status of the pick and place thread
:return: status (0: not running, 1: no error, 2: error) of the pick and place thread in a json dictionary, e.g. {'status': 2, 'error': 'an error happened'}
"""
taskparameters = {'command': 'GetPickPlaceStatus',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def MoveToHandPosition(self, goaltype, goals, toolname=None, envclearance=None, closegripper=0, robotspeed=None, timeout=None):
"""Computes the inverse kinematics and moves the manipulator to any one of the goals specified.
:param goaltype: type of the goal, e.g. translationdirection5d
:param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
:param toolname: name of the manipulator, default is self.toolname
:param envclearance: clearance in milimeter, default is self.envclearance
:param closegripper: whether to close gripper once the goal is reached, default is 0
"""
if toolname is None:
toolname = self.toolname
if envclearance is None:
envclearance = self.envclearance
taskparameters = {'command': 'MoveToHandPosition',
'goaltype': goaltype,
'goals': goals,
'toolname': toolname,
'envclearance': envclearance,
'closegripper': closegripper,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def ComputeIK(self, timeout=None, **kwargs):
"""
:param toolname: tool name, string
:param limit: number of solutions to return, int
:param iktype: grasp (but basically the just the ikparam), string
:param quaternion: grasp (but basically the just the ikparam) quaternion in world cooordinates, float array
:param translation: grasp (but basically the just the ikparam) translation in world cooordinates in mm, float array
:param direction: grasp (but basically the just the ikparam) direction in world cooordinates, float array
:param angle: grasp (but basically the just the ikparam) angle in world cooordinates, float
:param freeincvalue: float, the discretization of the free joints of the robot when computing ik.
:param filteroptions: OpenRAVE IkFilterOptions bitmask. By default this is 1, which means all collisions are checked, int
:param preshape: If the tool has fingers after the end effector, specify their values. The gripper DOFs come from **gripper_dof_pks** field from the tool., float array
:return: A dictionary of:
- solutions: array of IK solutions (each of which is an array of DOF values), sorted by minimum travel distance and truncated to match the limit
"""
taskparameters = {'command': 'ComputeIK',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
if 'toolname' not in taskparameters:
taskparameters['toolname'] = self.toolname
if 'envclearance' not in taskparameters:
taskparameters['envclearance'] = self.envclearance
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def ComputeIKFromParameters(self, timeout=None, **kwargs):
"""
:param toolname: tool name, string
:param limit: number of solutions to return, int
:param ikparamnames: the ikparameter names, also contains information about the grasp like the preshape
:param targetname: the target object name that the ikparamnames belong to
:param freeincvalue: float, the discretization of the free joints of the robot when computing ik.
:param filteroptions: OpenRAVE IkFilterOptions bitmask. By default this is 1, which means all collisions are checked, int
:return: A dictionary of:
- solutions: array of IK solutions (each of which is an array of DOF values), sorted by minimum travel distance and truncated to match the limit
"""
taskparameters = {'command': 'ComputeIKFromParameters',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
if 'toolname' not in taskparameters:
taskparameters['toolname'] = self.toolname
if 'envclearance' not in taskparameters:
taskparameters['envclearance'] = self.envclearance
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def InitializePartsWithPhysics(self, timeout=None, **kwargs):
"""Start a physics simulation where the parts drop down into the bin. The method returns as soon as the physics is initialized, user has to wait for the "duration" or call StopPhysicsThread command.
:param targeturi: the target uri to initialize the scene with
:param numtargets: the number of targets to create
:param regionname: the container name to drop the targets into
:param duration: the duration in seconds to continue the physics until it is stopped.
:param basename: The basename to give to all the new target names. Numbers are suffixed at the end, like basename+'0134'. If not specified, will use a basename derived from the targeturi.
:param deleteprevious: if True, will delete all the previous targets in the scene. By default this is True.
"""
taskparameters = {'command': 'InitializePartsWithPhysics',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
if 'containername' not in taskparameters:
taskparameters['containername'] = self.regionname
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def StopPhysicsThread(self, timeout=None, **kwargs):
"""stops the physics simulation started with InitializePartsWithPhysics
"""
taskparameters = {'command': 'StopPhysicsThread',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def JitterPartUntilValidGrasp(self, timeout=None, **kwargs):
"""Select a part that wasn't able to be grasped and jitter its location such that a grasp set is found for it that will take it to the destination.
:param toolname: name of the manipulator
:param targetname: The target to try to grasp.
:param graspsetname: the name of the grasp set belong to the target objects to use for the target. Grasp sets are a list of ikparams.
:param approachoffset: The approach distance for simulating full grasp.
:param departoffsetdir: The depart distance for simulating full grasp.
:param destdepartoffsetdir: the direction and distance in mm to move away from the object after it is placed, e.g. [0,0,30]. Depending on leaveoffsetintool parameter, this can in the global coordinate system or tool coordinate system.
:param leaveoffsetintool: If 1, destdepartoffsetdir is in the tool coordinate system. If 0, destdepartoffsetdir is in the global coordinate system. By default this is 0.
:param desttargetname: The destination target name where the destination goal ikparams come from. If no name is specified, then robot won't consider putting the target into the destination when it searches for grasps.
:param destikparamnames: A list of lists of ikparam names for the ordered destinations of the target. destikparamnames[0] is where the first picked up part goes, desttargetname[1] is where the second picked up target goes.
:param jitterdist: Amount to jitter the target object translation by
:param jitterangle: Amount to jitter the target object's orientation angle
:param jitteriters: Number of times to try jittering before giving up.
:return: If failed, an empty dictionary. If succeeded, a dictionary with the following keys:
- translation: the new translation of the target part
- quaternion: the new quaternion of the target part
- jointvalues: robot joint values that are grasping the part (fingers are at their preshape).
- graspname: the grasp name used for jointvalues. If empty, then no grasp was found.
- destikname: the name of the destination ikparam that was chosen with the grasp
- destjointvalues: robot joint values at one of the specified destinations (fingers are at their final positions).
- desttranslation: the new translation of the target part
- destquaternion: the new quaternion of the target part
"""
taskparameters = {'command': 'JitterPartUntilValidGrasp',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
####################
# scene commands
####################
def IsRobotOccludingBody(self, bodyname, cameraname, timeout=None):
"""returns if the robot is occluding body in the view of the specified camera
:param bodyname: name of the object
:param cameraname: name of the camera
:return: the occlusion state in a json dictionary, e.g. {'occluded': 0}
"""
taskparameters = {'command': 'IsRobotOccludingBody',
'robotname': self.robotname,
'bodyname': bodyname,
'cameraname': cameraname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetPickedPositions(self, unit='m', timeout=None):
"""returns the poses and the timestamps of the picked objects
:param robotname: name of the robot
:param unit: unit of the translation
:return: the positions and the timestamps of the picked objects in a json dictionary, info of each object has the format of quaternion (w,x,y,z) followed by x,y,z translation (in mm) followed by timestamp in milisecond e.g. {'positions': [[1,0,0,0,100,200,300,1389774818.8366449],[1,0,0,0,200,200,300,1389774828.8366449]]}
"""
taskparameters = {'command': 'GetPickedPositions',
'robotname': self.robotname,
'unit': unit,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def UpdateObjects(self, envstate, targetname=None, unit="m", timeout=None):
"""updates objects in the scene with the envstate
:param envstate: a list of dictionaries for each instance object in world frame. quaternion is specified in w,x,y,z order. e.g. [{'name': 'target_0', 'translation_': [1,2,3], 'quat_': [1,0,0,0]}, {'name': 'target_1', 'translation_': [2,2,3], 'quat_': [1,0,0,0]}]
:param unit: unit of envstate
"""
if targetname is None:
targetname = self.targetname
taskparameters = {'command': 'UpdateObjects',
'objectname': targetname,
'object_uri': u'mujin:/%s.mujin.dae' % (targetname),
'robot': self.robotname,
'envstate': envstate,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
'unit': unit,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def Grab(self, targetname, toolname=None, timeout=None):
"""grabs an object with tool
:param targetname: name of the object
:param robotname: name of the robot
:param toolname: name of the manipulator, default is self.toolname
"""
if toolname is None:
toolname = self.toolname
taskparameters = {'command': 'Grab',
'targetname': targetname,
'robotname': self.robotname,
'toolname': toolname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetGrabbed(self, timeout=None):
"""gets the names of the grabbed objects
:return: names of the grabbed object in a json dictionary, e.g. {'names': ['target_0']}
"""
taskparameters = {'command': 'GetGrabbed',
'robotname': self.robotname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetTransform(self, targetname, unit='mm', timeout=None):
"""gets the transform of an object
:param targetname: name of the object
:param unit: unit of the result translation
:return: transform of the object in a json dictionary, e.g. {'translation': [100,200,300], 'rotationmat': [[1,0,0],[0,1,0],[0,0,1]], 'quaternion': [1,0,0,0]}
"""
taskparameters = {'command': 'GetTransform',
'targetname': targetname,
'unit': unit,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def SetTransform(self, targetname, translation, unit='mm', rotationmat=None, quaternion=None, timeout=None):
"""sets the transform of an object
:param targetname: name of the object
:param translation: list of x,y,z value of the object in milimeter
:param unit: unit of translation
:param rotationmat: list specifying the rotation matrix in row major format, e.g. [1,0,0,0,1,0,0,0,1]
:param quaternion: list specifying the quaternion in w,x,y,z format, e.g. [1,0,0,0]
"""
taskparameters = {'command': 'SetTransform',
'targetname': targetname,
'unit': unit,
'translation': translation,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
if rotationmat is not None:
taskparameters['rotationmat'] = rotationmat
if quaternion is not None:
taskparameters['quaternion'] = quaternion
if rotationmat is None and quaternion is None:
taskparameters['quaternion'] = [1, 0, 0, 0]
log.warn('no rotation is specified, using identity quaternion ', taskparameters['quaternion'])
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetAABB(self, targetname, unit='mm', timeout=None):
"""Gets the axis aligned bounding box of object
:param targetname: name of the object
:param unit: unit of the AABB
:return: AABB of the object, e.g. {'pos': [1000,400,100], 'extents': [100,200,50]}
"""
taskparameters = {'command': 'GetAABB',
'targetname': targetname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
'unit': unit,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def RemoveObjectsWithPrefix(self, prefix, timeout=None):
"""removes objects with prefix
"""
taskparameters = {'command': 'RemoveObjectsWithPrefix',
'prefix': prefix,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def SaveScene(self, timeout=None, **kwargs):
"""saves the current scene to file
:param filename: e.g. /tmp/testscene.mujin.dae, if not specified, it will be saved with an auto-generated filename
:param preserveexternalrefs: If True, any bodies currently that are being externally referenced from the environment will be saved as external references.
:param externalref: If '*', then will save each of the objects as externally referencing their original filename. Otherwise will force saving specific bodies as external references
:param saveclone: If 1, will save the scenes for all the cloned environments
:return: the actual filename the scene is saved to in a json dictionary, e.g. {'filename': '2013-11-01-17-10-00-UTC.dae'}
"""
taskparameters = {'command': 'SaveScene',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetTrajectoryLog(self, timeout=None, **kwargs):
"""Gets the recent trajectories executed on the binpicking server. The internal server keeps trajectories around for 10 minutes before clearing them.
:param startindex: int, start of the trajectory to get. If negative, will start counting from the end. For example, -1 is the last element, -2 is the second to last element.
:param num: int, number of trajectories from startindex to return. If 0 will return all the trajectories starting from startindex
:param includejointvalues: bool, If True will include timedjointvalues, if False will just give back the trajectories. Defautl is False
:return:
total: 10
trajectories: [
{
"timestarted": 12345215
"name": "movingtodest",
"numpoints": 100,
"duration": 0.8,
"timedjointvalues": [0, 0, 0, .....]
},
{ ... }
]
Where timedjointvalues is a list joint values and the trajectory time. For a 3DOF robot sampled at 0.008s, this is
[J1, J2, J3, 0, J1, J2, J3, 0.008, J1, J2, J3, 0.016, ...]
"""
taskparameters = {'command': 'GetTrajectoryLog',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetPickAndPlaceLog(self, timeout=None, **kwargs):
"""Gets the recent pick-and-place log executed on the binpicking server. The internal server keeps the log around until the next Pick-and-place command is executed.
:param startindex: int, start of the trajectory to get. If negative, will start counting from the end. For example, -1 is the last element, -2 is the second to last element.
:param num: int, number of trajectories from startindex to return. If 0 will return all the trajectories starting from startindex
:return:
total: 10
messages: [
{
"message":"message1",
"type":"",
"level":0,
"data": {
"jointvalues":[0,0,0,0,0,0]
}
},
]
"""
taskparameters = {'command': 'GetPickAndPlaceLog',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def MoveRobotOutOfCameraOcclusion(self, regionname=None, robotspeed=None, toolname=None, timeout=None, **kwargs):
"""moves the robot out of camera occlusion and deletes targets if it was in occlusion.
:param toolname: name of the tool to move when avoiding
:param cameranames: the names of the cameras to avoid occlusions with the robot, list of strings
"""
if regionname is None:
regionname = self.regionname
if toolname is None:
toolname = self.toolname
taskparameters = {'command': 'MoveRobotOutOfCameraOcclusion',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
'containername': regionname,
'toolname': toolname
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
#######################
# unsupported commands
#######################
def UnchuckManipulator(self, *args, **kwargs):
log.warn('deprecated')
return self.UnchuckGripper(*args, **kwargs)
def ChuckManipulator(self, *args, **kwargs):
log.warn('deprecated')
return self.ChuckGripper(*args, **kwargs)
def __StartBackgroundTask(self, taskname, robotspeed=None, timeout=None):
"""starts a background task (need testing)
:param taskname: name of the background task
"""
taskparameters = {'command': 'ExecuteBackgroundTask',
'taskname': taskname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def __StopBackgroundTask(self, timeout=None):
"""stops the background task (need testing)
assumes that only one background task is running
"""
taskparameters = {'command': 'StopBackgroundTask',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def __PickAndMove(self, goaltype, armjointvaluesgoals, destinationgoals=None, targetnames=None, movetodestination=0, deletetarget=1, startvalues=None, toolname=None, envclearance=20, regionname=None, robotspeed=None, timeout=None):
"""deprecated
"""
if toolname is None:
toolname = self.toolname
taskparameters = {'command': 'PickAndMove',
'toolname': toolname,
'goaltype': goaltype,
'envclearance': envclearance,
'movetodestination': movetodestination,
'deletetarget': deletetarget,
'armjointvaluesgoals': list(armjointvaluesgoals),
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
if regionname is not None:
taskparameters['boxname'] = regionname # TODO: update backend
if destinationgoals is not None:
taskparameters['goals'] = destinationgoals
if targetnames is not None:
taskparameters['targetnames'] = targetnames
if startvalues is not None:
taskparameters['startvalues'] = list(startvalues)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
added function prototyptes to binpicking
# -*- coding: utf-8 -*-
# Copyright (C) 2013-2015 MUJIN Inc.
# Mujin controller client for bin picking task
import os
# logging
import logging
log = logging.getLogger(__name__)
# mujin imports
from . import controllerclientbase
class BinpickingControllerClient(controllerclientbase.ControllerClientBase):
"""mujin controller client for bin picking task
"""
tasktype = 'binpicking'
sceneparams = {}
def __init__(self, controllerurl, controllerusername, controllerpassword, robotControllerUri, scenepk, robotname, robotspeed, regionname, targetname, toolname, envclearance, binpickingzmqport=None, binpickingheartbeatport=None, binpickingheartbeattimeout=None, usewebapi=True, initializezmq=False, ctx=None):
"""logs into the mujin controller, initializes binpicking task, and sets up parameters
:param controllerurl: url of the mujin controller, e.g. http://controller14
:param controllerusername: username of the mujin controller, e.g. testuser
:param controllerpassword: password of the mujin controller
:param robotControllerUri: URI of the robot controller, e.g. tcp://192.168.13.201:7000?densowavearmgroup=5
:param binpickingzmqport: port of the binpicking task's zmq server, e.g. 7110
:param binpickingheartbeatport: port of the binpicking task's zmq server's heartbeat publisher, e.g. 7111
:param binpickingheartbeattimeout: seconds until reinitializing binpicking task's zmq server if no hearbeat is received, e.g. 7
:param scenepk: pk of the bin picking task scene, e.g. irex2013.mujin.dae
:param robotname: name of the robot, e.g. VP-5243I
:param robotspeed: speed of the robot, e.g. 0.4
:param regionname: name of the bin, e.g. container1
:param targetname: name of the target, e.g. plasticnut-center
:param toolname: name of the manipulator, e.g. 2BaseZ
:param envclearance: environment clearance in milimeter, e.g. 20
:param usewebapi: whether to use webapi for controller commands
"""
super(BinpickingControllerClient, self).__init__(controllerurl, controllerusername, controllerpassword, binpickingzmqport, binpickingheartbeatport, binpickingheartbeattimeout, self.tasktype, scenepk, initializezmq, usewebapi, ctx)
# robot controller
self.robotControllerUri = robotControllerUri
# bin picking task
self.scenepk = scenepk
self.robotname = robotname
self.robotspeed = robotspeed
self.regionname = regionname
self.targetname = targetname
self.toolname = toolname
self.envclearance = envclearance
sceneuri = controllerclientbase.GetURIFromPrimaryKey(self.scenepk)
# for now (HACK) need to set the correct scenefilename. newer version of mujin controller need only scenepk, so remove scenefilename eventually
mujinpath = os.path.join(os.environ.get('MUJIN_MEDIA_ROOT_DIR', '/var/www/media/u'), controllerusername)
scenefilename = controllerclientbase.GetFilenameFromURI(sceneuri, mujinpath)[1]
self.sceneparams = {'scenetype': 'mujincollada', 'sceneuri':sceneuri, 'scenefilename': scenefilename, 'scale': [1.0, 1.0, 1.0]} # TODO: set scenetype according to the scene
def ReloadModule(self, timeout=None, **kwargs):
return self.ExecuteCommand({'command': 'ReloadModule', 'sceneparams': self.sceneparams, 'tasktype': self.tasktype}, timeout=timeout, **kwargs)
#########################
# robot commands
#########################
def ExecuteRobotCommand(self, taskparameters, robotspeed=None, usewebapi=None, timeout=None):
"""wrapper to ExecuteCommand with robot info set up in taskparameters
executes a command on the task.
:return: a dictionary that contains:
- robottype: robot type,string
- currentjointvalues: current joint values, DOF floats
- elapsedtime: elapsed time in seconds, float
- numpoints: the number of points, int
- error: optional error info, dictionary
- desc: error message, string
- type: error type, string
- errorcode: error code, string
"""
robotname = self.robotname
robotControllerUri = self.robotControllerUri
taskparameters['robot'] = robotname
taskparameters['robotControllerUri'] = robotControllerUri
if taskparameters.get('speed', None) is None:
# taskparameters does not have robotspeed, so set the global speed
if robotspeed is None:
taskparameters['robotspeed'] = self.robotspeed
else:
taskparameters['robotspeed'] = robotspeed
return self.ExecuteCommand(taskparameters, usewebapi, timeout=timeout)
def ExecuteTrajectory(self, trajectoryxml, robotspeed=None, timeout=None, **kwargs):
"""Executes a trajectory on the robot from a serialized Mujin Trajectory XML file.
"""
taskparameters = {'command': 'ExecuteTrajectory',
'trajectory': trajectoryxml,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def MoveJoints(self, jointvalues, jointindices=None, robotspeed=None, execute=1, startvalues=None, timeout=None, **kwargs):
"""moves the robot to desired joint angles specified in jointvalues
:param jointvalues: list of joint values
:param jointindices: list of corresponding joint indices, default is range(len(jointvalues))
:param robotspeed: value in [0,1] of the percentage of robot speed to move at
:param envclearance: environment clearance in milimeter
"""
if jointindices is None:
jointindices = range(len(jointvalues))
log.warn('no jointindices specified, moving joints with default jointindices: ', jointindices)
taskparameters = {'command': 'MoveJoints',
'goaljoints': list(jointvalues),
'jointindices': list(jointindices),
'envclearance': self.envclearance,
'execute': execute,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
if startvalues is not None:
taskparameters['startvalues'] = list(startvalues)
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def UnchuckGripper(self, toolname=None, targetname=None, robotspeed=None, timeout=None):
"""unchucks the manipulator and releases the target
:param toolname: name of the manipulator, default is self.toolname
:param targetname: name of the target, default is self.targetname
"""
if toolname is None:
toolname = self.toolname
if targetname is None:
targetname = self.targetname
taskparameters = {'command': 'UnchuckGripper',
'toolname': toolname,
'targetname': targetname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def ChuckGripper(self, toolname=None, robotspeed=None, timeout=None):
"""chucks the manipulator
:param toolname: name of the manipulator, default is self.toolname
"""
if toolname is None:
toolname = self.toolname
taskparameters = {'command': 'ChuckGripper',
'toolname': toolname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def GetJointValues(self, timeout=None, **kwargs):
"""gets the current robot joint values
:return: current joint values in a json dictionary with
- currentjointvalues: [0,0,0,0,0,0]
"""
taskparameters = {'command': 'GetJointValues',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
# NOTE: if you are using densowave robot, you always need to set "densowavearmgroup"
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def GetManipulatorTransformInRobotFrame(self, timeout=None):
"""gets the transform of the manipulator in robot frame
:return: current transform of the manipulator in robot frame in a json dictionary, e.g. {'translation': [100,200,300], 'rotationmat': [[1,0,0],[0,1,0],[0,0,1]], 'quaternion': [1,0,0,0]}
"""
taskparameters = {'command': 'GetManipTransformToRobot',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def PickAndPlace(self, goaltype, goals, targetnamepattern=None, approachoffset=30, departoffsetdir=[0, 0, 50], destdepartoffsetdir=[0, 0, 30], deletetarget=0, debuglevel=4, movetodestination=1, freeinc=[0.08], worksteplength=None, densowavearmgroup=5, regionname=None, cameranames=None, envclearance=15, toolname=None, robotspeed=0.5, timeout=None, **kwargs):
"""picks up an object with the targetnamepattern and places it down at one of the goals. First computes the entire plan from robot moving to a grasp and then moving to its destination, then runs it on the real robot. Task finishes once the real robot is at the destination.
:param desttargetname: The destination target name where the destination goal ikparams come from
:param destikparamnames: A list of lists of ikparam names for the destinations of the target. Only destikparamnames[0] is looked at and tells the system to place the part in any of the ikparams in destikparamnames[0]
:param targetnamepattern: regular expression describing the name of the object, default is '%s_\d+'%(self.targetname). See https://docs.python.org/2/library/re.html
:param approachoffset: distance in milimeter to move straight to the grasp point, e.g. 30 mm
:param departoffsetdir: the direction and distance in mm to move the part in global frame (usually along negative gravity) after it is grasped, e.g. [0,0,50]
:param destdepartoffsetdir: the direction and distance in mm to move away from the object after it is placed, e.g. [0,0,30]. Depending on leaveoffsetintool parameter, this can in the global coordinate system or tool coordinate system.
:param leaveoffsetintool: If 1, destdepartoffsetdir is in the tool coordinate system. If 0, destdepartoffsetdir is in the global coordinate system. By default this is 0.
:param deletetarget: whether to delete target after pick and place is done
:param toolname: name of the manipulator
:param regionname: name of the region of the objects
:param cameranames: the names of the cameras to avoid occlusions with the robot, list of strings
:param envclearance: environment clearance in milimeter
Low level planning parameters:
:param debuglevel: sets debug level of the task
:param movetodestination: planning parameter
:param freeinc: planning parameter
:param worksteplength: planning parameter
:param densowavearmgroup: planning parameter
:param graspsetname: the name of the grasp set belong to the target objects to use for the target. Grasp sets are a list of ikparams
Manual Destination Specification (deprecated)
:param goaltype: type of the goal, e.g. translationdirection5d or transform6d
:param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
"""
if worksteplength is None:
worksteplength = 0.01
if toolname is None:
toolname = self.toolname
if targetnamepattern is None:
targetnamepattern = '%s_\d+' % (self.targetname)
if regionname is None:
regionname = self.regionname
if robotspeed is None:
robotspeed = self.robotspeed
taskparameters = {'command': 'PickAndPlace',
'toolname': toolname,
'goaltype': goaltype,
'envclearance': envclearance,
'movetodestination': movetodestination,
'goals': goals,
'approachoffset': approachoffset,
'departoffsetdir': departoffsetdir,
'destdepartoffsetdir': destdepartoffsetdir,
'freeinc': freeinc,
'worksteplength': worksteplength,
'targetnamepattern': targetnamepattern,
'containername': regionname,
'deletetarget': deletetarget,
'robotspeed': robotspeed,
'debuglevel': debuglevel,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def StartPickAndPlaceThread(self, goaltype=None, goals=None, targetnamepattern=None, approachoffset=30, departoffsetdir=[0, 0, 50], destdepartoffsetdir=[0, 0, 30], deletetarget=0, debuglevel=4, movetodestination=1, worksteplength=None, regionname=None, envclearance=15, toolname=None, robotspeed=None, timeout=None, **kwargs):
"""Start a background loop to continuously pick up objects with the targetnamepattern and place them down at the goals. The loop will check new objects arriving in and move the robot as soon as it finds a feasible grasp. The thread can be quit with StopPickPlaceThread.
:param desttargetname: The destination target name where the destination goal ikparams come from
:param destikparamnames: A list of lists of ikparam names for the ordered destinations of the target. destikparamnames[0] is where the first picked up part goes, desttargetname[1] is where the second picked up target goes.
:param cycledests: When finished cycling through all destikparamnames, will delete all the targets and start from the first index again doing this for cycledests times. By default it is 1.
:param targetnamepattern: regular expression describing the name of the object, default is '%s_\d+'%(self.targetname). See https://docs.python.org/2/library/re.html
:param approachoffset: distance in milimeter to move straight to the grasp point, e.g. 30 mm
:param departoffsetdir: the direction and distance in mm to move the part in global frame (usually along negative gravity) after it is grasped, e.g. [0,0,50]
:param destdepartoffsetdir: the direction and distance in mm to move away from the object after it is placed, e.g. [0,0,30]. Depending on leaveoffsetintool parameter, this can in the global coordinate system or tool coordinate system.
:param leaveoffsetintool: If 1, destdepartoffsetdir is in the tool coordinate system. If 0, destdepartoffsetdir is in the global coordinate system. By default this is 0.
:param deletetarget: whether to delete target after pick and place is done
:param toolname: name of the manipulator
:param regionname: name of the region of the objects
:param cameranames: the names of the cameras to avoid occlusions with the robot, list of strings
:param envclearance: environment clearance in milimeter
Low level planning parameters:
:param debuglevel: sets debug level of the task
:param movetodestination: planning parameter
:param worksteplength: planning parameter
:param densowavearmgroup: robot parameters
:param graspsetname: the name of the grasp set belong to the target objects to use for the target. Grasp sets are a list of ikparams
:param goaltype: type of the goal, e.g. translationdirection5d
:param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
:param useworkspaceplanner: If 1 is set, will try the workspace planner for moving the hand straight. If 2 is set, will try the RRT for moving straight. Can set 3 for trying both.
"""
if worksteplength is None:
worksteplength = 0.01
if toolname is None:
toolname = self.toolname
if targetnamepattern is None:
targetnamepattern = '%s_\d+' % (self.targetname)
if regionname is None:
regionname = self.regionname
if robotspeed is None:
robotspeed = self.robotspeed
taskparameters = {'command': 'StartPickAndPlaceThread',
'toolname': toolname,
'envclearance': envclearance,
'movetodestination': movetodestination,
'approachoffset': approachoffset,
'departoffsetdir': departoffsetdir,
'destdepartoffsetdir': destdepartoffsetdir,
'worksteplength': worksteplength,
'targetnamepattern': targetnamepattern,
'containername': regionname,
'deletetarget': deletetarget,
'robotspeed': robotspeed,
'debuglevel': debuglevel,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
if goals is not None:
taskparameters['orderedgoals'] = goals
taskparameters['goaltype'] = goaltype
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def StopPickPlaceThread(self, timeout=None, **kwargs):
"""stops the pick and place thread started with StartPickAndPlaceThread
"""
taskparameters = {'command': 'StopPickPlaceThread',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def GetPickPlaceStatus(self, timeout=None, **kwargs):
"""gets the status of the pick and place thread
:return: status (0: not running, 1: no error, 2: error) of the pick and place thread in a json dictionary, e.g. {'status': 2, 'error': 'an error happened'}
"""
taskparameters = {'command': 'GetPickPlaceStatus',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def MoveToHandPosition(self, goaltype, goals, toolname=None, envclearance=None, closegripper=0, robotspeed=None, timeout=None):
"""Computes the inverse kinematics and moves the manipulator to any one of the goals specified.
:param goaltype: type of the goal, e.g. translationdirection5d
:param goals: flat list of goals, e.g. two 5d ik goals: [380,450,50,0,0,1, 380,450,50,0,0,-1]
:param toolname: name of the manipulator, default is self.toolname
:param envclearance: clearance in milimeter, default is self.envclearance
:param closegripper: whether to close gripper once the goal is reached, default is 0
"""
if toolname is None:
toolname = self.toolname
if envclearance is None:
envclearance = self.envclearance
taskparameters = {'command': 'MoveToHandPosition',
'goaltype': goaltype,
'goals': goals,
'toolname': toolname,
'envclearance': envclearance,
'closegripper': closegripper,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def ComputeIK(self, timeout=None, **kwargs):
"""
:param toolname: tool name, string
:param limit: number of solutions to return, int
:param iktype: grasp (but basically the just the ikparam), string
:param quaternion: grasp (but basically the just the ikparam) quaternion in world cooordinates, float array
:param translation: grasp (but basically the just the ikparam) translation in world cooordinates in mm, float array
:param direction: grasp (but basically the just the ikparam) direction in world cooordinates, float array
:param angle: grasp (but basically the just the ikparam) angle in world cooordinates, float
:param freeincvalue: float, the discretization of the free joints of the robot when computing ik.
:param filteroptions: OpenRAVE IkFilterOptions bitmask. By default this is 1, which means all collisions are checked, int
:param preshape: If the tool has fingers after the end effector, specify their values. The gripper DOFs come from **gripper_dof_pks** field from the tool., float array
:return: A dictionary of:
- solutions: array of IK solutions (each of which is an array of DOF values), sorted by minimum travel distance and truncated to match the limit
"""
taskparameters = {'command': 'ComputeIK',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
if 'toolname' not in taskparameters:
taskparameters['toolname'] = self.toolname
if 'envclearance' not in taskparameters:
taskparameters['envclearance'] = self.envclearance
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def ComputeIKFromParameters(self, timeout=None, **kwargs):
"""
:param toolname: tool name, string
:param limit: number of solutions to return, int
:param ikparamnames: the ikparameter names, also contains information about the grasp like the preshape
:param targetname: the target object name that the ikparamnames belong to
:param freeincvalue: float, the discretization of the free joints of the robot when computing ik.
:param filteroptions: OpenRAVE IkFilterOptions bitmask. By default this is 1, which means all collisions are checked, int
:return: A dictionary of:
- solutions: array of IK solutions (each of which is an array of DOF values), sorted by minimum travel distance and truncated to match the limit
"""
taskparameters = {'command': 'ComputeIKFromParameters',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
if 'toolname' not in taskparameters:
taskparameters['toolname'] = self.toolname
if 'envclearance' not in taskparameters:
taskparameters['envclearance'] = self.envclearance
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def InitializePartsWithPhysics(self, timeout=None, **kwargs):
"""Start a physics simulation where the parts drop down into the bin. The method returns as soon as the physics is initialized, user has to wait for the "duration" or call StopPhysicsThread command.
:param targeturi: the target uri to initialize the scene with
:param numtargets: the number of targets to create
:param regionname: the container name to drop the targets into
:param duration: the duration in seconds to continue the physics until it is stopped.
:param basename: The basename to give to all the new target names. Numbers are suffixed at the end, like basename+'0134'. If not specified, will use a basename derived from the targeturi.
:param deleteprevious: if True, will delete all the previous targets in the scene. By default this is True.
"""
taskparameters = {'command': 'InitializePartsWithPhysics',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
if 'containername' not in taskparameters:
taskparameters['containername'] = self.regionname
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def StopPhysicsThread(self, timeout=None, **kwargs):
"""stops the physics simulation started with InitializePartsWithPhysics
"""
taskparameters = {'command': 'StopPhysicsThread',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def JitterPartUntilValidGrasp(self, timeout=None, **kwargs):
"""Select a part that wasn't able to be grasped and jitter its location such that a grasp set is found for it that will take it to the destination.
:param toolname: name of the manipulator
:param targetname: The target to try to grasp.
:param graspsetname: the name of the grasp set belong to the target objects to use for the target. Grasp sets are a list of ikparams.
:param approachoffset: The approach distance for simulating full grasp.
:param departoffsetdir: The depart distance for simulating full grasp.
:param destdepartoffsetdir: the direction and distance in mm to move away from the object after it is placed, e.g. [0,0,30]. Depending on leaveoffsetintool parameter, this can in the global coordinate system or tool coordinate system.
:param leaveoffsetintool: If 1, destdepartoffsetdir is in the tool coordinate system. If 0, destdepartoffsetdir is in the global coordinate system. By default this is 0.
:param desttargetname: The destination target name where the destination goal ikparams come from. If no name is specified, then robot won't consider putting the target into the destination when it searches for grasps.
:param destikparamnames: A list of lists of ikparam names for the ordered destinations of the target. destikparamnames[0] is where the first picked up part goes, desttargetname[1] is where the second picked up target goes.
:param jitterdist: Amount to jitter the target object translation by
:param jitterangle: Amount to jitter the target object's orientation angle
:param jitteriters: Number of times to try jittering before giving up.
:return: If failed, an empty dictionary. If succeeded, a dictionary with the following keys:
- translation: the new translation of the target part
- quaternion: the new quaternion of the target part
- jointvalues: robot joint values that are grasping the part (fingers are at their preshape).
- graspname: the grasp name used for jointvalues. If empty, then no grasp was found.
- destikname: the name of the destination ikparam that was chosen with the grasp
- destjointvalues: robot joint values at one of the specified destinations (fingers are at their final positions).
- desttranslation: the new translation of the target part
- destquaternion: the new quaternion of the target part
"""
taskparameters = {'command': 'JitterPartUntilValidGrasp',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
####################
# scene commands
####################
def IsRobotOccludingBody(self, bodyname, cameraname, timeout=None):
"""returns if the robot is occluding body in the view of the specified camera
:param bodyname: name of the object
:param cameraname: name of the camera
:return: the occlusion state in a json dictionary, e.g. {'occluded': 0}
"""
taskparameters = {'command': 'IsRobotOccludingBody',
'robotname': self.robotname,
'bodyname': bodyname,
'cameraname': cameraname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetPickedPositions(self, unit='m', timeout=None):
"""returns the poses and the timestamps of the picked objects
:param robotname: name of the robot
:param unit: unit of the translation
:return: the positions and the timestamps of the picked objects in a json dictionary, info of each object has the format of quaternion (w,x,y,z) followed by x,y,z translation (in mm) followed by timestamp in milisecond e.g. {'positions': [[1,0,0,0,100,200,300,1389774818.8366449],[1,0,0,0,200,200,300,1389774828.8366449]]}
"""
taskparameters = {'command': 'GetPickedPositions',
'robotname': self.robotname,
'unit': unit,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def UpdateObjects(self, envstate, targetname=None, unit="m", timeout=None):
"""updates objects in the scene with the envstate
:param envstate: a list of dictionaries for each instance object in world frame. quaternion is specified in w,x,y,z order. e.g. [{'name': 'target_0', 'translation_': [1,2,3], 'quat_': [1,0,0,0]}, {'name': 'target_1', 'translation_': [2,2,3], 'quat_': [1,0,0,0]}]
:param unit: unit of envstate
"""
if targetname is None:
targetname = self.targetname
taskparameters = {'command': 'UpdateObjects',
'objectname': targetname,
'object_uri': u'mujin:/%s.mujin.dae' % (targetname),
'robot': self.robotname,
'envstate': envstate,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
'unit': unit,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def Grab(self, targetname, toolname=None, timeout=None):
"""grabs an object with tool
:param targetname: name of the object
:param robotname: name of the robot
:param toolname: name of the manipulator, default is self.toolname
"""
if toolname is None:
toolname = self.toolname
taskparameters = {'command': 'Grab',
'targetname': targetname,
'robotname': self.robotname,
'toolname': toolname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetGrabbed(self, timeout=None):
"""gets the names of the grabbed objects
:return: names of the grabbed object in a json dictionary, e.g. {'names': ['target_0']}
"""
taskparameters = {'command': 'GetGrabbed',
'robotname': self.robotname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetTransform(self, targetname, unit='mm', timeout=None):
"""gets the transform of an object
:param targetname: name of the object
:param unit: unit of the result translation
:return: transform of the object in a json dictionary, e.g. {'translation': [100,200,300], 'rotationmat': [[1,0,0],[0,1,0],[0,0,1]], 'quaternion': [1,0,0,0]}
"""
taskparameters = {'command': 'GetTransform',
'targetname': targetname,
'unit': unit,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def SetTransform(self, targetname, translation, unit='mm', rotationmat=None, quaternion=None, timeout=None):
"""sets the transform of an object
:param targetname: name of the object
:param translation: list of x,y,z value of the object in milimeter
:param unit: unit of translation
:param rotationmat: list specifying the rotation matrix in row major format, e.g. [1,0,0,0,1,0,0,0,1]
:param quaternion: list specifying the quaternion in w,x,y,z format, e.g. [1,0,0,0]
"""
taskparameters = {'command': 'SetTransform',
'targetname': targetname,
'unit': unit,
'translation': translation,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
if rotationmat is not None:
taskparameters['rotationmat'] = rotationmat
if quaternion is not None:
taskparameters['quaternion'] = quaternion
if rotationmat is None and quaternion is None:
taskparameters['quaternion'] = [1, 0, 0, 0]
log.warn('no rotation is specified, using identity quaternion ', taskparameters['quaternion'])
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetAABB(self, targetname, unit='mm', timeout=None):
"""Gets the axis aligned bounding box of object
:param targetname: name of the object
:param unit: unit of the AABB
:return: AABB of the object, e.g. {'pos': [1000,400,100], 'extents': [100,200,50]}
"""
taskparameters = {'command': 'GetAABB',
'targetname': targetname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
'unit': unit,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def RemoveObjectsWithPrefix(self, prefix, timeout=None):
"""removes objects with prefix
"""
taskparameters = {'command': 'RemoveObjectsWithPrefix',
'prefix': prefix,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteCommand(taskparameters, timeout=timeout)
def SaveScene(self, timeout=None, **kwargs):
"""saves the current scene to file
:param filename: e.g. /tmp/testscene.mujin.dae, if not specified, it will be saved with an auto-generated filename
:param preserveexternalrefs: If True, any bodies currently that are being externally referenced from the environment will be saved as external references.
:param externalref: If '*', then will save each of the objects as externally referencing their original filename. Otherwise will force saving specific bodies as external references
:param saveclone: If 1, will save the scenes for all the cloned environments
:return: the actual filename the scene is saved to in a json dictionary, e.g. {'filename': '2013-11-01-17-10-00-UTC.dae'}
"""
taskparameters = {'command': 'SaveScene',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetTrajectoryLog(self, timeout=None, **kwargs):
"""Gets the recent trajectories executed on the binpicking server. The internal server keeps trajectories around for 10 minutes before clearing them.
:param startindex: int, start of the trajectory to get. If negative, will start counting from the end. For example, -1 is the last element, -2 is the second to last element.
:param num: int, number of trajectories from startindex to return. If 0 will return all the trajectories starting from startindex
:param includejointvalues: bool, If True will include timedjointvalues, if False will just give back the trajectories. Defautl is False
:return:
total: 10
trajectories: [
{
"timestarted": 12345215
"name": "movingtodest",
"numpoints": 100,
"duration": 0.8,
"timedjointvalues": [0, 0, 0, .....]
},
{ ... }
]
Where timedjointvalues is a list joint values and the trajectory time. For a 3DOF robot sampled at 0.008s, this is
[J1, J2, J3, 0, J1, J2, J3, 0.008, J1, J2, J3, 0.016, ...]
"""
taskparameters = {'command': 'GetTrajectoryLog',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetPickAndPlaceLog(self, timeout=None, **kwargs):
"""Gets the recent pick-and-place log executed on the binpicking server. The internal server keeps the log around until the next Pick-and-place command is executed.
:param startindex: int, start of the trajectory to get. If negative, will start counting from the end. For example, -1 is the last element, -2 is the second to last element.
:param num: int, number of trajectories from startindex to return. If 0 will return all the trajectories starting from startindex
:return:
total: 10
messages: [
{
"message":"message1",
"type":"",
"level":0,
"data": {
"jointvalues":[0,0,0,0,0,0]
}
},
]
"""
taskparameters = {'command': 'GetPickAndPlaceLog',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def MoveRobotOutOfCameraOcclusion(self, regionname=None, robotspeed=None, toolname=None, timeout=None, **kwargs):
"""moves the robot out of camera occlusion and deletes targets if it was in occlusion.
:param toolname: name of the tool to move when avoiding
:param cameranames: the names of the cameras to avoid occlusions with the robot, list of strings
"""
if regionname is None:
regionname = self.regionname
if toolname is None:
toolname = self.toolname
taskparameters = {'command': 'MoveRobotOutOfCameraOcclusion',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
'containername': regionname,
'toolname': toolname
}
taskparameters.update(kwargs)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def GetRobotBridgePLCStatus(self, timeout=None, **kwargs):
taskparameters = {'command': 'GetRobotBridgePLCStatus',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def GetRobotBridgePLCIOState(self, timeout=None, **kwargs):
taskparameters = {'command': 'GetRobotBridgePLCIOState',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def PausePickPlace(self, timeout=None, **kwargs):
taskparameters = {'command': 'PausePickPlace',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
def ResumePickPlace(self, timeout=None, **kwargs):
taskparameters = {'command': 'ResumePickPlace',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
taskparameters.update(kwargs)
return self.ExecuteCommand(taskparameters, timeout=timeout)
#######################
# unsupported commands
#######################
def UnchuckManipulator(self, *args, **kwargs):
log.warn('deprecated')
return self.UnchuckGripper(*args, **kwargs)
def ChuckManipulator(self, *args, **kwargs):
log.warn('deprecated')
return self.ChuckGripper(*args, **kwargs)
def __StartBackgroundTask(self, taskname, robotspeed=None, timeout=None):
"""starts a background task (need testing)
:param taskname: name of the background task
"""
taskparameters = {'command': 'ExecuteBackgroundTask',
'taskname': taskname,
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
def __StopBackgroundTask(self, timeout=None):
"""stops the background task (need testing)
assumes that only one background task is running
"""
taskparameters = {'command': 'StopBackgroundTask',
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
return self.ExecuteRobotCommand(taskparameters, timeout=timeout)
def __PickAndMove(self, goaltype, armjointvaluesgoals, destinationgoals=None, targetnames=None, movetodestination=0, deletetarget=1, startvalues=None, toolname=None, envclearance=20, regionname=None, robotspeed=None, timeout=None):
"""deprecated
"""
if toolname is None:
toolname = self.toolname
taskparameters = {'command': 'PickAndMove',
'toolname': toolname,
'goaltype': goaltype,
'envclearance': envclearance,
'movetodestination': movetodestination,
'deletetarget': deletetarget,
'armjointvaluesgoals': list(armjointvaluesgoals),
'sceneparams': self.sceneparams,
'tasktype': self.tasktype,
}
if regionname is not None:
taskparameters['boxname'] = regionname # TODO: update backend
if destinationgoals is not None:
taskparameters['goals'] = destinationgoals
if targetnames is not None:
taskparameters['targetnames'] = targetnames
if startvalues is not None:
taskparameters['startvalues'] = list(startvalues)
return self.ExecuteRobotCommand(taskparameters, robotspeed=robotspeed, timeout=timeout)
|
import requests
from bs4 import BeautifulSoup
import time
def is_number(s):
"""
Check and parse entered value as a number
"""
try:
float(s)
return True
except ValueError:
return False
def cg_accumulate(year, dep, degree_choice, sg_cg_choice, user_cg, line_num = 0):
# List of departments with Integrated M.Sc. (5 year courses)
msc_dep_list = ["GG", "EX", "MA", "CY", "HS", "PH"]
grades = ["EX", "A", "B", "C", "D", "P", "F", "X"]
msc_dep = False
dep_rank = 1
num_grades = [0, 0, 0, 0, 0, 0, 0]
print ""
fname = "Output.txt"
roll_count = 10000
if degree_choice == "2":
roll_count = 30000
if dep in msc_dep_list:
roll_count = 20000
msc_dep = True
student_count = 0
flag = False
cg_total = 0.00
sg_total = 0.00
bad_count = 0
sg_list = []
while True:
roll_count += 1
student_count += 1
rollno = str(year) + str(dep) + str(roll_count)
url_to_scrape = 'https://erp.iitkgp.ernet.in/StudentPerformance/view_performance.jsp?rollno=' + rollno
name_flag = False
flag = False
try:
r = requests.get(url_to_scrape)
except Exception:
print "ConnectionError on :" + str(roll_count)
print "Retrying...."
student_count -= 1
roll_count -= 1
continue
soup = BeautifulSoup(r.text, "html.parser")
with open(fname, "w") as text_file:
text_file.write("{}".format(soup))
with open(fname) as f:
content = f.readlines()
if sg_cg_choice == "5":
# if student_count > 6:
# break
if len(content) < 40:
flag = True
bad_count += 1
student_count -= 1
elif len(content) < line_num:
student_count -= 1
# elif len(content) < line_num or content.find("Backlog") != -1 or content.find("Deregistered") != -1:
# student_count -= 1
# if content.find("Backlog") != -1 or content.find("Deregistered") != -1:
# print "Backlog / Deregistration. Skipping"
else:
bad_count = 0
name_line = content[19]
idx = 24
while(name_line[idx]!='<'):
idx += 1
name = name_line[24:idx]
if content[line_num - 3].find(sub_name) != -1:
grade_line = content[line_num]
grade = grade_line[19:20]
else:
index = 0
for line in content:
if line.find(sub_name) != -1:
grade_line = content[index + 3]
grade = grade_line[19:20]
break
index += 1
if grade == "E":
grade = "EX"
if grade in grades:
print "Grade : " + str(grade) + " Name : " + str(name)
if grade == "EX":
num_grades[0] += 1
elif grade == "A":
num_grades[1] += 1
elif grade == "B":
num_grades[2] += 1
elif grade == "C":
num_grades[3] += 1
elif grade == "D":
num_grades[4] += 1
elif grade == "P":
num_grades[5] += 1
elif grade == "F":
num_grades[6] += 1
elif grade == "X":
num_grades[7] += 1
else:
student_count -= 1
else:
for line in content:
if len(content) < 40:
flag = True
bad_count += 1
student_count -= 1
break
bad_count = 0
if line.find("Name") != -1 and not name_flag:
idx = 24
while(line[idx]!='<'):
idx += 1
name = line[24:idx]
name_flag = True
if sg_cg_choice == "1" or sg_cg_choice == "4":
if line.find("CGPA") != -1:
if line[4] != "<" and is_number(line[31:35]):
if sg_cg_choice == "4":
if user_cg < float(line[31:35]):
dep_rank += 1
else:
print "Roll Num : " + str(rollno) + " CG : " + str(line[31:35]) + " Name : " + str(name)
cg_total += float(line[31:35])
break
elif sg_cg_choice == "2":
if line.find("SGPA") != -1 and is_number(line[25:29]):
print "Roll Num : " + str(rollno) + " SGPA in most recent semester : " + str(line[25:29]) + " Name : " + str(name)
sg_total += float(line[25:29])
break
elif sg_cg_choice == "3":
if line.find("SGPA") != -1 and is_number(line[25:29]):
sg_list.append(str(line[25:29]))
if sg_cg_choice == "3" and not flag:
print "Roll Num : " + str(rollno) + " SGPA list : " + str(sg_list) + " Name : " + str(name)
del sg_list[:]
if flag and bad_count >= 5 and (degree_choice != "3" or roll_count > 30000 or msc_dep):
break
# Will not be executed for MSc Integrated Courses
if flag and bad_count >= 5 and not msc_dep:
roll_count = 30000
print "Making transition to dual degree students..."
continue
student_count -= 1
print ""
print "________________________________________"
print "Number of Students : " + str(student_count)
if sg_cg_choice == "1":
print "Total CG : " + str(cg_total)
print "Average CG : " + str(cg_total / student_count)
elif sg_cg_choice == "2":
print "Total SG : " + str(sg_total)
print "Average SG : " + str(sg_total / student_count)
elif sg_cg_choice == "4":
print "Your Department Rank is : " + str(dep_rank)
if dep_rank < student_count/2:
print "Good going. You are in the top half of your department"
else:
print "A bit more hard work can see you in the top half of your department"
elif sg_cg_choice == "5":
print "Grade List : "
print "EX : " + str(num_grades[0]) + " A : " + str(num_grades[1]) + " B : " + str(num_grades[2]) + " C : " + str(num_grades[3])
print " D : " + str(num_grades[4]) + " P : " + str(num_grades[5]) + " F : " + str(num_grades[6]) + " Dereg : " + str(num_grades[7])
if float(sum(num_grades[0:2])) / sum(num_grades) > 0.45:
print "Looks like a scoring subject to me."
elif float(sum(num_grades[0:2])) / sum(num_grades) < 0.30:
print "Looks like a gloomy subject to me."
if num_grades[7] >= 5:
print "Beware! Too much threat of Deregistration in this one!"
print "________________________________________"
def find_cg(roll_num):
url_to_scrape = 'https://erp.iitkgp.ernet.in/StudentPerformance/view_performance.jsp?rollno=' + str(roll_num)
fname = "Output.txt"
try:
r = requests.get(url_to_scrape)
soup = BeautifulSoup(r.text, "html.parser")
with open(fname, "w") as text_file:
text_file.write("{}".format(soup))
with open(fname) as f:
content = f.readlines()
if len(content) < 40:
print "Invalid Roll Number!"
return -1
else:
for line in content:
if line.find("CGPA") != -1:
if line[4] != "<" and is_number(line[31:35]):
return float(line[31:35])
except Exception:
print "ConnectionError on you roll number. Please check your connection and try again!"
key = raw_input("The program will now exit. Press enter")
exit(0)
def find_subject_grade_line(year, dep, sub_name, msc_dep_bool):
"""
Finds grade distribution in a particular subject obtained by previous batches
"""
fname = "Output.txt"
grades = ["EX", "A", "B", "C", "D", "P", "F", "X"]
if msc_dep_bool:
roll_count = 20001
else:
roll_count = 10001
subj_found_flag = False
grade_found_flag = False
while True:
rollno = str(year) + str(dep) + str(roll_count)
url_to_scrape = 'https://erp.iitkgp.ernet.in/StudentPerformance/view_performance.jsp?rollno=' + rollno
try:
r = requests.get(url_to_scrape)
soup = BeautifulSoup(r.text, "html.parser")
with open(fname, "w") as text_file:
text_file.write("{}".format(soup))
with open(fname) as f:
content = f.readlines()
if len(content) < 40:
roll_count += 1
continue
bad_flag = False
for line in content:
if line.find("Backlog") != -1 or line.find("Deregistered") != -1:
bad_flag = True
break
if bad_flag:
roll_count += 1
else:
index = 0
grade_line_index = 0
for line in content:
if line.find(sub_name) != -1:
subj_found_flag = True
grade_line = content[index+3]
grade = grade_line[19:20]
if grade not in grades and grade != "E":
print "This Subject is an ongoing subject for the mentioned batch number"
print grade
print "System will now exit"
#exit(0)
# this can be replaced by return
return -1
else:
grade_found_flag = True
grade_line_index = index + 3
break
index += 1
if not grade_found_flag or (subj_found_flag == False):
print "Subject not found! System will now exit"
#exit(0)
return -1
# this can be replaced by return
else:
return grade_line_index
except Exception:
print "ConnectionError. Retrying..."
continue
print "*** Welcome to CG Accumulator ***"
departments = ["AE", "AG", "AR", "BT", "CE", "CH", "CS", "CY", "EC", "EE", "EX", "GG", "HS", "IE", "IM", "MA", "ME", "MF", "MI", "MT", "NA", "PH", "QD"]
years = ["12","13","14","15"]
user_cg = 0.00
while True:
print ""
year = raw_input("Enter year (Available Choices : 12, 13, 14, 15) : ")
if year not in years:
print "Please enter a valid year choice"
continue
print ""
dep = raw_input("Enter Department : ")
while dep not in departments:
print "Please enter a valid department!"
print "P.S. Department name should be capitalised. Eg. \"CS\" and not \"cs\""
dep = raw_input("Enter Valid Department again : ")
print ""
sg_cg_choice = raw_input("Do you want CG list (enter '1') \n or Most recent SG list (enter '2') \n or Entire SG history (enter '3') \n or Know your D.R. (enter '4') \n or Find previous year grades in a particular subject (enter '5')? : ")
while sg_cg_choice not in ["1", "2", "3", "4", "5"]:
print "Please enter a valid choice!"
sg_cg_choice = raw_input("Enter valid choice again : ")
if sg_cg_choice == "4" or sg_cg_choice == "5":
degree_choice = "3"
if sg_cg_choice == "4":
roll_num = raw_input("Enter last 5 digits of your roll number : ")
while len(roll_num) != 5 or find_cg(year + dep + roll_num) == -1:
print "Please enter valid last 5 digits"
roll_num = raw_input("Enter valid last 5 digits of your roll number again: ")
user_cg = find_cg(year + dep + roll_num)
elif sg_cg_choice == "5":
sub_name = raw_input("Enter subject name in capital letters : ")
#line_num = find_subject_grade_line(year, dep, sub_name, False)
else:
print ""
degree_choice = raw_input("Enter choice : '1' for 4 years only, '2' for 5 years only, '3' for both : ")
while degree_choice not in ["1", "2", "3"]:
print "Please enter a valid choice!"
degree_choice = raw_input("Enter valid choice again : ")
print ""
break
# year = "14"
# dep = "CS"
# sg_cg_choice = "5"
# sub_name = "CHEMISTRY"
# line_num = find_subject_grade_line(year, dep, sub_name, False)
# print line_num
if sg_cg_choice != "5":
print ""
print "Please wait while results are being accumulated, this may take a few minutes...."
print "Meanwhile, minimize this screen and think about what you are doing with your life."
print ""
var = cg_accumulate(year, dep, degree_choice,sg_cg_choice, user_cg)
print ""
elif sg_cg_choice == "5":
msc_dep_list = ["GG", "EX", "MA", "CY", "HS", "PH"]
if dep in msc_dep_list:
msc_dep_bool = True
else:
msc_dep_bool = False
line_num = find_subject_grade_line(year, dep, sub_name, msc_dep_bool)
if line_num != -1:
var = cg_accumulate(year, dep, degree_choice, sg_cg_choice, 0.00, line_num)
key = raw_input("Press Enter to exit")
Minor bug fix
import requests
from bs4 import BeautifulSoup
import time
def is_number(s):
"""
Check and parse entered value as a number
"""
try:
float(s)
return True
except ValueError:
return False
def cg_accumulate(year, dep, degree_choice, sg_cg_choice, user_cg, line_num = 0):
# List of departments with Integrated M.Sc. (5 year courses)
msc_dep_list = ["GG", "EX", "MA", "CY", "HS", "PH"]
grades = ["EX", "A", "B", "C", "D", "P", "F", "X"]
msc_dep = False
dep_rank = 1
num_grades = [0, 0, 0, 0, 0, 0, 0, 0]
print ""
fname = "Output.txt"
roll_count = 10000
if degree_choice == "2":
roll_count = 30000
if dep in msc_dep_list:
roll_count = 20000
msc_dep = True
student_count = 0
flag = False
cg_total = 0.00
sg_total = 0.00
bad_count = 0
sg_list = []
while True:
roll_count += 1
student_count += 1
rollno = str(year) + str(dep) + str(roll_count)
url_to_scrape = 'https://erp.iitkgp.ernet.in/StudentPerformance/view_performance.jsp?rollno=' + rollno
name_flag = False
flag = False
try:
r = requests.get(url_to_scrape)
except Exception:
print "ConnectionError on :" + str(roll_count)
print "Retrying...."
student_count -= 1
roll_count -= 1
continue
soup = BeautifulSoup(r.text, "html.parser")
with open(fname, "w") as text_file:
text_file.write("{}".format(soup))
with open(fname) as f:
content = f.readlines()
if sg_cg_choice == "5":
# if student_count > 6:
# break
if len(content) < 40:
flag = True
bad_count += 1
student_count -= 1
elif len(content) < line_num:
student_count -= 1
# elif len(content) < line_num or content.find("Backlog") != -1 or content.find("Deregistered") != -1:
# student_count -= 1
# if content.find("Backlog") != -1 or content.find("Deregistered") != -1:
# print "Backlog / Deregistration. Skipping"
else:
bad_count = 0
name_line = content[19]
idx = 24
while(name_line[idx]!='<'):
idx += 1
name = name_line[24:idx]
if content[line_num - 3].find(sub_name) != -1: # VULNERABILITY
grade_line = content[line_num]
grade = grade_line[19:20]
else:
index = 0
for line in content:
if line.find(sub_name) != -1:
grade_line = content[index + 3]
grade = grade_line[19:20]
break
index += 1
if grade == "E":
grade = "EX"
if grade in grades:
print "Grade : " + str(grade) + " Name : " + str(name)
if grade == "EX":
num_grades[0] += 1
elif grade == "A":
num_grades[1] += 1
elif grade == "B":
num_grades[2] += 1
elif grade == "C":
num_grades[3] += 1
elif grade == "D":
num_grades[4] += 1
elif grade == "P":
num_grades[5] += 1
elif grade == "F":
num_grades[6] += 1
elif grade == "X":
num_grades[7] += 1
else:
student_count -= 1
else:
for line in content:
if len(content) < 40:
flag = True
bad_count += 1
student_count -= 1
break
bad_count = 0
if line.find("Name") != -1 and not name_flag:
idx = 24
while(line[idx]!='<'):
idx += 1
name = line[24:idx]
name_flag = True
if sg_cg_choice == "1" or sg_cg_choice == "4":
if line.find("CGPA") != -1:
if line[4] != "<" and is_number(line[31:35]):
if sg_cg_choice == "4":
if user_cg < float(line[31:35]):
dep_rank += 1
else:
print "Roll Num : " + str(rollno) + " CG : " + str(line[31:35]) + " Name : " + str(name)
cg_total += float(line[31:35])
break
elif sg_cg_choice == "2":
if line.find("SGPA") != -1 and is_number(line[25:29]):
print "Roll Num : " + str(rollno) + " SGPA in most recent semester : " + str(line[25:29]) + " Name : " + str(name)
sg_total += float(line[25:29])
break
elif sg_cg_choice == "3":
if line.find("SGPA") != -1 and is_number(line[25:29]):
sg_list.append(str(line[25:29]))
if sg_cg_choice == "3" and not flag:
print "Roll Num : " + str(rollno) + " SGPA list : " + str(sg_list) + " Name : " + str(name)
del sg_list[:]
if flag and bad_count >= 5 and (degree_choice != "3" or roll_count > 30000 or msc_dep):
break
# Will not be executed for MSc Integrated Courses
if flag and bad_count >= 5 and not msc_dep:
roll_count = 30000
print "Making transition to dual degree students..."
continue
student_count -= 1
print ""
print "________________________________________"
print "Number of Students : " + str(student_count)
if sg_cg_choice == "1":
print "Total CG : " + str(cg_total)
print "Average CG : " + str(cg_total / student_count)
elif sg_cg_choice == "2":
print "Total SG : " + str(sg_total)
print "Average SG : " + str(sg_total / student_count)
elif sg_cg_choice == "4":
print "Your Department Rank is : " + str(dep_rank)
if dep_rank < student_count/2:
print "Good going. You are in the top half of your department"
else:
print "A bit more hard work can see you in the top half of your department"
elif sg_cg_choice == "5":
print "Grade List : "
print "EX : " + str(num_grades[0]) + " A : " + str(num_grades[1]) + " B : " + str(num_grades[2]) + " C : " + str(num_grades[3])
print " D : " + str(num_grades[4]) + " P : " + str(num_grades[5]) + " F : " + str(num_grades[6]) + " Dereg : " + str(num_grades[7])
if float(sum(num_grades[0:2])) / sum(num_grades) > 0.45:
print "Looks like a scoring subject to me."
elif float(sum(num_grades[0:2])) / sum(num_grades) < 0.30:
print "Looks like a gloomy subject to me."
if num_grades[7] >= 5:
print "Beware! Too much threat of Deregistration in this one!"
print "________________________________________"
def find_cg(roll_num):
url_to_scrape = 'https://erp.iitkgp.ernet.in/StudentPerformance/view_performance.jsp?rollno=' + str(roll_num)
fname = "Output.txt"
try:
r = requests.get(url_to_scrape)
soup = BeautifulSoup(r.text, "html.parser")
with open(fname, "w") as text_file:
text_file.write("{}".format(soup))
with open(fname) as f:
content = f.readlines()
if len(content) < 40:
print "Invalid Roll Number!"
return -1
else:
for line in content:
if line.find("CGPA") != -1:
if line[4] != "<" and is_number(line[31:35]):
return float(line[31:35])
except Exception:
print "ConnectionError on you roll number. Please check your connection and try again!"
key = raw_input("The program will now exit. Press enter")
exit(0)
def find_subject_grade_line(year, dep, sub_name, msc_dep_bool):
"""
Finds grade distribution in a particular subject obtained by previous batches
"""
fname = "Output.txt"
grades = ["EX", "A", "B", "C", "D", "P", "F", "X"]
if msc_dep_bool:
roll_count = 20001
else:
roll_count = 10001
subj_found_flag = False
grade_found_flag = False
while True:
rollno = str(year) + str(dep) + str(roll_count)
url_to_scrape = 'https://erp.iitkgp.ernet.in/StudentPerformance/view_performance.jsp?rollno=' + rollno
try:
r = requests.get(url_to_scrape)
soup = BeautifulSoup(r.text, "html.parser")
with open(fname, "w") as text_file:
text_file.write("{}".format(soup))
with open(fname) as f:
content = f.readlines()
if len(content) < 40:
roll_count += 1
continue
bad_flag = False
for line in content:
if line.find("Backlog") != -1 or line.find("Deregistered") != -1:
bad_flag = True
break
if bad_flag:
roll_count += 1
else:
index = 0
grade_line_index = 0
for line in content:
if line.find(sub_name) != -1:
subj_found_flag = True
grade_line = content[index+3]
grade = grade_line[19:20]
if grade not in grades and grade != "E":
print "This Subject is an ongoing subject for the mentioned batch number"
print grade
print "System will now exit"
#exit(0)
# this can be replaced by return
return -1
else:
grade_found_flag = True
grade_line_index = index + 3
break
index += 1
if not grade_found_flag or (subj_found_flag == False):
print "Subject not found! System will now exit"
#exit(0)
return -1
# this can be replaced by return
else:
return grade_line_index
except Exception:
print "ConnectionError. Retrying..."
continue
print "*** Welcome to CG Accumulator ***"
departments = ["AE", "AG", "AR", "BT", "CE", "CH", "CS", "CY", "EC", "EE", "EX", "GG", "HS", "IE", "IM", "MA", "ME", "MF", "MI", "MT", "NA", "PH", "QD"]
years = ["12","13","14","15"]
user_cg = 0.00
while True:
print ""
year = raw_input("Enter year (Available Choices : 12, 13, 14, 15) : ")
if year not in years:
print "Please enter a valid year choice"
continue
print ""
dep = raw_input("Enter Department : ")
while dep not in departments:
print "Please enter a valid department!"
print "P.S. Department name should be capitalised. Eg. \"CS\" and not \"cs\""
dep = raw_input("Enter Valid Department again : ")
print ""
sg_cg_choice = raw_input("Do you want CG list (enter '1') \n or Most recent SG list (enter '2') \n or Entire SG history (enter '3') \n or Know your D.R. (enter '4') \n or Find previous year grades in a particular subject (enter '5')? : ")
while sg_cg_choice not in ["1", "2", "3", "4", "5"]:
print "Please enter a valid choice!"
sg_cg_choice = raw_input("Enter valid choice again : ")
if sg_cg_choice == "4" or sg_cg_choice == "5":
degree_choice = "3"
if sg_cg_choice == "4":
roll_num = raw_input("Enter last 5 digits of your roll number : ")
while len(roll_num) != 5 or find_cg(year + dep + roll_num) == -1:
print "Please enter valid last 5 digits"
roll_num = raw_input("Enter valid last 5 digits of your roll number again: ")
user_cg = find_cg(year + dep + roll_num)
elif sg_cg_choice == "5":
sub_name = raw_input("Enter subject name in capital letters : ")
#line_num = find_subject_grade_line(year, dep, sub_name, False)
else:
print ""
degree_choice = raw_input("Enter choice : '1' for 4 years only, '2' for 5 years only, '3' for both : ")
while degree_choice not in ["1", "2", "3"]:
print "Please enter a valid choice!"
degree_choice = raw_input("Enter valid choice again : ")
print ""
break
# year = "14"
# dep = "CS"
# sg_cg_choice = "5"
# sub_name = "CHEMISTRY"
# line_num = find_subject_grade_line(year, dep, sub_name, False)
# print line_num
if sg_cg_choice != "5":
print ""
print "Please wait while results are being accumulated, this may take a few minutes...."
print "Meanwhile, minimize this screen and think about what you are doing with your life."
print ""
var = cg_accumulate(year, dep, degree_choice,sg_cg_choice, user_cg)
print ""
elif sg_cg_choice == "5":
msc_dep_list = ["GG", "EX", "MA", "CY", "HS", "PH"]
if dep in msc_dep_list:
msc_dep_bool = True
else:
msc_dep_bool = False
line_num = find_subject_grade_line(year, dep, sub_name, msc_dep_bool)
if line_num != -1:
var = cg_accumulate(year, dep, degree_choice, sg_cg_choice, 0.00, line_num)
key = raw_input("Press Enter to exit")
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_builtins module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrappers
from tensorflow.python.autograph.operators import data_structures
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class TestBase(object):
def plus_twenty(self, x):
return x + 20
class PyBuiltinsTest(test.TestCase):
def test_abs(self):
self.assertEqual(py_builtins.abs_(-1), 1)
with self.cached_session() as sess:
t = py_builtins.abs_(constant_op.constant(-1))
self.assertEqual(self.evaluate(t), 1)
t = py_builtins.abs_(constant_op.constant([-1, 2, -3]))
self.assertAllEqual(self.evaluate(t), [1, 2, 3])
def test_abs_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices([-1, 2, 3])
dataset = py_builtins.abs_(dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), 1)
self.assertAllEqual(self.evaluate(iterator.get_next()), 2)
self.assertAllEqual(self.evaluate(iterator.get_next()), 3)
def test_abs_dataset_zipped(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([-1, 2, 3])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([1, -2, 3])
dataset = dataset_ops.DatasetV2.zip((dataset_1, dataset_2))
dataset = py_builtins.abs_(dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (1, 1))
self.assertAllEqual(self.evaluate(iterator.get_next()), (2, 2))
self.assertAllEqual(self.evaluate(iterator.get_next()), (3, 3))
def test_abs_dataset_mixed(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([-1, 2, 3])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([1, -2, 3])
dataset_3 = dataset_ops.DatasetV2.from_tensor_slices([-1, -2, -3])
dataset_4 = dataset_ops.DatasetV2.zip((dataset_1, dataset_2))
dataset = dataset_ops.DatasetV2.zip((dataset_3, dataset_4))
dataset = py_builtins.abs_(dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
for i in range(1, 4):
actual = self.evaluate(iterator.get_next())
self.assertAllEqual(actual[0], i)
self.assertAllEqual(actual[1], (i, i))
def test_float(self):
self.assertEqual(py_builtins.float_(10), 10.0)
self.assertEqual(py_builtins.float_('10.0'), 10.0)
with self.cached_session() as sess:
t = py_builtins.float_(constant_op.constant(1, dtype=dtypes.int64))
self.assertEqual(self.evaluate(t), 1.0)
st = py_builtins.float_(constant_op.constant('1.0'))
self.assertEqual(self.evaluate(st), 1.0)
def test_int(self):
self.assertEqual(py_builtins.int_(10.0), 10)
self.assertEqual(py_builtins.int_('11', 2), 3)
with self.cached_session() as sess:
t = py_builtins.int_(constant_op.constant(1, dtype=dtypes.float64))
self.assertEqual(self.evaluate(t), 1)
st = py_builtins.int_(constant_op.constant('1'))
self.assertEqual(self.evaluate(st), 1)
st = py_builtins.int_(constant_op.constant('1'), 10)
self.assertEqual(self.evaluate(st), 1)
def test_int_unsupported_base(self):
t = constant_op.constant(1, dtype=dtypes.float64)
with self.assertRaises(NotImplementedError):
py_builtins.int_(t, 2)
def test_len(self):
self.assertEqual(py_builtins.len_([1, 2, 3]), 3)
with self.cached_session() as sess:
t = py_builtins.len_(constant_op.constant([[1], [2], [3]]))
self.assertEqual(t, 3)
ta = py_builtins.len_(tensor_array_ops.TensorArray(dtypes.int32, size=5))
self.assertEqual(self.evaluate(ta), 5)
tl = py_builtins.len_(data_structures.tf_tensor_list_new([3, 4, 5]))
self.assertEqual(self.evaluate(tl), 3)
def test_len_scalar(self):
with self.assertRaises(ValueError):
py_builtins.len_(constant_op.constant(1))
@test_util.run_deprecated_v1
def test_len_dynamic_shape(self):
with self.cached_session() as sess:
p = array_ops.placeholder(dtype=dtypes.int32, shape=None)
t = py_builtins.len_(p)
self.assertEqual(sess.run(t, {p: [1, 2, 3]}), 3)
with self.assertRaises(errors_impl.InvalidArgumentError):
t = py_builtins.len_(p)
sess.run(t, {p: 1})
@test_util.run_deprecated_v1
def test_print_tensors(self):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
with self.cached_session() as sess:
sess.run(py_builtins.print_(constant_op.constant('test message'), 1))
self.assertEqual(out_capturer.getvalue(), 'test message 1\n')
finally:
sys.stdout = sys.__stdout__
@test_util.run_deprecated_v1
def test_print_complex(self):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
with self.cached_session() as sess:
sess.run(
py_builtins.print_(constant_op.constant('test message'), [1, 2]))
self.assertEqual(out_capturer.getvalue(), 'test message [1, 2]\n')
finally:
sys.stdout = sys.__stdout__
def test_range(self):
self.assertListEqual(list(py_builtins.range_(3)), [0, 1, 2])
self.assertListEqual(list(py_builtins.range_(1, 3)), [1, 2])
self.assertListEqual(list(py_builtins.range_(2, 0, -1)), [2, 1])
def test_range_tensor(self):
with self.cached_session() as sess:
r = py_builtins.range_(constant_op.constant(3))
self.assertAllEqual(self.evaluate(r), [0, 1, 2])
r = py_builtins.range_(1, constant_op.constant(3))
self.assertAllEqual(self.evaluate(r), [1, 2])
r = py_builtins.range_(2, 0, constant_op.constant(-1))
self.assertAllEqual(self.evaluate(r), [2, 1])
def test_range_tensor_empty_range(self):
with self.session() as sess:
r = py_builtins.range_(constant_op.constant(-3))
self.assertAllEqual(self.evaluate(r), [])
r = py_builtins.range_(5, constant_op.constant(2))
self.assertAllEqual(self.evaluate(r), [])
def test_enumerate(self):
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1])), [(0, 3), (1, 2), (2, 1)])
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1], 5)), [(5, 3), (6, 2), (7, 1)])
self.assertListEqual(list(py_builtins.enumerate_([-8], -3)), [(-3, -8)])
def test_enumerate_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices(['a', 'c'])
start = constant_op.constant(20, dtype=dtypes.int64)
dataset = py_builtins.enumerate_(dataset, start)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (20, b'a'))
self.assertAllEqual(self.evaluate(iterator.get_next()), (21, b'c'))
def test_zip(self):
self.assertListEqual(
list(py_builtins.zip_([3, 2, 1], [1, 2, 3])), [(3, 1), (2, 2), (1, 3)])
self.assertListEqual(
list(py_builtins.zip_([4, 5, 6], [-1, -2])), [(4, -1), (5, -2)])
def test_zip_dataset(self):
ds1 = dataset_ops.DatasetV2.from_tensor_slices([-11, -12, 4])
ds2 = dataset_ops.DatasetV2.from_tensor_slices([-21, -22, 5])
ds3 = py_builtins.zip_(ds1, ds2)
iterator = dataset_ops.make_one_shot_iterator(ds3)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (-11, -21))
self.assertAllEqual(self.evaluate(iterator.get_next()), (-12, -22))
self.assertAllEqual(self.evaluate(iterator.get_next()), (4, 5))
def test_map(self):
def increment(x):
return x + 1
add_list = lambda x, y: x + y
self.assertListEqual(
list(py_builtins.map_(increment, [4, 5, 6])), [5, 6, 7])
self.assertListEqual(
list(py_builtins.map_(add_list, [3, 2, 1], [-1, -2, -3])), [2, 0, -2])
def test_map_dataset(self):
def increment(x):
return x + 1
ds1 = dataset_ops.DatasetV2.from_tensor_slices([4, 5, 6])
ds2 = py_builtins.map_(increment, ds1)
iterator = dataset_ops.make_one_shot_iterator(ds2)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), 5)
self.assertAllEqual(self.evaluate(iterator.get_next()), 6)
self.assertAllEqual(self.evaluate(iterator.get_next()), 7)
def test_map_multiple_datasets(self):
add_list = lambda x, y: x + y
ds1 = dataset_ops.DatasetV2.from_tensor_slices([-11, -12, 4])
ds2 = dataset_ops.DatasetV2.from_tensor_slices([-21, -22, 5])
ds3 = py_builtins.map_(add_list, ds1, ds2)
iterator = dataset_ops.make_one_shot_iterator(ds3)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), -32)
self.assertAllEqual(self.evaluate(iterator.get_next()), -34)
self.assertAllEqual(self.evaluate(iterator.get_next()), 9)
def test_next_normal(self):
iterator = iter([1, 2, 3])
self.assertEqual(py_builtins.next_(iterator), 1)
self.assertEqual(py_builtins.next_(iterator), 2)
self.assertEqual(py_builtins.next_(iterator), 3)
with self.assertRaises(StopIteration):
py_builtins.next_(iterator)
self.assertEqual(py_builtins.next_(iterator, 4), 4)
def test_next_tf_iterator(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn(go_out_of_range, with_default):
iterator = iter(dataset_ops.Dataset.range(3))
retval = (
py_builtins.next_(iterator),
py_builtins.next_(iterator),
py_builtins.next_(iterator),
)
if go_out_of_range:
if with_default:
retval += (
py_builtins.next_(iterator,
constant_op.constant(-3, dtype=dtypes.int64)),
py_builtins.next_(iterator,
constant_op.constant(-4, dtype=dtypes.int64)),
)
else:
py_builtins.next_(iterator)
return retval
self.assertAllEqual(
self.evaluate(test_fn(go_out_of_range=False, with_default=None)),
(0, 1, 2))
self.assertAllEqual(
self.evaluate(test_fn(go_out_of_range=True, with_default=True)),
(0, 1, 2, -3, -4))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(test_fn(go_out_of_range=True, with_default=False))
def test_next_tf_iterator_error_checking(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn():
iterator = iter(dataset_ops.Dataset.range(1))
py_builtins.next_(iterator)
py_builtins.next_(iterator, constant_op.constant(-3))
# Dataset.range defaults to int64,
with self.assertRaisesRegex(TypeError, 'default.*int64'):
self.evaluate(test_fn())
def test_next_tf_iterator_error_checking_structures(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn(default_val):
ds = dataset_ops.Dataset.range(1)
ds = ds.map(lambda i: {'a': i + 1, 'b': i + 10})
iterator = iter(ds)
py_builtins.next_(iterator)
py_builtins.next_(iterator, default_val)
default = {
'a': constant_op.constant(3, dtype=dtypes.int64),
}
with self.assertRaisesRegex(TypeError, 'same element structure'):
test_fn(default)
default = {
'a': constant_op.constant(3.0),
'b': [constant_op.constant(30), constant_op.constant(300)]
}
with self.assertRaisesRegex(TypeError, 'same element structure'):
test_fn(default)
default = {
'a': constant_op.constant(3.0),
'b': constant_op.constant(30, dtype=dtypes.int64),
}
with self.assertRaisesRegex(TypeError, 'float32'):
test_fn(default)
def _basic_function_scope(self):
return function_wrappers.FunctionScope(
'test_function_name',
'test_scope', # Note: this must match the name in the `with` statement.
converter.ConversionOptions())
def test_eval_in_original_context(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
self.assertEqual(test_fn(), 1)
def test_eval_in_original_context_inner_function(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
def inner_fn():
# Note: a user function without a top-level function scope should
# never be found in user code; it's only possible in generated code.
l = 2 # pylint:disable=unused-variable
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
return inner_fn()
self.assertEqual(test_fn(), 2)
def test_super_in_original_context_unary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def plus_twenty(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base_unbound = py_builtins.super_in_original_context(
super, (TestSubclass,), test_scope)
test_base = test_base_unbound.__get__(self, TestSubclass)
return test_base.plus_twenty(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_binary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def plus_twenty(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base = py_builtins.super_in_original_context(
super, (TestSubclass, self), test_scope)
return test_base.plus_twenty(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_filter(self):
self.assertListEqual(
list(py_builtins.filter_(lambda x: x == 'b', ['a', 'b', 'c'])), ['b'])
self.assertListEqual(
list(py_builtins.filter_(lambda x: x < 3, [3, 2, 1])), [2, 1])
def test_filter_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices([3, 2, 1])
dataset = py_builtins.filter_(lambda x: x < 3, dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), 2)
self.assertAllEqual(self.evaluate(iterator.get_next()), 1)
def test_any(self):
self.assertEqual(py_builtins.any_([False, True, False]), True)
self.assertEqual(py_builtins.any_([False, False, False]), False)
def test_any_dataset(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([False, False, False])
self.assertEqual(self.evaluate(py_builtins.any_(dataset_1)), True)
self.assertEqual(self.evaluate(py_builtins.any_(dataset_2)), False)
dataset_3 = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2])
with self.assertRaises(ValueError):
py_builtins.any_(dataset_3)
dataset_4 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_zipped = dataset_ops.DatasetV2.zip((dataset_4, dataset_4))
with self.assertRaises(ValueError):
py_builtins.any_(dataset_zipped)
dataset_mixed = dataset_ops.DatasetV2.zip((dataset_3, dataset_4))
with self.assertRaises(ValueError):
py_builtins.any_(dataset_mixed)
def test_all(self):
self.assertEqual(py_builtins.all_([False, True, False]), False)
self.assertEqual(py_builtins.all_([True, True, True]), True)
def test_all_dataset(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([True, True, True])
self.assertEqual(self.evaluate(py_builtins.all_(dataset_1)), False)
self.assertEqual(self.evaluate(py_builtins.all_(dataset_2)), True)
dataset_3 = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2])
with self.assertRaises(ValueError):
py_builtins.all_(dataset_3)
dataset_4 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_zipped = dataset_ops.DatasetV2.zip((dataset_4, dataset_4))
with self.assertRaises(ValueError):
py_builtins.all_(dataset_zipped)
dataset_mixed = dataset_ops.DatasetV2.zip((dataset_3, dataset_4))
with self.assertRaises(ValueError):
py_builtins.all_(dataset_mixed)
def test_sorted(self):
self.assertListEqual(py_builtins.sorted_([2, 3, 1]), [1, 2, 3])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], key=lambda x: -x), [3, 2, 1])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], reverse=True), [3, 2, 1])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], key=lambda x: -x, reverse=True),
[1, 2, 3])
self.assertAllEqual(
py_builtins.sorted_([[4, 3], [2, 1]], key=lambda x: sum(x)),
[[2, 1], [4, 3]])
def test_sorted_tensor(self):
iterable_1 = constant_op.constant([2, 3, 1])
self.assertListEqual(
list(self.evaluate(py_builtins.sorted_(iterable_1))), [1, 2, 3])
self.assertListEqual(
list(self.evaluate(py_builtins.sorted_(iterable_1, key=lambda x: -x))),
[3, 2, 1])
self.assertListEqual(
list(self.evaluate(py_builtins.sorted_(iterable_1, reverse=True))),
[3, 2, 1])
self.assertListEqual(
list(
self.evaluate(
py_builtins.sorted_(iterable_1, key=lambda x: -x,
reverse=True))), [1, 2, 3])
iterable_2 = constant_op.constant([[4, 3], [2, 1]])
with self.assertRaises(ValueError):
py_builtins.sorted_(iterable_2)
with self.assertRaises(ValueError):
py_builtins.sorted_(iterable_2, key=lambda x: -x)
self.assertAllEqual(
list(
self.evaluate(
py_builtins.sorted_(
iterable_2, key=lambda x: math_ops.reduce_sum(x)))),
[[2, 1], [4, 3]])
if __name__ == '__main__':
test.main()
Add test case for builtin len() support of tf.data.Dataset in autograph
Signed-off-by: Yong Tang <765086fe2e0c1f980161f127fec596800f327f62@outlook.com>
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_builtins module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import six
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.core import function_wrappers
from tensorflow.python.autograph.operators import data_structures
from tensorflow.python.autograph.operators import py_builtins
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.platform import test
class TestBase(object):
def plus_twenty(self, x):
return x + 20
class PyBuiltinsTest(test.TestCase):
def test_abs(self):
self.assertEqual(py_builtins.abs_(-1), 1)
with self.cached_session() as sess:
t = py_builtins.abs_(constant_op.constant(-1))
self.assertEqual(self.evaluate(t), 1)
t = py_builtins.abs_(constant_op.constant([-1, 2, -3]))
self.assertAllEqual(self.evaluate(t), [1, 2, 3])
def test_abs_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices([-1, 2, 3])
dataset = py_builtins.abs_(dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), 1)
self.assertAllEqual(self.evaluate(iterator.get_next()), 2)
self.assertAllEqual(self.evaluate(iterator.get_next()), 3)
def test_abs_dataset_zipped(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([-1, 2, 3])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([1, -2, 3])
dataset = dataset_ops.DatasetV2.zip((dataset_1, dataset_2))
dataset = py_builtins.abs_(dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (1, 1))
self.assertAllEqual(self.evaluate(iterator.get_next()), (2, 2))
self.assertAllEqual(self.evaluate(iterator.get_next()), (3, 3))
def test_abs_dataset_mixed(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([-1, 2, 3])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([1, -2, 3])
dataset_3 = dataset_ops.DatasetV2.from_tensor_slices([-1, -2, -3])
dataset_4 = dataset_ops.DatasetV2.zip((dataset_1, dataset_2))
dataset = dataset_ops.DatasetV2.zip((dataset_3, dataset_4))
dataset = py_builtins.abs_(dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
for i in range(1, 4):
actual = self.evaluate(iterator.get_next())
self.assertAllEqual(actual[0], i)
self.assertAllEqual(actual[1], (i, i))
def test_float(self):
self.assertEqual(py_builtins.float_(10), 10.0)
self.assertEqual(py_builtins.float_('10.0'), 10.0)
with self.cached_session() as sess:
t = py_builtins.float_(constant_op.constant(1, dtype=dtypes.int64))
self.assertEqual(self.evaluate(t), 1.0)
st = py_builtins.float_(constant_op.constant('1.0'))
self.assertEqual(self.evaluate(st), 1.0)
def test_int(self):
self.assertEqual(py_builtins.int_(10.0), 10)
self.assertEqual(py_builtins.int_('11', 2), 3)
with self.cached_session() as sess:
t = py_builtins.int_(constant_op.constant(1, dtype=dtypes.float64))
self.assertEqual(self.evaluate(t), 1)
st = py_builtins.int_(constant_op.constant('1'))
self.assertEqual(self.evaluate(st), 1)
st = py_builtins.int_(constant_op.constant('1'), 10)
self.assertEqual(self.evaluate(st), 1)
def test_int_unsupported_base(self):
t = constant_op.constant(1, dtype=dtypes.float64)
with self.assertRaises(NotImplementedError):
py_builtins.int_(t, 2)
def test_len(self):
self.assertEqual(py_builtins.len_([1, 2, 3]), 3)
with self.cached_session() as sess:
t = py_builtins.len_(constant_op.constant([[1], [2], [3]]))
self.assertEqual(t, 3)
ta = py_builtins.len_(tensor_array_ops.TensorArray(dtypes.int32, size=5))
self.assertEqual(self.evaluate(ta), 5)
tl = py_builtins.len_(data_structures.tf_tensor_list_new([3, 4, 5]))
self.assertEqual(self.evaluate(tl), 3)
def test_len_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices([3, 2, 1])
self.assertEqual(py_builtins.len_(dataset), 3)
with self.cached_session() as sess:
t = py_builtins.len_(dataset)
self.assertAllEqual(self.evaluate(t), 3)
def test_len_scalar(self):
with self.assertRaises(ValueError):
py_builtins.len_(constant_op.constant(1))
@test_util.run_deprecated_v1
def test_len_dynamic_shape(self):
with self.cached_session() as sess:
p = array_ops.placeholder(dtype=dtypes.int32, shape=None)
t = py_builtins.len_(p)
self.assertEqual(sess.run(t, {p: [1, 2, 3]}), 3)
with self.assertRaises(errors_impl.InvalidArgumentError):
t = py_builtins.len_(p)
sess.run(t, {p: 1})
@test_util.run_deprecated_v1
def test_print_tensors(self):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
with self.cached_session() as sess:
sess.run(py_builtins.print_(constant_op.constant('test message'), 1))
self.assertEqual(out_capturer.getvalue(), 'test message 1\n')
finally:
sys.stdout = sys.__stdout__
@test_util.run_deprecated_v1
def test_print_complex(self):
try:
out_capturer = six.StringIO()
sys.stdout = out_capturer
with self.cached_session() as sess:
sess.run(
py_builtins.print_(constant_op.constant('test message'), [1, 2]))
self.assertEqual(out_capturer.getvalue(), 'test message [1, 2]\n')
finally:
sys.stdout = sys.__stdout__
def test_range(self):
self.assertListEqual(list(py_builtins.range_(3)), [0, 1, 2])
self.assertListEqual(list(py_builtins.range_(1, 3)), [1, 2])
self.assertListEqual(list(py_builtins.range_(2, 0, -1)), [2, 1])
def test_range_tensor(self):
with self.cached_session() as sess:
r = py_builtins.range_(constant_op.constant(3))
self.assertAllEqual(self.evaluate(r), [0, 1, 2])
r = py_builtins.range_(1, constant_op.constant(3))
self.assertAllEqual(self.evaluate(r), [1, 2])
r = py_builtins.range_(2, 0, constant_op.constant(-1))
self.assertAllEqual(self.evaluate(r), [2, 1])
def test_range_tensor_empty_range(self):
with self.session() as sess:
r = py_builtins.range_(constant_op.constant(-3))
self.assertAllEqual(self.evaluate(r), [])
r = py_builtins.range_(5, constant_op.constant(2))
self.assertAllEqual(self.evaluate(r), [])
def test_enumerate(self):
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1])), [(0, 3), (1, 2), (2, 1)])
self.assertListEqual(
list(py_builtins.enumerate_([3, 2, 1], 5)), [(5, 3), (6, 2), (7, 1)])
self.assertListEqual(list(py_builtins.enumerate_([-8], -3)), [(-3, -8)])
def test_enumerate_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices(['a', 'c'])
start = constant_op.constant(20, dtype=dtypes.int64)
dataset = py_builtins.enumerate_(dataset, start)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (20, b'a'))
self.assertAllEqual(self.evaluate(iterator.get_next()), (21, b'c'))
def test_zip(self):
self.assertListEqual(
list(py_builtins.zip_([3, 2, 1], [1, 2, 3])), [(3, 1), (2, 2), (1, 3)])
self.assertListEqual(
list(py_builtins.zip_([4, 5, 6], [-1, -2])), [(4, -1), (5, -2)])
def test_zip_dataset(self):
ds1 = dataset_ops.DatasetV2.from_tensor_slices([-11, -12, 4])
ds2 = dataset_ops.DatasetV2.from_tensor_slices([-21, -22, 5])
ds3 = py_builtins.zip_(ds1, ds2)
iterator = dataset_ops.make_one_shot_iterator(ds3)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), (-11, -21))
self.assertAllEqual(self.evaluate(iterator.get_next()), (-12, -22))
self.assertAllEqual(self.evaluate(iterator.get_next()), (4, 5))
def test_map(self):
def increment(x):
return x + 1
add_list = lambda x, y: x + y
self.assertListEqual(
list(py_builtins.map_(increment, [4, 5, 6])), [5, 6, 7])
self.assertListEqual(
list(py_builtins.map_(add_list, [3, 2, 1], [-1, -2, -3])), [2, 0, -2])
def test_map_dataset(self):
def increment(x):
return x + 1
ds1 = dataset_ops.DatasetV2.from_tensor_slices([4, 5, 6])
ds2 = py_builtins.map_(increment, ds1)
iterator = dataset_ops.make_one_shot_iterator(ds2)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), 5)
self.assertAllEqual(self.evaluate(iterator.get_next()), 6)
self.assertAllEqual(self.evaluate(iterator.get_next()), 7)
def test_map_multiple_datasets(self):
add_list = lambda x, y: x + y
ds1 = dataset_ops.DatasetV2.from_tensor_slices([-11, -12, 4])
ds2 = dataset_ops.DatasetV2.from_tensor_slices([-21, -22, 5])
ds3 = py_builtins.map_(add_list, ds1, ds2)
iterator = dataset_ops.make_one_shot_iterator(ds3)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), -32)
self.assertAllEqual(self.evaluate(iterator.get_next()), -34)
self.assertAllEqual(self.evaluate(iterator.get_next()), 9)
def test_next_normal(self):
iterator = iter([1, 2, 3])
self.assertEqual(py_builtins.next_(iterator), 1)
self.assertEqual(py_builtins.next_(iterator), 2)
self.assertEqual(py_builtins.next_(iterator), 3)
with self.assertRaises(StopIteration):
py_builtins.next_(iterator)
self.assertEqual(py_builtins.next_(iterator, 4), 4)
def test_next_tf_iterator(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn(go_out_of_range, with_default):
iterator = iter(dataset_ops.Dataset.range(3))
retval = (
py_builtins.next_(iterator),
py_builtins.next_(iterator),
py_builtins.next_(iterator),
)
if go_out_of_range:
if with_default:
retval += (
py_builtins.next_(iterator,
constant_op.constant(-3, dtype=dtypes.int64)),
py_builtins.next_(iterator,
constant_op.constant(-4, dtype=dtypes.int64)),
)
else:
py_builtins.next_(iterator)
return retval
self.assertAllEqual(
self.evaluate(test_fn(go_out_of_range=False, with_default=None)),
(0, 1, 2))
self.assertAllEqual(
self.evaluate(test_fn(go_out_of_range=True, with_default=True)),
(0, 1, 2, -3, -4))
with self.assertRaises(errors_impl.OutOfRangeError):
self.evaluate(test_fn(go_out_of_range=True, with_default=False))
def test_next_tf_iterator_error_checking(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn():
iterator = iter(dataset_ops.Dataset.range(1))
py_builtins.next_(iterator)
py_builtins.next_(iterator, constant_op.constant(-3))
# Dataset.range defaults to int64,
with self.assertRaisesRegex(TypeError, 'default.*int64'):
self.evaluate(test_fn())
def test_next_tf_iterator_error_checking_structures(self):
# graph-mode iterators are only supported inside tf.function.
@def_function.function(autograph=False)
def test_fn(default_val):
ds = dataset_ops.Dataset.range(1)
ds = ds.map(lambda i: {'a': i + 1, 'b': i + 10})
iterator = iter(ds)
py_builtins.next_(iterator)
py_builtins.next_(iterator, default_val)
default = {
'a': constant_op.constant(3, dtype=dtypes.int64),
}
with self.assertRaisesRegex(TypeError, 'same element structure'):
test_fn(default)
default = {
'a': constant_op.constant(3.0),
'b': [constant_op.constant(30), constant_op.constant(300)]
}
with self.assertRaisesRegex(TypeError, 'same element structure'):
test_fn(default)
default = {
'a': constant_op.constant(3.0),
'b': constant_op.constant(30, dtype=dtypes.int64),
}
with self.assertRaisesRegex(TypeError, 'float32'):
test_fn(default)
def _basic_function_scope(self):
return function_wrappers.FunctionScope(
'test_function_name',
'test_scope', # Note: this must match the name in the `with` statement.
converter.ConversionOptions())
def test_eval_in_original_context(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
self.assertEqual(test_fn(), 1)
def test_eval_in_original_context_inner_function(self):
def test_fn():
l = 1 # pylint:disable=unused-variable
with self._basic_function_scope() as test_scope:
def inner_fn():
# Note: a user function without a top-level function scope should
# never be found in user code; it's only possible in generated code.
l = 2 # pylint:disable=unused-variable
return py_builtins.eval_in_original_context(eval, ('l',), test_scope)
return inner_fn()
self.assertEqual(test_fn(), 2)
def test_super_in_original_context_unary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def plus_twenty(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base_unbound = py_builtins.super_in_original_context(
super, (TestSubclass,), test_scope)
test_base = test_base_unbound.__get__(self, TestSubclass)
return test_base.plus_twenty(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_super_in_original_context_binary_call(self):
test_case_self = self
class TestSubclass(TestBase):
def plus_twenty(self, x):
test_case_self.fail('This should never be called.')
def test_method(self):
with test_case_self._basic_function_scope() as test_scope:
test_base = py_builtins.super_in_original_context(
super, (TestSubclass, self), test_scope)
return test_base.plus_twenty(1)
tc = TestSubclass()
self.assertEqual(tc.test_method(), 21)
def test_filter(self):
self.assertListEqual(
list(py_builtins.filter_(lambda x: x == 'b', ['a', 'b', 'c'])), ['b'])
self.assertListEqual(
list(py_builtins.filter_(lambda x: x < 3, [3, 2, 1])), [2, 1])
def test_filter_dataset(self):
dataset = dataset_ops.DatasetV2.from_tensor_slices([3, 2, 1])
dataset = py_builtins.filter_(lambda x: x < 3, dataset)
iterator = dataset_ops.make_one_shot_iterator(dataset)
with self.cached_session() as sess:
self.assertAllEqual(self.evaluate(iterator.get_next()), 2)
self.assertAllEqual(self.evaluate(iterator.get_next()), 1)
def test_any(self):
self.assertEqual(py_builtins.any_([False, True, False]), True)
self.assertEqual(py_builtins.any_([False, False, False]), False)
def test_any_dataset(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([False, False, False])
self.assertEqual(self.evaluate(py_builtins.any_(dataset_1)), True)
self.assertEqual(self.evaluate(py_builtins.any_(dataset_2)), False)
dataset_3 = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2])
with self.assertRaises(ValueError):
py_builtins.any_(dataset_3)
dataset_4 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_zipped = dataset_ops.DatasetV2.zip((dataset_4, dataset_4))
with self.assertRaises(ValueError):
py_builtins.any_(dataset_zipped)
dataset_mixed = dataset_ops.DatasetV2.zip((dataset_3, dataset_4))
with self.assertRaises(ValueError):
py_builtins.any_(dataset_mixed)
def test_all(self):
self.assertEqual(py_builtins.all_([False, True, False]), False)
self.assertEqual(py_builtins.all_([True, True, True]), True)
def test_all_dataset(self):
dataset_1 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_2 = dataset_ops.DatasetV2.from_tensor_slices([True, True, True])
self.assertEqual(self.evaluate(py_builtins.all_(dataset_1)), False)
self.assertEqual(self.evaluate(py_builtins.all_(dataset_2)), True)
dataset_3 = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2])
with self.assertRaises(ValueError):
py_builtins.all_(dataset_3)
dataset_4 = dataset_ops.DatasetV2.from_tensor_slices([False, True, False])
dataset_zipped = dataset_ops.DatasetV2.zip((dataset_4, dataset_4))
with self.assertRaises(ValueError):
py_builtins.all_(dataset_zipped)
dataset_mixed = dataset_ops.DatasetV2.zip((dataset_3, dataset_4))
with self.assertRaises(ValueError):
py_builtins.all_(dataset_mixed)
def test_sorted(self):
self.assertListEqual(py_builtins.sorted_([2, 3, 1]), [1, 2, 3])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], key=lambda x: -x), [3, 2, 1])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], reverse=True), [3, 2, 1])
self.assertListEqual(
py_builtins.sorted_([2, 3, 1], key=lambda x: -x, reverse=True),
[1, 2, 3])
self.assertAllEqual(
py_builtins.sorted_([[4, 3], [2, 1]], key=lambda x: sum(x)),
[[2, 1], [4, 3]])
def test_sorted_tensor(self):
iterable_1 = constant_op.constant([2, 3, 1])
self.assertListEqual(
list(self.evaluate(py_builtins.sorted_(iterable_1))), [1, 2, 3])
self.assertListEqual(
list(self.evaluate(py_builtins.sorted_(iterable_1, key=lambda x: -x))),
[3, 2, 1])
self.assertListEqual(
list(self.evaluate(py_builtins.sorted_(iterable_1, reverse=True))),
[3, 2, 1])
self.assertListEqual(
list(
self.evaluate(
py_builtins.sorted_(iterable_1, key=lambda x: -x,
reverse=True))), [1, 2, 3])
iterable_2 = constant_op.constant([[4, 3], [2, 1]])
with self.assertRaises(ValueError):
py_builtins.sorted_(iterable_2)
with self.assertRaises(ValueError):
py_builtins.sorted_(iterable_2, key=lambda x: -x)
self.assertAllEqual(
list(
self.evaluate(
py_builtins.sorted_(
iterable_2, key=lambda x: math_ops.reduce_sum(x)))),
[[2, 1], [4, 3]])
if __name__ == '__main__':
test.main()
|
__author__ = 'jonathan'
import logging
import time
import uuid
from sqlalchemy.sql.expression import BinaryExpression
from sqlalchemy.util._collections import KeyedTuple
from lib.rome.core.dataformat import get_decoder
from lib.rome.core.lazy import LazyValue
from lib.rome.core.utils import get_objects, is_novabase
from tuples import default_panda_building_tuples as simple_building_tuples
from tuples import sql_panda_building_tuples as building_tuples
file_logger_enabled = False
try:
file_logger = logging.getLogger('rome_file_logger')
hdlr = logging.FileHandler('/opt/logs/rome.log')
formatter = logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
file_logger.addHandler(hdlr)
file_logger.setLevel(logging.INFO)
file_logger_enabled = True
except:
pass
def all_selectable_are_functions(models):
return all(x._is_function for x in [y for y in models if not y.is_hidden])
def has_attribute(obj, key):
if type(obj) is dict:
return key in obj
else:
return hasattr(obj, key)
def set_attribute(obj, key, value):
if type(obj) is dict:
obj[key] = value
else:
return setattr(obj, key, value)
def get_attribute(obj, key, default=None):
if type(obj) is dict:
return obj[key] if key in obj else default
else:
return getattr(obj, key, default)
def find_table_name(model):
"""This function return the name of the given model as a String. If the
model cannot be identified, it returns "none".
:param model: a model object candidate
:return: the table name or "none" if the object cannot be identified
"""
if has_attribute(model, "__tablename__"):
return model.__tablename__
if has_attribute(model, "table"):
return model.table.name
if has_attribute(model, "class_"):
return model.class_.__tablename__
if has_attribute(model, "clauses"):
for clause in model.clauses:
return find_table_name(clause)
return "none"
def extract_models(l):
already_processed = set()
result = []
for selectable in [x for x in l if not x._is_function]:
if not selectable._model in already_processed:
already_processed.add(selectable._model)
result += [selectable]
return result
def extract_sub_row(row, selectables):
"""Adapt a row result to the expectation of sqlalchemy.
:param row: a list of python objects
:param selectables: a list entity class
:return: the response follows what is required by sqlalchemy (if len(model)==1, a single object is fine, in
the other case, a KeyTuple where each sub object is associated with it's entity name
"""
if len(selectables) > 1:
labels = []
for selectable in selectables:
labels += [find_table_name(selectable._model)]
product = []
for label in labels:
product = product + [get_attribute(row, label)]
return KeyedTuple(product, labels=labels)
else:
model_name = find_table_name(selectables[0]._model)
return get_attribute(row, model_name)
def intersect(b1, b2):
return [val for val in b1 if val in b2]
def flatten(lis):
"""Given a list, possibly nested to any level, return it flattened."""
new_lis = []
for item in lis:
if type(item) == type([]):
new_lis.extend(flatten(item))
else:
new_lis.append(item)
return new_lis
# def flatten(l):
# return [item for sublist in l for item in sublist]
def extract_table_data(term):
term_value = str(term)
if "." in term_value:
return {"table": term_value.split(".")[0], "column": term_value.split(".")[1]}
else:
return None
def extract_joining_criterion(exp):
from lib.rome.core.expression.expression import BooleanExpression
if type(exp) is BooleanExpression:
return map(lambda x:extract_joining_criterion(x), exp.exps)
elif type(exp) is BinaryExpression:
return [[extract_table_data(exp.left)] + [extract_table_data(exp.right)]]
else:
return []
def extract_joining_criterion_from_relationship(rel, local_table):
local_tabledata = {"table": local_table, "column": rel.local_fk_field}
remote_tabledata = {"table": rel.remote_object_tablename, "column": rel.remote_object_field}
return [local_tabledata, remote_tabledata]
def wrap_with_lazy_value(value, only_if_necessary=True, request_uuid=None):
if value is None:
return None
if only_if_necessary and type(value).__name__ in ["int", "str", "float", "unicode"]:
return value
elif type(value) is dict and "timezone" in value:
decoder = get_decoder()
return decoder.desimplify(value)
else:
return LazyValue(value, request_uuid)
# def wrap_with_lazy_value(value, only_if_necessary=True, request_uuid=None):
# return LazyValue(value, request_uuid)
def construct_rows(models, criterions, hints, session=None, request_uuid=None):
"""This function constructs the rows that corresponds to the current orm.
:return: a list of row, according to sqlalchemy expectation
"""
current_milli_time = lambda: int(round(time.time() * 1000))
part1_starttime = current_milli_time()
if request_uuid is None:
request_uuid = uuid.uuid1()
else:
request_uuid = request_uuid
labels = []
columns = set([])
rows = []
model_set = extract_models(models)
""" Get the fields of the join result """
for selectable in model_set:
labels += [find_table_name(selectable._model)]
if selectable._attributes == "*":
try:
selected_attributes = selectable._model._sa_class_manager
except:
selected_attributes = selectable._model.class_._sa_class_manager
pass
else:
selected_attributes = [selectable._attributes]
for field in selected_attributes:
attribute = None
if has_attribute(models, "class_"):
attribute = selectable._model.class_._sa_class_manager[field].__str__()
elif has_attribute(models, "_sa_class_manager"):
attribute = selectable._model._sa_class_manager[field].__str__()
if attribute is not None:
columns.add(attribute)
part2_starttime = current_milli_time()
""" Loading objects (from database) """
list_results = []
for selectable in model_set:
tablename = find_table_name(selectable._model)
authorized_secondary_indexes = get_attribute(selectable._model, "_secondary_indexes", [])
# tablename = selectable._model.__tablename__
# authorized_secondary_indexes = SECONDARY_INDEXES[tablename] if tablename in SECONDARY_INDEXES else []
selected_hints = filter(lambda x: x.table_name == tablename and (x.attribute == "id" or x.attribute in authorized_secondary_indexes), hints)
reduced_hints = map(lambda x:(x.attribute, x.value), selected_hints)
objects = get_objects(tablename, request_uuid=request_uuid, skip_loading=False, hints=reduced_hints)
list_results += [objects]
part3_starttime = current_milli_time()
""" Building tuples """
if len(list_results) > 1:
tuples = building_tuples(list_results, labels, criterions, hints)
else:
tuples = simple_building_tuples(list_results, labels, criterions, hints)
part4_starttime = current_milli_time()
""" Filtering tuples (cartesian product) """
indexed_rows = {}
for product in tuples:
if len(product) > 0:
row = KeyedTuple(product, labels=labels)
row_index_key = "%s" % (str(row))
if row_index_key in indexed_rows:
continue
all_criterions_satisfied = True
# for criterion in criterions:
# if not criterion.is_joining_expression and not criterion.evaluate(row):
# all_criterions_satisfied = False
# break
if all_criterions_satisfied:
indexed_rows[row_index_key] = True
rows += [extract_sub_row(row, model_set)]
part5_starttime = current_milli_time()
deconverter = get_decoder(request_uuid=request_uuid)
""" Reordering tuples (+ selecting attributes) """
final_rows = []
showable_selection = [x for x in models if (not x.is_hidden) or x._is_function]
part6_starttime = current_milli_time()
""" Selecting attributes """
if all_selectable_are_functions(models):
final_row = []
for selection in showable_selection:
value = selection._function._function(rows)
final_row += [value]
final_row = map(lambda x: deconverter.desimplify(x), final_row)
return [final_row]
else:
for row in rows:
final_row = []
for selection in showable_selection:
if selection._is_function:
value = selection._function._function(rows)
final_row += [value]
else:
current_table_name = find_table_name(selection._model)
key = current_table_name
if not is_novabase(row) and has_attribute(row, key):
value = get_attribute(row, key)
else:
value = row
if value is not None:
if selection._attributes != "*":
final_row += [get_attribute(value, selection._attributes)]
else:
final_row += [value]
final_row = map(lambda x: wrap_with_lazy_value(x, request_uuid=request_uuid), final_row)
if len(showable_selection) == 1:
final_rows += final_row
else:
final_rows += [final_row]
part7_starttime = current_milli_time()
query_information = """{"building_query": %s, "loading_objects": %s, "building_tuples": %s, "filtering_tuples": %s, "reordering_columns": %s, "selecting_attributes": %s, "description": "%s", "timestamp": %i}""" % (
part2_starttime - part1_starttime,
part3_starttime - part2_starttime,
part4_starttime - part3_starttime,
part5_starttime - part4_starttime,
part6_starttime - part5_starttime,
part7_starttime - part6_starttime,
"""{\\"models\\": \\"%s\\", \\"criterions\\": \\"%s\\"}""" % (models, criterions),
current_milli_time()
)
logging.info(query_information)
if file_logger_enabled:
file_logger.info(query_information)
return final_rows
add alternative join;
__author__ = 'jonathan'
import logging
import time
import uuid
from sqlalchemy.sql.expression import BinaryExpression
from sqlalchemy.util._collections import KeyedTuple
from lib.rome.core.dataformat import get_decoder
from lib.rome.core.lazy import LazyValue
from lib.rome.core.utils import get_objects, is_novabase
from tuples import default_panda_building_tuples as simple_building_tuples
from tuples import sql_panda_building_tuples as building_tuples
file_logger_enabled = False
try:
file_logger = logging.getLogger('rome_file_logger')
hdlr = logging.FileHandler('/opt/logs/rome.log')
formatter = logging.Formatter('%(message)s')
hdlr.setFormatter(formatter)
file_logger.addHandler(hdlr)
file_logger.setLevel(logging.INFO)
file_logger_enabled = True
except:
pass
def all_selectable_are_functions(models):
return all(x._is_function for x in [y for y in models if not y.is_hidden])
def has_attribute(obj, key):
if type(obj) is dict:
return key in obj
else:
return hasattr(obj, key)
def set_attribute(obj, key, value):
if type(obj) is dict:
obj[key] = value
else:
return setattr(obj, key, value)
def get_attribute(obj, key, default=None):
if type(obj) is dict:
return obj[key] if key in obj else default
else:
return getattr(obj, key, default)
def find_table_name(model):
"""This function return the name of the given model as a String. If the
model cannot be identified, it returns "none".
:param model: a model object candidate
:return: the table name or "none" if the object cannot be identified
"""
if has_attribute(model, "__tablename__"):
return model.__tablename__
if has_attribute(model, "table"):
return model.table.name
if has_attribute(model, "class_"):
return model.class_.__tablename__
if has_attribute(model, "clauses"):
for clause in model.clauses:
return find_table_name(clause)
return "none"
def extract_models(l):
already_processed = set()
result = []
for selectable in [x for x in l if not x._is_function]:
if not selectable._model in already_processed:
already_processed.add(selectable._model)
result += [selectable]
return result
def extract_sub_row(row, selectables):
"""Adapt a row result to the expectation of sqlalchemy.
:param row: a list of python objects
:param selectables: a list entity class
:return: the response follows what is required by sqlalchemy (if len(model)==1, a single object is fine, in
the other case, a KeyTuple where each sub object is associated with it's entity name
"""
if len(selectables) > 1:
labels = []
for selectable in selectables:
labels += [find_table_name(selectable._model)]
product = []
for label in labels:
product = product + [get_attribute(row, label)]
return KeyedTuple(product, labels=labels)
else:
model_name = find_table_name(selectables[0]._model)
return get_attribute(row, model_name)
def intersect(b1, b2):
return [val for val in b1 if val in b2]
def flatten(lis):
"""Given a list, possibly nested to any level, return it flattened."""
new_lis = []
for item in lis:
if type(item) == type([]):
new_lis.extend(flatten(item))
else:
new_lis.append(item)
return new_lis
# def flatten(l):
# return [item for sublist in l for item in sublist]
def extract_table_data(term):
term_value = str(term)
if "." in term_value:
return {"table": term_value.split(".")[0], "column": term_value.split(".")[1]}
else:
return None
def extract_joining_criterion(exp):
from lib.rome.core.expression.expression import BooleanExpression
if type(exp) is BooleanExpression:
return map(lambda x:extract_joining_criterion(x), exp.exps)
elif type(exp) is BinaryExpression:
return [[extract_table_data(exp.left)] + [extract_table_data(exp.right)]]
else:
return []
def extract_joining_criterion_from_relationship(rel, local_table):
local_tabledata = {"table": local_table, "column": rel.local_fk_field}
remote_tabledata = {"table": rel.remote_object_tablename, "column": rel.remote_object_field}
return [local_tabledata, remote_tabledata]
def wrap_with_lazy_value(value, only_if_necessary=True, request_uuid=None):
if value is None:
return None
if only_if_necessary and type(value).__name__ in ["int", "str", "float", "unicode"]:
return value
elif type(value) is dict and "timezone" in value:
decoder = get_decoder()
return decoder.desimplify(value)
else:
return LazyValue(value, request_uuid)
# def wrap_with_lazy_value(value, only_if_necessary=True, request_uuid=None):
# return LazyValue(value, request_uuid)
def construct_rows(models, criterions, hints, session=None, request_uuid=None):
"""This function constructs the rows that corresponds to the current orm.
:return: a list of row, according to sqlalchemy expectation
"""
current_milli_time = lambda: int(round(time.time() * 1000))
part1_starttime = current_milli_time()
if request_uuid is None:
request_uuid = uuid.uuid1()
else:
request_uuid = request_uuid
labels = []
columns = set([])
rows = []
model_set = extract_models(models)
""" Get the fields of the join result """
for selectable in model_set:
labels += [find_table_name(selectable._model)]
if selectable._attributes == "*":
try:
selected_attributes = selectable._model._sa_class_manager
except:
selected_attributes = selectable._model.class_._sa_class_manager
pass
else:
selected_attributes = [selectable._attributes]
for field in selected_attributes:
attribute = None
if has_attribute(models, "class_"):
attribute = selectable._model.class_._sa_class_manager[field].__str__()
elif has_attribute(models, "_sa_class_manager"):
attribute = selectable._model._sa_class_manager[field].__str__()
if attribute is not None:
columns.add(attribute)
part2_starttime = current_milli_time()
""" Loading objects (from database) """
list_results = []
for selectable in model_set:
tablename = find_table_name(selectable._model)
authorized_secondary_indexes = get_attribute(selectable._model, "_secondary_indexes", [])
# tablename = selectable._model.__tablename__
# authorized_secondary_indexes = SECONDARY_INDEXES[tablename] if tablename in SECONDARY_INDEXES else []
selected_hints = filter(lambda x: x.table_name == tablename and (x.attribute == "id" or x.attribute in authorized_secondary_indexes), hints)
reduced_hints = map(lambda x:(x.attribute, x.value), selected_hints)
objects = get_objects(tablename, request_uuid=request_uuid, skip_loading=False, hints=reduced_hints)
list_results += [objects]
part3_starttime = current_milli_time()
""" Building tuples """
try:
tuples = building_tuples(list_results, labels, criterions, hints)
except:
tuples = simple_building_tuples(list_results, labels, criterions, hints)
part4_starttime = current_milli_time()
""" Filtering tuples (cartesian product) """
indexed_rows = {}
for product in tuples:
if len(product) > 0:
row = KeyedTuple(product, labels=labels)
row_index_key = "%s" % (str(row))
if row_index_key in indexed_rows:
continue
all_criterions_satisfied = True
# for criterion in criterions:
# if not criterion.is_joining_expression and not criterion.evaluate(row):
# all_criterions_satisfied = False
# break
if all_criterions_satisfied:
indexed_rows[row_index_key] = True
rows += [extract_sub_row(row, model_set)]
part5_starttime = current_milli_time()
deconverter = get_decoder(request_uuid=request_uuid)
""" Reordering tuples (+ selecting attributes) """
final_rows = []
showable_selection = [x for x in models if (not x.is_hidden) or x._is_function]
part6_starttime = current_milli_time()
""" Selecting attributes """
if all_selectable_are_functions(models):
final_row = []
for selection in showable_selection:
value = selection._function._function(rows)
final_row += [value]
final_row = map(lambda x: deconverter.desimplify(x), final_row)
return [final_row]
else:
for row in rows:
final_row = []
for selection in showable_selection:
if selection._is_function:
value = selection._function._function(rows)
final_row += [value]
else:
current_table_name = find_table_name(selection._model)
key = current_table_name
if not is_novabase(row) and has_attribute(row, key):
value = get_attribute(row, key)
else:
value = row
if value is not None:
if selection._attributes != "*":
final_row += [get_attribute(value, selection._attributes)]
else:
final_row += [value]
final_row = map(lambda x: wrap_with_lazy_value(x, request_uuid=request_uuid), final_row)
if len(showable_selection) == 1:
final_rows += final_row
else:
final_rows += [final_row]
part7_starttime = current_milli_time()
query_information = """{"building_query": %s, "loading_objects": %s, "building_tuples": %s, "filtering_tuples": %s, "reordering_columns": %s, "selecting_attributes": %s, "description": "%s", "timestamp": %i}""" % (
part2_starttime - part1_starttime,
part3_starttime - part2_starttime,
part4_starttime - part3_starttime,
part5_starttime - part4_starttime,
part6_starttime - part5_starttime,
part7_starttime - part6_starttime,
"""{\\"models\\": \\"%s\\", \\"criterions\\": \\"%s\\"}""" % (models, criterions),
current_milli_time()
)
logging.info(query_information)
if file_logger_enabled:
file_logger.info(query_information)
return final_rows |
import json
import os
import sys
from abc import ABC, ABCMeta, abstractmethod
from collections import OrderedDict, deque
from datetime import datetime
import defusedxml.ElementTree as ET
from defusedxml import minidom
from ..types import Serializable, SimpleSerializable
_apis_by_name = {}
class MetaAPI(ABCMeta):
"""
Meta Class for APIs. It creates the BoundAPIQuery helpers.
"""
def __new__(mcs, name, bases, attrs):
cls = super(MetaAPI, mcs).__new__(mcs, name, bases, attrs)
# Only create the helpers on subclasses of API
if mcs.__module__ != attrs['__module__']:
from ..queries.base import Query, BoundAPIQuery
cls.Query = type('Query', (BoundAPIQuery, ), {'__module__': attrs['__module__'], 'API': cls})
cls.Parser = type('Parser', (Parser, ), {'__module__': attrs['__module__'], 'API': cls})
cls._supported_queries = {}
base_queries = deque(q for q in Query.__subclasses__() if not issubclass(q, BoundAPIQuery))
while base_queries:
base_query = base_queries.popleft()
base_queries.extend(q for q in base_query.__subclasses__() if not issubclass(q, BoundAPIQuery))
setattr(cls, base_query.__name__+'Base', type(base_query.__name__+'Base', (cls.Query, base_query, ),
{'__module__': attrs['__module__']}))
return cls
@property
def supported_queries(cls):
return frozenset(cls._supported_queries)
class API(SimpleSerializable, metaclass=MetaAPI):
"""
An API subclass is a collection of Query implementations used by different networks.
The instance of an API has a name and is usually a network.
To start a query on an API instance use its properties:
>>> api.stops.where(name='Essen')
This may raise a NotImplementedError if the API does not implement this Query.
"""
def __init__(self, name):
if self.__class__ == API:
raise TypeError('Only API subclasses can be initialized.')
if name in _apis_by_name:
raise TypeError('Duplicate API name: %s' % name)
self.name = name
_apis_by_name[name] = self
@classmethod
def _get_serialized_type_name(cls):
return 'api'
def _simple_serialize(self):
return self.name
@classmethod
def _simple_unserialize(cls, data):
if data is None:
return None
try:
return _apis_by_name[data]
except:
raise ValueError('API %s does not exist!' % data)
@property
def geopoints(self):
raise NotImplementedError('Querying geopoints is not supported by this API.')
@property
def platforms(self):
raise NotImplementedError('Querying platforms is not supported by this API.')
@property
def locations(self):
raise NotImplementedError('Querying locations is not supported by this API.')
@property
def addresses(self):
raise NotImplementedError('Querying addresses is not supported by this API.')
@property
def addressables(self):
raise NotImplementedError('Querying addressables is not supported by this API.')
@property
def stops(self):
raise NotImplementedError('Querying stops is not supported by this API.')
@property
def pois(self):
raise NotImplementedError('Querying POIs is not supported by this API.')
@property
def trips(self):
raise NotImplementedError('Querying trips is not supported by this API.')
@classmethod
def _register_query(cls, query_cls):
if query_cls.Model in cls._supported_queries:
raise TypeError('Duplicate %sQuery on %s API.' % (query_cls.Model.__name__, query_cls.API.__name__))
cls._supported_queries[query_cls.Model] = query_cls
setattr(cls, query_cls.Model.__name__.lower()+'s', property(lambda self: query_cls(self)))
class ParserError(Exception):
"""
Exception raised if data can not be parsed.
The parser attribute contains the Parser in which the error occured.
The pretty_data attribute contains the parser's data as a string.
"""
def __init__(self, parser, message):
self.parser = parser
self.message = message
self.pretty_data = self.parser.printable_data()
if os.environ.get('CHOO_DEBUG'):
message += '\n'+self.pretty_data
super().__init__(message)
class Parser(Serializable, ABC):
"""
A object that parses data, usually into model attributes.
Only subclasses of this class (XMLParser, JSONParser) may be used directly.
The data attribute contains the data.
The api attribute contains the API instance which supplied the data.
The time attribute contains the time of the data as datetime.
If you want to implement a parser that describes a Model attributes, start similar to this:
>>> class MyStopParser(Stop.XMLParser):
>>> @parser_property
... def name(self, data, **kwargs):
... pass # Do your parsing
Your parser may also inherit from another one of your parsers instead.
Model attributes that are not implemented by your parser automatically will return None.
"""
API = None
def __init__(self, parent, data, api=None, time=None, **kwargs):
"""
Initialise the parser.
parent has to be an object from which the api and time attributes can be taken.
data is the parser's data.
Any additional keyword arguments will be forwarded to all parser_property and cached_property methods.
"""
if self.API is None:
raise TypeError('Use the API.Parser mixin. Example: class MyStop(EFA.Parser, Stop.XMLParser):')
self.api = api if api else parent.api
self.time = time if time else parent.time
self.data = data
self._kwargs = kwargs
@abstractmethod
def printable_data(self, pretty=True):
"""
Get the parsers data as string.
if pretty is True, the data is made easy-readable for humans (e.g. by indenting)
"""
pass
@classmethod
@abstractmethod
def _parse_raw_data(cls, data):
pass
@classmethod
def parse(cls, api, time, data, **kwargs):
result = cls(None, cls._parse_raw_data(data), api=api, time=time, **kwargs)
if not isinstance(api, cls.API):
raise TypeError('Wrong API for this parser. Expected %s subclass, not %s.' % (repr(cls.API), repr(api)))
return result
@classmethod
def _get_serialized_type_name(cls):
from ..models import Model
if issubclass(cls, Model) and cls.API is not None:
return (cls.Model.__name__.lower()+'.parser.'+cls.__module__).replace('.choo.apis.', '.')+cls.__name__
def _serialize(self):
return OrderedDict((
('api', self.api.serialize()),
('time', self.time.isoformat()),
('data', self.printable_data(pretty=False).decode()),
('kwargs', self._kwargs),
))
@classmethod
def _unserialize(cls, data):
return cls.parse(API.unserialize(data['api']), datetime.strptime(data['time'], '%Y-%m-%dT%H:%M:%S'),
data['data'], **data['kwargs'])
def __setattr__(self, name, value):
if name in getattr(self, '_nonproxy_fields', ()):
raise TypeError('Cannot set a parser property')
super().__setattr__(name, value)
def __delattr__(self, name):
if name in getattr(self, '_nonproxy_fields', ()):
raise TypeError('Cannot delete a parser property')
super().__delattr__(name)
class XMLParser(Parser):
"""
A Parser that parses XML using defusedxml.ElementTree.
data has to be a defusedxml.ElementTree.Element instance, e.g. ElementTree.fromstring(…).
"""
def printable_data(self, pretty=True):
string = ET.tostring(self.data, 'utf-8')
if pretty:
string = minidom.parseString(string).toprettyxml(indent=' ')
return string
@classmethod
def _parse_raw_data(cls, data):
return ET.fromstring(data)
class JSONParser(Parser):
"""
A Parser that parses JSON.
data has to be json serializable, e.g. json.loads(…).
"""
def printable_data(self, pretty=True):
return json.dumps(self.data, indent=2 if pretty else None)
@classmethod
def _parse_raw_data(cls, data):
return json.loads(data)
class parser_property(object):
"""
A decorator to create a parser property that describes a model attribute.
The name of the property has to be a the name of a field of the given model.
The underlying method gets called with the parser's data as a additional positional argument
and all additional keyword arguments that the parser was initialized with. It will only be
called once as it's return value will be cached.
If an exception is raised from your method, debug info will be added to its message.
Example:
>>> class MyStopParser(Stop.XMLParser):
>>> @parser_property
... def name(self, data, **kwargs):
... pass # Do your parsing
"""
def __init__(self, func, name=None):
self.func = func
self.name = name or func.__name__
def __get__(self, obj, cls):
if obj is None:
return self
field = obj.Model._fields[self.name]
try:
value = obj.__dict__[self.name] = self.func(obj, obj.data, **obj._kwargs)
except Exception as e:
raise type(e)(str(e) +
'\n\n### CHOO DEBUG INFO:\n%s' % obj.printable_data()).with_traceback(sys.exc_info()[2])
if not field.validate(value):
raise TypeError('Invalid type for attribute %s.' % self.name)
return value
def cached_property(func):
"""
A decorator to create an internal parser property that does not correspond to a field of the given model.
The name of the property should start with an underscore.
This decorator is similar to parser_property, but it only caches the result and adds the method arguments.
There is no exception handling.
"""
def wrapped_func(self):
value = func(self, self.data, **self._kwargs)
self.__dict__[func.__name__] = value
return value
return property(wrapped_func)
fix Parser serializing
import json
import os
import sys
from abc import ABC, ABCMeta, abstractmethod
from collections import OrderedDict, deque
from datetime import datetime
import defusedxml.ElementTree as ET
from defusedxml import minidom
from ..types import Serializable, SimpleSerializable
_apis_by_name = {}
class MetaAPI(ABCMeta):
"""
Meta Class for APIs. It creates the BoundAPIQuery helpers.
"""
def __new__(mcs, name, bases, attrs):
cls = super(MetaAPI, mcs).__new__(mcs, name, bases, attrs)
# Only create the helpers on subclasses of API
if mcs.__module__ != attrs['__module__']:
from ..queries.base import Query, BoundAPIQuery
cls.Query = type('Query', (BoundAPIQuery, ), {'__module__': attrs['__module__'], 'API': cls})
cls.Parser = type('Parser', (Parser, ), {'__module__': attrs['__module__'], 'API': cls})
cls._supported_queries = {}
base_queries = deque(q for q in Query.__subclasses__() if not issubclass(q, BoundAPIQuery))
while base_queries:
base_query = base_queries.popleft()
base_queries.extend(q for q in base_query.__subclasses__() if not issubclass(q, BoundAPIQuery))
setattr(cls, base_query.__name__+'Base', type(base_query.__name__+'Base', (cls.Query, base_query, ),
{'__module__': attrs['__module__']}))
return cls
@property
def supported_queries(cls):
return frozenset(cls._supported_queries)
class API(SimpleSerializable, metaclass=MetaAPI):
"""
An API subclass is a collection of Query implementations used by different networks.
The instance of an API has a name and is usually a network.
To start a query on an API instance use its properties:
>>> api.stops.where(name='Essen')
This may raise a NotImplementedError if the API does not implement this Query.
"""
def __init__(self, name):
if self.__class__ == API:
raise TypeError('Only API subclasses can be initialized.')
if name in _apis_by_name:
raise TypeError('Duplicate API name: %s' % name)
self.name = name
_apis_by_name[name] = self
@classmethod
def _get_serialized_type_name(cls):
return 'api'
def _simple_serialize(self):
return self.name
@classmethod
def _simple_unserialize(cls, data):
if data is None:
return None
try:
return _apis_by_name[data]
except:
raise ValueError('API %s does not exist!' % data)
@property
def geopoints(self):
raise NotImplementedError('Querying geopoints is not supported by this API.')
@property
def platforms(self):
raise NotImplementedError('Querying platforms is not supported by this API.')
@property
def locations(self):
raise NotImplementedError('Querying locations is not supported by this API.')
@property
def addresses(self):
raise NotImplementedError('Querying addresses is not supported by this API.')
@property
def addressables(self):
raise NotImplementedError('Querying addressables is not supported by this API.')
@property
def stops(self):
raise NotImplementedError('Querying stops is not supported by this API.')
@property
def pois(self):
raise NotImplementedError('Querying POIs is not supported by this API.')
@property
def trips(self):
raise NotImplementedError('Querying trips is not supported by this API.')
@classmethod
def _register_query(cls, query_cls):
if query_cls.Model in cls._supported_queries:
raise TypeError('Duplicate %sQuery on %s API.' % (query_cls.Model.__name__, query_cls.API.__name__))
cls._supported_queries[query_cls.Model] = query_cls
setattr(cls, query_cls.Model.__name__.lower()+'s', property(lambda self: query_cls(self)))
class ParserError(Exception):
"""
Exception raised if data can not be parsed.
The parser attribute contains the Parser in which the error occured.
The pretty_data attribute contains the parser's data as a string.
"""
def __init__(self, parser, message):
self.parser = parser
self.message = message
self.pretty_data = self.parser.printable_data()
if os.environ.get('CHOO_DEBUG'):
message += '\n'+self.pretty_data
super().__init__(message)
class Parser(Serializable, ABC):
"""
A object that parses data, usually into model attributes.
Only subclasses of this class (XMLParser, JSONParser) may be used directly.
The data attribute contains the data.
The api attribute contains the API instance which supplied the data.
The time attribute contains the time of the data as datetime.
If you want to implement a parser that describes a Model attributes, start similar to this:
>>> class MyStopParser(Stop.XMLParser):
>>> @parser_property
... def name(self, data, **kwargs):
... pass # Do your parsing
Your parser may also inherit from another one of your parsers instead.
Model attributes that are not implemented by your parser automatically will return None.
"""
API = None
def __init__(self, parent, data, api=None, time=None, **kwargs):
"""
Initialise the parser.
parent has to be an object from which the api and time attributes can be taken.
data is the parser's data.
Any additional keyword arguments will be forwarded to all parser_property and cached_property methods.
"""
if self.API is None:
raise TypeError('Use the API.Parser mixin. Example: class MyStop(EFA.Parser, Stop.XMLParser):')
self.api = api if api else parent.api
self.time = time if time else parent.time
self.data = data
self._kwargs = kwargs
@abstractmethod
def printable_data(self, pretty=True):
"""
Get the parsers data as string.
if pretty is True, the data is made easy-readable for humans (e.g. by indenting)
"""
pass
@classmethod
@abstractmethod
def _parse_raw_data(cls, data):
pass
@classmethod
def parse(cls, api, time, data, **kwargs):
result = cls(None, cls._parse_raw_data(data), api=api, time=time, **kwargs)
if not isinstance(api, cls.API):
raise TypeError('Wrong API for this parser. Expected %s subclass, not %s.' % (repr(cls.API), repr(api)))
return result
@classmethod
def _get_serialized_type_name(cls):
from ..models import Model
if issubclass(cls, Model) and cls.API is not None:
return (cls.Model.__name__.lower()+'.parser.'+cls.__module__).replace('.choo.apis.', '.')+cls.__name__
def _serialize(self):
result = OrderedDict((
('api', self.api.serialize()),
('time', self.time.isoformat()),
('data', self.printable_data(pretty=False).decode()),
('kwargs', self._kwargs),
))
kwargs = {}
for name, value in self._kwargs.items():
kwargs[name] = value.serialize() if isinstance(value, Serializable) else value
result['kwargs'] = kwargs
return result
@classmethod
def _unserialize(cls, data):
kwargs = data['kwargs']
for name, value in list(kwargs.items()):
if isinstance(value, dict) and '@type' in value:
kwargs[name] = Serializable.unserialize(value)
return cls.parse(API.unserialize(data['api']), datetime.strptime(data['time'], '%Y-%m-%dT%H:%M:%S'),
data['data'], **kwargs)
def __setattr__(self, name, value):
if name in getattr(self, '_nonproxy_fields', ()):
raise TypeError('Cannot set a parser property')
super().__setattr__(name, value)
def __delattr__(self, name):
if name in getattr(self, '_nonproxy_fields', ()):
raise TypeError('Cannot delete a parser property')
super().__delattr__(name)
class XMLParser(Parser):
"""
A Parser that parses XML using defusedxml.ElementTree.
data has to be a defusedxml.ElementTree.Element instance, e.g. ElementTree.fromstring(…).
"""
def printable_data(self, pretty=True):
string = ET.tostring(self.data, 'utf-8')
if pretty:
string = minidom.parseString(string).toprettyxml(indent=' ')
return string
@classmethod
def _parse_raw_data(cls, data):
return ET.fromstring(data)
class JSONParser(Parser):
"""
A Parser that parses JSON.
data has to be json serializable, e.g. json.loads(…).
"""
def printable_data(self, pretty=True):
return json.dumps(self.data, indent=2 if pretty else None)
@classmethod
def _parse_raw_data(cls, data):
return json.loads(data)
class parser_property(object):
"""
A decorator to create a parser property that describes a model attribute.
The name of the property has to be a the name of a field of the given model.
The underlying method gets called with the parser's data as a additional positional argument
and all additional keyword arguments that the parser was initialized with. It will only be
called once as it's return value will be cached.
If an exception is raised from your method, debug info will be added to its message.
Example:
>>> class MyStopParser(Stop.XMLParser):
>>> @parser_property
... def name(self, data, **kwargs):
... pass # Do your parsing
"""
def __init__(self, func, name=None):
self.func = func
self.name = name or func.__name__
def __get__(self, obj, cls):
if obj is None:
return self
field = obj.Model._fields[self.name]
try:
value = obj.__dict__[self.name] = self.func(obj, obj.data, **obj._kwargs)
except Exception as e:
raise type(e)(str(e) +
'\n\n### CHOO DEBUG INFO:\n%s' % obj.printable_data()).with_traceback(sys.exc_info()[2])
if not field.validate(value):
raise TypeError('Invalid type for attribute %s.' % self.name)
return value
def cached_property(func):
"""
A decorator to create an internal parser property that does not correspond to a field of the given model.
The name of the property should start with an underscore.
This decorator is similar to parser_property, but it only caches the result and adds the method arguments.
There is no exception handling.
"""
def wrapped_func(self):
value = func(self, self.data, **self._kwargs)
self.__dict__[func.__name__] = value
return value
return property(wrapped_func)
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NL Classifier task."""
import dataclasses
from tensorflow_lite_support.python.task.core import base_options as base_options_module
from tensorflow_lite_support.python.task.processor.proto import classifications_pb2
from tensorflow_lite_support.python.task.processor.proto import classification_options_pb2
from tensorflow_lite_support.python.task.text.pybinds import _pywrap_nl_classifier
_CppNLClassifier = _pywrap_nl_classifier.NLClassifier
_BaseOptions = base_options_module.BaseOptions
_ClassificationOptions = classification_options_pb2.ClassificationOptions
@dataclasses.dataclass
class NLClassifierOptions:
"""Options for the NL classifier task."""
base_options: _BaseOptions
classification_options: _ClassificationOptions = _ClassificationOptions()
class NLClassifier(object):
"""Class that performs NL classification on text."""
def __init__(self, options: NLClassifierOptions,
cpp_classifier: _CppNLClassifier) -> None:
"""Initializes the `NLClassifier` object."""
# Creates the object of C++ NLClassifier class.
self._options = options
self._classifier = cpp_classifier
@classmethod
def create_from_file(cls, file_path: str) -> "NLClassifier":
"""Creates the `NLClassifier` object from a TensorFlow Lite model.
Args:
file_path: Path to the model.
Returns:
`NLClassifier` object that's created from the model file.
Raises:
ValueError: If failed to create `NLClassifier` object from the provided
file such as invalid file.
RuntimeError: If other types of error occurred.
"""
base_options = _BaseOptions(file_name=file_path)
options = NLClassifierOptions(base_options=base_options)
return cls.create_from_options(options)
@classmethod
def create_from_options(cls, options: NLClassifierOptions) -> "NLClassifier":
"""Creates the `NLClassifier` object from NL classifier options.
Args:
options: Options for the NL classifier task.
Returns:
`NLClassifier` object that's created from `options`.
Raises:
ValueError: If failed to create `NLClassifier` object from
`NLClassifierOptions` such as missing the model or if any of the
classification options is invalid.
RuntimeError: If other types of error occurred.
"""
classification_options = options.classification_options
if classification_options.max_results == 0:
raise ValueError("Invalid `max_results` option: value must be != 0")
if classification_options.category_name_allowlist is not None and \
classification_options.category_name_denylist is not None:
if len(classification_options.category_name_allowlist) > 0 and \
len(classification_options.category_name_denylist) > 0:
raise ValueError(
"`class_name_allowlist` and `class_name_denylist` are mutually "
"exclusive options.")
classifier = _CppNLClassifier.create_from_options(
options.base_options.to_pb2())
return cls(options, classifier)
def classify(self, text: str) -> classifications_pb2.ClassificationResult:
"""Performs actual NL classification on the provided text.
Args:
text: the input text, used to extract the feature vectors.
Returns:
classification result.
Raises:
ValueError: If any of the input arguments is invalid.
RuntimeError: If failed to calculate the embedding vector.
"""
classification_result = self._classifier.classify(text)
classification_result = classifications_pb2.ClassificationResult.\
create_from_pb2(classification_result)
return self._postprocess(classification_result)
def _postprocess(self, result: classifications_pb2.ClassificationResult):
"""Post-process the classification output based on classification options.
Args:
result: the raw classification result.
Returns:
The filtered classification result.
Raises:
ValueError: If any of the classification options is invalid.
"""
classification_options = self.options.classification_options
# Sort in descending order (higher score is better).
categories = result.classifications[0].categories
categories = sorted(
categories, key=lambda category: category.score, reverse=True)
# Filter out classification in deny list
filtered_results = categories
if classification_options.category_name_denylist is not None:
filtered_results = list(
filter(
lambda category: category.category_name not in classification_options.
category_name_denylist, filtered_results))
# Keep only classification in allow list
if classification_options.category_name_allowlist is not None:
filtered_results = list(
filter(
lambda category: category.category_name in classification_options.
category_name_allowlist, filtered_results))
# Filter out classification in score threshold
if classification_options.score_threshold is not None:
filtered_results = list(
filter(
lambda category: category.score >= classification_options.
score_threshold, filtered_results))
# Only return maximum of max_results classification.
if classification_options.max_results is not None:
if classification_options.max_results > 0:
result_count = min(len(filtered_results),
classification_options.max_results)
filtered_results = filtered_results[:result_count]
result.classifications[0].categories = filtered_results
return result
@property
def options(self) -> NLClassifierOptions:
return self._options
Updated docstrings
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""NL Classifier task."""
import dataclasses
from tensorflow_lite_support.python.task.core import base_options as base_options_module
from tensorflow_lite_support.python.task.processor.proto import classifications_pb2
from tensorflow_lite_support.python.task.processor.proto import classification_options_pb2
from tensorflow_lite_support.python.task.text.pybinds import _pywrap_nl_classifier
_CppNLClassifier = _pywrap_nl_classifier.NLClassifier
_BaseOptions = base_options_module.BaseOptions
_ClassificationOptions = classification_options_pb2.ClassificationOptions
@dataclasses.dataclass
class NLClassifierOptions:
"""Options for the NL classifier task."""
base_options: _BaseOptions
classification_options: _ClassificationOptions = _ClassificationOptions()
class NLClassifier(object):
"""Class that performs NL classification on text."""
def __init__(self, options: NLClassifierOptions,
cpp_classifier: _CppNLClassifier) -> None:
"""Initializes the `NLClassifier` object."""
# Creates the object of C++ NLClassifier class.
self._options = options
self._classifier = cpp_classifier
@classmethod
def create_from_file(cls, file_path: str) -> "NLClassifier":
"""Creates the `NLClassifier` object from a TensorFlow Lite model.
Args:
file_path: Path to the model.
Returns:
`NLClassifier` object that's created from the model file.
Raises:
ValueError: If failed to create `NLClassifier` object from the provided
file such as invalid file.
RuntimeError: If other types of error occurred.
"""
base_options = _BaseOptions(file_name=file_path)
options = NLClassifierOptions(base_options=base_options)
return cls.create_from_options(options)
@classmethod
def create_from_options(cls, options: NLClassifierOptions) -> "NLClassifier":
"""Creates the `NLClassifier` object from NL classifier options.
Args:
options: Options for the NL classifier task.
Returns:
`NLClassifier` object that's created from `options`.
Raises:
ValueError: If failed to create `NLClassifier` object from
`NLClassifierOptions` such as missing the model or if any of the
classification options is invalid.
RuntimeError: If other types of error occurred.
"""
classification_options = options.classification_options
if classification_options.max_results == 0:
raise ValueError("Invalid `max_results` option: value must be != 0")
if classification_options.category_name_allowlist is not None and \
classification_options.category_name_denylist is not None:
if len(classification_options.category_name_allowlist) > 0 and \
len(classification_options.category_name_denylist) > 0:
raise ValueError(
"`class_name_allowlist` and `class_name_denylist` are mutually "
"exclusive options.")
classifier = _CppNLClassifier.create_from_options(
options.base_options.to_pb2())
return cls(options, classifier)
def classify(self, text: str) -> classifications_pb2.ClassificationResult:
"""Performs actual NL classification on the provided text.
Args:
text: the input text, used to extract the feature vectors.
Returns:
classification result.
Raises:
ValueError: If any of the input arguments is invalid.
RuntimeError: If failed to perform NL classification.
"""
classification_result = self._classifier.classify(text)
classification_result = classifications_pb2.ClassificationResult.\
create_from_pb2(classification_result)
return self._postprocess(classification_result)
def _postprocess(self, result: classifications_pb2.ClassificationResult):
"""Post-process the classification output based on classification options.
Args:
result: the raw classification result.
Returns:
The filtered classification result.
"""
classification_options = self.options.classification_options
# Sort in descending order (higher score is better).
categories = result.classifications[0].categories
categories = sorted(
categories, key=lambda category: category.score, reverse=True)
# Filter out classification in deny list
filtered_results = categories
if classification_options.category_name_denylist is not None:
filtered_results = list(
filter(
lambda category: category.category_name not in classification_options.
category_name_denylist, filtered_results))
# Keep only classification in allow list
if classification_options.category_name_allowlist is not None:
filtered_results = list(
filter(
lambda category: category.category_name in classification_options.
category_name_allowlist, filtered_results))
# Filter out classification in score threshold
if classification_options.score_threshold is not None:
filtered_results = list(
filter(
lambda category: category.score >= classification_options.
score_threshold, filtered_results))
# Only return maximum of max_results classification.
if classification_options.max_results is not None:
if classification_options.max_results > 0:
result_count = min(len(filtered_results),
classification_options.max_results)
filtered_results = filtered_results[:result_count]
result.classifications[0].categories = filtered_results
return result
@property
def options(self) -> NLClassifierOptions:
return self._options
|
#!/usr/bin/env python
import datetime
#hideous code to create the html report file with the prefix in the format of year month day hour minute
report = open(str(datetime.datetime.today()).replace(':','').replace('.','').replace(' ','').replace('-','')[0:14]+'report.html','w')
#this method expects a two-dimensional list containing the data as lst, and a one-dimensional list containing titles as titlelst
def generate(lst,titlelst):
#make the document pretty (inthefuture)
report.write('<table border="1">')
report.write('<tr>\n')
#writes titles as heading
for title in titlelst:
report.write('<th>'+title+'</th>\n')
report.write('</tr>\n')
#writes data
for i in range(0,len(lst)):
report.write('<tr>\n')
for j in range(0,len(lst[i])):
report.write('<td>'+str(lst[i][j])+'</td>\n')
report.write('</tr>\n')
report.write('</table>')
report.close()
#for testing purposes
#generate([[0,1],[2,3]],['cats','dogs'])
Report amount of times something happened
#!/usr/bin/env python
import datetime
#this method expects a two-dimensional list containing the data as lst, and a one-dimensional list containing titles as titlelst
def generate(lst,titlelst,count=[]):
#make the document pretty (inthefuture)
#hideous code to create the html report file with the prefix in the format of year month day hour minute
report = open(str(datetime.datetime.today()).replace(':','').replace('.','').replace(' ','').replace('-','')[0:14]+'report.html','w')
report.write('<table border="1">')
report.write('<tr>\n')
#writes titles as heading
for title in titlelst:
report.write('<th>'+title+'</th>\n')
report.write('<th>Amount</th>\n')
report.write('</tr>\n')
#writes data
for i in range(0,len(lst)):
report.write('<tr>\n')
for j in range(0,len(lst[i])):
report.write('<td>'+str(lst[i][j])+'</td>\n')
if count != []:
report.write('<td>'+str(count[i])+'</td>\n')
report.write('</tr>\n')
report.write('</table>')
report.close()
#for testing purposes
#generate([[0,1],[2,3]],['cats','dogs'], [10, 12])
|
#! /usr/bin/env python
"""
This standalone python script can be used to convert the force-fields in MSI
format (.FRC files, a.k.a. "BIOSYM", "DISCOVERY" format)
...into MOLTEMPLATE/LAMMPS format (.LT format).
Once converted into moltemplate (.LT) format, users can use these files with
MOLTEMPLATE to prepare LAMMPS simulations of molecules using these force fields
(without needing any additional software such as msi2lmp).
There are several examples of MSI files in the "tools/msi2lmp/frc_files/"
directory which is distributed with LAMMPS.
Limitations:
Currently (2017-2) this script ignores the "template" information in .FRC files.
When defining a new type of molecule, the user must carefully choose the
complete atom type for each type of atom in the molecule. In other words,
MOLTEMPLATE will not attempt to determine (from local context) whether
a carbon atom somewhere in your molecule happens to be an SP3 carbon
(ie. "c4" in the COMPASS force-field), or an aromatic carbon ("c3a"),
or something else (for example). This information is typically contained
in the "templates" section of these files, and this script currently ignores
that information. Instead, the user must determine which type of carbon atom
it is manually, for all of the carbon atoms in that kind of molecule.
(This only needs to be done once per molecule definition.
Once a type of molecule is defined, it can be copied indefinitely.)
"""
__author__ = 'Andrew Jewett'
__version__ = '0.1.20'
__date__ = '2017-10-03'
import sys
import os
from collections import defaultdict, OrderedDict
from operator import itemgetter
from math import *
g_program_name = __file__.split('/')[-1]
doc_msg = \
"Typical Usage:\n\n" + \
" " + g_program_name + " -name COMPASS < compass_published.frc > compass.lt\n\n" + \
" where \"compass_published.frc\" is a force-field file in MSI format.\n" + \
" \"comass.lt\" is the corresponding file converted to moltemplate format\n" + \
" and \"COMPASS\" is the name that future moltemplate users will use to refer\n" + \
" to this force-field (optional).\n" + \
"Optional Arguments\n" + \
" -name FORCEFIELDNAME # Give the force-field a name\n" + \
" -file FILE_NAME # Read force field parameters from a file\n" + \
" -url URL # Read force field parameters from a file on the web\n" + \
" -atoms \"QUOTED LIST\" # Restrict output to a subset of atom types\n" + \
" Sometimes an FRC file contains multiple versions. In that case,\n"+\
" you can select between them using these optional arguments:\n"+\
" -pair-style \"PAIRSTYLE ARGS\" # LAMMPS pair style and cutoff arg(s)\n" + \
" -bond-style BONDSTYLE # desired LAMMPS bond style (default: \"class2\")\n" + \
" -angle-style ANGLESTYLE # desired LAMMPS angle style\n" + \
" -dihedral-style DIHEDRALSTYLE # desired LAMMPS dihedral style\n" + \
" -improper-style IMPROPERSTYLE # desired LAMMPS improper style\n" + \
" -hbond-style \"HBONDTYLE ARGS\" # LAMMPS hydrogen-bond style and args\n"
# " -auto # Consider auto_equivalences in the .frc file \n"+\
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
(Raising this exception implies that the caller has provided
a faulty input file or argument.)
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
def NSplitQuotedString(string,
nmax,
quotes,
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
"""
Split a quoted & commented string into at most "nmax" tokens (if nmax>0),
where each token is separated by one or more delimeter characters
in the origingal string, and quoted substrings are not split,
This function returns a list of strings. Once the string is split Nmax
times, any remaining text will be appended to the last entry of the list.
Comments are stripped from the string before splitting begins.
"""
tokens = []
token = ''
reading_token = True
escaped_state = False
quote_state = None
for c in string:
if (c in comment_char) and (not escaped_state) and (quote_state == None):
if len(token) > 0:
tokens.append(token)
return tokens
elif (c in delimiters) and (not escaped_state) and (quote_state == None):
if reading_token:
if (nmax == 0) or (len(tokens) < nmax-1):
if len(token) > 0:
tokens.append(token)
token = ''
reading_token = False
else:
token += c
elif c in escape:
if escaped_state:
token += c
reading_token = True
escaped_state = False
else:
escaped_state = True
# and leave c (the '\' character) out of token
elif (c in quotes) and (not escaped_state):
if (quote_state != None):
if (c == quote_state):
quote_state = None
else:
quote_state = c
token += c
reading_token = True
else:
if (c == 'n') and (escaped_state == True):
c = '\n'
elif (c == 't') and (escaped_state == True):
c = '\t'
elif (c == 'r') and (escaped_state == True):
c = '\r'
elif (c == 'f') and (escaped_state == True):
c = '\f'
token += c
reading_token = True
escaped_state = False
if len(token) > 0:
tokens.append(token)
return tokens
def SplitQuotedString(string,
quotes='\'\"',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
return NSplitQuotedString(string,
0,
quotes,
delimiters,
escape,
comment_char)
def RemoveOuterQuotes(text, quotes='\"\''):
if ((len(text) >= 2) and (text[0] in quotes) and (text[-1] == text[0])):
return text[1:-1]
else:
return text
def SortByEnds(l_orig):
"""
Convenient to have a one-line macro for swapping list order if first>last
"""
l = [x for x in l_orig]
if l[0] > l[-1]:
l.reverse()
return l
#def Repl(tokens, a, b):
# return [(b if x==a else x) for x in tokens]
def EncodeAName(s):
"""
Handle * characters in MSI atom names
"""
# If the atom name begins with *, then it is a wildcard
if s[:1] == '*': # special case: deal with strings like *7
return 'X' # These have special meaning. Throw away the integer.
# (and replace the * with an X)
# If the * character occurs later on in the atom name, then it is actually
# part of the atom's name. (MSI force fields use many strange characters in
# atom names.) Here we change the * to \* to prevent the atom name from
# being interpreted as a wild card in the rules for generating bonds,
# angles, dihedrals, and impropers.
return s.replace('*','star').replace('\'','prime').replace('"','dblpr')
# '*' is reserved for wildcards in moltemplate
# 'star' is a string that is unused in any
# of the force fields I have seen so far.
# Similarly quote characters (' and ") confuse
# moltemplate, so we replace them with something else.
# The following approach doesn't work (mistakenly thinks '\*' = wildcard)
#return s.replace('*','\\*') # this prevents ttree_lex.MatchesAll()
# # from interpreting the '*' as a wildcard
def DetermineAutoPriority(anames):
#scan through list of strings anames, looking for patterns of the form
#*n
#where n is an integer.
#(These patterns are used by MSI software when using "auto_equivalences"
# to look up force field parameters for bonded interactions.)
#Make sure this pattern only appears once and return n to the caller.
n = None
for i in range(0, len(anames)):
if anames[:1] == '*':
if n == None:
n = float(anames[1:])
elif n != float(anames[1:]):
raise InputError('Error: Inconsistent priority integers in the following interaction:\n'
' ' + ' '.join(anames) + '\n')
if n == None:
return 0.0
else:
return n
#def DeterminePriority(is_auto,
# anames,
# version):
# """
# Determine the priority of an interaction from
# 1) whether or not it is an "auto" interaction
# 2) what is the force-field "version" (a number)
# 3) what are the names of the atoms (for auto_equivalences only,
# some atom "names" are wildcards followed by integers. use the integer)
# """
#
# if is_auto:
# n = DetermineAutoPriority(anames)
# return (is_auto, n)
# else:
# return (is_auto, -version)
def DetermineNumericPriority(is_auto,
anames,
version):
"""
Determine the priority of an interaction from
2) what is the force-field "version" (a number)
3) what are the names of the atoms (for auto_equivalences only,
some atom "names" are wildcards followed by integers. use the integer)
"""
if is_auto:
n = DetermineAutoPriority(anames)
return n
else:
return -float(version)
def IsAutoInteraction(interaction_name):
return interaction_name.find('auto') == 0
def IsAutoAtom(atom_name):
if ((len(atom_name)>0) and (atom_name[-1] == '_')):
return True
else:
return False
def EncodeInteractionName(anames,
is_auto = False):
if is_auto == False:
is_auto = False
# Is the line containing anames from an "_auto" section of
# the FRC file? (I am trying to infer this from whether or
# not any of the atom names are followed by the '_' character.)
for s in anames:
if IsAutoAtom(s):
is_auto = True
if is_auto:
priority = DetermineAutoPriority(anames)
# (If an atom name is a wildcard '*' followed by
# an integer, DetermineAutoPriority() will return
# that integer. Otherwise it will return '')
return 'auto' + str(priority)+','.join(anames)
return ','.join(anames)
def ExtractANames(interaction_name):
if IsAutoInteraction(interaction_name):
return interaction_name[5:].split(',')
return interaction_name.split(',')
def OOPImproperNameSort(aorig):
assert(len(aorig) == 4)
atom_names = map(EncodeAName, aorig)
if atom_names[0] < atom_names[3]:
return (atom_names, [0,1,2,3])
else:
return ([atom_names[3],
atom_names[1],
atom_names[2],
atom_names[0]],
[3,1,2,0])
def Class2ImproperNameSort(aorig):
"""
This function takes a list of 4 strings as an argument representing 4 atom
names for atoms participating in an "improper" ("wilson-out-of-plane")
interaction. This function assumes the second atom is the central ("hub")
atom in the interaction, and it sorts the remaining atoms names.
This function also replaces any occurence of \"*\" with \"X\".
The new list is returned to the caller, along with the permutation.
"""
assert(len(aorig) == 4)
atom_names = [a for a in map(EncodeAName, aorig)]
z = [x for x in zip([atom_names[0], atom_names[2], atom_names[3]],
[0,2,3])]
z.sort()
l = [z[0][0], atom_names[1], z[1][0], z[2][0]]
p = [z[0][1], 1, z[1][1], z[2][1]]
return (l, p)
def Parity(p):
""" compute the parity of a permutation
(credit: "Weeble")
"""
permutation = list(p)
length = len(permutation)
elements_seen = [False] * length
cycles = 0
for index, already_seen in enumerate(elements_seen):
if already_seen:
continue
cycles += 1
current = index
while not elements_seen[current]:
elements_seen[current] = True
current = permutation[current]
return (length-cycles) % 2 == 0
def ImCrossTermID(atom_names):
"""
# From a list of 4 atom names, corresponding two a pair
# of angles between atoms# 3,2,1 and 3,2,4,
# and replaces the list of atoms with a canonical tuple
# which eliminates order ambiguity.
# If you swap the first and last atom (#1 and #4), then
# the -pair- of angles is the same. Hence if atom #1
# has a name which is lexicographically less than atom #4,
# swap atoms 1 and 4.
"""
if atom_names[0] <= atom_names[3]:
return (atom_names[0]+','+atom_names[1]+','+
atom_names[2]+','+atom_names[3])
else:
return (atom_names[3]+','+atom_names[1]+','+
atom_names[2]+','+atom_names[0])
def DoAtomsMatchPattern(anames, pattern):
"""
Check whether the list of atom names "anames" matches "pattern"
(Both arguments are lists of strings, but some of the strings
in pattern may contain wildcard characters followed by
"priority" numbers. Matches with lower priority numbers are
given preference whenever multiple distinct matches are found.
(Note: This function does not check patterns in reverse order.)
"""
#sys.stderr.write('DEBUG: checking whether '+str(anames)+' matches '+str(pattern)+'\n')
assert(len(anames) == len(pattern))
matched = True
for d in range(0, len(pattern)):
if (pattern[d] == anames[d]) or (pattern[d][0] == '*'):
if pattern[d][0] == '*':
priority = int(pattern[d][1:])
else:
priority = 0
else:
matched = False
if matched:
#sys.stderr.write('DEBUG: '+str(anames)+' matches '+str(pattern)+'\n')
return priority
else:
return None
def LookupBondLength(a1, a2,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto):
"""
Try to find bond parameters between atoms whose original
atom names (without equivalences) are a1 and a2.
Then return both the equilibrium bond length for that bond,
as well as the equivalent atom names used to lookup that bond.
(These could be stored in either atom2equiv_bond or atom2auto_bond.)
If a match was not found, return None.
"""
return_val = None
anames = (atom2equiv_bond[a1], atom2equiv_bond[a2])
bond_name = EncodeInteractionName(SortByEnds(anames))
if bond_name in bond2r0:
return_val = (bond2r0[bond_name], [anames[0], anames[1]])
# If no bond between these atoms is defined,
# check the bonds in the _auto section(s)
# This is a lot messier.
elif ((a1 in atom2auto_bond) and (a2 in atom2auto_bond)):
anames = [atom2auto_bond[a1], atom2auto_bond[a2]]
# Because _auto interactions can contain wildcards,
# there can be multiple entries in bond2r0_auto[]
# for the same list of atom names, and we have to
# consider all of them, and pick the one with the
# most priority (ie. whose priority number is lowest).
# (Note: The MSI file format uses low priority numbers
# to indicate high priority. Somewhat confusing.)
HUGE_VAL = 2000000000
best_priority = HUGE_VAL
pattern = ['','']
for (pattern[0],pattern[1]), r0 in bond2r0_auto.items():
priority = DoAtomsMatchPattern(anames, pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (r0, [anames[0], anames[1]])
anames.reverse() # now check of the atoms in reverse order match
priority = DoAtomsMatchPattern(anames, pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (r0, [anames[1], anames[0]]) #preserve atom order
#if return_val != None:
# sys.stderr.write('DEBUG: For atoms '+str((a1,a2))+' ... bond_length, batom_names = '+str(return_val)+'\n')
return return_val
def LookupRestAngle(a1, a2, a3,
atom2equiv_angle,
angle2theta0_or,
atom2auto_angle,
angle2theta0_auto_or):
"""
Try to find angle parameters between atoms whose original atom
names (without equivalences) are a1, a2, and a3. Then return
both the equilibrium rest angle for that 3body interaction
as well as the equivalent atom names used to look it up. (These
could be stored in either atom2equiv_angle or atom2auto_angle.)
If a match was not found, return None.
"""
return_val = None
anames = (atom2equiv_angle[a1], atom2equiv_angle[a2], atom2equiv_angle[a3])
angle_name = EncodeInteractionName(SortByEnds(anames))
if angle_name in angle2theta0_or:
return_val = (angle2theta0_or[angle_name], [anames[0], anames[1], anames[2]])
# If no angle between these atoms is defined,
# check the angles in the _auto section(s)
# This is a lot messier.
elif ((a1 in atom2auto_angle[0]) and
(a2 in atom2auto_angle[1]) and
(a3 in atom2auto_angle[2])):
anames = [atom2auto_angle[0][a1],
atom2auto_angle[1][a2],
atom2auto_angle[2][a3]]
#sys.stderr.write('DEBUG: LookupRestAngle(): a1,a2,a3=('+
# a1+','+a2+','+a3+'), anames='+str(anames)+'\n')
# Because _auto interactions can contain wildcards,
# there can be multiple entries in angle2theta0_auto_or[]
# for the same list of atom names, and we have to
# consider all of them, and pick the one with the
# most priority (ie. whose priority number is lowest).
# (Note: The MSI file format uses low priority numbers
# to indicate high priority. Somewhat confusing.)
HUGE_VAL = 2000000000
best_priority = HUGE_VAL
pattern = ['','','']
for (pattern[0],pattern[1],pattern[2]), theta0 in angle2theta0_auto_or.items():
priority = DoAtomsMatchPattern(anames, pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (theta0, [anames[0], anames[1], anames[2]])
anames.reverse() # now check of the atoms in reverse order match
priority = DoAtomsMatchPattern(anames, pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (theta0, [anames[2], anames[1], anames[0]]) #preserve atom order
#if return_val != None:
# sys.stderr.write('DEBUG: For atoms '+str((a1,a2))+' ... rest_angle, anames = '+str(return_val)+'\n')
return return_val
def Equivalences2ffids(lines_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper):
"""
This function reads a list of lines containing "equivalences" and
"auto_equivalences" from an MSI-formatted .FRC file.
Then, for each atom type, it generates a long string which includes the
original atom type name as well as all of the equivalences it belongs to.
Later on, when it is time to generate angles, dihedrals, or impropers,
moltemplate will search for patterns contained in these strings to decide
which type of interaction to generate.
This function returns a dictionary that converts the original atom type name
into these strings.
"""
for line in lines_equivalences:
#tokens = SplitQuotedString(line.strip(),
# comment_char='!>')
# skip past both '!' and '>' characters
ic1 = line.find('!')
ic = ic1
ic2 = line.find('>')
if ic2 != -1 and ic2 < ic1:
ic = ic2
if ic != -1:
line = line[:ic]
else:
line = line.rstrip('\n')
tokens = line.strip().split()
#sys.stderr.write('DEBUG Equivalences2ffids():\n'
# ' tokens = '+str(tokens)+'\n')
atype = EncodeAName(tokens[2])
atom2equiv_pair[atype] = EncodeAName(tokens[3])
atom2equiv_bond[atype] = EncodeAName(tokens[4])
atom2equiv_angle[atype] = EncodeAName(tokens[5])
atom2equiv_dihedral[atype] = EncodeAName(tokens[6])
atom2equiv_improper[atype] = EncodeAName(tokens[7])
atom2ffid = OrderedDict()
for atom in atom_types:
atom2ffid[atom] = (atom +
',p'+atom2equiv_pair.get(atom,'') +
',b'+atom2equiv_bond.get(atom,'') +
',a'+atom2equiv_angle.get(atom,'') +
',d'+atom2equiv_dihedral.get(atom,'') +
',i'+atom2equiv_improper.get(atom,''))
return atom2ffid
def AutoEquivalences2ffids(lines_equivalences,
lines_auto_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper,
atom2auto_pair,
atom2auto_bondincr,
atom2auto_bond,
atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_dihedralend,
atom2auto_dihedralcenter,
atom2auto_improperend,
atom2auto_impropercenter):
"""
This function is a variant of Equivalences2ffids() which also considers
"auto_equivalences".
This function returns a dictionary that converts the original atom type name
into a string that includes that atom's "equivalences",
as well as its "auto_equivalences".
moltemplate will search for patterns contained in these strings to decide
which type of interaction to generate.
"""
Equivalences2ffids(lines_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper)
# ------ The following lines are for processing "auto_equivalences" -----
#
# What is the difference between "equivalences" and "auto_equivalences"?
#
# equivalences:
# Here is an excerpt from the Discover manual describing "equivalences":
# "Chemically distinct atoms often differ in some, but not all,
# of their forcefield parameters. For example, the bond parameters
# for the C-C bonds in ethene and in benzene are quite different,
# but the nonbond parameters for the carbon atoms are essentially
# the same. Rather than duplicating the nonbond parameters in the
# forcefield parameter file, the Discover program uses atom type
# equivalences to simplify the problem. In the example, the phenyl
# carbon atom type is equivalent to the pure sp2 carbons of ethene
# insofar as the nonbond parameters are concerned. The Discover
# program recognizes five types of equivalences for each atom
# type: nonbond, bond, angle, torsion, and out-of-plane.
# Cross terms such as bond-bond terms have the same equivalences
# (insofar as atom types are concerned) as the diagonal term of
# the topology of all the atoms defining the internal coordinates.
# For the bond-bond term, this means that the atom type
# equivalences for angles would be used
#
# auto_equivalences:
# Are similar to equivalences, but apparently with lower priority.
# In addition, it seems that, when looking up some of the class2 terms
# in the interaction according to atom type using "auto_equivalences"
# a distinction is made between end atoms and central atoms.
# The parameters for these interactions are also stored in different
# tables in the .frc file, with different comments/tags.
# (for example, "cff91_auto" as opposed to "cff91")
# An excerpt from the Discover manual is somewhat vague:
# "A forcefield may include automatic parameters for use when
# better-quality explicit parameters are not defined for a
# particular bond, angle, torsion, or out-of-plane interaction.
# These parameters are intended as temporary patches, to allow
# you to begin calculations immediately."
for line in lines_auto_equivalences:
#tokens = SplitQuotedString(line.strip(),
# comment_char='!>')
# skip past both '!' and '>' characters
ic1 = line.find('!')
ic = ic1
ic2 = line.find('>')
if ic2 != -1 and ic2 < ic1:
ic = ic2
if ic != -1:
line = line[:ic]
else:
line = line.rstrip('\n')
tokens = line.strip().split()
#sys.stderr.write('DEBUG Equivalences2ffids():\n'
# ' tokens = '+str(tokens)+'\n')
atype = EncodeAName(tokens[2])
atom2auto_pair[atype] = EncodeAName(tokens[3])
atom2auto_bondincr[atype] = EncodeAName(tokens[4])
atom2auto_bond[atype] = EncodeAName(tokens[5])
atom2auto_angleend[atype] = EncodeAName(tokens[6])
atom2auto_anglecenter[atype] = EncodeAName(tokens[7])
atom2auto_dihedralend[atype] = EncodeAName(tokens[8])
atom2auto_dihedralcenter[atype] = EncodeAName(tokens[9])
atom2auto_improperend[atype] = EncodeAName(tokens[10])
atom2auto_impropercenter[atype] = EncodeAName(tokens[11])
atom2ffid = OrderedDict()
for atom in atom_types:
atom2ffid[atom] = (atom +
',p'+atom2equiv_pair.get(atom,'') +
',b'+atom2equiv_bond.get(atom,'') +
',a'+atom2equiv_angle.get(atom,'') +
',d'+atom2equiv_dihedral.get(atom,'') +
',i'+atom2equiv_improper.get(atom,'') +
',ap'+atom2auto_pair.get(atom,'') +
',aq'+atom2auto_bondincr.get(atom,'') +
',ab'+atom2auto_bond.get(atom,'') +
',aae'+atom2auto_angleend.get(atom,'') +
',aac'+atom2auto_anglecenter.get(atom,'') +
',ade'+atom2auto_dihedralend.get(atom,'') +
',adc'+atom2auto_dihedralcenter.get(atom,'') +
',aie'+atom2auto_improperend.get(atom,'') +
',aic'+atom2auto_impropercenter.get(atom,'') +
''
)
return atom2ffid
def main():
try:
sys.stderr.write(g_program_name + ", version " +
__version__ + ", " + __date__ + "\n")
if sys.version < '2.6':
raise InputError('Error: Using python ' + sys.version + '\n' +
' Alas, your version of python is too old.\n'
' You must upgrade to a newer version of python (2.6 or later).')
if sys.version < '2.7':
from ordereddict import OrderedDict
else:
from collections import OrderedDict
if sys.version > '3':
import io
else:
import cStringIO
# defaults:
ffname = 'BIOSYM_MSI_FORCE_FIELD'
type_subset = set([])
filename_in = ''
#file_in = sys.stdin
file_in = open('compass_published.frc','r') #CONTINUEHERE
include_auto_equivalences = False
#pair_style_name = 'lj/class2/coul/long'
#pair_style_params = "10.0 10.0"
pair_style2docs = {}
pair_style2args = defaultdict(str)
pair_style2docs['lj/cut/coul/long'] = 'http://lammps.sandia.gov/doc/pair_lj.html'
pair_style2args['lj/cut/coul/long'] = '10.0'
pair_style2docs['lj/class2/coul/long'] = 'http://lammps.sandia.gov/doc/pair_class2.html'
pair_style2args['lj/class2/coul/long'] = '10.0'
pair_style2docs['lj/class2/coul/cut'] = 'http://lammps.sandia.gov/doc/pair_class2.html'
pair_style2args['lj/class2/coul/cut'] = '10.0'
bond_style2docs = {}
#bond_style2args = defaultdict(str)
bond_style2docs['harmonic'] = 'http://lammps.sandia.gov/doc/bond_harmonic.html'
bond_style2docs['class2'] = 'http://lammps.sandia.gov/doc/bond_class2.html'
bond_style2docs['morse'] = 'http://lammps.sandia.gov/doc/bond_morse.html'
bond_symmetry_subgraph = '' # default
angle_style2docs = {}
#angle_style2args = defaultdict(str)
angle_style2docs['harmonic'] = 'http://lammps.sandia.gov/doc/angle_harmonic.html'
angle_style2docs['class2'] = 'http://lammps.sandia.gov/doc/angle_class2.html'
angle_symmetry_subgraph = '' # default
dihedral_style2docs = {}
#dihedral_style2args = defaultdict(str)
dihedral_style2docs['charmm'] = 'http://lammps.sandia.gov/doc/dihedral_charmm.html'
dihedral_style2docs['class2'] = 'http://lammps.sandia.gov/doc/dihedral_class2.html'
dihedral_symmetry_subgraph = '' # default
improper_style2docs = {}
#improper_style2args = defaultdict(str)
improper_style2docs['cvff'] = 'http://lammps.sandia.gov/doc/improper_cvff.html'
improper_style2docs['class2'] = 'http://lammps.sandia.gov/doc/improper_class2.html'
improper_symmetry_subgraph = 'cenJsortIKL'
pair_mixing_style = 'sixthpower tail yes'
special_bonds_command = 'special_bonds lj/coul 0.0 0.0 1.0 dihedral yes'
# Thanks to Paul Saxe for is suggestions
# http://lammps.sandia.gov/threads/msg11270.html
kspace_style = 'kspace_style pppm 0.0001'
pair_styles_selected = set([])
#pair_style_link = 'http://lammps.sandia.gov/doc/pair_class2.html'
pair_style_args = {}
pair_cutoff = '10.0'
#pair_style_command = " pair_style hybrid " + \
# pair_style_name + " " + pair_style_args + "\n"
bond_styles_selected = set([])
#bond_style_link = bond_style2docs[bond_style_name]
#bond_style_args = ''
angle_styles_selected = set([])
#angle_style_link = angle_style2docs[angle_style_name]
#angle_style_args = ''
dihedral_styles_selected = set([])
#dihedral_style_link = dihedral_style2docs[dihedral_style_name]
#dihedral_style_args = ''
improper_styles_selected = set([])
#improper_style_link = improper_style2docs[improper_style_name]
#improper_style_args = ''
hbond_style_name = ''
hbond_style_link = ''
hbond_style_args = ''
lines_templates = []
lines_references = defaultdict(list)
lines_warnings = []
argv = [arg for arg in sys.argv]
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-atoms':
if i + 1 >= len(argv):
raise InputError('Error: the \"' + argv[i] + '\" argument should be followed by a quoted string\n'
' which contains a space-delimited list of of a subset of atom types\n'
' you want to use from the original force-field.\n'
' Make sure you enclose the entire list in quotes.\n')
type_subset = set(argv[i + 1].strip('\"\'').strip().split())
del argv[i:i + 2]
elif argv[i] == '-name':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by the name of the force-field\n')
ffname = argv[i + 1]
del argv[i:i + 2]
elif argv[i] in ('-file', '-in-file'):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by the name of a force-field file\n')
filename_in = argv[i + 1]
try:
file_in = open(filename_in, 'r')
except IOError:
sys.stderr.write('Error: Unable to open file\n'
' \"' + filename_in + '\"\n'
' for reading.\n')
sys.exit(1)
del argv[i:i + 2]
elif argv[i] == '-pair-cutoff':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by a number'
' (the distance cutoff for non-bonded (pair) interactions)\n')
pair_style_cutoff = argv[i+1]
del argv[i:i + 2]
elif argv[i] == '-pair-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by either \"lj/class2/coul/cut\" or \"lj/class2/coul/long\"\n')
pair_style_list = argv[i + 1].split(',')
for pair_style in pair_style_list:
if pair_style == '9-6':
pair_style = 'lj/class2/coul/long'
elif pair_style in ('12-6', 'lj', 'LJ'):
pair_style = 'lj/cut/coul/long'
if pair_style.find('lj/class2/coul/long') == 0:
kspace_style = 'kspace_style pppm 0.0001'
elif pair_style.find('lj/cut/coul/long') == 0:
kspace_style = 'kspace_style pppm 0.0001'
elif pair_style.find('lj/class2/coul/cut') == 0:
pass
#kspace_style = ''
elif pair_style.find('lj/cut') == 0:
pass
#kspace_style = ''
else:
raise InputError('Error: ' + argv[i] + ' ' + pair_style_name + ' not supported.\n'
' The following pair_styles are supported:\n'
' lj/class2/coul/cut\n'
' lj/class2/coul/long\n'
' lj/cut\n'
' lj/cut/coul/long\n')
pair_styles_selected.add(pair_style)
del argv[i:i + 2]
elif argv[i] == '-bond-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible bond_style.\n')
bond_styles = argv[i + 1].split(',')
for bond_style in bond_styles:
bond_styles_selected.add(bond_style)
#bond_style2args[bond_style] = argv[i + 1].split()[1:]
#if bond_style_name.find('harmonic') == 0:
# pass
# #bond_style_link = 'http://lammps.sandia.gov/doc/bond_harmonic.html'
#elif bond_style_name.find('morse') == 0:
# pass
# #bond_style_link = 'http://lammps.sandia.gov/doc/bond_morse.html'
#elif bond_style_name.find('class2') == 0:
# pass
# #bond_style_link = 'http://lammps.sandia.gov/doc/bond_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\", \"class2\", or \"morse\".\n')
del argv[i:i + 2]
elif argv[i] == '-angle-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible angle_style.\n')
angle_styles = argv[i + 1].split(',')
for angle_style in angle_styles:
angle_styles_selected.add(angle_style)
#if angle_style_name.find('harmonic') == 0:
# pass
# #angle_style_link = 'http://lammps.sandia.gov/doc/angle_harmonic.html'
#elif angle_style_name.find('class2') == 0:
# pass
# #angle_style_link = 'http://lammps.sandia.gov/doc/angle_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\" or \"class2\"\n')
del argv[i:i + 2]
elif argv[i] == '-dihedral-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible dihedral_style.\n')
dihedral_styles = argv[i + 1].split(',')
for dihedral_style in dihedral_styles:
dihedral_styles_selected.add(dihedral_style)
#if dihedral_style_name.find('charmm') == 0:
# pass
# #dihedral_style_link = 'http://lammps.sandia.gov/doc/dihedral_charmm.html'
#elif dihedral_style_name.find('class2') == 0:
# pass
# #dihedral_style_link = 'http://lammps.sandia.gov/doc/dihedral_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\" or \"class2\"\n')
del argv[i:i + 2]
elif argv[i] == '-improper-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible impropoer_style.\n')
improper_styles = argv[i + 1].split(',')
for improper_style in improper_styles:
improper_styles_selected.add(improper_style)
#if impropoer_style_name.find('harmonic') == 0:
# pass
# #impropoer_style_link = 'http://lammps.sandia.gov/doc/impropoer_harmonic.html'
#elif impropoer_style_name.find('class2') == 0:
# pass
# #impropoer_style_link = 'http://lammps.sandia.gov/doc/impropoer_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\" or \"class2\"\n')
del argv[i:i + 2]
elif argv[i] == '-hbond-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' ' + hbond_style_name + '\n'
' should be followed by a compatible pair_style.\n')
hbond_style_name = argv[i + 1]
hbond_style_link = 'http://lammps.sandia.gov/doc/pair_hbond_dreiding.html'
if hbond_style_name.find('none') == 0:
hbond_style_name = ''
hbond_style_args = ''
elif hbond_style_name.find('hbond/dreiding/lj') == 0:
n = len('hbond/dreiding/lj')
hbond_style_args = hbond_style_name[n+1:]
hbond_style_name = hbond_style_name[:n]
elif hbond_style_name.find('hbond/dreiding/morse') == 0:
n = len('hbond/dreiding/morse')
hbond_style_args = hbond_style_name[n+1:]
hbond_style_name = hbond_style_name[:n]
else:
raise InputError('Error: ' + argv[i] + ' flag should be followed by either\n'
' \"hbond/dreiding/lj\" or \"hbond/dreiding/morse"\n')
del argv[i:i + 2]
elif argv[i] in ('-url', '-in-url'):
import urllib2
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by the name of a\n'
' file containing force-field information in msi/frc format.\n')
url = argv[i + 1]
try:
request = urllib2.Request(url)
file_in = urllib2.urlopen(request)
except urllib2.URLError:
sys.stdout.write("Error: Unable to open link:\n" + url + "\n")
sys.exit(1)
del argv[i:i + 2]
elif argv[i] == '-auto':
include_auto_equivalences = True
del argv[i:i + 1]
elif argv[i] in ('-help', '--help', '-?', '--?'):
sys.stderr.write(doc_msg)
sys.exit(0)
del argv[i:i + 1]
else:
i += 1
if len(argv) != 1:
raise InputError('Error: Unrecongized arguments: ' + ' '.join(argv[1:]) +
'\n\n' + doc_msg)
# Default styles:
if len(bond_styles_selected) == 0:
bond_styles_selected.add('class2')
if len(angle_styles_selected) == 0:
angle_styles_selected.add('class2')
if len(dihedral_styles_selected) == 0:
dihedral_styles_selected.add('class2')
if len(improper_styles_selected) == 0:
improper_styles_selected.add('class2')
if len(pair_styles_selected) == 0:
pair_styles_selected.add('lj/class2/coul/long')
#sys.stderr.write("Reading parameter file...\n")
lines = file_in.readlines()
atom2charge = OrderedDict() # lookup charge from atom type
atom2mass = OrderedDict() # lookup mass from atom type
# equivalences lookup
atom2ffid = OrderedDict() # lookup "force-field-ID" a string containing
# equivalences to lookup bonded interactions
atom2equiv_pair = OrderedDict() # lookup the equivalent symbol used for
# looking up pair interactions
atom2equiv_bond = OrderedDict()
atom2equiv_angle = OrderedDict()
atom2equiv_dihedral = OrderedDict()
atom2equiv_improper = OrderedDict()
# inverse equivalences lookup
equiv_pair2atom = defaultdict(set)
equiv_bond2atom = defaultdict(set)
equiv_angle2atom = defaultdict(set)
equiv_dihedral2atom = defaultdict(set)
equiv_improper2atom = defaultdict(set)
# auto equivalences lookup
atom2auto_pair = OrderedDict()
atom2auto_bondincr = OrderedDict()
atom2auto_bond = OrderedDict()
atom2auto_angleend = OrderedDict()
atom2auto_anglecenter = OrderedDict()
atom2auto_dihedralend = OrderedDict()
atom2auto_dihedralcenter = OrderedDict()
atom2auto_improperend = OrderedDict()
atom2auto_impropercenter = OrderedDict()
# inverse auto equivalences lookup
auto_pair2atom = defaultdict(set)
auto_bondincr2atom = defaultdict(set)
auto_bond2atom = defaultdict(set)
auto_angleend2atom = defaultdict(set)
auto_anglecenter2atom = defaultdict(set)
auto_dihedralend2atom = defaultdict(set)
auto_dihedralcenter2atom = defaultdict(set)
auto_improperend2atom = defaultdict(set)
auto_impropercenter2atom = defaultdict(set)
atom2element = OrderedDict() # Optional:
# which element (eg 'C', 'O') ? (Note this
# is different from atom type: 'C1', 'Oh')
atom2numbonds = OrderedDict() # Optional: how many bonds emanate from
atom2descr = OrderedDict() # Optional: a brief description
atom2ver = OrderedDict() # atoms introduced in different versions of ff
atom2ref = OrderedDict() # reference to paper where atom introduced
lines_equivalences = [] # equivalences for force-field lookup
lines_auto_equivalences = [] # auto_equivalences have lower priority
pair2params = OrderedDict()
pair2style = OrderedDict()
pair_styles = set([])
pair2ver = OrderedDict()
pair2ref = OrderedDict()
bond2chargepair = OrderedDict() # a.k.a "bond increments"
charge_pair_priority = OrderedDict() # priority in case multiple entries
# exist for the same pair of atoms
charge_pair_ver = OrderedDict() # which version of the force field?
charge_pair_ref = OrderedDict() # paper introducing this chargepair
bond2params = OrderedDict() # store a tuple with the 2-body bond
# interaction type, and its parameters
# for every type of bond
bond2priority = OrderedDict() # What is the priority of this interaction?
bond2style = OrderedDict() # What LAMMPS bond style (formula)
# is used for a given interaction?
bond_styles = set([]) # Contains all bond styles used.
bond2ver = OrderedDict()
bond2ref = OrderedDict()
bond2r0 = OrderedDict()
bond2r0_auto = OrderedDict()
angle2params = OrderedDict() # store a tuple with the 3-body angle
# interaction type, and its parameters
# for every type of angle
angle2params_or = OrderedDict()
# http://lammps.sandia.gov/doc/angle_class2.html
#angle2class2_a = OrderedDict() # params for the "a" class2 terms
angle2class2_bb = OrderedDict() # params for the "bb" class2 terms
angle2class2_bb_or = OrderedDict()
angle2class2_ba = OrderedDict() # params for the "ba" class2 terms
angle2class2_ba_or = OrderedDict()
angle2priority = OrderedDict() # What is the priority of this interaction?
angle2priority_or = OrderedDict()
angle2style = OrderedDict() # What LAMMPS angle style (formula)
# is used for a given interaction?
angle2style_or = OrderedDict()
angle_styles = set([]) # Contains all angle styles used.
angle2ref = OrderedDict()
angle2ver = OrderedDict()
angle2ref_or = OrderedDict()
angle2ver_or = OrderedDict()
angle2ver_bb = OrderedDict()
angle2ver_bb_or = OrderedDict()
angle2ref_bb = OrderedDict()
angle2ref_bb_or = OrderedDict()
angle2ver_ba = OrderedDict()
angle2ver_ba_or = OrderedDict()
angle2ref_ba = OrderedDict()
angle2ref_ba_or = OrderedDict()
angle2theta0_or = OrderedDict()
angle2theta0_auto_or = OrderedDict()
# http://lammps.sandia.gov/doc/dihedral_class2.html
dihedral2params = OrderedDict() # store a tuple with the 4-body dihedral
# interaction type, and its parameters
# for every type of dihedral
dihedral2params_or = OrderedDict()
#dihedral2class2_d = OrderedDict() # params for the "d" class2 term
dihedral2class2_mbt = OrderedDict() # params for the "mbt" class2 term
dihedral2class2_mbt_or = OrderedDict()
dihedral2class2_ebt = OrderedDict() # params for the "ebt" class2 term
dihedral2class2_ebt_or = OrderedDict()
#dihedral2sym_ebt = OrderedDict()
dihedral2class2_at = OrderedDict() # params for the "at" class2 term
dihedral2class2_at_or = OrderedDict()
#dihedral2sym_at = OrderedDict()
dihedral2class2_aat = OrderedDict() # params for the "aat" class2 term
dihedral2class2_aat_or = OrderedDict()
#dihedral2sym_aat = OrderedDict()
dihedral2class2_bb13 = OrderedDict() # params for the "bb13" class2 term
dihedral2class2_bb13_or = OrderedDict()
#dihedral2sym_bb13 = OrderedDict()
dihedral2priority = OrderedDict() # What is the priority of this interaction?
dihedral2priority_or = OrderedDict()
dihedral2style = OrderedDict() # What LAMMPS dihedral style (formula)
# is used for a given interaction?
dihedral2style_or = OrderedDict()
dihedral_styles = set([]) # Contains all dihedral styles used.
dihedral2ref = OrderedDict()
dihedral2ver = OrderedDict()
dihedral2ver_or = OrderedDict()
dihedral2ref_or = OrderedDict()
dihedral2ver_mbt = OrderedDict()
dihedral2ver_mbt_or = OrderedDict()
dihedral2ref_mbt = OrderedDict()
dihedral2ref_mbt_or = OrderedDict()
dihedral2ver_ebt = OrderedDict()
dihedral2ver_ebt_or = OrderedDict()
dihedral2ref_ebt = OrderedDict()
dihedral2ref_ebt_or = OrderedDict()
dihedral2ver_at = OrderedDict()
dihedral2ver_at_or = OrderedDict()
dihedral2ref_at = OrderedDict()
dihedral2ref_at_or = OrderedDict()
dihedral2ver_aat = OrderedDict()
dihedral2ver_aat_or = OrderedDict()
dihedral2ref_aat = OrderedDict()
dihedral2ref_aat_or = OrderedDict()
dihedral2ver_bb13 = OrderedDict()
dihedral2ver_bb13_or = OrderedDict()
dihedral2ref_bb13 = OrderedDict()
dihedral2ref_bb13_or = OrderedDict()
# http://lammps.sandia.gov/doc/improper_class2.html
improper2params = OrderedDict() # store a tuple with the 4-body improper
# interaction type, and its parameters
# for every type of imporpoer
improper2params_or = OrderedDict()
improper2class2_aa = OrderedDict() # params for the "aa" class2 term
improper2class2_aa_or = OrderedDict()
improper2cross = defaultdict(dict)
# improper2cross[imp_name][atoms] stores the
# coefficient (K) for the angle-angle ("aa")
# improper interactions between a pair of
# neighboring 3-body angles (in the .FRC file).
# "imp_name" is the name of the improper interaction
# (which is a concatination of the central atom and
# the 3 surrounding leaf atoms (which are sorted))
# "atoms" indicates, for that K value, the list of
# leaf atoms for that K value as they appear in the
# corresponding line of the .frc file (however the
# and last atom names are swapped if the first
# atom name is lexicographically > the last, to
# eliminate redundancy and ambiguity.)
improper2sym = defaultdict(set)
# improper2sym[imp_name] indicates which subset of
# leaf atoms (from 0 to 2) are equivalent and can
# tolerate having their order rearranged without
# effecting the energy. Later on this will be used
# to reduce the number of improper interactions that
# will be generated by moltemplate.
improper2priority = OrderedDict() # What is the priority of this interaction?
improper2priority_or = OrderedDict()
improper2style = OrderedDict() # What LAMMPS improper style (formula)
# is used for a given interaction?
improper2style_or = OrderedDict()
improper_styles = set([]) # Contains all improper styles used.
improper2ver = OrderedDict()
improper2ver_or = OrderedDict()
improper2ref = OrderedDict()
improper2ref_or = OrderedDict()
improper2ver_aa = OrderedDict()
improper2ver_aa_or = OrderedDict()
improper2ref_aa = OrderedDict()
improper2ref_aa_or = OrderedDict()
# Warn users if force field contains terms which cannot yet
# be simulated with LAMMPS (as of 2017-2-07)
display_OOP_OOP_warning = False
display_torsion_torsion_1_warning = False
"""
--- these next few lines of code appear to be unnecessary.
--- I'll probably delete this code in a later version
hbond2params = OrderedDict() # lookup hbond parameters and atom types
hbond2donors = OrderedDict() # according to the identifier in the 2nd
hbond2acceptors = OrderedDict() # column of the "#hbond_definition"
hbond2hydrogens = OrderedDict() # section of an .frc file.
"""
allowed_section_names = set(['#define',
# sections used in all MSI force-fields
'#atom_types',
'#equivalence',
'#auto_equivalence',
'#nonbond(9-6)',
'#nonbond(12-6)',
'#quadratic_bond',
'#quartic_bond',
'#morse_bond',
'#quadratic_angle',
'#quartic_angle',
'#bond-bond',
'#bond-angle',
'#torsion_1',
'#torsion_3',
'#middle_bond-torsion_3',
'#end_bond-torsion_3',
'#angle-torsion_3',
'#angle-angle-torsion_1',#(class2 dihedral)
'#bond-bond_1_3', #(a class2 dihedral term)
'#out_of_plane',
'#wilson_out_of_plane',
'#angle-angle', #(a class2 improper term)
'#out_of_plane-out_of_plane', # UNSUPPORTED
'#torsion-torsion_1', # UNSUPPORTED
'#bond_increments',
'#hbond_definition', # irrelevant?
'#templates',
'#reference',
'#end'
])
icol_type = icol_mass = icol_elem = icol_nbonds = icol_comment = icol_ver = icol_ref = -1
section_name = ''
section_is_auto = False
sys.stderr.write("parsing file pass1: look for atom types and equivalences...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if tokens[0] in allowed_section_names:
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif not tokens[0] in ('#version',
'#define'):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif (len(tokens) == 8) and (section_name == '#equivalence'):
if line.lstrip().find('!') == 0:
continue
lines_equivalences.append(line)
elif (len(tokens) == 12) and (section_name == '#auto_equivalence'):
if line.lstrip().find('!') == 0:
continue
lines_auto_equivalences.append(line)
elif (len(tokens) > 0) and (section_name == '#atom_types'):
# Different FRC files put this information in different
# columns. Column order is stored in the !Ver comment line:
if line.lstrip().find('!Ver') == 0:
tokens = line.strip().split()
for i in range(0, len(tokens)):
if tokens[i].lower() == 'type':
icol_type = i
elif tokens[i].lower() == 'mass':
icol_mass = i
elif tokens[i].lower() == 'element':
icol_elem = i
elif tokens[i].lower() == 'connections':
icol_nbonds = i
elif tokens[i].lower() == 'comment':
icol_comment = i
elif tokens[i].lower() == '!ver': #(version of ff)
icol_ver = i
elif tokens[i].lower() == 'ref':
icol_ref = i
assert(icol_ver == 0)
if -1 in (icol_type, icol_mass):
raise InputError('Error: Invalid #atom_types section.\n'
' The meaning of each column cannot be determined.\n'
' This file needs a valid "!Ver..." comment.\n')
if icol_comment == -1:
icol_comment = max(icol_type, icol_mass,
icol_elem, icol_nbonds) + 1
sys.stderr.write('icol_ver = '+str(icol_ver)+'\n')
sys.stderr.write('icol_ref = '+str(icol_ref)+'\n')
sys.stderr.write('icol_mass = '+str(icol_mass)+'\n')
sys.stderr.write('icol_nelem = '+str(icol_elem)+'\n')
sys.stderr.write('icol_nbonds = '+str(icol_nbonds)+'\n')
sys.stderr.write('icol_comment = '+str(icol_comment)+'\n')
continue
tokens = map(RemoveOuterQuotes,
NSplitQuotedString(line.strip(),
icol_comment+1,
quotes='',
comment_char='>'))
tokens = list(tokens)
if (len(tokens) > 4):
if ((len(type_subset) == 0) or (tokens[1] in type_subset)):
aname = EncodeAName(tokens[icol_type])
atom2mass[aname] = str(max(float(tokens[icol_mass]), 1.0e-06))
# Some atoms in cvff.prm have zero mass. Unfortunately this
# causes LAMMPS to crash, even if these atoms are never used,
# so I give the mass a non-zero value instead.
if icol_elem != -1:
atom2element[aname] = tokens[icol_elem]
if icol_nbonds != -1:
atom2numbonds[aname] = int(tokens[icol_nbonds])
atom2descr[aname] = tokens[icol_comment]
atom2ver[aname] = tokens[icol_ver]
atom2ref[aname] = tokens[icol_ref]
elif len(tokens) > 0:
raise InputError('Error: Invalid atom line: (line#'+str(iline)+')\n' +
'\"'+line.strip()+'\"')
atom_types = [x for x in atom2mass]
# Now construct the lookup tables and inverse tables
# we will need to understand the remainder of the file:
if not include_auto_equivalences:
atom2ffid = Equivalences2ffids(lines_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper)
else:
atom2ffid = AutoEquivalences2ffids(lines_equivalences,
lines_auto_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper,
atom2auto_pair,
atom2auto_bondincr,
atom2auto_bond,
atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_dihedralend,
atom2auto_dihedralcenter,
atom2auto_improperend,
atom2auto_impropercenter)
for a,e in atom2equiv_pair.items():
equiv_pair2atom[e].add(a)
for a,e in atom2equiv_bond.items():
equiv_bond2atom[e].add(a)
for a,e in atom2equiv_angle.items():
equiv_angle2atom[e].add(a)
for a,e in atom2equiv_dihedral.items():
equiv_dihedral2atom[e].add(a)
for a,e in atom2equiv_improper.items():
equiv_improper2atom[e].add(a)
# the inverse lookup for '*' matches all atom types
for a in atom_types:
#equiv_pair2atom['*'].add(EncodeAName(a))
equiv_pair2atom['X'].add(EncodeAName(a))
#equiv_bond2atom['*'].add(EncodeAName(a))
equiv_bond2atom['X'].add(EncodeAName(a))
#equiv_angle2atom['*'].add(EncodeAName(a))
equiv_angle2atom['X'].add(EncodeAName(a))
#equiv_dihedral2atom['*'].add(EncodeAName(a))
equiv_dihedral2atom['X'].add(EncodeAName(a))
#equiv_improper2atom['*'].add(EncodeAName(a))
equiv_improper2atom['X'].add(EncodeAName(a))
for a,e in atom2auto_pair.items():
auto_pair2atom[e].add(a)
for a,e in atom2auto_bondincr.items():
auto_bondincr2atom[e].add(a)
for a,e in atom2auto_bond.items():
auto_bond2atom[e].add(a)
for a,e in atom2auto_angleend.items():
auto_angleend2atom[e].add(a)
#auto_angle[0][e].add(a)
#auto_angle[2][e].add(a)
for a,e in atom2auto_anglecenter.items():
auto_anglecenter2atom[e].add(a)
#auto_angle[1][e].add(a)
for a,e in atom2auto_dihedralend.items():
auto_dihedralend2atom[e].add(a)
#auto_dihedral2atom[0][e].add(a)
#auto_dihedral2atom[3][e].add(a)
for a,e in atom2auto_dihedralcenter.items():
auto_dihedralcenter2atom[e].add(a)
#auto_dihedral2atom[1][e].add(a)
#auto_dihedral2atom[2][e].add(a)
for a,e in atom2auto_improperend.items():
auto_improperend2atom[e].add(a)
for a,e in atom2auto_impropercenter.items():
auto_impropercenter2atom[e].add(a)
# the inverse lookup for '*' matches all atom types
for a in atom_types:
#auto_pair2atom['*'].add(EncodeAName(a))
auto_pair2atom['X'].add(EncodeAName(a))
#auto_bondincr2atom['*'].add(EncodeAName(a))
auto_bondincr2atom['X'].add(EncodeAName(a))
#auto_bond2atom['*'].add(EncodeAName(a))
auto_bond2atom['X'].add(EncodeAName(a))
#auto_angleend2atom['*'].add(EncodeAName(a))
auto_angleend2atom['X'].add(EncodeAName(a))
#auto_anglecenter2atom['*'].add(EncodeAName(a))
auto_anglecenter2atom['X'].add(EncodeAName(a))
#auto_dihedralend2atom['*'].add(EncodeAName(a))
auto_dihedralend2atom['X'].add(EncodeAName(a))
#auto_dihedralcenter2atom['*'].add(EncodeAName(a))
auto_dihedralcenter2atom['X'].add(EncodeAName(a))
#auto_improperend2atom['*'].add(EncodeAName(a))
auto_improperend2atom['X'].add(EncodeAName(a))
#auto_impropercenter2atom['*'].add(EncodeAName(a))
auto_impropercenter2atom['X'].add(EncodeAName(a))
sys.stderr.write("parsing file pass2: look for bonds, bond_increments and nonbonded (pair) interactions...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if (tokens[0] in allowed_section_names):
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif (not tokens[0] in ('#version','#define')):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif ((len(tokens) > 4) and (section_name == '#nonbond(12-6)')
and (pair_styles_selected & set(['lj','lj/cut','lj/cut/coul/long',
'lj/cut/coul/cut','lj/cut/coul/debye',
'lj/cut/coul/dsf','lj/cut/coul/msm',
'12-6','nonbond(12-6)']))):
if line.lstrip().find('!') == 0:
continue
atom_name = EncodeAName(tokens[2])
pair2ver[atom_name] = tokens[0]
pair2ref[atom_name] = tokens[1]
A = float(tokens[3])
B = float(tokens[4])
epsilon = B*B/(4*A)
sigma = pow(B/A, 1.0/6)
if sigma == 0.0:
sigma = 1.0 #(non-zero to avoid nan error later)
pair_styles.add('lj/cut/coul/long')
pair_style_args['lj/cut/coul/long'] = pair_cutoff
pair2style[atom_name] = 'lj/cut/coul/long'
pair2params[atom_name] = (str(epsilon)+' '+str(sigma))
pair_mixing_style = 'geometric tail yes'
#if pair_style_name.find('lj/cut') == 0:
# pair2params[atom_name] = (str(epsilon)+' '+str(sigma))
# pair_mixing_style = 'geometric tail yes'
elif ((len(tokens) > 4) and (section_name == '#nonbond(9-6)')
and (pair_styles_selected &
set(['class2', '9-6', 'nonbond(9-6)',
'lj/class2/coul/long']))):
if line.lstrip().find('!') == 0:
continue
atom_name = EncodeAName(tokens[2])
pair2ver[atom_name] = tokens[0]
pair2ref[atom_name] = tokens[1]
sigma = tokens[3]
epsilon = tokens[4]
pair_styles.add('lj/class2/coul/long')
pair_style_args['lj/class2/coul/long'] = pair_cutoff
pair2style[atom_name] = 'lj/class2/coul/long'
pair2params[atom_name] = (epsilon+' '+sigma)
pair_mixing_style = 'sixthpower tail yes'
#if pair_style_name.find('lj/class2') == 0:
# pair2params[atom_name] = (epsilon+' '+sigma)
# pair_mixing_style = 'sixthpower tail yes'
elif (len(tokens) == 6) and (section_name == '#bond_increments'):
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:4])]
delta_q = tokens[4:6]
atom_names = [a for a in aorig]
# swap the order of the atoms?
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
delta_q.reverse()
atom_names.reverse()
bond_name = EncodeInteractionName(atom_names, section_is_auto)
charge_pair_ver[bond_name] = tokens[0]
charge_pair_ref[bond_name] = tokens[1]
charge_pair_priority[bond_name] = \
(0,
section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(charge_pair_ver[bond_name])))
bond2chargepair[bond_name] = (delta_q[0] + ' ' + delta_q[1])
elif ((len(tokens) > 5) and (section_name == '#quadratic_bond')
and (bond_styles_selected & set(['harmonic','quadratic','quadratic_bond']))):
if line.lstrip().find('!') == 0:
continue
bond_styles.add('harmonic')
atom_names = SortByEnds(map(EncodeAName, tokens[2:4]))
bond_name = EncodeInteractionName(atom_names, section_is_auto)
bond2ver[bond_name] = tokens[0]
bond2ref[bond_name] = tokens[1]
bond2priority[bond_name] = \
(0,
section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(bond2ver[bond_name])))
r0 = tokens[4]
k = tokens[5]
if not section_is_auto:
bond2r0[bond_name] = r0
sys.stderr.write('bond2r0['+bond_name+'] = ' + str(r0) + '\n')
else:
bond2r0_auto[(atom_names[0], atom_names[1])] = r0
sys.stderr.write('bond2r0_auto['+str(atom_names)+'] = ' + str(r0) + '\n')
bond2style[bond_name] = 'harmonic'
bond2params[bond_name] = (k+' '+r0)
elif ((len(tokens) > 6) and (section_name == '#morse_bond')
and (bond_styles_selected & set(['morse','morse_bond']))):
if line.lstrip().find('!') == 0:
continue
bond_styles.add('morse')
atom_names = SortByEnds(map(EncodeAName, tokens[2:4]))
bond_name = EncodeInteractionName(atom_names, section_is_auto)
bond2ver[bond_name] = tokens[0]
bond2ref[bond_name] = tokens[1]
bond2priority[bond_name] = \
(0,
section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(bond2ver[bond_name])))
r0 = tokens[4]
D = tokens[5]
alpha = tokens[6]
sys.stderr.write('DEBUG: morse: atom_names = '+str(atom_names)+'\n')
if not section_is_auto:
bond2r0[bond_name] = r0
sys.stderr.write('bond2r0['+bond_name+'] = ' + str(r0) + '\n')
else:
bond2r0_auto[(atom_names[0], atom_names[1])] = r0
sys.stderr.write('bond2r0_auto['+str(atom_names)+'] = ' + str(r0) + '\n')
bond2style[bond_name] = 'morse'
bond2params[bond_name] = (D+' '+alpha+' '+r0)
elif ((len(tokens) > 7) and (section_name == '#quartic_bond')
and (bond_styles_selected & set(['class2','quartic','quartic_bond']))):
if line.lstrip().find('!') == 0:
continue
bond_styles.add('class2')
atom_names = SortByEnds(map(EncodeAName, tokens[2:4]))
bond_name = EncodeInteractionName(atom_names, section_is_auto)
bond2ver[bond_name] = tokens[0]
bond2ref[bond_name] = tokens[1]
bond2priority[bond_name] = \
(0,
section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(bond2ver[bond_name])))
r0 = tokens[4]
if not section_is_auto:
bond2r0[bond_name] = r0
sys.stderr.write('bond2r0['+bond_name+'] = ' + str(r0) + '\n')
else:
bond2r0_auto[(atom_names[0], atom_names[1])] = r0
sys.stderr.write('bond2r0_auto['+str(atom_names)+'] = ' + str(r0) + '\n')
K2 = tokens[5]
K3 = tokens[6]
K4 = tokens[7]
bond2style[bond_name] = 'class2'
bond2params[bond_name] = (r0+' '+K2+' '+K3+' '+K4)
sys.stderr.write("parsing file pass3: look for (3-body) angle interactions...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if (tokens[0] in allowed_section_names):
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif (not tokens[0] in ('#version','#define')):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif (len(tokens) > 6) and (section_name == '#quadratic_angle'):
if line.lstrip().find('!') == 0:
continue
atom_names = SortByEnds(map(EncodeAName, tokens[2:5]))
angle_name = EncodeInteractionName(atom_names, section_is_auto)
angle2ver[angle_name] = tokens[0]
angle2ref[angle_name] = tokens[1]
angle2priority_or[angle_name] = \
DetermineNumericPriority(section_is_auto,
tokens[2:5],
float(angle2ver[angle_name]))
angle2priority[angle_name] = \
(0,
section_is_auto,
angle2priority_or[angle_name])
theta0 = tokens[5]
k = tokens[6]
if not section_is_auto:
angle2theta0_or[angle_name] = theta0
sys.stderr.write('angle2theta0_or['+angle_name+'] = ' + str(theta0) + '\n')
else:
angle2theta0_auto_or[(atom_names[0], atom_names[1], atom_names[2])] = theta0
sys.stderr.write('angle2theta0_auto_or['+str(atom_names)+'] = ' + str(theta0) + '\n')
if (angle_styles_selected & set(['harmonic',
'quadratic',
'quadratic_angle'])):
angle_styles.add('harmonic')
angle2style[angle_name] = 'harmonic'
angle2params[angle_name] = (k+' '+theta0)
elif (angle_styles_selected & set(['class2',
'quartic',
'quartic_angle'])):
# Then this is a special case of the class2 angle where
# the (theta-theta0)^3 and (theta-theta0)^4 terms = 0
angle_styles.add('class2')
angle2style_or[angle_name] = 'class2'
angle2params_or[angle_name] = (theta0+' '+k+' 0 0')
elif ((len(tokens) > 8) and (section_name == '#quartic_angle')
and (angle_styles_selected & set(['class2','quartic','quartic_angle']))):
if line.lstrip().find('!') == 0:
continue
angle_styles.add('class2')
atom_names = SortByEnds(map(EncodeAName, tokens[2:5]))
ang_name_orig = EncodeInteractionName(atom_names, section_is_auto)
version = tokens[0]
reference = tokens[1]
angle2ver_or[ang_name_orig] = version
angle2ref_or[ang_name_orig] = reference
angle2priority_or[ang_name_orig] = \
DetermineNumericPriority(section_is_auto,
tokens[2:5],
float(angle2ver_or[ang_name_orig]))
#angle2priority[ang_name_orig] = \
# (0,
# section_is_auto,
# angle2priority_or[ang_name_orig])
theta0 = tokens[5]
if not section_is_auto:
angle2theta0_or[ang_name_orig] = theta0
sys.stderr.write('angle2theta0_or['+ang_name_orig+'] = ' + str(theta0) + '\n')
else:
angle2theta0_auto_or[(atom_names[0], atom_names[1], atom_names[2])] = theta0
sys.stderr.write('angle2theta0_auto_or['+str(atom_names)+'] = ' + str(theta0) + '\n')
K2 = tokens[6]
K3 = tokens[7]
K4 = tokens[8]
angle2style_or[ang_name_orig] = 'class2'
angle2params_or[ang_name_orig] = [theta0, K2, K3, K4]
if not ang_name_orig in angle2class2_bb_or:
angle2class2_bb_or[ang_name_orig] = '0.0' # default value
angle2ver_bb_or[ang_name_orig] = version # default value
angle2ref_bb_or[ang_name_orig] = reference # default value
if not ang_name_orig in angle2class2_ba_or:
angle2class2_ba_or[ang_name_orig] = ['0.0', '0.0'] # default value
angle2ver_ba_or[ang_name_orig] = version # default value
angle2ref_ba_or[ang_name_orig] = reference # default value
elif ((len(tokens) > 5) and
(section_name in ('#bond-bond', '#bond-angle')) and
(angle_styles_selected &
set(['class2', 'quartic', 'quartic_angle']))):
if line.lstrip().find('!') == 0:
continue
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:5])]
atom_names = SortByEnds(aorig)
ang_name_orig = EncodeInteractionName(atom_names, section_is_auto)
K = ['', '']
K[0] = tokens[5]
K[1] = K[0]
if len(tokens) > 6:
K[1] = tokens[6]
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
K.reverse()
if (section_name == '#bond-bond'):
angle2class2_bb_or[ang_name_orig] = K[0]
angle2ver_bb_or[ang_name_orig] = version
angle2ref_bb_or[ang_name_orig] = reference
elif (section_name == '#bond-angle'):
angle2class2_ba_or[ang_name_orig] = [k for k in K]
angle2ver_ba_or[ang_name_orig] = version
angle2ref_ba_or[ang_name_orig] = reference
if not ang_name_orig in angle2params_or:
angle2params_or[ang_name_orig] = ['0.0', '0.0', '0.0', '0.0'] # default value
angle2ver_or[ang_name_orig] = version
angle2ref_or[ang_name_orig] = reference
angle2priority_or[ang_name_orig] = 0.0
sys.stderr.write("parsing file pass4: look for dihedrals(torsions) and impropers(out_of_plane)...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if (tokens[0] in allowed_section_names):
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif (not tokens[0] in ('#version','#define')):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif (len(tokens) > 8) and (section_name == '#torsion_1'):
if line.lstrip().find('!') == 0:
continue
atom_names = SortByEnds(map(EncodeAName, tokens[2:6]))
dihedral_name = EncodeInteractionName(atom_names, section_is_auto)
dihedral2ver[dihedral_name] = tokens[0]
dihedral2ref[dihedral_name] = tokens[1]
dihedral2priority_or[dihedral_name] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(dihedral2ver[dihedral_name]))
dihedral2priority[dihedral_name] = \
(0,
section_is_auto,
dihedral2priority_or[dihedral_name])
K = tokens[6]
n = tokens[7]
d = tokens[8]
w = '0.0' #ignore: this is only used by the CHARMM force field
if (dihedral_styles_selected & set(['charmm','torsion_1'])):
dihedral_styles.add('charmm')
dihedral2style[dihedral_name] = 'charmm'
#dihedral2params_or[dihedral_name] = [K,n,d,w]
dihedral2params[dihedral_name] = (K+' '+n+' '+d+' '+w)
elif (dihedral_styles_selected & set(['class2','torsion_3'])):
# Then this is a special case of the class2 angle
# lacking the higher terms in the Fourier series
dihedral_styles.add('class2')
dihedral2style[dihedral_name] = 'class2'
dihedral2params_or[dihedral_name] = [K,d,0,0,0,0]
#= (K+' '+d+' '+
# '0 0 '+'0 0')
elif ((len(tokens) > 7) and (section_name == '#torsion_3')
and (dihedral_styles_selected & set(['class2','torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
atom_names = SortByEnds(map(EncodeAName, tokens[2:6]))
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
version = tokens[0]
reference = tokens[1]
dihedral2priority_or[dih_name_orig] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(version))
#dihedral2priority[dih_name_orig] = \
# (0,
# section_is_auto,
# dihedral2priority_or[dih_name_orig])
V1 = tokens[6]
phi0_1 = tokens[7]
V2 = phi0_2 = V3 = phi0_3 = '0.0'
if len(tokens) > 9:
V2 = tokens[8]
phi0_2 = tokens[9]
if len(tokens) > 11:
V3 = tokens[10]
phi0_3 = tokens[11]
dihedral2style_or[dih_name_orig] = 'class2'
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2params_or[dih_name_orig] = [V1, phi0_1, V2, phi0_2, V3, phi0_3]
# default values for cross terms:
if not dih_name_orig in dihedral2class2_mbt_or:
dihedral2class2_mbt_or[dih_name_orig] = ['0.0','0.0','0.0'] # default value
dihedral2ver_mbt_or[dih_name_orig] = version
dihedral2ref_mbt_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_ebt_or:
dihedral2class2_ebt_or[dih_name_orig] = [['0.0','0.0','0.0'],['0.0','0.0','0.0']] # default value
dihedral2ver_ebt_or[dih_name_orig] = version
dihedral2ref_ebt_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_bb13_or:
dihedral2class2_bb13_or[dih_name_orig] = '0.0' # default value
dihedral2ver_bb13_or[dih_name_orig] = version
dihedral2ref_bb13_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_at_or:
dihedral2class2_at_or[dih_name_orig] = [['0.0','0.0','0.0'],['0.0','0.0','0.0']] # default value
dihedral2ver_at_or[dih_name_orig] = version
dihedral2ref_at_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_aat_or:
dihedral2class2_aat_or[dih_name_orig] = '0.0' # default value
dihedral2ver_aat_or[dih_name_orig] = version
dihedral2ref_aat_or[dih_name_orig] = reference
elif ((len(tokens) > 6) and (section_name == '#middle_bond-torsion_3')
and (dihedral_styles_selected & set(['class2','torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names = SortByEnds(aorig)
Fmbt = [tokens[6], '0.0', '0.0']
if len(tokens) > 7:
Fmbt[1] = tokens[7]
if len(tokens) > 8:
Fmbt[2] = tokens[8]
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
#sys.stderr.write('DEBUG: (a2,a3) = '+str((a2,a3))+', '
# ' (b1,b2) = '+str(batoms)+'\n')
dihedral2style[dih_name_orig] = 'class2'
dihedral2class2_mbt_or[dih_name_orig] = [F for F in Fmbt]
dihedral2ver_mbt_or[dih_name_orig] = version
dihedral2ref_mbt_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2params_or:
dihedral2params_or[dih_name_orig] = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0']
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2priority_or[dih_name_orig] = 0.0
elif ((len(tokens) > 6) and
(section_name in ('#end_bond-torsion_3',
'#bond-bond_1_3')) and
(dihedral_styles_selected &
set(['class2', 'torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names = SortByEnds(aorig)
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
dihedral2style[dih_name_orig] = 'class2'
if section_name == '#end_bond-torsion_3':
Febt = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
Febt[0][0] = tokens[6]
if len(tokens) > 7:
Febt[0][1] = tokens[7]
if len(tokens) > 8:
Febt[0][2] = tokens[8]
Febt[1][0] = Febt[0][0]
Febt[1][1] = Febt[0][1]
Febt[1][2] = Febt[0][2]
if len(tokens) > 9:
Febt[1][0] = tokens[9]
if len(tokens) > 10:
Febt[1][1] = tokens[10]
if len(tokens) > 11:
Febt[1][2] = tokens[11]
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
Febt.reverse()
dihedral2class2_ebt_or[dih_name_orig] = [ [F_ij for F_ij in F_i] for F_i in Febt] #deep copy of Febt[][]
dihedral2ver_ebt_or[dih_name_orig] = version
dihedral2ref_ebt_or[dih_name_orig] = reference
elif section_name == '#bond-bond_1_3':
Kbb13 = tokens[6]
#dihedral2ver_bb13[dih_name_orig] = version
dihedral2class2_bb13_or[dih_name_orig] = Kbb13
dihedral2ver_bb13_or[dih_name_orig] = version
dihedral2ref_bb13_or[dih_name_orig] = reference
else:
assert(False)
if not dih_name_orig in dihedral2params_or:
dihedral2params_or[dih_name_orig] = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0']
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2priority_or[dih_name_orig] = 0.0
elif ((len(tokens) > 6) and
(section_name in ('#angle-torsion_3',
'#angle-angle-torsion_1')) and
(dihedral_styles_selected &
set(['class2', 'torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names = SortByEnds(aorig)
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
dihedral2style[dih_name_orig] = 'class2'
if section_name == '#angle-torsion_3':
Fat = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
Fat[0][0] = tokens[6]
if len(tokens) > 7:
Fat[0][1] = tokens[7]
if len(tokens) > 8:
Fat[0][2] = tokens[8]
Fat[1][0] = Fat[0][0]
Fat[1][1] = Fat[0][1]
Fat[1][2] = Fat[0][2]
if len(tokens) > 9:
Fat[1][0] = tokens[9]
if len(tokens) > 10:
Fat[1][1] = tokens[10]
if len(tokens) > 11:
Fat[1][2] = tokens[11]
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
Fat.reverse()
Fat[0].reverse()
Fat[1].reverse()
dihedral2class2_at_or[dih_name_orig] = [ [F_ij for F_ij in F_i] for F_i in Fat] #deep copy of Fat
dihedral2ver_at_or[dih_name_orig] = version
dihedral2ref_at_or[dih_name_orig] = reference
elif section_name == '#angle-angle-torsion_1':
Kaat = tokens[6]
dihedral2class2_aat_or[dih_name_orig] = Kaat
dihedral2ver_aat_or[dih_name_orig] = version
dihedral2ref_aat_or[dih_name_orig] = reference
else:
assert(False)
if not dih_name_orig in dihedral2params_or:
dihedral2params_or[dih_name_orig] = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0'] # default value
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2priority_or[dih_name_orig] = 0.0
elif ((len(tokens) > 8) and (section_name == '#out_of_plane')
and (improper_styles_selected & set(['cvff','out_of_plane']))):
if line.lstrip().find('!') == 0:
continue
improper_styles.add('cvff')
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names,_ignore = OOPImproperNameSort(tokens[2:6])
improper_name = EncodeInteractionName(atom_names, section_is_auto)
improper2ver[improper_name] = tokens[0]
improper2ref[improper_name] = tokens[1]
improper2priority[improper_name] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(improper2ver[improper_name]))
improper2priority[improper_name] = \
(0,
section_is_auto,
improper2priority_or[improper_name])
K = tokens[6]
n = tokens[7]
chi0 = tokens[8]
improper2style[improper_name] = 'cvff'
improper2params[improper_name] = (Kchi+' '+n+' '+chi0)
improper_symmetry_subgraph = 'cenJswapIL'
#if improper_style_name == 'cvff':
# improper2params[improper_name] = (Kchi+' '+n+' '+chi0)
# improper_symmetry_subgraph = 'cenJswapIL'
elif ((len(tokens) > 7) and (section_name == '#wilson_out_of_plane')
and (improper_styles_selected and set(['class2','wilson_out_of_plane']))):
if line.lstrip().find('!') == 0:
continue
improper_styles.add('class2')
#improper_symmetry_subgraph = 'dihedrals_nosym' (<--no)
improper_symmetry_subgraph = 'cenJsortIKL'
sys.stderr.write('tokens = ' + str(tokens) + '\n')
version = tokens[0]
reference = tokens[1]
aorig = [a for a in map(EncodeAName, tokens[2:6])]
# To avoid redundancy, it is necessary to order the atoms
# in the interaction so that two equivalent ways of ordering
# the atoms in an improper interaction do not get misinterpreted
# as two different types of improper interactions. So we sort
# the 3 "leaf" atoms surrounding the central "hub" by name.
atom_names, permutation = Class2ImproperNameSort(tokens[2:6])
# This will effect the formula for the energy.
# (specifically the "chi0" parameter)
# When we lookup the various cross-term interactions for that
# same improper interaction, we will be sure to sort them
# in the same way to make sure those interactions are
# associated with the same improper interaction.
imp_name_orig = EncodeInteractionName(atom_names, section_is_auto)
improper2ver_or[imp_name_orig] = version
improper2ref_or[imp_name_orig] = reference
improper2priority_or[imp_name_orig] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(improper2ver_or[imp_name_orig]))
#improper2priority[imp_name_orig] = \
# (0,
# section_is_auto,
# improper2priority_or[imp_name_orig])
K = tokens[6]
chi0 = tokens[7]
if Parity(permutation) != 0:
# Each time the order of a pair of atoms is swapped in
# the interaction, all 3 of the "X" (chi) angles change sign
# The formula for the ordinary term in the improper
# interaction is Ei = K*((Xijkl + Xkjli + Xljik)/3 - chi0)^2
# This formula is invariant if we change the sign of all
# Xijkl, Xkjli, Xljik, chi0
# Hence, we can account for a change in atom order by
# changing the sign of the "chi0" parameter.
# We calculate the "Parity" of the permutation (ie whether
# the permutation has an even or odd number of swaps)
# and multiply chi0 by -1 for each swap.
# It's not clear if this is necessary since in practice
# the "chi0" parameter is usually zero.
chi0 = str(-1.0*float(chi0)) # same as ('-' + chi0)
improper2style_or[imp_name_orig] = 'class2'
improper2params_or[imp_name_orig] = [K, chi0]
#improper2params[imp_name_orig] = K + ' ' + chi0
# default values for cross terms:
if not imp_name_orig in improper2class2_aa_or:
improper2class2_aa_or[imp_name_orig] = '0.0' #(default)
improper2ver_aa_or[imp_name_orig] = version
improper2ref_aa_or[imp_name_orig] = reference
# Initially, set all of the angle-angle cross terms to zero
# Start with the first cross term between aorig[0],aorig[1],aorig[2] & aorig[2],aorig[1],aorig[3]
improper2cross[imp_name_orig][ImCrossTermID([aorig[0],aorig[1],aorig[2],aorig[3]])] = '0.0'
# ...then cyclically permute the 3 "leaf" atoms (aorig[0], aorig[2], aorig[3]) around the "hub" atom (aorig[1])
improper2cross[imp_name_orig][ImCrossTermID([aorig[2],aorig[1],aorig[3],aorig[0]])] = '0.0'
improper2cross[imp_name_orig][ImCrossTermID([aorig[3],aorig[1],aorig[0],aorig[2]])] = '0.0'
elif ((len(tokens) > 6) and (section_name == '#angle-angle')
and (improper_styles_selected and set(['class2','wilson_out_of_plane']))):
if line.lstrip().find('!') == 0:
continue
improper_styles.add('class2')
version = tokens[0]
reference = tokens[1]
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names, permutation = Class2ImproperNameSort(tokens[2:6])
imp_name_orig = EncodeInteractionName(atom_names, section_is_auto)
improper2ver_aa_or[imp_name_orig] = version
improper2ref_aa_or[imp_name_orig] = reference
K = tokens[6]
improper2style_or[imp_name_orig] = 'class2'
if not imp_name_orig in improper2params_or:
improper2params_or[imp_name_orig] = ['0.0', '0.0']
improper2ver_or[imp_name_orig] = version
improper2ref_or[imp_name_orig] = reference
improper2priority_or[imp_name_orig] = 0.0
if not imp_name_orig in improper2cross:
# then initialize all of the cross terms to zero
improper2cross[imp_name_orig][ImCrossTermID([aorig[0],aorig[1],aorig[2],aorig[3]])] = '0.0'
# ...then cyclically permute the 3 "leaf" atoms (aorig[0], aorig[2], aorig[3]) around the "hub" atom (aorig[1])
improper2cross[imp_name_orig][ImCrossTermID([aorig[2],aorig[1],aorig[3],aorig[0]])] = '0.0'
improper2cross[imp_name_orig][ImCrossTermID([aorig[3],aorig[1],aorig[0],aorig[2]])] = '0.0'
#improper2class2_aa_or[imp_name_orig] = K (not needed)
improper2cross[imp_name_orig][ImCrossTermID(aorig)] = K
elif (len(tokens) > 0) and (section_name == '#out_of_plane-out_of_plane'):
if line.lstrip().find('!') == 0:
continue
display_OOP_OOP_warning = True
elif (len(tokens) > 0) and (section_name == '#torsion-torsion_1'):
if line.lstrip().find('!') == 0:
continue
display_torsion_torsion_1_warning = True
elif section_name == '#templates':
#if line.lstrip().find('!') == 0:
# continue
lines_templates.append(line)
elif section_name == '#reference':
if line.lstrip().find('!') == 0:
continue
if len(tokens_after_section_name) > 0:
ref_number = int(tokens_after_section_name[0])
if len(line.strip()) > 0:
lines_references[ref_number].append(line)
"""
--- these next few lines of code appear to be unnecessary.
--- I'll probably delete this code in a later version
elif (len(tokens) > 3) and (section_name == '#hbond_definition'):
hbondID = tokens[1]
if tokens[2] == 'distance':
hbond2distance[hbondID] = tokens[3]
if tokens[2] == 'angle':
hbond2angle[hbondID] = tokens[3]
if tokens[2] == 'donors':
hbond2donors[hbondID] = map(EncodeAName, tokens[2:])
if tokens[2] == 'acceptors':
hbond2acceptors[hbondID] = map(EncodeAname(),tokens[2:])
"""
if display_OOP_OOP_warning:
lines_warnings.append('###########################################################\n'
'# WARNING\n'
'# ALL \"out-of-plane_out-of_plane\" INTERACTIONS ARE IGNORED.\n'
'# CHECK THAT THESE TERMS ARE NEGLEGIBLY SMALL.\n'
'# \"out-of-plane_out-of_plane\" interactions are not yet supported in LAMMPS\n'
'# (...as of 2017-2-07) There is no way that moltemplate can produce\n'
'# LAMMPS compatible parameter files for these interactions.\n'
'###########################################################\n')
if display_torsion_torsion_1_warning:
lines_warnings.append('###########################################################\n'
'# WARNING\n'
'# ALL \"torsion_torsion_1\" INTERACTIONS ARE IGNORED.\n'
'# CHECK THAT THESE TERMS ARE NEGLEGIBLY SMALL.\n'
'# \"torsion_torsion_1\" interactions are not yet supported in LAMMPS\n'
'# (...as of 2017-2-07) There is no way that moltemplate can produce\n'
'# LAMMPS compatible parameter files for these interactions.\n'
'###########################################################\n')
sys.stderr.write(' done.\n'
'building lookup tables...')
"""
--- these next few lines of code appear to be unnecessary.
--- I'll probably delete them eventually
if len(hbond2params) > 0:
sys.stdout.write('\n\n write_once("In Settings") {\n')
if hbond_style == 'hbond/dreiding/lj':
for hbondID, angle in hbond2angle:
hbond2params[hbondID] = hbond2distance[hbondID]+' '+hbond2angle[hbondID] ##<--this is not correct
for hbondID, params in hbond2params:
for donor in hbond2donors[hbondID]:
for acceptor in hbond2acceptors[hbondID]:
for hydrogen in hbond2hydrogens[hbondID]:
sys.stdout.write('pair_coeff @atom:'+donor+' @atom:'+acceptor+' '+hbond_style+' @atom:'+hydrogen+' i '+params+'\n')
sys.stdout.write(' } # (DREIDING style H-bond parameters)\n\n\n')
"""
sys.stderr.write(" done.\n")
sys.stderr.write("Trying all combinations of atom types...")
##################### POST-PROCESSING ########################
for ang_name_orig in angle2params_or:
is_auto = (ang_name_orig.find('auto_') == 0)
atom_names = ExtractANames(ang_name_orig)
num_angles = 0
atom_combos = [set([]), set([]), set([])]
#*#atom_priorities = [{}, {}, {}]
#*#atom_priorities[i][atom_name] = priority of i'th atom in interaction
# We must consider every possible combination of atom types
# which satisfy BOTH angle_equivalences and bond_equivalences.
# ...AND we must consider BOTH regular AND auto equivalences.
# For each combination generate a separate @angle interaction.
# (I fear this will make the resulting .LT file large.)
# Use different auto equivalence lookup tables for different
# atoms in the interaction. (ie the "center" and "end" atoms)
auto_angle2atom = [auto_angleend2atom,
auto_anglecenter2atom,
auto_angleend2atom]
for i in range(0, 3):
angle_atom_name = atom_names[i]
sys.stderr.write('DEBUG: angle_atom_name = '+angle_atom_name+'\n')
if not section_is_auto:
assert(angle_atom_name[-1] != '_')
# assume regular equivalences when looking up atom types
sys.stderr.write('DEBUG: equiv_angle2atom['+angle_atom_name+'] = '+
str(equiv_angle2atom[angle_atom_name])+'\n')
for a in equiv_angle2atom[angle_atom_name]:
atom_combos[i].add(a)
else:
assert((angle_atom_name[-1] == '_') or (ange_atom_name[0] == '*'))
# assume "auto" equivalences when looking up atom types
sys.stderr.write('DEBUG: auto_angle2atom['+str(i)+']['+angle_atom_name+'] = \n'
' '+str(equiv_angle2atom[i][angle_atom_name])+'\n')
for a in auto_angle2atom[i][angle_atom_name]:
atom_combos[i].add(a)
found_at_least_one = False
#*#for a1, a1priority in atom_priorities[0].items():
#*# for a2, a2priority in atom_priorities[1].items():
#*# for a3, a3priority in atom_priorities[2].items():
for a1 in atom_combos[0]:
for a2 in atom_combos[1]:
#sys.stderr.write('atom2auto_bond = '+str(atom2auto_bond)+'\n')
bond_data1 = LookupBondLength(a1, a2,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data1 == None: # Save time by continuing only if a
continue # bond was defined between a1 and a2
for a3 in atom_combos[2]:
bond_data2 = LookupBondLength(a2, a3,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data2 == None:
continue
#bond lengths:
r0s = [0.0, 0.0]
#equivalent atom names used to lookup the bonds:
batoms = [['', ''], ['', '']]
r0s[0], batoms[0] = bond_data1
r0s[1], batoms[1] = bond_data2
found_at_least_one = True
ang_name_full = ang_name_orig + ',' + \
EncodeInteractionName(batoms[0] + batoms[1],
section_is_auto)
#sys.stderr.write('DEBUG: (a1,a2,a3) = '+str((a1,a2,a3))+', '
# ' (b11,b12,b21,b22) = '+str(batoms)+'\n')
angle2ref_or[ang_name_full] = reference
angle2style_or[ang_name_full] = 'class2'
theta0_K_params = angle2params_or[ang_name_orig]
angle2params[ang_name_full] = ' '.join(theta0_K_params)
if ang_name_orig in angle2class2_bb_or:
Kbb = angle2class2_bb_or[ang_name_orig]
assert(ang_name_orig in angle2ver_bb_or)
assert(ang_name_orig in angle2ref_bb_or)
else: #(use default values)
Kbb = '0.0'
angle2class2_bb_or[ang_name_orig] = Kbb
angle2ver_bb_or[ang_name_orig] = angle2ver_or[ang_name_orig]
angle2ref_bb_or[ang_name_orig] = angle2ref_or[ang_name_orig]
angle2class2_bb[ang_name_full] = (Kbb+' '+r0s[0]+' '+r0s[1])
angle2priority_bb = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[1],
float(angle2ver_bb_or[ang_name_orig]))
angle2ver_bb[ang_name_full] = angle2ver_bb_or[ang_name_orig]
angle2ref_bb[ang_name_full] = angle2ref_bb_or[ang_name_orig]
if ang_name_orig in angle2class2_ba_or:
Kba = angle2class2_ba_or[ang_name_orig]
assert(ang_name_orig in angle2ver_ba_or)
assert(ang_name_orig in angle2ref_ba_or)
else: #(use default values)
Kba = ['0.0', '0.0']
angle2class2_ba_or[ang_name_orig] = Kba
angle2ver_ba_or[ang_name_orig] = angle2ver_or[ang_name_orig]
angle2ref_ba_or[ang_name_orig] = angle2ref_or[ang_name_orig]
angle2class2_ba[ang_name_full] = (Kba[0]+' '+Kba[1]+' '+r0s[0]+' '+r0s[1])
angle2sym_ba = (r0s[0] == r0s[1])
angle2priority_ba = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[1],
angle2ver_ba_or[ang_name_orig])
angle2ver_ba[ang_name_full] = angle2ver_ba_or[ang_name_orig]
angle2ref_ba[ang_name_full] = angle2ref_ba_or[ang_name_orig]
version = max((angle2ver_or[ang_name_orig],
angle2ver_bb_or[ang_name_orig],
angle2ver_ba_or[ang_name_orig]))
angle2ver[ang_name_full] = version
angle2ref[ang_name_full] = angle2ref_or[ang_name_orig]
angle2style[ang_name_full] = 'class2'
angle2priority[ang_name_full] = \
(1,
is_auto,
angle2priority_or[ang_name_orig],
angle2priority_bb,
angle2priority_ba)
if num_angles < len(angle2params):
sys.stderr.write('DEBUG: '+section_name[1:]+' r0 ('+ang_name_full+') = ('+r0s[0]+', '+r0s[1]+')\n')
sys.stderr.write('DEBUG: len(angle2class2_bb) = '+str(len(angle2class2_bb))+'\n')
sys.stderr.write('DEBUG: '+section_name[1:]+' r0 ('+ang_name_full+') = ('+r0s[0]+', '+r0s[1]+')\n')
#sys.stderr.write('DEBUG: len(angle2class2_ba) = '+str(len(angle2class2_ba))+'\n')
num_angles = len(angle2params)
if ((not angle2sym_ba)
and
(atom_names[0] == atom_names[2])):
raise InputError('Error: Unsupported angle interaction: \"@angle:'+str(ang_name_orig)+'\"\n'
' This interaction has symmetric atom names:\n'
', '.join(atom_names)+'\n'
' and yet it lacks symmetry in the corresponding force field parameters.\n'
' (If this is not a mistake in the .frc file, then explain\n'
' why to andrew so he can fix this.)\n')
if not found_at_least_one:
#raise InputError('Error: Undefined bonds for bond-bond interactions:\n'
# ' '+str(atom_names)+'\n')
lines_warnings.append('# WARNING: Undefied bond length for angle interaction: ' +
' '.join(atom_names)+'\n')
#sys.stderr.write('bond_names = ' + str(bond_names) + '\n')
############ POST-PROCESSING DIHEDRALS ###########
for dih_name_orig in dihedral2params_or:
#assert(dih_name_orig in dihedral2class2_mbt_or)
#assert(dih_name_orig in dihedral2class2_ebt_or)
#assert(dih_name_orig in dihedral2class2_bb13_or)
#assert(dih_name_orig in dihedral2class2_at_or)
#assert(dih_name_orig in dihedral2class2_aat_or)
is_auto = (dih_name_orig.find('auto_') == 0)
atom_names = ExtractANames(dih_name_orig)
num_dihedrals = 0
atom_combos = [set([]), set([]), set([]), set([])]
#*#atom_priorities = [{}, {}, {}, {}]
#*#atom_priorities[i][atom_name] = priority of i'th atom in interaction
# We must consider every possible combination of atom types
# which satisfy all three:
# dihedral_equivalences
# bond_equivalences
# angle_equivalences
# ...AND we must consider BOTH regular AND auto equivalences.
# For each combination generate a separate @dihedral interaction.
# (I fear this will make the resulting .LT file large.)
# Use different auto equivalence lookup tables for different
# atoms in the interaction. (ie the "center" and "end" atoms)
auto_dihedral2atom = [auto_dihedralend2atom,
auto_dihedralcenter2atom,
auto_dihedralcenter2atom,
auto_dihedralend2atom]
for i in range(0, 4):
dihedral_atom_name = atom_names[i]
sys.stderr.write('DEBUG: dihedral_atom_name = '+dihedral_atom_name+'\n')
if not is_auto:
assert(dihedral_atom_name[-1] != '_')
# assume regular equivalences when looking up atom types
sys.stderr.write('DEBUG: equiv_dihedral2atom['+dihedral_atom_name+'] = '+
str(equiv_dihedral2atom[dihedral_atom_name])+'\n')
for a in equiv_dihedral2atom[dihedral_atom_name]:
atom_combos[i].add(a)
else:
assert((dihedral_atom_name[-1] == '_') or (ange_atom_name[0] == '*'))
# assume "auto" equivalences when looking up atom types
sys.stderr.write('DEBUG: auto_dihedral2atom['+str(i)+']['+dihedral_atom_name+'] = \n'
' '+str(equiv_dihedral2atom[i][dihedral_atom_name])+'\n')
for a in auto_dihedral2atom[i][dihedral_atom_name]:
atom_combos[i].add(a)
found_at_least_one = False
#*#for a1, a1priority in atom_priorities[0].items():
#*# for a2, a2priority in atom_priorities[1].items():
#*# for a3, a3priority in atom_priorities[2].items():
#*# for a4, a3priority in atom_priorities[3].items():
for a1 in atom_combos[0]:
for a2 in atom_combos[1]:
#sys.stderr.write('atom2auto_bond = '+str(atom2auto_bond)+'\n')
bond_data12 = LookupBondLength(a1, a2,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data12 == None:
# Save time by only continuing if a bond was
# found between a1 and a2
continue
for a3 in atom_combos[2]:
bond_data23 = LookupBondLength(a2, a3,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data23 == None:
# Save time by only continuing if a bond was
# found between a2 and a3
continue
angle_data123 = LookupRestAngle(a1, a2, a3,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_anglecenter],
angle2theta0_auto_or)
if angle_data123 == None:
# Save time by only continuing if an angle was
# found between a1, a2, a3
continue
for a4 in atom_combos[3]:
bond_data34 = LookupBondLength(a3, a4,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data34 == None:
# Save time by only continuing if a bond was
# found between a3 and a4
continue
#rest bond lengths:
r0s = [0.0, 0.0, 0,0]
#equivalent atom names used to lookup the bonds:
batoms = [['', ''], ['', ''], ['','']]
r0s[0], batoms[0] = bond_data12
r0s[1], batoms[1] = bond_data23
r0s[2], batoms[2] = bond_data34
angle_data234 = LookupRestAngle(a2, a3, a4,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_anglecenter],
angle2theta0_auto_or)
if angle_data234 == None:
# Save time by only continuing if an angle was
# found between a2, a3, a4
continue
#rest angles:
theta0s = [0.0, 0.0]
#equivalent atom names used to lookup angles:
aatoms = [['', '',''], ['', '','']]
theta0s[0], aatoms[0] = angle_data123
theta0s[1], aatoms[1] = angle_data234
found_at_least_one = True
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
theta0s.reverse()
aatoms.reverse()
aatoms[0].reverse()
aatoms[1].reverse()
#if is_auto:
dih_name_full = dih_name_orig + ',' + \
EncodeInteractionName(batoms[0] + batoms[1] + batoms[2] +
aatoms[0] + aatoms[1],
False)
#else:
# assert(batoms[0][1] == batoms[1][0])
# assert(batoms[1][1] == batoms[2][0])
# assert(aatoms[0][1] == aatoms[1][0])
# assert(aatoms[0][2] == aatoms[1][1])
# dih_name_full = dih_name_orig + ',' + \
# EncodeInteractionName([batoms[0][0], batoms[0][1]
# batoms[2][0], batoms[2][1],
# aatoms[0][0], aatoms[0][1],
# aatoms[0][2], aatoms[1][0]],
# False)
found_at_least_one = True
########### Fourier terms ###########
#if dih_name_orig in dihedral2param_or:
V_phi0_params = dihedral2params_or[dih_name_orig]
dihedral2params[dih_name_full] = ' '.join(V_phi0_params)
#else:
# dihedral2params[dih_name_full] = '0.0 0.0 0.0 0.0 0.0 0.0'
########### "mbt", "ebt", and "aat" terms ###########
# "mbt" terms:
if dih_name_orig in dihedral2class2_mbt_or:
Fmbt = dihedral2class2_mbt_or[dih_name_orig]
else:
Fmbt = ['0.0', '0.0', '0.0']
dihedral2class2_mbt_or[dih_name_orig] = Fmbt
dihedral2ver_mbt_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_mbt_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2class2_mbt[dih_name_full] = \
(Fmbt[0]+' '+Fmbt[1]+' '+Fmbt[2]+' '+r0s[1])
dihedral2priority_mbt = \
DetermineNumericPriority(is_auto,
batoms[1],
float(dihedral2ver_mbt_or[dih_name_orig]))
dihedral2ver_mbt[dih_name_full] = dihedral2ver_mbt_or[dih_name_orig]
dihedral2ref_mbt[dih_name_full] = dihedral2ref_mbt_or[dih_name_orig]
# "ebt" terms:
if dih_name_orig in dihedral2class2_ebt_or:
Febt = dihedral2class2_ebt_or[dih_name_orig]
dihedral2sym_ebt = ((Febt[0][0] == Febt[1][0]) and
(Febt[0][1] == Febt[1][1]) and
(Febt[0][2] == Febt[1][2]))
#and (r0s[0] == r0s[2]))
else:
Febt = [['0.0','0.0','0.0'], ['0.0','0.0','0.0']]
dihedral2class2_ebt_or[dih_name_orig] = Febt
dihedral2ver_ebt_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_ebt_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_ebt = True
dihedral2class2_ebt[dih_name_full]= (Febt[0][0] + ' ' +
Febt[0][1] + ' ' +
Febt[0][2] + ' ' +
Febt[1][0] + ' ' +
Febt[1][1] + ' ' +
Febt[1][2] + ' ' +
r0s[0]+' '+r0s[2])
dihedral2priority_ebt = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[2],
float(dihedral2ver_ebt_or[dih_name_orig]))
dihedral2ver_ebt[dih_name_full] = dihedral2ver_ebt_or[dih_name_orig]
dihedral2ref_ebt[dih_name_full] = dihedral2ref_ebt_or[dih_name_orig]
#(Note: large atom_priority number <==> low priority
# Only one of the atom priority numbers should be > 0)
# "bb13" terms:
if dih_name_orig in dihedral2class2_bb13_or:
Kbb13 = dihedral2class2_bb13_or[dih_name_orig]
#dihedral2sym_bb13 = (r0s[0] == r0s[2])
dihedral2sym_bb13 = True
else:
Kbb13 = '0.0'
dihedral2class2_bb13_or[dih_name_orig] = Kbb13
dihedral2ver_bb13_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_bb13_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_bb13 = True
dihedral2class2_bb13[dih_name_full] = (Kbb13+' '+r0s[0]+' '+r0s[2])
dihedral2priority_bb13 = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[2],
float(dihedral2ver_bb13_or[dih_name_orig]))
dihedral2ver_bb13[dih_name_full] = dihedral2ver_bb13_or[dih_name_orig]
dihedral2ref_bb13[dih_name_full] = dihedral2ref_bb13_or[dih_name_orig]
########### "at" and "aat" terms ###########
# "at" terms:
if dih_name_orig in dihedral2class2_at_or:
Fat = dihedral2class2_at_or[dih_name_orig]
dihedral2sym_at = ((Fat[0][0] == Fat[1][0]) and
(Fat[0][1] == Fat[1][1]) and
(Fat[0][2] == Fat[1][2]))
#and (theta0[0] == theta0[1]))
else:
Fat = [['0.0','0.0','0.0'], ['0.0','0.0','0.0']]
dihedral2class2_at_or[dih_name_orig] = Fat
dihedral2ver_at_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_at_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_at = True
dihedral2class2_at[dih_name_full] = \
(Fat[0][0] + ' ' +
Fat[0][1] + ' ' +
Fat[0][2] + ' ' +
Fat[1][0] + ' ' +
Fat[1][1] + ' ' +
Fat[1][2] + ' ' +
theta0s[0] + ' ' +
theta0s[1])
dihedral2priority_at = \
DetermineNumericPriority(is_auto,
aatoms[0] + aatoms[1],
float(dihedral2ver_at_or[dih_name_orig]))
dihedral2ver_at[dih_name_full] = dihedral2ver_at_or[dih_name_orig]
dihedral2ref_at[dih_name_full] = dihedral2ref_at_or[dih_name_orig]
# "aat" terms:
if dih_name_orig in dihedral2class2_aat_or:
Kaat = dihedral2class2_aat_or[dih_name_orig]
#dihedral2sym_aat = (theta0[0] == theta0[1])
dihedral2sym_aat = True
else:
Kaat = '0.0'
dihedral2class2_aat_or[dih_name_orig] = Kaat
dihedral2ver_aat_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_aat_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_aat = True
dihedral2class2_aat[dih_name_full] = \
(Kaat+' '+theta0s[0]+' '+theta0s[1])
dihedral2priority_aat = \
DetermineNumericPriority(is_auto,
aatoms[0] + aatoms[1],
float(dihedral2ver_aat_or[dih_name_orig]))
dihedral2ver_aat[dih_name_full] = dihedral2ver_aat_or[dih_name_orig]
dihedral2ref_aat[dih_name_full] = dihedral2ref_aat_or[dih_name_orig]
if len(dihedral2params) > num_dihedrals:
sys.stderr.write('DEBUG: dihedral['+dih_name_full+']:\n'
'(r12,r23,r34) = ('
+r0s[0]+','+r0s[1]+','+r0s[2]+') \n'
'(theta123,theta234) = ('
+theta0s[0]+','+theta0s[1]+') \n')
sys.stderr.write('DEBUG: num_dihedrals = len(dihedral2params) = '
+str(len(dihedral2params))+'\n')
version = max((dihedral2ver_or[dih_name_orig],
dihedral2ver_mbt_or[dih_name_orig],
dihedral2ver_ebt_or[dih_name_orig],
dihedral2ver_bb13_or[dih_name_orig],
dihedral2ver_at_or[dih_name_orig],
dihedral2ver_aat_or[dih_name_orig]))
dihedral2style[dih_name_full] = 'class2'
dihedral2ver[dih_name_full] = version
dihedral2ref[dih_name_full] = dihedral2ref_or[dih_name_orig]
dihedral2priority[dih_name_full] = \
(1,
is_auto,
dihedral2priority_or[dih_name_orig],
dihedral2priority_mbt,
dihedral2priority_ebt,
dihedral2priority_bb13,
dihedral2priority_at,
dihedral2priority_aat)
num_dihedrals = len(dihedral2params)
if ((not (dihedral2sym_ebt and
#dihedral2sym_mbt and
# (note: symmetry doesn't make sense for mbt)
dihedral2sym_at and
dihedral2sym_aat and
dihedral2sym_bb13))
and
((atom_names[0] == atom_names[3]) and
(atom_names[1] == atom_names[2]))):
raise InputError('Error: Unsupported dihedral interaction: \"@dihedral:'+str(dih_name_orig)+'\"\n'
' This interaction has symmetric atom names:\n'+
', '.join(atom_names)+'\n'+
' and yet it lacks symmetry in the corresponding force field parameters.\n'+
' (If this is not a mistake in the .frc file, then explain\n'+
' why to andrew so he can fix this.)\n')
#sys.stderr.write('DEBUG: number of interactions = '+str(len(dihedral2class2_bb))+'\n')
if not found_at_least_one:
#raise InputError('Error: Undefined bonds for bond-bond interactions:\n'
# ' '+str(atom_names)+'\n')
lines_warnings.append('# WARNING: Undefined bond length (r0) or rest angle (theta0) for\n'+
'# the dihedral interaction between: ' +
' '.join(atom_names)+'\n')
#sys.stderr.write('bond_names = ' + str(bond_names) + '\n')
############ POST-PROCESSING IMPROPERS ###########
for imp_name_orig in improper2cross:
assert(imp_name_orig in improper2params_or)
#assert(imp_name_orig in improper2class2_aa_or)
is_auto = (dih_name_orig.find('auto_') == 0)
atom_names = ExtractANames(imp_name_orig)
num_impropers = 0
atom_combos = [set([]), set([]), set([]), set([])]
#*#atom_priorities = [{}, {}, {}, {}]
#*#atom_priorities[i][atom_name] = priority of i'th atom in interaction
# We must consider every possible combination of atom types
# which satisfy both:
# improper_equivalences
# angle_equivalences
# ...AND we must consider BOTH regular AND auto equivalences.
# For each combination generate a separate @improper interaction.
# (I fear this will make the resulting .LT file large.)
# Use different auto equivalence lookup tables for different
# atoms in the interaction. (ie the "center" and "end" atoms)
auto_improper2atom = [atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend,
atom2auto_improperend]
for i in range(0, 4):
improper_atom_name = atom_names[i]
sys.stderr.write('DEBUG: improper_atom_name = '+improper_atom_name+'\n')
if not is_auto:
assert(improper_atom_name[-1] != '_')
# assume regular equivalences when looking up atom types
sys.stderr.write('DEBUG: equiv_improper2atom['+improper_atom_name+'] = '+
str(equiv_improper2atom[improper_atom_name])+'\n')
for a in equiv_improper2atom[improper_atom_name]:
atom_combos[i].add(a)
else:
assert((improper_atom_name[-1] == '_') or (angle_atom_name[0] == '*'))
# assume "auto" equivalences when looking up atom types
sys.stderr.write('DEBUG: auto_improper2atom['+str(i)+']['+improper_atom_name+'] = \n'
' '+str(equiv_improper2atom[i][improper_atom_name])+'\n')
for a in auto_improper2atom[i][improper_atom_name]:
atom_combos[i].add(a)
is_auto = IsAutoInteraction(imp_name_orig) # is this an "auto" interaction?
atom_names = ExtractANames(imp_name_orig) # names of all 4 atoms
lnames = [atom_names[0], atom_names[2], atom_names[3]] # names of "leaf" atoms
#M1 = improper2cross[imp_name_orig][ 2 ]
#M2 = improper2cross[imp_name_orig][ 0 ]
#M3 = improper2cross[imp_name_orig][ 3 ]
#try:
M1 = improper2cross[imp_name_orig][ImCrossTermID([atom_names[0],
atom_names[1],
atom_names[2],
atom_names[3]])]
#except KeyError:
# M1 = '0.0'
#try:
M2 = improper2cross[imp_name_orig][ImCrossTermID([atom_names[2],
atom_names[1],
atom_names[0],
atom_names[3]])]
#except KeyError:
# M2 = '0.0'
#try:
M3 = improper2cross[imp_name_orig][ImCrossTermID([atom_names[0],
atom_names[1],
atom_names[3],
atom_names[2]])]
#except KeyError:
# M3 = '0.0'
# ###### Symmetry: ######
# Unfortunately, it's time to wade into the messy issue of symmetry.
# We desire a way to detect whether an improper interaction
# between 4 atoms is invariant with respect to atom reordering
# of the 3 peripheral "leaf" atoms which surround the central atom.
# In principle, any rearrangement of atoms would require a separate
# class2 improper interaction. However, in some cases, when the
# parameters for these rearrangements are symmetric, we can detect
# that and warn moltemplate that it is not necessary to generate new
# improper interactions for every conceivable permutation of these
# atoms. Figuring out when it is safe to do that is a headache.
# (...but it's necessary. Otherwise each junction in the molecule
# will generate 3*2*1=6 improper interactions which are usually
# redundant. This will slow down the simulation significantly
# and may make it difficult to compare the resulting LAMMPS
# input files with those generated by other tools like msi2lmp.)
#
# To make this easier, I store the parameters in arrays which
# are arranged in a more symmetric way
M = [0.0, 0.0, 0.0]
theta0 = [0.0, 0.0, 0.0]
# noti3[i] = the sorted tuple of integers from the
# set {0,1,2} which remain after deleting i
noti3 = ((1,2), (0,2), (0,1))
i_neigh = [ ([0,2,3][ noti3[i][0] ], # neighbor leaves of ith leaf
[0,2,3][ noti3[i][1] ]) for i in range(0,3)]
for i in range(0, 3):
# You will notice the pattern "[0,2,3][i]" appears often in the
# code below because for class 2 force-fields, the second atom
# (with index 1) is the central atom ("hub" atom), and the three
# that surround it ("leaf" atoms) have indices 0,2,3. I want
# to skip over the central atoms and loop over the leaf atoms
imTermID = ImCrossTermID([atom_names[ i_neigh[i][0] ],
atom_names[ 1 ],
atom_names[ [0,2,3][i] ],
atom_names[ i_neigh[i][1] ]])
M[i] = float(improper2cross[imp_name_orig][imTermID])
##i_leaf = [0,2,3][i]
##M[i] = float(improper2cross[imp_name_orig][ i_leaf ])
#angle_name_l = SortByEnds([atom_names[i_neigh[i][0]],
# atom_names[ 1 ],
# atom_names[i_neigh[i][1]]])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
#theta0[i] = float(angle2theta0_or[angle_name])
for i in range(0, 3):
if (M[ noti3[i][0] ] == M[ noti3[i][1] ]):
#and (theta0[ noti3[i][0] ] == theta0[ noti3[i][1] ])):
# Then it is safe to swap the order of these two atoms in
# the list of atoms when looking up force-field parameters
improper2sym[imp_name_orig].add(i_neigh[i][0])
improper2sym[imp_name_orig].add(i_neigh[i][1])
# Later, I can use these to decide whether or not I need to
# change the default script with symmetry rules. (I'm hoping
# that "cenJsortIKL.py" should work in most cases.)
# CONTINUEHERE: FIGURE OUT WHETHER TO WORRY ABOUT improper2sym
else:
if atom_names[i_neigh[i][0]] == atom_names[i_neigh[i][1]]:
raise InputError('Error: Unsupported improper interaction: \"@improper:'+str(imp_name_orig)+'\"\n'
' This interaction has matching atom aliases:\n'
' (@atom:'+str(atom_names[i_neigh[i][0]])+
', @atom:'+str(atom_names[i_neigh[i][1]])+')\n'
' and yet it lacks symmetry in the corresponding force field parameters.\n'
' (If this is not a mistake in the .frc file, then ask andrew to\n'
' fix this limitation.)\n')
found_at_least_one = False
#*#for a1, a1priority in atom_priorities[0].items():
#*# for a2, a2priority in atom_priorities[1].items():
#*# for a3, a3priority in atom_priorities[2].items():
#*# for a4, a3priority in atom_priorities[3].items():
for a1 in atom_combos[0]:
for a2 in atom_combos[1]:
for a3 in atom_combos[2]:
for a4 in atom_combos[3]:
# Collect information from the different terms in a class2 improper:
# http://lammps.sandia.gov/doc/improper_class2.html
# Loop over the neighbors of the central atom in each improper
# interaction and collect all the Mi and Ti parameters. Collect
# them in the order they appear in the formula for the Eaa
# term as it appears in the documentation for improper_style class2:
#
# http://lammps.sandia.gov/doc/improper_class2.html
#
# Eaa = M1 (Tijk - T0)(Tkjl - T2) + #common leaf node: k (index 2)
# M2 (Tijk - T0)(Tijl - T1) + #common leaf node: i (index 0)
# M3 (Tijl - T1)(Tkjl - T2) #common leaf node: l (index 3)
# (I'm trying to match the variable names used in this web page
# I wish the author had chosen the M1,M2,M3, T1,T2,T3 order in more
# symmetric way, or at least in a way that makes more sense to me.)
theta0s = ['0.0', '0.0', '0.0']
aatoms = [['', '',''], ['', '',''], ['', '', '']]
#angle_name_l = SortByEnds([atom_names[0], atom_names[1], atom_names[2]])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
#theta01 = angle2theta0_or[angle_name]
angle_data = LookupRestAngle(a1, a2, a3,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend],
angle2theta0_auto_or)
if angle_data == None:
# Save time by only continuing if an angle was
# found between a1, a2, a3
continue
theta0s[0], aatoms[0] = angle_data
#angle_name_l = SortByEnds(aatoms[0])
#angle_name = EncodeInteractionName(angle_name_l[0], is_auto)
#theta02 = angle2theta0_or[angle_name]
angle_data = LookupRestAngle(a1, a2, a4,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend],
angle2theta0_auto_or)
if angle_data == None:
# Save time by only continuing if an angle was
# found between a1, a2, a4
continue
theta0s[1], aatoms[1] = angle_data
#angle_name_l = SortByEnds(aatoms[1])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
#theta03 = angle2theta0_or[angle_name]
angle_data = LookupRestAngle(a3, a2, a4,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend],
angle2theta0_auto_or)
if angle_data == None:
# Save time by only continuing if an angle was
# found between a2, a2, a4
continue
theta0s[2], aatoms[2] = angle_data
# The following asserts checks that the two theta0s
# are defined whenever the corresponding M is defined.
# (Note: The order is LAMMPS-implementation specific.
# See http://lammps.sandia.gov/doc/improper_class2.html)
assert((float(theta0s[0]) != 0) or (float(M1) == 0))
assert((float(theta0s[2]) != 0) or (float(M1) == 0))
assert((float(theta0s[0]) != 0) or (float(M2) == 0))
assert((float(theta0s[1]) != 0) or (float(M2) == 0))
assert((float(theta0s[1]) != 0) or (float(M3) == 0))
assert((float(theta0s[2]) != 0) or (float(M3) == 0))
#angle_name_l = SortByEnds(aatoms[2])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
imp_name_full = imp_name_orig + ',' + \
EncodeInteractionName(aatoms[0] + aatoms[1] + aatoms[2],
False)
#if imp_name_orig in improper2params_or[imp_name_orig]:
improper2params[imp_name_full] = ' '.join(improper2params_or[imp_name_orig])
#else:
# improper2params[imp_name_full] = '0.0 0.0'
#if imp_name_orig in improper2cross:
improper2class2_aa[imp_name_full] = \
(str(M1)+' '+str(M2)+' '+str(M3)+' '+
str(theta0s[0])+' '+str(theta0s[1])+' '+str(theta0s[2]))
#else:
# improper2class2_aa[imp_name_full] = '0.0 0.0 0.0 0.0 0.0 0.0'
# improper2ver_aa_or[imp_name_orig] = improper2ver_or[imp_name_orig]
# improper2ref_aa_or[imp_name_orig] = improper2ref_or[imp_name_orig]
improper2priority_aa = \
DetermineNumericPriority(is_auto,
aatoms[0] + aatoms[1] + aatoms[2],
float(improper2ver_aa_or[imp_name_orig]))
improper2ver_aa[imp_name_full] = improper2ver_aa_or[imp_name_orig]
improper2ref_aa[imp_name_full] = improper2ref_aa_or[imp_name_orig]
version = max((improper2ver_or[imp_name_orig],
improper2ver_aa_or[imp_name_orig]))
improper2style[imp_name_full] = 'class2'
improper2ref[imp_name_full] = improper2ref_or[imp_name_orig]
improper2ver[imp_name_full] = version
improper2priority[imp_name_full] = \
(1,
is_auto,
improper2priority_or[imp_name_orig],
improper2priority_aa)
sys.stderr.write("done\n")
sys.stderr.write("Converting to moltemplate format...\n")
##################### BEGIN WRITING FILE #####################
sys.stdout.write("# This file was generated automatically using:\n")
sys.stdout.write("# " + g_program_name + " " + " ".join(sys.argv[1:]) + "\n")
sys.stdout.write("\n\n")
sys.stdout.write(ffname + " {\n\n")
sys.stdout.write("\n"
" # AtomType Mass # \"Description\" (version, reference)\n\n")
sys.stdout.write(" write_once(\"Data Masses\") {\n")
for atype in atom2mass:
sys.stdout.write(" @atom:" + atype + " " + str(atom2mass[atype]))
sys.stdout.write(" # ")
if atype in atom2element:
sys.stdout.write(atom2element[atype] + ", ")
#sys.stdout.write(atom2descr[atype])
sys.stdout.write("\"" + atom2descr[atype] + "\"")
sys.stdout.write(" (")
if atype in atom2numbonds:
sys.stdout.write("nbonds="+str(atom2numbonds[atype])+", ")
sys.stdout.write("ver=" + atom2ver[atype] +
", ref=" + atom2ref[atype])
sys.stdout.write(")\n")
sys.stdout.write(" } #(end of atom masses)\n\n\n")
sys.stdout.write(" # ---------- EQUIVALENCE CATEGORIES for bonded interaction lookup ----------\n"
" # Each type of atom has a separate ID used for looking up bond parameters\n"
" # and a separate ID for looking up 3-body angle interaction parameters\n"
" # and a separate ID for looking up 4-body dihedral interaction parameters\n"
" # and a separate ID for looking up 4-body improper interaction parameters\n"
#" # (This is because there are several different types of sp3 carbon atoms\n"
#" # which have the same torsional properties when within an alkane molecule,\n"
#" # for example. If they share the same dihedral-ID, then this frees us\n"
#" # from being forced define separate dihedral interaction parameters\n"
#" # for all of them.)\n"
" # The complete @atom type name includes ALL of these ID numbers. There's\n"
" # no need to force the end-user to type the complete name of each atom.\n"
" # The \"replace\" command used below informs moltemplate that the short\n"
" # @atom names we have been using abovee are equivalent to the complete\n"
" # @atom names used below:\n\n")
for atype in atom2ffid:
#ffid = atype + "_ffid" + atom2ffid[atype]
sys.stdout.write(" replace{ @atom:" + atype +
" @atom:" + atom2ffid[atype] + " }\n")
sys.stdout.write("\n\n\n\n")
sys.stdout.write(" # --------------- Non-Bonded Interactions: ---------------------\n"
" # Syntax:\n"
" # pair_coeff AtomType1 AtomType2 pair_style_name parameters...\n\n")
sys.stdout.write(" write_once(\"In Settings\") {\n")
for atype in pair2params:
assert(atype in pair2style)
if include_auto_equivalences:
assert(atype in atom2auto_pair)
if include_auto_equivalences:
sys.stdout.write(' pair_coeff @atom:*,ap' + atom2auto_pair[atype] +
',aq*,ab*,aae*,aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap' + atom2auto_pair[atype] +
',aq*,ab*,aae*,aac*,ade*,adc*,aie*,aic* ' +
pair2style[atype] + ' ' +
pair2params[atype] +
' # (ver=' + pair2ver[atype] +
', ref=' +pair2ref[atype] + ')\n')
else:
continue
else:
assert(atype in atom2equiv_pair)
sys.stdout.write(' pair_coeff ' +
'@atom:*,p' + atom2equiv_pair[atype] + ',b*,a*,d*,i* ' +
'@atom:*,p' + atom2equiv_pair[atype] + ',b*,a*,d*,i* ' +
pair2style[atype] + ' ' +
pair2params[atype] +
' # (ver=' + pair2ver[atype] +
', ref=' +pair2ref[atype] + ')\n')
sys.stdout.write(" } #(end of pair_coeffs)\n\n\n\n")
################# Print Charge By Bond Interactions ##################
charge_pair_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(charge_pair_priority.items())],
key=itemgetter(1),
reverse=True)]
if len(charge_pair_priority) > 0:
sys.stdout.write(" # ---------- Charge By Bond (a.k.a. \"bond equivalences\") ----------\n")
# Print rules for generating (2-body) "bond" interactions:
sys.stdout.write('\n\n\n'
' write_once("Data Charge By Bond") {\n')
for bond_name in charge_pair_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(bond_name)]
# Did the user ask us to include "auto" interactions?
if IsAutoInteraction(bond_name):
if include_auto_equivalences:
sys.stdout.write(' @atom:*,ap*,aq' + anames[0] +
',ab*,aae*,aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap*,aq' + anames[1] +
',ab*,aae*,aac*,ade*,adc*,aie*,aic*' +
' ' + bond2chargepair[bond_name] +
" # (ver=" + charge_pair_ver[bond_name] +
", ref=" + charge_pair_ref[bond_name] + ")\n")
else:
continue
else:
sys.stdout.write(' @atom:*,p*,b' + anames[0] + ',a*,d*,i* ' +
' @atom:*,p*,b' + anames[1] + ',a*,d*,i* ' +
' ' + bond2chargepair[bond_name] +
" # (ver=" + charge_pair_ver[bond_name] +
", ref=" + charge_pair_ref[bond_name] + ")\n")
sys.stdout.write(' } #(end of Charge by Bond (bond equivalences))\n\n'
'\n\n\n\n')
################# Print 2-body Bond Interactions ##################
bond_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(bond2priority.items())],
key=itemgetter(1),
reverse=True)]
if len(bond2priority) > 0:
sys.stdout.write(" # --------------- Bond Interactions: ---------------------\n")
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (2-body) "bond" interactions: --\n'
' # BondType AtomType1 AtomType2\n')
sys.stdout.write('\n'
' write_once("Data Bonds By Type')
if bond_symmetry_subgraph != '':
sys.stdout.write(' ('+bond_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for bond_name in bond_names_priority_high_to_low:
if not (bond2style[bond_name] in
bond_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(bond_name)]
# Did the user ask us to include "auto" interactions?
if IsAutoInteraction(bond_name):
if include_auto_equivalences:
sys.stdout.write(' @bond:' + bond_name + ' ' +
' @atom:*,ap*,aq*,ab' + anames[0] +
',aae*,aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap*,aq*,ab' + anames[1] +
',aae*,aac*,ade*,adc*,aie*,aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @bond:' + bond_name + ' ' +
' @atom:*,b' + anames[0] + ',a*,d*,i* ' +
' @atom:*,b' + anames[1] + ',a*,d*,i* ' +
'\n')
sys.stdout.write(' } # end of "Data Bonds By Type" section\n'
'\n')
# Print the force-field parameters for these bond interactions:
sys.stdout.write('\n\n'
' # ------------ Bond Parameters: ----------\n')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for bond_style in bond_styles:
if not (bond_style in bond_styles_selected):
continue
sys.stdout.write(' # '+bond_style2docs[bond_style]+'\n')
sys.stdout.write('\n'
' # Syntax: \n'
' # bond_coeff BondTypeName BondStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for bond_name in bond_names_priority_high_to_low:
if not (bond2style[bond_name] in
bond_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
if (IsAutoInteraction(bond_name) and
(not include_auto_equivalences)):
continue
sys.stdout.write(' bond_coeff @bond:'+bond_name+' '+
bond2style[bond_name] + ' ' +
bond2params[bond_name] +
" # (ver=" + bond2ver[bond_name] +
", ref=" +bond2ref[bond_name] + ")\n")
sys.stdout.write(' } # end of bond_coeff commands\n'
'\n\n')
################# Print 3-body Angle Interactions ##################
ang_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(angle2priority.items())],
key=itemgetter(1),
reverse=True)]
if len(angle2priority) > 0:
sys.stdout.write(" # --------------- Angle Interactions: ---------------------\n")
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (3-body) "angle" interactions: --\n'
' # AngleType AtomType1 AtomType2 AtomType3 [BondType1 BondType2]\n')
sys.stdout.write('\n'
' write_once("Data Angles By Type')
if angle_symmetry_subgraph != '':
sys.stdout.write(' ('+angle_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for angle_name in ang_names_priority_high_to_low:
if not (angle2style[angle_name] in
angle_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(angle_name)]
#if (len(anames) == 3) and angle2style[angle_name] == 'class2':
# continue
bnames = [anames[3:5], anames[5:7]]
assert(bnames[0][1] == bnames[1][0])
# (NOTE TO SELF:
# If these assertions fail, then try checking if they are
# all either the same, or '*'. If they are then just replace '*'
# everwhere that atom appears with the most restrictive name.)
# Optional: Shorten the angle name since some of the bnames are redundant:
is_auto = IsAutoInteraction(angle_name)
anm = [a for a in map(EncodeAName, anames)]
ang_name_abbr = EncodeInteractionName(anm[0:3]+
[anm[3],anm[4],anm[6]],
is_auto)
# Did the user ask us to include "auto" interactions?
if IsAutoInteraction(angle_name):
if include_auto_equivalences:
sys.stdout.write(' @angle:' + ang_name_abbr + ' ' +
' @atom:*,ap*,aq*,ab'+bnames[0][0]+',aae' + anames[0] +
',aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap*,aq*,ab'+bnames[0][1]+',aae*,aac'+anames[1] +
',ade*,adc*,aie*,aic*' +
' @atom:*,ap*,aq*,ab'+bnames[1][1]+',aae' + anames[2] +
',aac*,ade*,adc*,aie*,aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @angle:' + ang_name_abbr + ' ' +
#' @atom:*,p*,b*,a' + anames[0] + ',d*,i* ' +
#' @atom:*,p*,b*,a' + anames[1] + ',d*,i* ' +
#' @atom:*,p*,b*,a' + anames[2] + ',d*,i* ' +
#' @bond:'+bnames[0][0]+','+bnames[0][1]+' '
#' @bond:'+bnames[1][0]+','+bnames[1][1]+'\n'
' @atom:*,p*,b'+bnames[0][0]+',a'+anames[0]+',d*,i* ' +
' @atom:*,p*,b'+bnames[0][1]+',a'+anames[1]+',d*,i* ' +
' @atom:*,p*,b'+bnames[1][1]+',a'+anames[2]+',d*,i*'
'\n')
sys.stdout.write(' } # end of "Data Angles By Type" section\n'
'\n')
# Print the force-field parameters for these angle interactions:
sys.stdout.write('\n\n'
' # ------- Angle Force Field Parameters: -------')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for angle_style in angle_styles:
if not (angle_style in angle_styles_selected):
continue
sys.stdout.write(' # '+angle_style2docs[angle_style]+'\n')
sys.stdout.write('\n'
' # Syntax: \n'
' # angle_coeff AngleTypeName AngleStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for angle_name in ang_names_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(angle_name)]
#if (len(anames) == 3) and angle2style[angle_name] == 'class2':
# continue
# Optional: Shorten the angle name since some of the anames are redundant:
anm = [a for a in map(EncodeAName, anames)]
ang_name_abbr = EncodeInteractionName(anm[0:3]+
#[bnames[0][0],bnames[0][1], bnames[1][1]],
[anm[3],anm[4],anm[6]],
is_auto)
if not (angle2style[angle_name] in
angle_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
if (IsAutoInteraction(angle_name) and
(not include_auto_equivalences)):
continue
sys.stdout.write(' angle_coeff @angle:'+ang_name_abbr+' '+
angle2style[angle_name] + ' ' +
angle2params[angle_name] +
" # (ver=" + angle2ver[angle_name] +
", ref=" + angle2ref[angle_name] + ")\n")
if angle_name in angle2class2_bb:
sys.stdout.write(' angle_coeff @angle:'+ang_name_abbr+' '+
angle2style[angle_name] + ' bb ' +
angle2class2_bb[angle_name] +
" # (ver=" + angle2ver_bb[angle_name] +
", ref=" + angle2ref_bb[angle_name] + ")\n")
assert(angle_name in angle2class2_ba)
sys.stdout.write(' angle_coeff @angle:'+ang_name_abbr+' '+
angle2style[angle_name] + ' ba ' +
angle2class2_ba[angle_name] +
" # (ver=" + angle2ver_ba[angle_name] +
", ref=" + angle2ref_ba[angle_name] + ")\n")
sys.stdout.write(' } # end of angle_coeff commands\n'
'\n\n')
################# Print 4-body Dihedral Interactions ##################
dih_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(dihedral2priority.items())],
key=itemgetter(1),
reverse=True)]
if len(dih_names_priority_high_to_low) > 0:
sys.stdout.write(' # --------------- Dihedral Interactions: ---------------------\n')
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (4-body) "dihedral" interactions: --\n'
' # DihedralType AtmType1 AtmType2 AtmType3 AtmType3 [BondType1 Bnd2 Bnd3]\n')
sys.stdout.write('\n\n'
' write_once("Data Dihedrals By Type')
if dihedral_symmetry_subgraph != '':
sys.stdout.write(' ('+dihedral_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for dihedral_name in dih_names_priority_high_to_low:
if not (dihedral2style[dihedral_name] in
dihedral_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(dihedral_name)]
#if (len(anames) == 4) and dihedral2style[dihedral_name] == 'class2':
# continue
bnames = [anames[4:6], anames[6:8], anames[8:10]]
assert(bnames[0][1] == bnames[1][0])
assert(bnames[1][1] == bnames[2][0])
ang_names = [anames[10:13], anames[13:16]]
assert(ang_names[0][1] == ang_names[1][0])
assert(ang_names[0][2] == ang_names[1][1])
# (NOTE TO SELF:
# If these assertions fail, then try checking if they are
# all either the same, or '*'. If they are then just replace '*'
# everwhere that atom appears with the most restrictive name.)
# Optional: Shorten the angle name since some of the bnames are redundant:
is_auto = IsAutoInteraction(dihedral_name)
anm = [a for a in map(EncodeAName, anames)]
dih_name_abbr = EncodeInteractionName(anm[0:4]+
#[bnames[0][0],bnames[0][1], bnames[1][1], bnames[2][1]]
[anm[4],anm[5],anm[7],anm[9]]+
#[ang_names[0][0],ang_names[0][1],ang_names[0][2],ang_names[1][2]]
[anm[10],anm[11],anm[12],anm[15]],
is_auto)
if dih_name_abbr.find('*') != -1:
print(dihedral_name)
# Did the user ask us to include "auto" interactions?
if dihedral2style[dihedral_name] != 'class2':
if IsAutoInteraction(dihedral_name):
if include_auto_equivalences:
sys.stdout.write(' @dihedral:' + dih_name_abbr + ' ' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'
+ anames[0] +
',adc*,aie*,aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc'
+ anames[1] +
',aie*,aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc'
+ anames[2] +
',aie*,aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'
+ anames[3] +
',adc*,aie*,aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @dihedral:' + dih_name_abbr + ' ' +
' @atom:*,p*,b*,a*,d' + anames[0] + ',i* ' +
' @atom:*,p*,b*,a*,d' + anames[1] + ',i* ' +
' @atom:*,p*,b*,a*,d' + anames[2] + ',i* ' +
' @atom:*,p*,b*,a*,d' + anames[3] + ',i* ' +
'\n')
else:
if IsAutoInteraction(dihedral_name):
if include_auto_equivalences:
sys.stdout.write(' @dihedral:' + dih_name_abbr + ' ' +
' @atom:*,ap*,aq*,ab'+bnames[0][0]+',aae'+ang_names[0][0]+',aac*,ade'
+ anames[0] +
',adc*,aie*,aic*' +
' @atom:*,ap*,aq*,ab'+bnames[0][1]+',aae'+ang_names[1][0]+',aac'+ang_names[0][1]+',ade*,adc'
+ anames[1] +
',aie*,aic*' +
' @atom:*,ap*,aq*,ab'+bnames[1][0]+',aae'+ang_names[0][2]+',aac'+ang_names[1][1]+',ade*,adc'
+ anames[2] +
',aie*,aic*' +
' @atom:*,ap*,aq*,ab'+bnames[1][1]+',aae'+ang_names[1][2]+',aac*,ade'
+ anames[3] +
',adc*,aie*,aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @dihedral:' + dih_name_abbr + ' ' +
' @atom:*,p*,b'+bnames[0][0]+',a'+ang_names[0][0]+',d' + anames[0] + ',i* ' +
' @atom:*,p*,b'+bnames[0][1]+',a'+ang_names[0][1]+',d' + anames[1] + ',i* ' +
' @atom:*,p*,b'+bnames[1][0]+',a'+ang_names[1][1]+',d' + anames[2] + ',i* ' +
' @atom:*,p*,b'+bnames[1][1]+',a'+ang_names[1][2]+',d' + anames[3] + ',i* ' +
'\n')
sys.stdout.write(' } # end of "Data Dihedrals By Type" section\n'
'\n')
# Print the force-field parameters for these dihedral interactions:
sys.stdout.write('\n\n'
' # ------- Dihedral Force Field Parameters: -------\n')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for dihedral_style in dihedral_styles:
if not (dihedral_style in dihedral_styles_selected):
continue
sys.stdout.write(' # '+dihedral_style2docs[dihedral_style]+'\n')
sys.stdout.write('\n'
' # Syntax: \n'
' # dihedral_coeff DihedralTypeName DihedralStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for dihedral_name in dih_names_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(dihedral_name)]
#if (len(anames) == 4) and dihedral2style[dihedral_name] == 'class2':
# continue
# Optional: Shorten the angle name since some of the bnames are redundant:
anm = [a for a in map(EncodeAName, anames)]
dih_name_abbr = EncodeInteractionName(anm[0:4]+
#[bnames[0][0],bnames[0][1], bnames[1][1], bnames[2][1]]
[anm[4],anm[5],anm[7],anm[9]]+
#[ang_names[0][0],ang_names[0][1],ang_names[0][2],ang_names[1][2]]
[anm[10],anm[11],anm[12],anm[15]],
is_auto)
if not (dihedral2style[dihedral_name] in
dihedral_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
if (IsAutoInteraction(dihedral_name) and
(not include_auto_equivalences)):
continue
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' ' +
dihedral2params[dihedral_name] +
" # (ver=" + dihedral2ver[dihedral_name] +
", ref=" + dihedral2ref[dihedral_name] + ")\n")
if dihedral_name in dihedral2class2_mbt:
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' mbt ' +
dihedral2class2_mbt[dihedral_name] +
" # (ver=" + dihedral2ver_mbt[dihedral_name] +
", ref=" + dihedral2ref_mbt[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_ebt)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' ebt ' +
dihedral2class2_ebt[dihedral_name] +
" # (ver=" + dihedral2ver_ebt[dihedral_name] +
", ref=" + dihedral2ref_ebt[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_at)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' at ' +
dihedral2class2_at[dihedral_name] +
" # (ver=" + dihedral2ver_at[dihedral_name] +
", ref=" + dihedral2ref_at[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_aat)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' aat ' +
dihedral2class2_aat[dihedral_name] +
" # (ver=" + dihedral2ver_aat[dihedral_name] +
", ref=" + dihedral2ref_aat[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_bb13)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' bb13 ' +
dihedral2class2_bb13[dihedral_name] +
" # (ver=" + dihedral2ver_bb13[dihedral_name] +
", ref=" + dihedral2ref_bb13[dihedral_name] + ")\n")
sys.stdout.write(' } # end of dihedral_coeff commands\n'
'\n\n')
################# Print 4-body Improper Interactions ##################
imp_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(improper2priority.items())],
key=itemgetter(1),
reverse=True)]
if len(imp_names_priority_high_to_low) > 0:
sys.stdout.write(" # --------------- Improper Interactions: ---------------------\n")
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (4-body) "improper" interactions: --\n'
' # ImproperType AtmType1 AtmType2 AtmType3 AtmType3 [BondType1 Bnd2 Bnd3]\n')
sys.stdout.write('\n'
' write_once("Data Impropers By Type')
if improper_symmetry_subgraph != '':
sys.stdout.write(' ('+improper_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for improper_name in imp_names_priority_high_to_low:
if not (improper2style[improper_name] in
improper_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(improper_name)]
#if (len(anames) == 4) and improper2style[improper_name] == 'class2':
# continue
ang_names = [anames[4:7],anames[7:10],anames[10:13]]
# atom orderings are LAMMPS implementation specific. See:
# http://lammps.sandia.gov/doc/improper_class2.html
#ang_names[0] <==> (a1, a2, a3) <==> (i, j, k)
#ang_names[1] <==> (a1, a2, a4) <==> (i, j, l)
#ang_names[2] <==> (a3, a2, a4) <==> (k, j, l)
assert(ang_names[0][1] == ang_names[1][1] == ang_names[2][1])
assert(ang_names[0][0] == ang_names[1][0])
assert(ang_names[1][2] == ang_names[2][2])
assert(ang_names[2][0] == ang_names[0][2])
# (NOTE TO SELF:
# If these assertions fail, then try checking if they are
# all either the same, or '*'. If they are then just replace '*'
# everwhere that atom appears with the most restrictive name.)
# Optional: Shorten the angle name since some of the bnames are redundant:
is_auto = IsAutoInteraction(improper_name)
anm = [a for a in map(EncodeAName, anames)]
imp_name_abbr = EncodeInteractionName(anm[0:4]+
#[ang_names[0][0],ang_names[0][1],ang_names[0][2],
# ang_names[1][2]]
[anm[4],anm[5],anm[6],
anm[9]],
is_auto)
# Did the user ask us to include "auto" interactions?
if improper2style[improper_name] != 'class2':
if IsAutoInteraction(improper_name):
if include_auto_equivalences:
sys.stdout.write(' @improper:' + imp_name_abbr +' '+
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie'
+ anames[0] + ',aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie*,aic'
+ anames[1] +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie'
+ anames[2] + ',aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie'
+ anames[3] + ',aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @improper:' + imp_name_abbr + ' ' +
' @atom:*,p*,b*,a*,d*,i' + anames[0] +
' @atom:*,p*,b*,a*,d*,i' + anames[1] +
' @atom:*,p*,b*,a*,d*,i' + anames[2] +
' @atom:*,p*,b*,a*,d*,i' + anames[3] +
'\n')
else:
if IsAutoInteraction(improper_name):
if include_auto_equivalences:
sys.stdout.write(' @improper:' + imp_name_abbr +' ' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'+ang_names[0][0]+',adc*,aie'
+ anames[0] + ',aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac'+ang_names[0][1]+',ade*,adc*,aie*,aic'
+ anames[1] +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'+ang_names[0][1]+',adc*,aie'
+ anames[2] + ',aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'+ang_names[2][2]+',adc*,aie'
+ anames[3] + ',aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @improper:' + imp_name_abbr + ' ' +
' @atom:*,p*,b*,a'+ang_names[0][0]+',d*,i' + anames[0] +
' @atom:*,p*,b*,a'+ang_names[0][1]+',d*,i' + anames[1] +
' @atom:*,p*,b*,a'+ang_names[0][2]+',d*,i' + anames[2] +
' @atom:*,p*,b*,a'+ang_names[1][2]+',d*,i' + anames[3] +
'\n')
sys.stdout.write(' } # end of "Data Impropers By Type" section\n'
'\n')
# Print the force-field parameters for these improper interactions:
sys.stdout.write('\n\n'
' # ------- Improper Force Field Parameters: -------\n')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for improper_style in improper_styles:
if not (improper_style in improper_styles_selected):
continue
sys.stdout.write(' # '+improper_style2docs[improper_style]+'\n')
sys.stdout.write('\n'
'# Syntax: \n'
' # improper_coeff ImproperTypeName ImproperStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for improper_name in imp_names_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(improper_name)]
#if (len(anames) == 4) and improper2style[improper_name] == 'class2':
# continue
# Optional: Shorten the angle name since some of the bnames are redundant:
is_auto = IsAutoInteraction(improper_name)
anm = [a for a in map(EncodeAName, anames)]
imp_name_abbr = EncodeInteractionName(anm[0:4]+
#[ang_names[0][0],ang_names[0][1],ang_names[0][2],
# ang_names[1][2]]
[anm[4],anm[5],anm[6],
anm[9]],
is_auto)
if not (improper2style[improper_name] in
improper_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
if (IsAutoInteraction(improper_name) and
(not include_auto_equivalences)):
continue
sys.stdout.write(' improper_coeff @improper:'+imp_name_abbr+' '+
improper2style[improper_name] + ' ' +
improper2params[improper_name] +
" # (ver=" + improper2ver[improper_name] +
", ref=" + improper2ref[improper_name] + ")\n")
if improper_name in improper2class2_aa:
sys.stdout.write(' improper_coeff @improper:'+imp_name_abbr+' '+
improper2style[improper_name] + ' aa ' +
improper2class2_aa[improper_name] +
" # (ver=" + improper2ver_aa[improper_name] +
", ref=" + improper2ref[improper_name] + ")\n")
sys.stdout.write(' } # end of improper_coeff commands\n'
'\n\n')
sys.stdout.write('\n\n\n\n'
' # -------------------- Select LAMMPS style(s) ------------------\n'
'\n')
sys.stdout.write('\n'
' # LAMMPS supports many different kinds of bonded and non-bonded\n'
' # interactions which can be selected at run time. Eventually\n'
' # we must inform LAMMPS which of them we will need. We specify\n'
' # this in the "In Init" section: \n\n')
sys.stdout.write(' write_once("In Init") {\n')
sys.stdout.write(' units real\n')
sys.stdout.write(' atom_style full\n')
if len(bond_styles) > 0:
sys.stdout.write(' bond_style hybrid')
for bond_style in bond_styles:
if not (bond_style in bond_styles_selected):
continue
sys.stdout.write(' ' + bond_style)
sys.stdout.write('\n')
for bond_style in bond_styles:
if not (bond_style in bond_styles_selected):
continue
sys.stdout.write(' # '+bond_style2docs[bond_style]+'\n')
sys.stdout.write('\n')
if len(angle_styles) > 0:
sys.stdout.write(' angle_style hybrid')
for angle_style in angle_styles:
if not (angle_style in angle_styles_selected):
continue
sys.stdout.write(' ' + angle_style)
sys.stdout.write('\n')
for angle_style in angle_styles:
if not (angle_style in angle_styles_selected):
continue
sys.stdout.write(' # '+angle_style2docs[angle_style]+'\n')
sys.stdout.write('\n')
if len(dihedral_styles) > 0:
sys.stdout.write(' dihedral_style hybrid')
for dihedral_style in dihedral_styles:
if not (dihedral_style in dihedral_styles_selected):
continue
sys.stdout.write(' ' + dihedral_style)
sys.stdout.write('\n')
for dihedral_style in dihedral_styles:
if not (dihedral_style in dihedral_styles_selected):
continue
sys.stdout.write(' # '+dihedral_style2docs[dihedral_style]+'\n')
sys.stdout.write('\n')
if len(improper_styles) > 0:
sys.stdout.write(' improper_style hybrid')
for improper_style in improper_styles:
if not (improper_style in improper_styles_selected):
continue
sys.stdout.write(' ' + improper_style)
sys.stdout.write('\n')
for improper_style in improper_styles:
if not (improper_style in improper_styles_selected):
continue
sys.stdout.write(' # '+improper_style2docs[improper_style]+'\n')
sys.stdout.write('\n')
if len(pair_styles) > 0:
sys.stdout.write(' pair_style hybrid')
for pair_style in pair_styles:
if not (pair_style in pair_styles_selected):
continue
sys.stdout.write(' ' + pair_style +
' ' + pair_style_args[pair_style])
sys.stdout.write('\n')
for pair_style in pair_styles:
sys.stdout.write(' # '+pair_style2docs[pair_style]+'\n')
sys.stdout.write('\n')
sys.stdout.write(' pair_modify mix ' + pair_mixing_style + '\n')
sys.stdout.write(' ' + special_bonds_command + '\n')
sys.stdout.write(' ' + kspace_style + '\n')
sys.stdout.write(' } #end of init parameters\n\n')
sys.stdout.write('} # ' + ffname + '\n\n')
sys.stdout.write("#\n"
"# WARNING: The following 1-2, 1-3, and 1-4 weighting parameters were ASSUMED:\n")
sys.stdout.write("# " + special_bonds_command + "\n")
sys.stdout.write("# (See http://lammps.sandia.gov/doc/special_bonds.html for details)\n")
#sys.stderr.write(' done.\n')
if len(lines_templates) > 0:
sys.stdout.write('\n\n\n\n'
'# ---- templates from the original .frc file used for atom type selection: ---\n')
for line in lines_templates:
sys.stdout.write('# '+line)
if len(lines_references) > 0:
sys.stdout.write('\n\n\n\n'
'# ---- references from the original .frc file: ----\n\n')
for ref_number,lines in sorted(lines_references.items()):
sys.stdout.write('# reference '+str(ref_number)+'\n')
for line in lines:
sys.stdout.write('# '+line)
sys.stdout.write('\n')
if len(lines_warnings) > 0:
sys.stdout.write('\n\n\n\n'
'# ---- additional warnings: ----\n')
for line in lines_warnings:
sys.stdout.write(line)
if filename_in != '':
file_in.close()
except InputError as err:
sys.stderr.write('\n\n' + str(err) + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
modifications to msifrc2lt.py
#! /usr/bin/env python
"""
This standalone python script can be used to convert the force-fields in MSI
format (.FRC files, a.k.a. "BIOSYM", "DISCOVERY" format)
...into MOLTEMPLATE/LAMMPS format (.LT format).
Once converted into moltemplate (.LT) format, users can use these files with
MOLTEMPLATE to prepare LAMMPS simulations of molecules using these force fields
(without needing any additional software such as msi2lmp).
There are several examples of MSI files in the "tools/msi2lmp/frc_files/"
directory which is distributed with LAMMPS.
Limitations:
Currently (2017-2) this script ignores the "template" information in .FRC files.
When defining a new type of molecule, the user must carefully choose the
complete atom type for each type of atom in the molecule. In other words,
MOLTEMPLATE will not attempt to determine (from local context) whether
a carbon atom somewhere in your molecule happens to be an SP3 carbon
(ie. "c4" in the COMPASS force-field), or an aromatic carbon ("c3a"),
or something else (for example). This information is typically contained
in the "templates" section of these files, and this script currently ignores
that information. Instead, the user must determine which type of carbon atom
it is manually, for all of the carbon atoms in that kind of molecule.
(This only needs to be done once per molecule definition.
Once a type of molecule is defined, it can be copied indefinitely.)
"""
__author__ = 'Andrew Jewett'
__version__ = '0.1.20'
__date__ = '2017-10-03'
import sys
import os
from collections import defaultdict, OrderedDict
from operator import itemgetter
from math import *
g_program_name = __file__.split('/')[-1]
doc_msg = \
"Typical Usage:\n\n" + \
" " + g_program_name + " -name COMPASS < compass_published.frc > compass.lt\n\n" + \
" where \"compass_published.frc\" is a force-field file in MSI format.\n" + \
" \"comass.lt\" is the corresponding file converted to moltemplate format\n" + \
" and \"COMPASS\" is the name that future moltemplate users will use to refer\n" + \
" to this force-field (optional).\n" + \
"Optional Arguments\n" + \
" -name FORCEFIELDNAME # Give the force-field a name\n" + \
" -file FILE_NAME # Read force field parameters from a file\n" + \
" -url URL # Read force field parameters from a file on the web\n" + \
" -atoms \"QUOTED LIST\" # Restrict output to a subset of atom types\n" + \
" Sometimes an FRC file contains multiple versions. In that case,\n"+\
" you can select between them using these optional arguments:\n"+\
" -pair-style \"PAIRSTYLE ARGS\" # LAMMPS pair style and cutoff arg(s)\n" + \
" -bond-style BONDSTYLE # desired LAMMPS bond style (default: \"class2\")\n" + \
" -angle-style ANGLESTYLE # desired LAMMPS angle style\n" + \
" -dihedral-style DIHEDRALSTYLE # desired LAMMPS dihedral style\n" + \
" -improper-style IMPROPERSTYLE # desired LAMMPS improper style\n" + \
" -hbond-style \"HBONDTYLE ARGS\" # LAMMPS hydrogen-bond style and args\n"
# " -auto # Consider auto_equivalences in the .frc file \n"+\
class InputError(Exception):
""" A generic exception object containing a string for error reporting.
(Raising this exception implies that the caller has provided
a faulty input file or argument.)
"""
def __init__(self, err_msg):
self.err_msg = err_msg
def __str__(self):
return self.err_msg
def __repr__(self):
return str(self)
def NSplitQuotedString(string,
nmax,
quotes,
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
"""
Split a quoted & commented string into at most "nmax" tokens (if nmax>0),
where each token is separated by one or more delimeter characters
in the origingal string, and quoted substrings are not split,
This function returns a list of strings. Once the string is split Nmax
times, any remaining text will be appended to the last entry of the list.
Comments are stripped from the string before splitting begins.
"""
tokens = []
token = ''
reading_token = True
escaped_state = False
quote_state = None
for c in string:
if (c in comment_char) and (not escaped_state) and (quote_state == None):
if len(token) > 0:
tokens.append(token)
return tokens
elif (c in delimiters) and (not escaped_state) and (quote_state == None):
if reading_token:
if (nmax == 0) or (len(tokens) < nmax-1):
if len(token) > 0:
tokens.append(token)
token = ''
reading_token = False
else:
token += c
elif c in escape:
if escaped_state:
token += c
reading_token = True
escaped_state = False
else:
escaped_state = True
# and leave c (the '\' character) out of token
elif (c in quotes) and (not escaped_state):
if (quote_state != None):
if (c == quote_state):
quote_state = None
else:
quote_state = c
token += c
reading_token = True
else:
if (c == 'n') and (escaped_state == True):
c = '\n'
elif (c == 't') and (escaped_state == True):
c = '\t'
elif (c == 'r') and (escaped_state == True):
c = '\r'
elif (c == 'f') and (escaped_state == True):
c = '\f'
token += c
reading_token = True
escaped_state = False
if len(token) > 0:
tokens.append(token)
return tokens
def SplitQuotedString(string,
quotes='\'\"',
delimiters=' \t\r\f\n',
escape='\\',
comment_char='#'):
return NSplitQuotedString(string,
0,
quotes,
delimiters,
escape,
comment_char)
def RemoveOuterQuotes(text, quotes='\"\''):
if ((len(text) >= 2) and (text[0] in quotes) and (text[-1] == text[0])):
return text[1:-1]
else:
return text
def SortByEnds(l_orig):
"""
Convenient to have a one-line macro for swapping list order if first>last
"""
l = [x for x in l_orig]
if l[0] > l[-1]:
l.reverse()
return l
#def Repl(tokens, a, b):
# return [(b if x==a else x) for x in tokens]
def EncodeAName(s):
"""
Handle * characters in MSI atom names
"""
# If the atom name begins with *, then it is a wildcard
if s[:1] == '*': # special case: deal with strings like *7
return 'X' # These have special meaning. Throw away the integer.
# (and replace the * with an X)
# If the * character occurs later on in the atom name, then it is actually
# part of the atom's name. (MSI force fields use many strange characters in
# atom names.) Here we change the * to \* to prevent the atom name from
# being interpreted as a wild card in the rules for generating bonds,
# angles, dihedrals, and impropers.
return s.replace('*','star').replace('\'','prime').replace('"','dblpr')
# '*' is reserved for wildcards in moltemplate
# 'star' is a string that is unused in any
# of the force fields I have seen so far.
# Similarly quote characters (' and ") confuse
# moltemplate, so we replace them with something else.
# The following approach doesn't work (mistakenly thinks '\*' = wildcard)
#return s.replace('*','\\*') # this prevents ttree_lex.MatchesAll()
# # from interpreting the '*' as a wildcard
def DetermineAutoPriority(anames):
#scan through list of strings anames, looking for patterns of the form
#*n
#where n is an integer.
#(These patterns are used by MSI software when using "auto_equivalences"
# to look up force field parameters for bonded interactions.)
#Make sure this pattern only appears once and return n to the caller.
n = None
for i in range(0, len(anames)):
if anames[:1] == '*':
if n == None:
n = float(anames[1:])
elif n != float(anames[1:]):
raise InputError('Error: Inconsistent priority integers in the following interaction:\n'
' ' + ' '.join(anames) + '\n')
if n == None:
return 0.0
else:
return n
#def DeterminePriority(is_auto,
# anames,
# version):
# """
# Determine the priority of an interaction from
# 1) whether or not it is an "auto" interaction
# 2) what is the force-field "version" (a number)
# 3) what are the names of the atoms (for auto_equivalences only,
# some atom "names" are wildcards followed by integers. use the integer)
# """
#
# if is_auto:
# n = DetermineAutoPriority(anames)
# return (is_auto, n)
# else:
# return (is_auto, -version)
def DetermineNumericPriority(is_auto,
anames,
version):
"""
Determine the priority of an interaction from
2) what is the force-field "version" (a number)
3) what are the names of the atoms (for auto_equivalences only,
some atom "names" are wildcards followed by integers. use the integer)
"""
if is_auto:
n = DetermineAutoPriority(anames)
return n
else:
return -float(version)
def IsAutoAtom(atom_name):
return atom_name[-1:] == '_'
#def PossibleAutoAtom(atom_name):
# """ Auto-equivalences are alternate atom names used in "auto"
# interactions. (These are low priority interactions used as a
# last resort when the interaction parameters could not be located
# by any other means). Each atom is given an alternate name which
# is used in this kind of interaction. These names typically end
# '_' followed by an optional integer. Example "auto" atom names
# are 'c3m_' and 'c=_3'. Unfortunately some ordinary atom names
# also end in an integer preceeded by a _ character. But they
# never end in a '_' character. Here we check for both."""
#
# i = atom_name.rfind('_')
# if (i == -1) or str.isdigit(atom_name[i:]):
# return True
# return False
def IsAutoInteraction(interaction_name):
return interaction_name.find('auto') == 0
#def IsAutoInteraction(interaction_name):
# anames = ExtractAtomNames(interaction_name)
# for a in anames:
# if IsAutoAtom(a):
# return True
# if not PossibleAutoAtom(a):
# return False
# return True
def EncodeInteractionName(anames,
is_auto = False):
if is_auto == False:
is_auto = False
# Is the line containing anames from an "_auto" section of
# the FRC file? (I am trying to infer this from whether or
# not any of the atom names are followed by the '_' character.)
for s in anames:
if IsAutoAtom(s):
is_auto = True
if is_auto:
priority = DetermineAutoPriority(anames)
# (If an atom name is a wildcard '*' followed by
# an integer, DetermineAutoPriority() will return
# that integer. Otherwise it will return '')
#return str(priority)+'auto'+','.join(anames)
return 'auto'+','.join(anames)
return ','.join(anames)
def ExtractANames(interaction_name):
if IsAutoInteraction(interaction_name):
return interaction_name[4:].split(',')
return interaction_name.split(',')
def OOPImproperNameSort(aorig):
assert(len(aorig) == 4)
atom_names = map(EncodeAName, aorig)
if atom_names[0] < atom_names[3]:
return (atom_names, [0,1,2,3])
else:
return ([atom_names[3],
atom_names[1],
atom_names[2],
atom_names[0]],
[3,1,2,0])
def Class2ImproperNameSort(aorig):
"""
This function takes a list of 4 strings as an argument representing 4 atom
names for atoms participating in an "improper" ("wilson-out-of-plane")
interaction. This function assumes the second atom is the central ("hub")
atom in the interaction, and it sorts the remaining atoms names.
This function also replaces any occurence of \"*\" with \"X\".
The new list is returned to the caller, along with the permutation.
"""
assert(len(aorig) == 4)
atom_names = [a for a in map(EncodeAName, aorig)]
z = [x for x in zip([atom_names[0], atom_names[2], atom_names[3]],
[0,2,3])]
z.sort()
l = [z[0][0], atom_names[1], z[1][0], z[2][0]]
p = [z[0][1], 1, z[1][1], z[2][1]]
return (l, p)
def Parity(p):
""" compute the parity of a permutation
(credit: "Weeble")
"""
permutation = list(p)
length = len(permutation)
elements_seen = [False] * length
cycles = 0
for index, already_seen in enumerate(elements_seen):
if already_seen:
continue
cycles += 1
current = index
while not elements_seen[current]:
elements_seen[current] = True
current = permutation[current]
return (length-cycles) % 2 == 0
def ImCrossTermID(atom_names):
"""
# From a list of 4 atom names, corresponding two a pair
# of angles between atoms# 3,2,1 and 3,2,4,
# and replaces the list of atoms with a canonical tuple
# which eliminates order ambiguity.
# If you swap the first and last atom (#1 and #4), then
# the -pair- of angles is the same. Hence if atom #1
# has a name which is lexicographically less than atom #4,
# swap atoms 1 and 4.
"""
if atom_names[0] <= atom_names[3]:
return (atom_names[0]+','+atom_names[1]+','+
atom_names[2]+','+atom_names[3])
else:
return (atom_names[3]+','+atom_names[1]+','+
atom_names[2]+','+atom_names[0])
def DoAtomsMatchPattern(anames, pattern):
"""
Check whether the list of atom names "anames" matches "pattern"
(Both arguments are lists of strings, but some of the strings
in pattern may contain wildcard characters followed by
"priority" numbers. Matches with lower priority numbers are
given preference whenever multiple distinct matches are found.
(Note: This function does not check patterns in reverse order.)
"""
#sys.stderr.write('DEBUG: checking whether '+str(anames)+' matches '+str(pattern)+'\n')
assert(len(anames) == len(pattern))
matched = True
for d in range(0, len(pattern)):
if (pattern[d] == anames[d]) or (pattern[d][0] == '*'):
if pattern[d][0] == '*':
priority = int(pattern[d][1:])
else:
priority = 0
else:
matched = False
if matched:
#sys.stderr.write('DEBUG: '+str(anames)+' matches '+str(pattern)+'\n')
return priority
else:
return None
def LookupBondLength(a1, a2,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto):
"""
Try to find bond parameters between atoms whose original
atom names (without equivalences) are a1 and a2.
Then return both the equilibrium bond length for that bond,
as well as the equivalent atom names used to lookup that bond.
(These could be stored in either atom2equiv_bond or atom2auto_bond.)
If a match was not found, return None.
"""
return_val = None
anames = (atom2equiv_bond[a1], atom2equiv_bond[a2])
bond_name = EncodeInteractionName(SortByEnds(anames))
if bond_name in bond2r0:
return_val = (bond2r0[bond_name], [anames[0], anames[1]])
# If no bond between these atoms is defined,
# check the bonds in the _auto section(s)
# This is a lot messier.
elif ((a1 in atom2auto_bond) and (a2 in atom2auto_bond)):
anames = [atom2auto_bond[a1], atom2auto_bond[a2]]
# Because _auto interactions can contain wildcards,
# there can be multiple entries in bond2r0_auto[]
# for the same list of atom names, and we have to
# consider all of them, and pick the one with the
# most priority (ie. whose priority number is lowest).
# (Note: The MSI file format uses low priority numbers
# to indicate high priority. Somewhat confusing.)
HUGE_VAL = 2000000000
best_priority = HUGE_VAL
pattern = ['','']
for (pattern[0],pattern[1]), r0 in bond2r0_auto.items():
priority = DoAtomsMatchPattern(anames, pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (r0, [anames[0], anames[1]])
anames.reverse() # now check of the atoms in reverse order match
priority = DoAtomsMatchPattern(anames, pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (r0, [anames[1], anames[0]]) #preserve atom order
#if return_val != None:
# sys.stderr.write('DEBUG: For atoms '+str((a1,a2))+' ... bond_length, batom_names = '+str(return_val)+'\n')
return return_val
def LookupRestAngle(a1, a2, a3,
atom2equiv_angle,
angle2theta0_or,
atom2auto_angle,
angle2theta0_auto_or):
"""
Try to find angle parameters between atoms whose original atom
names (without equivalences) are a1, a2, and a3. Then return
both the equilibrium rest angle for that 3body interaction
as well as the equivalent atom names used to look it up. (These
could be stored in either atom2equiv_angle or atom2auto_angle.)
If a match was not found, return None.
"""
return_val = None
anames = (atom2equiv_angle[a1], atom2equiv_angle[a2], atom2equiv_angle[a3])
angle_name = EncodeInteractionName(SortByEnds(anames))
if angle_name in angle2theta0_or:
return_val = (angle2theta0_or[angle_name], [anames[0], anames[1], anames[2]])
# If no angle between these atoms is defined,
# check the angles in the _auto section(s)
# This is a lot messier.
elif ((a1 in atom2auto_angle[0]) and
(a2 in atom2auto_angle[1]) and
(a3 in atom2auto_angle[2])):
anames = [atom2auto_angle[0][a1],
atom2auto_angle[1][a2],
atom2auto_angle[2][a3]]
#sys.stderr.write('DEBUG: LookupRestAngle(): a1,a2,a3=('+
# a1+','+a2+','+a3+'), anames='+str(anames)+'\n')
# Because _auto interactions can contain wildcards,
# there can be multiple entries in angle2theta0_auto_or[]
# for the same list of atom names, and we have to
# consider all of them, and pick the one with the
# most priority (ie. whose priority number is lowest).
# (Note: The MSI file format uses low priority numbers
# to indicate high priority. Somewhat confusing.)
HUGE_VAL = 2000000000
best_priority = HUGE_VAL
pattern = ['','','']
for (pattern[0],pattern[1],pattern[2]), theta0 in angle2theta0_auto_or.items():
priority = DoAtomsMatchPattern(anames, pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (theta0, [anames[0], anames[1], anames[2]])
anames.reverse() # now check of the atoms in reverse order match
priority = DoAtomsMatchPattern(anames, pattern)
if (priority != None) and (priority < best_priority):
best_priority = priority
return_val = (theta0, [anames[2], anames[1], anames[0]]) #preserve atom order
#if return_val != None:
# sys.stderr.write('DEBUG: For atoms '+str((a1,a2))+' ... rest_angle, anames = '+str(return_val)+'\n')
return return_val
def Equivalences2ffids(lines_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper):
"""
This function reads a list of lines containing "equivalences" and
"auto_equivalences" from an MSI-formatted .FRC file.
Then, for each atom type, it generates a long string which includes the
original atom type name as well as all of the equivalences it belongs to.
Later on, when it is time to generate angles, dihedrals, or impropers,
moltemplate will search for patterns contained in these strings to decide
which type of interaction to generate.
This function returns a dictionary that converts the original atom type name
into these strings.
"""
for line in lines_equivalences:
#tokens = SplitQuotedString(line.strip(),
# comment_char='!>')
# skip past both '!' and '>' characters
ic1 = line.find('!')
ic = ic1
ic2 = line.find('>')
if ic2 != -1 and ic2 < ic1:
ic = ic2
if ic != -1:
line = line[:ic]
else:
line = line.rstrip('\n')
tokens = line.strip().split()
#sys.stderr.write('DEBUG Equivalences2ffids():\n'
# ' tokens = '+str(tokens)+'\n')
atype = EncodeAName(tokens[2])
atom2equiv_pair[atype] = EncodeAName(tokens[3])
atom2equiv_bond[atype] = EncodeAName(tokens[4])
atom2equiv_angle[atype] = EncodeAName(tokens[5])
atom2equiv_dihedral[atype] = EncodeAName(tokens[6])
atom2equiv_improper[atype] = EncodeAName(tokens[7])
atom2ffid = OrderedDict()
for atom in atom_types:
atom2ffid[atom] = (atom +
',p'+atom2equiv_pair.get(atom,'') +
',b'+atom2equiv_bond.get(atom,'') +
',a'+atom2equiv_angle.get(atom,'') +
',d'+atom2equiv_dihedral.get(atom,'') +
',i'+atom2equiv_improper.get(atom,''))
return atom2ffid
def AutoEquivalences2ffids(lines_equivalences,
lines_auto_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper,
atom2auto_pair,
atom2auto_bondincr,
atom2auto_bond,
atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_dihedralend,
atom2auto_dihedralcenter,
atom2auto_improperend,
atom2auto_impropercenter):
"""
This function is a variant of Equivalences2ffids() which also considers
"auto_equivalences".
This function returns a dictionary that converts the original atom type name
into a string that includes that atom's "equivalences",
as well as its "auto_equivalences".
moltemplate will search for patterns contained in these strings to decide
which type of interaction to generate.
"""
Equivalences2ffids(lines_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper)
# ------ The following lines are for processing "auto_equivalences" -----
#
# What is the difference between "equivalences" and "auto_equivalences"?
#
# equivalences:
# Here is an excerpt from the Discover manual describing "equivalences":
# "Chemically distinct atoms often differ in some, but not all,
# of their forcefield parameters. For example, the bond parameters
# for the C-C bonds in ethene and in benzene are quite different,
# but the nonbond parameters for the carbon atoms are essentially
# the same. Rather than duplicating the nonbond parameters in the
# forcefield parameter file, the Discover program uses atom type
# equivalences to simplify the problem. In the example, the phenyl
# carbon atom type is equivalent to the pure sp2 carbons of ethene
# insofar as the nonbond parameters are concerned. The Discover
# program recognizes five types of equivalences for each atom
# type: nonbond, bond, angle, torsion, and out-of-plane.
# Cross terms such as bond-bond terms have the same equivalences
# (insofar as atom types are concerned) as the diagonal term of
# the topology of all the atoms defining the internal coordinates.
# For the bond-bond term, this means that the atom type
# equivalences for angles would be used
#
# auto_equivalences:
# Are similar to equivalences, but apparently with lower priority.
# In addition, it seems that, when looking up some of the class2 terms
# in the interaction according to atom type using "auto_equivalences"
# a distinction is made between end atoms and central atoms.
# The parameters for these interactions are also stored in different
# tables in the .frc file, with different comments/tags.
# (for example, "cff91_auto" as opposed to "cff91")
# An excerpt from the Discover manual is somewhat vague:
# "A forcefield may include automatic parameters for use when
# better-quality explicit parameters are not defined for a
# particular bond, angle, torsion, or out-of-plane interaction.
# These parameters are intended as temporary patches, to allow
# you to begin calculations immediately."
for line in lines_auto_equivalences:
#tokens = SplitQuotedString(line.strip(),
# comment_char='!>')
# skip past both '!' and '>' characters
ic1 = line.find('!')
ic = ic1
ic2 = line.find('>')
if ic2 != -1 and ic2 < ic1:
ic = ic2
if ic != -1:
line = line[:ic]
else:
line = line.rstrip('\n')
tokens = line.strip().split()
#sys.stderr.write('DEBUG Equivalences2ffids():\n'
# ' tokens = '+str(tokens)+'\n')
atype = EncodeAName(tokens[2])
atom2auto_pair[atype] = EncodeAName(tokens[3])
atom2auto_bondincr[atype] = EncodeAName(tokens[4])
atom2auto_bond[atype] = EncodeAName(tokens[5])
atom2auto_angleend[atype] = EncodeAName(tokens[6])
atom2auto_anglecenter[atype] = EncodeAName(tokens[7])
atom2auto_dihedralend[atype] = EncodeAName(tokens[8])
atom2auto_dihedralcenter[atype] = EncodeAName(tokens[9])
atom2auto_improperend[atype] = EncodeAName(tokens[10])
atom2auto_impropercenter[atype] = EncodeAName(tokens[11])
atom2ffid = OrderedDict()
for atom in atom_types:
atom2ffid[atom] = (atom +
',p'+atom2equiv_pair.get(atom,'') +
',b'+atom2equiv_bond.get(atom,'') +
',a'+atom2equiv_angle.get(atom,'') +
',d'+atom2equiv_dihedral.get(atom,'') +
',i'+atom2equiv_improper.get(atom,'') +
',ap'+atom2auto_pair.get(atom,'') +
',aq'+atom2auto_bondincr.get(atom,'') +
',ab'+atom2auto_bond.get(atom,'') +
',aae'+atom2auto_angleend.get(atom,'') +
',aac'+atom2auto_anglecenter.get(atom,'') +
',ade'+atom2auto_dihedralend.get(atom,'') +
',adc'+atom2auto_dihedralcenter.get(atom,'') +
',aie'+atom2auto_improperend.get(atom,'') +
',aic'+atom2auto_impropercenter.get(atom,'') +
''
)
return atom2ffid
def main():
try:
sys.stderr.write(g_program_name + ", version " +
__version__ + ", " + __date__ + "\n")
if sys.version < '2.6':
raise InputError('Error: Using python ' + sys.version + '\n' +
' Alas, your version of python is too old.\n'
' You must upgrade to a newer version of python (2.6 or later).')
if sys.version < '2.7':
from ordereddict import OrderedDict
else:
from collections import OrderedDict
if sys.version > '3':
import io
else:
import cStringIO
# defaults:
ffname = 'BIOSYM_MSI_FORCE_FIELD'
type_subset = set([])
filename_in = ''
file_in = sys.stdin
#file_in = open('pcff_repaired.frc','r') #CONTINUEHERE
include_auto_equivalences = False
#pair_style_name = 'lj/class2/coul/long'
#pair_style_params = "10.0 10.0"
pair_style2docs = {}
pair_style2args = defaultdict(str)
pair_style2docs['lj/cut/coul/long'] = 'http://lammps.sandia.gov/doc/pair_lj.html'
pair_style2args['lj/cut/coul/long'] = '10.0'
pair_style2docs['lj/class2/coul/long'] = 'http://lammps.sandia.gov/doc/pair_class2.html'
pair_style2args['lj/class2/coul/long'] = '10.0'
pair_style2docs['lj/class2/coul/cut'] = 'http://lammps.sandia.gov/doc/pair_class2.html'
pair_style2args['lj/class2/coul/cut'] = '10.0'
bond_style2docs = {}
#bond_style2args = defaultdict(str)
bond_style2docs['harmonic'] = 'http://lammps.sandia.gov/doc/bond_harmonic.html'
bond_style2docs['class2'] = 'http://lammps.sandia.gov/doc/bond_class2.html'
bond_style2docs['morse'] = 'http://lammps.sandia.gov/doc/bond_morse.html'
bond_symmetry_subgraph = '' # default
angle_style2docs = {}
#angle_style2args = defaultdict(str)
angle_style2docs['harmonic'] = 'http://lammps.sandia.gov/doc/angle_harmonic.html'
angle_style2docs['class2'] = 'http://lammps.sandia.gov/doc/angle_class2.html'
angle_symmetry_subgraph = '' # default
dihedral_style2docs = {}
#dihedral_style2args = defaultdict(str)
dihedral_style2docs['charmm'] = 'http://lammps.sandia.gov/doc/dihedral_charmm.html'
dihedral_style2docs['class2'] = 'http://lammps.sandia.gov/doc/dihedral_class2.html'
dihedral_symmetry_subgraph = '' # default
improper_style2docs = {}
#improper_style2args = defaultdict(str)
improper_style2docs['cvff'] = 'http://lammps.sandia.gov/doc/improper_cvff.html'
improper_style2docs['class2'] = 'http://lammps.sandia.gov/doc/improper_class2.html'
improper_symmetry_subgraph = 'cenJsortIKL'
pair_mixing_style = 'sixthpower tail yes'
special_bonds_command = 'special_bonds lj/coul 0.0 0.0 1.0 dihedral yes'
# Thanks to Paul Saxe for is suggestions
# http://lammps.sandia.gov/threads/msg11270.html
kspace_style = 'kspace_style pppm 0.0001'
pair_styles_selected = set([])
#pair_style_link = 'http://lammps.sandia.gov/doc/pair_class2.html'
pair_style_args = {}
pair_cutoff = '10.0'
#pair_style_command = " pair_style hybrid " + \
# pair_style_name + " " + pair_style_args + "\n"
bond_styles_selected = set([])
#bond_style_link = bond_style2docs[bond_style_name]
#bond_style_args = ''
angle_styles_selected = set([])
#angle_style_link = angle_style2docs[angle_style_name]
#angle_style_args = ''
dihedral_styles_selected = set([])
#dihedral_style_link = dihedral_style2docs[dihedral_style_name]
#dihedral_style_args = ''
improper_styles_selected = set([])
#improper_style_link = improper_style2docs[improper_style_name]
#improper_style_args = ''
hbond_style_name = ''
hbond_style_link = ''
hbond_style_args = ''
lines_templates = []
lines_references = defaultdict(list)
lines_warnings = []
argv = [arg for arg in sys.argv]
i = 1
while i < len(argv):
#sys.stderr.write('argv['+str(i)+'] = \"'+argv[i]+'\"\n')
if argv[i] == '-atoms':
if i + 1 >= len(argv):
raise InputError('Error: the \"' + argv[i] + '\" argument should be followed by a quoted string\n'
' which contains a space-delimited list of of a subset of atom types\n'
' you want to use from the original force-field.\n'
' Make sure you enclose the entire list in quotes.\n')
type_subset = set(argv[i + 1].strip('\"\'').strip().split())
del argv[i:i + 2]
elif argv[i] == '-name':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by the name of the force-field\n')
ffname = argv[i + 1]
del argv[i:i + 2]
elif argv[i] in ('-file', '-in-file'):
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by the name of a force-field file\n')
filename_in = argv[i + 1]
try:
file_in = open(filename_in, 'r')
except IOError:
sys.stderr.write('Error: Unable to open file\n'
' \"' + filename_in + '\"\n'
' for reading.\n')
sys.exit(1)
del argv[i:i + 2]
elif argv[i] == '-pair-cutoff':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by a number'
' (the distance cutoff for non-bonded (pair) interactions)\n')
pair_style_cutoff = argv[i+1]
del argv[i:i + 2]
elif argv[i] == '-pair-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by either \"lj/class2/coul/cut\" or \"lj/class2/coul/long\"\n')
pair_style_list = argv[i + 1].split(',')
for pair_style in pair_style_list:
if pair_style == '9-6':
pair_style = 'lj/class2/coul/long'
elif pair_style in ('12-6', 'lj', 'LJ'):
pair_style = 'lj/cut/coul/long'
if pair_style.find('lj/class2/coul/long') == 0:
kspace_style = 'kspace_style pppm 0.0001'
elif pair_style.find('lj/cut/coul/long') == 0:
kspace_style = 'kspace_style pppm 0.0001'
elif pair_style.find('lj/class2/coul/cut') == 0:
pass
#kspace_style = ''
elif pair_style.find('lj/cut') == 0:
pass
#kspace_style = ''
else:
raise InputError('Error: ' + argv[i] + ' ' + pair_style_name + ' not supported.\n'
' The following pair_styles are supported:\n'
' lj/class2/coul/cut\n'
' lj/class2/coul/long\n'
' lj/cut\n'
' lj/cut/coul/long\n')
pair_styles_selected.add(pair_style)
del argv[i:i + 2]
elif argv[i] == '-bond-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible bond_style.\n')
bond_styles = argv[i + 1].split(',')
for bond_style in bond_styles:
bond_styles_selected.add(bond_style)
#bond_style2args[bond_style] = argv[i + 1].split()[1:]
#if bond_style_name.find('harmonic') == 0:
# pass
# #bond_style_link = 'http://lammps.sandia.gov/doc/bond_harmonic.html'
#elif bond_style_name.find('morse') == 0:
# pass
# #bond_style_link = 'http://lammps.sandia.gov/doc/bond_morse.html'
#elif bond_style_name.find('class2') == 0:
# pass
# #bond_style_link = 'http://lammps.sandia.gov/doc/bond_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\", \"class2\", or \"morse\".\n')
del argv[i:i + 2]
elif argv[i] == '-angle-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible angle_style.\n')
angle_styles = argv[i + 1].split(',')
for angle_style in angle_styles:
angle_styles_selected.add(angle_style)
#if angle_style_name.find('harmonic') == 0:
# pass
# #angle_style_link = 'http://lammps.sandia.gov/doc/angle_harmonic.html'
#elif angle_style_name.find('class2') == 0:
# pass
# #angle_style_link = 'http://lammps.sandia.gov/doc/angle_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\" or \"class2\"\n')
del argv[i:i + 2]
elif argv[i] == '-dihedral-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible dihedral_style.\n')
dihedral_styles = argv[i + 1].split(',')
for dihedral_style in dihedral_styles:
dihedral_styles_selected.add(dihedral_style)
#if dihedral_style_name.find('charmm') == 0:
# pass
# #dihedral_style_link = 'http://lammps.sandia.gov/doc/dihedral_charmm.html'
#elif dihedral_style_name.find('class2') == 0:
# pass
# #dihedral_style_link = 'http://lammps.sandia.gov/doc/dihedral_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\" or \"class2\"\n')
del argv[i:i + 2]
elif argv[i] == '-improper-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by\n'
' a compatible impropoer_style.\n')
improper_styles = argv[i + 1].split(',')
for improper_style in improper_styles:
improper_styles_selected.add(improper_style)
#if impropoer_style_name.find('harmonic') == 0:
# pass
# #impropoer_style_link = 'http://lammps.sandia.gov/doc/impropoer_harmonic.html'
#elif impropoer_style_name.find('class2') == 0:
# pass
# #impropoer_style_link = 'http://lammps.sandia.gov/doc/impropoer_class2.html'
#else:
# raise InputError('Error: ' + argv[i] + ' must be followed by either:\n'
# ' \"harmonic\" or \"class2\"\n')
del argv[i:i + 2]
elif argv[i] == '-hbond-style':
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' ' + hbond_style_name + '\n'
' should be followed by a compatible pair_style.\n')
hbond_style_name = argv[i + 1]
hbond_style_link = 'http://lammps.sandia.gov/doc/pair_hbond_dreiding.html'
if hbond_style_name.find('none') == 0:
hbond_style_name = ''
hbond_style_args = ''
elif hbond_style_name.find('hbond/dreiding/lj') == 0:
n = len('hbond/dreiding/lj')
hbond_style_args = hbond_style_name[n+1:]
hbond_style_name = hbond_style_name[:n]
elif hbond_style_name.find('hbond/dreiding/morse') == 0:
n = len('hbond/dreiding/morse')
hbond_style_args = hbond_style_name[n+1:]
hbond_style_name = hbond_style_name[:n]
else:
raise InputError('Error: ' + argv[i] + ' flag should be followed by either\n'
' \"hbond/dreiding/lj\" or \"hbond/dreiding/morse"\n')
del argv[i:i + 2]
elif argv[i] in ('-url', '-in-url'):
import urllib2
if i + 1 >= len(argv):
raise InputError('Error: ' + argv[i] + ' flag should be followed by the name of a\n'
' file containing force-field information in msi/frc format.\n')
url = argv[i + 1]
try:
request = urllib2.Request(url)
file_in = urllib2.urlopen(request)
except urllib2.URLError:
sys.stdout.write("Error: Unable to open link:\n" + url + "\n")
sys.exit(1)
del argv[i:i + 2]
elif argv[i] == '-auto':
include_auto_equivalences = True
del argv[i:i + 1]
elif argv[i] in ('-help', '--help', '-?', '--?'):
sys.stderr.write(doc_msg)
sys.exit(0)
del argv[i:i + 1]
else:
i += 1
if len(argv) != 1:
raise InputError('Error: Unrecongized arguments: ' + ' '.join(argv[1:]) +
'\n\n' + doc_msg)
# Default styles:
if len(bond_styles_selected) == 0:
bond_styles_selected.add('class2')
if len(angle_styles_selected) == 0:
angle_styles_selected.add('class2')
if len(dihedral_styles_selected) == 0:
dihedral_styles_selected.add('class2')
if len(improper_styles_selected) == 0:
improper_styles_selected.add('class2')
if len(pair_styles_selected) == 0:
pair_styles_selected.add('lj/class2/coul/long')
#sys.stderr.write("Reading parameter file...\n")
lines = file_in.readlines()
atom2charge = OrderedDict() # lookup charge from atom type
atom2mass = OrderedDict() # lookup mass from atom type
# equivalences lookup
atom2ffid = OrderedDict() # lookup "force-field-ID" a string containing
# equivalences to lookup bonded interactions
atom2equiv_pair = OrderedDict() # lookup the equivalent symbol used for
# looking up pair interactions
atom2equiv_bond = OrderedDict()
atom2equiv_angle = OrderedDict()
atom2equiv_dihedral = OrderedDict()
atom2equiv_improper = OrderedDict()
# inverse equivalences lookup
equiv_pair2atom = defaultdict(set)
equiv_bond2atom = defaultdict(set)
equiv_angle2atom = defaultdict(set)
equiv_dihedral2atom = defaultdict(set)
equiv_improper2atom = defaultdict(set)
# auto equivalences lookup
atom2auto_pair = OrderedDict()
atom2auto_bondincr = OrderedDict()
atom2auto_bond = OrderedDict()
atom2auto_angleend = OrderedDict()
atom2auto_anglecenter = OrderedDict()
atom2auto_dihedralend = OrderedDict()
atom2auto_dihedralcenter = OrderedDict()
atom2auto_improperend = OrderedDict()
atom2auto_impropercenter = OrderedDict()
# inverse auto equivalences lookup
auto_pair2atom = defaultdict(set)
auto_bondincr2atom = defaultdict(set)
auto_bond2atom = defaultdict(set)
auto_angleend2atom = defaultdict(set)
auto_anglecenter2atom = defaultdict(set)
auto_dihedralend2atom = defaultdict(set)
auto_dihedralcenter2atom = defaultdict(set)
auto_improperend2atom = defaultdict(set)
auto_impropercenter2atom = defaultdict(set)
atom2element = OrderedDict() # Optional:
# which element (eg 'C', 'O') ? (Note this
# is different from atom type: 'C1', 'Oh')
atom2numbonds = OrderedDict() # Optional: how many bonds emanate from
atom2descr = OrderedDict() # Optional: a brief description
atom2ver = OrderedDict() # atoms introduced in different versions of ff
atom2ref = OrderedDict() # reference to paper where atom introduced
lines_equivalences = [] # equivalences for force-field lookup
lines_auto_equivalences = [] # auto_equivalences have lower priority
pair2params = OrderedDict()
pair2style = OrderedDict()
pair_styles = set([])
pair2ver = OrderedDict()
pair2ref = OrderedDict()
bond2chargepair = OrderedDict() # a.k.a "bond increments"
charge_pair_priority = OrderedDict() # priority in case multiple entries
# exist for the same pair of atoms
charge_pair_ver = OrderedDict() # which version of the force field?
charge_pair_ref = OrderedDict() # paper introducing this chargepair
bond2params = OrderedDict() # store a tuple with the 2-body bond
# interaction type, and its parameters
# for every type of bond
bond2priority = OrderedDict() # What is the priority of this interaction?
bond2style = OrderedDict() # What LAMMPS bond style (formula)
# is used for a given interaction?
bond_styles = set([]) # Contains all bond styles used.
bond2ver = OrderedDict()
bond2ref = OrderedDict()
bond2r0 = OrderedDict()
bond2r0_auto = OrderedDict()
angle2params = OrderedDict() # store a tuple with the 3-body angle
# interaction type, and its parameters
# for every type of angle
angle2params_or = OrderedDict()
# http://lammps.sandia.gov/doc/angle_class2.html
#angle2class2_a = OrderedDict() # params for the "a" class2 terms
angle2class2_bb = OrderedDict() # params for the "bb" class2 terms
angle2class2_bb_or = OrderedDict()
angle2class2_ba = OrderedDict() # params for the "ba" class2 terms
angle2class2_ba_or = OrderedDict()
angle2priority = OrderedDict() # What is the priority of this interaction?
angle2priority_or = OrderedDict()
angle2style = OrderedDict() # What LAMMPS angle style (formula)
# is used for a given interaction?
angle2style_or = OrderedDict()
angle_styles = set([]) # Contains all angle styles used.
angle2ref = OrderedDict()
angle2ver = OrderedDict()
angle2ref_or = OrderedDict()
angle2ver_or = OrderedDict()
angle2ver_bb = OrderedDict()
angle2ver_bb_or = OrderedDict()
angle2ref_bb = OrderedDict()
angle2ref_bb_or = OrderedDict()
angle2ver_ba = OrderedDict()
angle2ver_ba_or = OrderedDict()
angle2ref_ba = OrderedDict()
angle2ref_ba_or = OrderedDict()
angle2theta0_or = OrderedDict()
angle2theta0_auto_or = OrderedDict()
# http://lammps.sandia.gov/doc/dihedral_class2.html
dihedral2params = OrderedDict() # store a tuple with the 4-body dihedral
# interaction type, and its parameters
# for every type of dihedral
dihedral2params_or = OrderedDict()
#dihedral2class2_d = OrderedDict() # params for the "d" class2 term
dihedral2class2_mbt = OrderedDict() # params for the "mbt" class2 term
dihedral2class2_mbt_or = OrderedDict()
dihedral2class2_ebt = OrderedDict() # params for the "ebt" class2 term
dihedral2class2_ebt_or = OrderedDict()
#dihedral2sym_ebt = OrderedDict()
dihedral2class2_at = OrderedDict() # params for the "at" class2 term
dihedral2class2_at_or = OrderedDict()
#dihedral2sym_at = OrderedDict()
dihedral2class2_aat = OrderedDict() # params for the "aat" class2 term
dihedral2class2_aat_or = OrderedDict()
#dihedral2sym_aat = OrderedDict()
dihedral2class2_bb13 = OrderedDict() # params for the "bb13" class2 term
dihedral2class2_bb13_or = OrderedDict()
#dihedral2sym_bb13 = OrderedDict()
dihedral2priority = OrderedDict() # What is the priority of this interaction?
dihedral2priority_or = OrderedDict()
dihedral2style = OrderedDict() # What LAMMPS dihedral style (formula)
# is used for a given interaction?
dihedral2style_or = OrderedDict()
dihedral_styles = set([]) # Contains all dihedral styles used.
dihedral2ref = OrderedDict()
dihedral2ver = OrderedDict()
dihedral2ver_or = OrderedDict()
dihedral2ref_or = OrderedDict()
dihedral2ver_mbt = OrderedDict()
dihedral2ver_mbt_or = OrderedDict()
dihedral2ref_mbt = OrderedDict()
dihedral2ref_mbt_or = OrderedDict()
dihedral2ver_ebt = OrderedDict()
dihedral2ver_ebt_or = OrderedDict()
dihedral2ref_ebt = OrderedDict()
dihedral2ref_ebt_or = OrderedDict()
dihedral2ver_at = OrderedDict()
dihedral2ver_at_or = OrderedDict()
dihedral2ref_at = OrderedDict()
dihedral2ref_at_or = OrderedDict()
dihedral2ver_aat = OrderedDict()
dihedral2ver_aat_or = OrderedDict()
dihedral2ref_aat = OrderedDict()
dihedral2ref_aat_or = OrderedDict()
dihedral2ver_bb13 = OrderedDict()
dihedral2ver_bb13_or = OrderedDict()
dihedral2ref_bb13 = OrderedDict()
dihedral2ref_bb13_or = OrderedDict()
# http://lammps.sandia.gov/doc/improper_class2.html
improper2params = OrderedDict() # store a tuple with the 4-body improper
# interaction type, and its parameters
# for every type of imporpoer
improper2params_or = OrderedDict()
improper2class2_aa = OrderedDict() # params for the "aa" class2 term
improper2class2_aa_or = OrderedDict()
improper2cross = defaultdict(dict)
# improper2cross[imp_name][atoms] stores the
# coefficient (K) for the angle-angle ("aa")
# improper interactions between a pair of
# neighboring 3-body angles (in the .FRC file).
# "imp_name" is the name of the improper interaction
# (which is a concatination of the central atom and
# the 3 surrounding leaf atoms (which are sorted))
# "atoms" indicates, for that K value, the list of
# leaf atoms for that K value as they appear in the
# corresponding line of the .frc file (however the
# and last atom names are swapped if the first
# atom name is lexicographically > the last, to
# eliminate redundancy and ambiguity.)
improper2sym = defaultdict(set)
# improper2sym[imp_name] indicates which subset of
# leaf atoms (from 0 to 2) are equivalent and can
# tolerate having their order rearranged without
# effecting the energy. Later on this will be used
# to reduce the number of improper interactions that
# will be generated by moltemplate.
improper2priority = OrderedDict() # What is the priority of this interaction?
improper2priority_or = OrderedDict()
improper2style = OrderedDict() # What LAMMPS improper style (formula)
# is used for a given interaction?
improper2style_or = OrderedDict()
improper_styles = set([]) # Contains all improper styles used.
improper2ver = OrderedDict()
improper2ver_or = OrderedDict()
improper2ref = OrderedDict()
improper2ref_or = OrderedDict()
improper2ver_aa = OrderedDict()
improper2ver_aa_or = OrderedDict()
improper2ref_aa = OrderedDict()
improper2ref_aa_or = OrderedDict()
# Warn users if force field contains terms which cannot yet
# be simulated with LAMMPS (as of 2017-2-07)
display_OOP_OOP_warning = False
display_torsion_torsion_1_warning = False
"""
--- these next few lines of code appear to be unnecessary.
--- I'll probably delete this code in a later version
hbond2params = OrderedDict() # lookup hbond parameters and atom types
hbond2donors = OrderedDict() # according to the identifier in the 2nd
hbond2acceptors = OrderedDict() # column of the "#hbond_definition"
hbond2hydrogens = OrderedDict() # section of an .frc file.
"""
allowed_section_names = set(['#define',
# sections used in all MSI force-fields
'#atom_types',
'#equivalence',
'#auto_equivalence',
'#nonbond(9-6)',
'#nonbond(12-6)',
'#quadratic_bond',
'#quartic_bond',
'#morse_bond',
'#quadratic_angle',
'#quartic_angle',
'#bond-bond',
'#bond-angle',
'#torsion_1',
'#torsion_3',
'#middle_bond-torsion_3',
'#end_bond-torsion_3',
'#angle-torsion_3',
'#angle-angle-torsion_1',#(class2 dihedral)
'#bond-bond_1_3', #(a class2 dihedral term)
'#out_of_plane',
'#wilson_out_of_plane',
'#angle-angle', #(a class2 improper term)
'#out_of_plane-out_of_plane', # UNSUPPORTED
'#torsion-torsion_1', # UNSUPPORTED
'#bond_increments',
'#hbond_definition', # irrelevant?
'#templates',
'#reference',
'#end'
])
icol_type = icol_mass = icol_elem = icol_nbonds = icol_comment = icol_ver = icol_ref = -1
section_name = ''
section_is_auto = False
sys.stderr.write("parsing file pass1: look for atom types and equivalences...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if tokens[0] in allowed_section_names:
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif not tokens[0] in ('#version',
'#define'):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif (len(tokens) == 8) and (section_name == '#equivalence'):
if line.lstrip().find('!') == 0:
continue
lines_equivalences.append(line)
elif (len(tokens) == 12) and (section_name == '#auto_equivalence'):
if line.lstrip().find('!') == 0:
continue
lines_auto_equivalences.append(line)
elif (len(tokens) > 0) and (section_name == '#atom_types'):
# Different FRC files put this information in different
# columns. Column order is stored in the !Ver comment line:
if line.lstrip().find('!Ver') == 0:
tokens = line.strip().split()
for i in range(0, len(tokens)):
if tokens[i].lower() == 'type':
icol_type = i
elif tokens[i].lower() == 'mass':
icol_mass = i
elif tokens[i].lower() == 'element':
icol_elem = i
elif tokens[i].lower() == 'connections':
icol_nbonds = i
elif tokens[i].lower() == 'comment':
icol_comment = i
elif tokens[i].lower() == '!ver': #(version of ff)
icol_ver = i
elif tokens[i].lower() == 'ref':
icol_ref = i
assert(icol_ver == 0)
if -1 in (icol_type, icol_mass):
raise InputError('Error: Invalid #atom_types section.\n'
' The meaning of each column cannot be determined.\n'
' This file needs a valid "!Ver..." comment.\n')
if icol_comment == -1:
icol_comment = max(icol_type, icol_mass,
icol_elem, icol_nbonds) + 1
sys.stderr.write('icol_ver = '+str(icol_ver)+'\n')
sys.stderr.write('icol_ref = '+str(icol_ref)+'\n')
sys.stderr.write('icol_mass = '+str(icol_mass)+'\n')
sys.stderr.write('icol_nelem = '+str(icol_elem)+'\n')
sys.stderr.write('icol_nbonds = '+str(icol_nbonds)+'\n')
sys.stderr.write('icol_comment = '+str(icol_comment)+'\n')
continue
tokens = map(RemoveOuterQuotes,
NSplitQuotedString(line.strip(),
icol_comment+1,
quotes='',
comment_char='>'))
tokens = list(tokens)
if (len(tokens) > 4):
if ((len(type_subset) == 0) or (tokens[1] in type_subset)):
aname = EncodeAName(tokens[icol_type])
atom2mass[aname] = str(max(float(tokens[icol_mass]), 1.0e-06))
# Some atoms in cvff.prm have zero mass. Unfortunately this
# causes LAMMPS to crash, even if these atoms are never used,
# so I give the mass a non-zero value instead.
if icol_elem != -1:
atom2element[aname] = tokens[icol_elem]
if icol_nbonds != -1:
atom2numbonds[aname] = int(tokens[icol_nbonds])
atom2descr[aname] = tokens[icol_comment]
atom2ver[aname] = tokens[icol_ver]
atom2ref[aname] = tokens[icol_ref]
elif len(tokens) > 0:
raise InputError('Error: Invalid atom line: (line#'+str(iline)+')\n' +
'\"'+line.strip()+'\"')
atom_types = [x for x in atom2mass]
# Now construct the lookup tables and inverse tables
# we will need to understand the remainder of the file:
if not include_auto_equivalences:
atom2ffid = Equivalences2ffids(lines_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper)
else:
atom2ffid = AutoEquivalences2ffids(lines_equivalences,
lines_auto_equivalences,
atom_types,
atom2equiv_pair,
atom2equiv_bond,
atom2equiv_angle,
atom2equiv_dihedral,
atom2equiv_improper,
atom2auto_pair,
atom2auto_bondincr,
atom2auto_bond,
atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_dihedralend,
atom2auto_dihedralcenter,
atom2auto_improperend,
atom2auto_impropercenter)
for a,e in atom2equiv_pair.items():
equiv_pair2atom[e].add(a)
for a,e in atom2equiv_bond.items():
equiv_bond2atom[e].add(a)
for a,e in atom2equiv_angle.items():
equiv_angle2atom[e].add(a)
for a,e in atom2equiv_dihedral.items():
equiv_dihedral2atom[e].add(a)
for a,e in atom2equiv_improper.items():
equiv_improper2atom[e].add(a)
# the inverse lookup for '*' matches all atom types
for a in atom_types:
#equiv_pair2atom['*'].add(EncodeAName(a))
equiv_pair2atom['X'].add(EncodeAName(a))
#equiv_bond2atom['*'].add(EncodeAName(a))
equiv_bond2atom['X'].add(EncodeAName(a))
#equiv_angle2atom['*'].add(EncodeAName(a))
equiv_angle2atom['X'].add(EncodeAName(a))
#equiv_dihedral2atom['*'].add(EncodeAName(a))
equiv_dihedral2atom['X'].add(EncodeAName(a))
#equiv_improper2atom['*'].add(EncodeAName(a))
equiv_improper2atom['X'].add(EncodeAName(a))
for a,e in atom2auto_pair.items():
auto_pair2atom[e].add(a)
for a,e in atom2auto_bondincr.items():
auto_bondincr2atom[e].add(a)
for a,e in atom2auto_bond.items():
auto_bond2atom[e].add(a)
for a,e in atom2auto_angleend.items():
auto_angleend2atom[e].add(a)
#auto_angle[0][e].add(a)
#auto_angle[2][e].add(a)
for a,e in atom2auto_anglecenter.items():
auto_anglecenter2atom[e].add(a)
#auto_angle[1][e].add(a)
for a,e in atom2auto_dihedralend.items():
auto_dihedralend2atom[e].add(a)
#auto_dihedral2atom[0][e].add(a)
#auto_dihedral2atom[3][e].add(a)
for a,e in atom2auto_dihedralcenter.items():
auto_dihedralcenter2atom[e].add(a)
#auto_dihedral2atom[1][e].add(a)
#auto_dihedral2atom[2][e].add(a)
for a,e in atom2auto_improperend.items():
auto_improperend2atom[e].add(a)
for a,e in atom2auto_impropercenter.items():
auto_impropercenter2atom[e].add(a)
# the inverse lookup for '*' matches all atom types
for a in atom_types:
#auto_pair2atom['*'].add(EncodeAName(a))
auto_pair2atom['X'].add(EncodeAName(a))
#auto_bondincr2atom['*'].add(EncodeAName(a))
auto_bondincr2atom['X'].add(EncodeAName(a))
#auto_bond2atom['*'].add(EncodeAName(a))
auto_bond2atom['X'].add(EncodeAName(a))
#auto_angleend2atom['*'].add(EncodeAName(a))
auto_angleend2atom['X'].add(EncodeAName(a))
#auto_anglecenter2atom['*'].add(EncodeAName(a))
auto_anglecenter2atom['X'].add(EncodeAName(a))
#auto_dihedralend2atom['*'].add(EncodeAName(a))
auto_dihedralend2atom['X'].add(EncodeAName(a))
#auto_dihedralcenter2atom['*'].add(EncodeAName(a))
auto_dihedralcenter2atom['X'].add(EncodeAName(a))
#auto_improperend2atom['*'].add(EncodeAName(a))
auto_improperend2atom['X'].add(EncodeAName(a))
#auto_impropercenter2atom['*'].add(EncodeAName(a))
auto_impropercenter2atom['X'].add(EncodeAName(a))
sys.stderr.write("parsing file pass2: look for bonds, bond_increments and nonbonded (pair) interactions...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if (tokens[0] in allowed_section_names):
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif (not tokens[0] in ('#version','#define')):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif ((len(tokens) > 4) and (section_name == '#nonbond(12-6)')
and (pair_styles_selected & set(['lj','lj/cut','lj/cut/coul/long',
'lj/cut/coul/cut','lj/cut/coul/debye',
'lj/cut/coul/dsf','lj/cut/coul/msm',
'12-6','nonbond(12-6)']))):
if line.lstrip().find('!') == 0:
continue
atom_name = EncodeAName(tokens[2])
pair2ver[atom_name] = tokens[0]
pair2ref[atom_name] = tokens[1]
A = float(tokens[3])
B = float(tokens[4])
epsilon = B*B/(4*A)
sigma = pow(B/A, 1.0/6)
if sigma == 0.0:
sigma = 1.0 #(non-zero to avoid nan error later)
pair_styles.add('lj/cut/coul/long')
pair_style_args['lj/cut/coul/long'] = pair_cutoff
pair2style[atom_name] = 'lj/cut/coul/long'
pair2params[atom_name] = (str(epsilon)+' '+str(sigma))
pair_mixing_style = 'geometric tail yes'
#if pair_style_name.find('lj/cut') == 0:
# pair2params[atom_name] = (str(epsilon)+' '+str(sigma))
# pair_mixing_style = 'geometric tail yes'
elif ((len(tokens) > 4) and (section_name == '#nonbond(9-6)')
and (pair_styles_selected &
set(['class2', '9-6', 'nonbond(9-6)',
'lj/class2/coul/long']))):
if line.lstrip().find('!') == 0:
continue
atom_name = EncodeAName(tokens[2])
pair2ver[atom_name] = tokens[0]
pair2ref[atom_name] = tokens[1]
sigma = tokens[3]
epsilon = tokens[4]
pair_styles.add('lj/class2/coul/long')
pair_style_args['lj/class2/coul/long'] = pair_cutoff
pair2style[atom_name] = 'lj/class2/coul/long'
pair2params[atom_name] = (epsilon+' '+sigma)
pair_mixing_style = 'sixthpower tail yes'
#if pair_style_name.find('lj/class2') == 0:
# pair2params[atom_name] = (epsilon+' '+sigma)
# pair_mixing_style = 'sixthpower tail yes'
elif (len(tokens) == 6) and (section_name == '#bond_increments'):
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:4])]
delta_q = tokens[4:6]
atom_names = [a for a in aorig]
# swap the order of the atoms?
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
delta_q.reverse()
atom_names.reverse()
bond_name = EncodeInteractionName(atom_names, section_is_auto)
charge_pair_ver[bond_name] = tokens[0]
charge_pair_ref[bond_name] = tokens[1]
charge_pair_priority[bond_name] = \
(0,
section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(charge_pair_ver[bond_name])))
bond2chargepair[bond_name] = (delta_q[0] + ' ' + delta_q[1])
elif ((len(tokens) > 5) and (section_name == '#quadratic_bond')
and (bond_styles_selected & set(['harmonic','quadratic','quadratic_bond']))):
if line.lstrip().find('!') == 0:
continue
bond_styles.add('harmonic')
atom_names = SortByEnds(map(EncodeAName, tokens[2:4]))
bond_name = EncodeInteractionName(atom_names, section_is_auto)
bond2ver[bond_name] = tokens[0]
bond2ref[bond_name] = tokens[1]
bond2priority[bond_name] = \
(0,
section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(bond2ver[bond_name])))
r0 = tokens[4]
k = tokens[5]
if not section_is_auto:
bond2r0[bond_name] = r0
sys.stderr.write('bond2r0['+bond_name+'] = ' + str(r0) + '\n')
else:
bond2r0_auto[(atom_names[0], atom_names[1])] = r0
sys.stderr.write('bond2r0_auto['+str(atom_names)+'] = ' + str(r0) + '\n')
bond2style[bond_name] = 'harmonic'
bond2params[bond_name] = (k+' '+r0)
elif ((len(tokens) > 6) and (section_name == '#morse_bond')
and (bond_styles_selected & set(['morse','morse_bond']))):
if line.lstrip().find('!') == 0:
continue
bond_styles.add('morse')
atom_names = SortByEnds(map(EncodeAName, tokens[2:4]))
bond_name = EncodeInteractionName(atom_names, section_is_auto)
bond2ver[bond_name] = tokens[0]
bond2ref[bond_name] = tokens[1]
bond2priority[bond_name] = \
(0,
section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(bond2ver[bond_name])))
r0 = tokens[4]
D = tokens[5]
alpha = tokens[6]
sys.stderr.write('DEBUG: morse: atom_names = '+str(atom_names)+'\n')
if not section_is_auto:
bond2r0[bond_name] = r0
sys.stderr.write('bond2r0['+bond_name+'] = ' + str(r0) + '\n')
else:
bond2r0_auto[(atom_names[0], atom_names[1])] = r0
sys.stderr.write('bond2r0_auto['+str(atom_names)+'] = ' + str(r0) + '\n')
bond2style[bond_name] = 'morse'
bond2params[bond_name] = (D+' '+alpha+' '+r0)
elif ((len(tokens) > 7) and (section_name == '#quartic_bond')
and (bond_styles_selected & set(['class2','quartic','quartic_bond']))):
if line.lstrip().find('!') == 0:
continue
bond_styles.add('class2')
atom_names = SortByEnds(map(EncodeAName, tokens[2:4]))
bond_name = EncodeInteractionName(atom_names, section_is_auto)
bond2ver[bond_name] = tokens[0]
bond2ref[bond_name] = tokens[1]
bond2priority[bond_name] = \
(0,
section_is_auto,
DetermineNumericPriority(section_is_auto,
tokens[2:4],
float(bond2ver[bond_name])))
r0 = tokens[4]
if not section_is_auto:
bond2r0[bond_name] = r0
sys.stderr.write('bond2r0['+bond_name+'] = ' + str(r0) + '\n')
else:
bond2r0_auto[(atom_names[0], atom_names[1])] = r0
sys.stderr.write('bond2r0_auto['+str(atom_names)+'] = ' + str(r0) + '\n')
K2 = tokens[5]
K3 = tokens[6]
K4 = tokens[7]
bond2style[bond_name] = 'class2'
bond2params[bond_name] = (r0+' '+K2+' '+K3+' '+K4)
sys.stderr.write("parsing file pass3: look for (3-body) angle interactions...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if (tokens[0] in allowed_section_names):
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif (not tokens[0] in ('#version','#define')):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif (len(tokens) > 6) and (section_name == '#quadratic_angle'):
if line.lstrip().find('!') == 0:
continue
atom_names = SortByEnds(map(EncodeAName, tokens[2:5]))
angle_name = EncodeInteractionName(atom_names, section_is_auto)
angle2ver[angle_name] = tokens[0]
angle2ref[angle_name] = tokens[1]
angle2priority_or[angle_name] = \
DetermineNumericPriority(section_is_auto,
tokens[2:5],
float(angle2ver[angle_name]))
angle2priority[angle_name] = \
(0,
section_is_auto,
angle2priority_or[angle_name])
theta0 = tokens[5]
k = tokens[6]
if not section_is_auto:
angle2theta0_or[angle_name] = theta0
sys.stderr.write('angle2theta0_or['+angle_name+'] = ' + str(theta0) + '\n')
else:
angle2theta0_auto_or[(atom_names[0], atom_names[1], atom_names[2])] = theta0
sys.stderr.write('angle2theta0_auto_or['+str(atom_names)+'] = ' + str(theta0) + '\n')
if (angle_styles_selected & set(['harmonic',
'quadratic',
'quadratic_angle'])):
angle_styles.add('harmonic')
angle2style[angle_name] = 'harmonic'
angle2params[angle_name] = (k+' '+theta0)
elif (angle_styles_selected & set(['class2',
'quartic',
'quartic_angle'])):
# Then this is a special case of the class2 angle where
# the (theta-theta0)^3 and (theta-theta0)^4 terms = 0
angle_styles.add('class2')
angle2style_or[angle_name] = 'class2'
angle2params_or[angle_name] = (theta0+' '+k+' 0 0')
elif ((len(tokens) > 8) and (section_name == '#quartic_angle')
and (angle_styles_selected & set(['class2','quartic','quartic_angle']))):
if line.lstrip().find('!') == 0:
continue
angle_styles.add('class2')
atom_names = SortByEnds(map(EncodeAName, tokens[2:5]))
ang_name_orig = EncodeInteractionName(atom_names, section_is_auto)
version = tokens[0]
reference = tokens[1]
angle2ver_or[ang_name_orig] = version
angle2ref_or[ang_name_orig] = reference
angle2priority_or[ang_name_orig] = \
DetermineNumericPriority(section_is_auto,
tokens[2:5],
float(angle2ver_or[ang_name_orig]))
#angle2priority[ang_name_orig] = \
# (0,
# section_is_auto,
# angle2priority_or[ang_name_orig])
theta0 = tokens[5]
if not section_is_auto:
angle2theta0_or[ang_name_orig] = theta0
sys.stderr.write('angle2theta0_or['+ang_name_orig+'] = ' + str(theta0) + '\n')
else:
angle2theta0_auto_or[(atom_names[0], atom_names[1], atom_names[2])] = theta0
sys.stderr.write('angle2theta0_auto_or['+str(atom_names)+'] = ' + str(theta0) + '\n')
K2 = tokens[6]
K3 = tokens[7]
K4 = tokens[8]
angle2style_or[ang_name_orig] = 'class2'
angle2params_or[ang_name_orig] = [theta0, K2, K3, K4]
if not ang_name_orig in angle2class2_bb_or:
angle2class2_bb_or[ang_name_orig] = '0.0' # default value
angle2ver_bb_or[ang_name_orig] = version # default value
angle2ref_bb_or[ang_name_orig] = reference # default value
if not ang_name_orig in angle2class2_ba_or:
angle2class2_ba_or[ang_name_orig] = ['0.0', '0.0'] # default value
angle2ver_ba_or[ang_name_orig] = version # default value
angle2ref_ba_or[ang_name_orig] = reference # default value
elif ((len(tokens) > 5) and
(section_name in ('#bond-bond', '#bond-angle')) and
(angle_styles_selected &
set(['class2', 'quartic', 'quartic_angle']))):
if line.lstrip().find('!') == 0:
continue
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:5])]
atom_names = SortByEnds(aorig)
ang_name_orig = EncodeInteractionName(atom_names, section_is_auto)
K = ['', '']
K[0] = tokens[5]
K[1] = K[0]
if len(tokens) > 6:
K[1] = tokens[6]
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
K.reverse()
if (section_name == '#bond-bond'):
angle2class2_bb_or[ang_name_orig] = K[0]
angle2ver_bb_or[ang_name_orig] = version
angle2ref_bb_or[ang_name_orig] = reference
elif (section_name == '#bond-angle'):
angle2class2_ba_or[ang_name_orig] = [k for k in K]
angle2ver_ba_or[ang_name_orig] = version
angle2ref_ba_or[ang_name_orig] = reference
if not ang_name_orig in angle2params_or:
angle2params_or[ang_name_orig] = ['0.0', '0.0', '0.0', '0.0'] # default value
angle2ver_or[ang_name_orig] = version
angle2ref_or[ang_name_orig] = reference
angle2priority_or[ang_name_orig] = 0.0
sys.stderr.write("parsing file pass4: look for dihedrals(torsions) and impropers(out_of_plane)...")
for iline in range(0, len(lines)):
line = lines[iline]
sys.stderr.write('line=\"' + line.strip() + '\"\n')
tokens = SplitQuotedString(line.strip(),
quotes='',
comment_char='>')
#sys.stderr.write('tokens = ' + str(tokens) + '\n')
if line.lstrip().find('!') == 0 and tokens[0] != '!Ver':
continue
if line.lstrip(' ').find('#') == 0:
#sys.stderr.write('allowed_section_names = ' +
# str(allowed_section_names) + '\n')
if (tokens[0] in allowed_section_names):
section_name = tokens[0]
section_is_auto = tokens[-1].endswith('_auto')
tokens_after_section_name = tokens[1:]
sys.stderr.write(' encountered section \"'+tokens[0]+'\"\n')
continue
elif (not tokens[0] in ('#version','#define')):
raise InputError('Error: Line# '+str(iline) +'\n'
' Unrecognized section name:\n'
' \"' + tokens[0] + '\"\n')
elif (len(tokens) > 8) and (section_name == '#torsion_1'):
if line.lstrip().find('!') == 0:
continue
atom_names = SortByEnds(map(EncodeAName, tokens[2:6]))
dihedral_name = EncodeInteractionName(atom_names, section_is_auto)
dihedral2ver[dihedral_name] = tokens[0]
dihedral2ref[dihedral_name] = tokens[1]
dihedral2priority_or[dihedral_name] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(dihedral2ver[dihedral_name]))
dihedral2priority[dihedral_name] = \
(0,
section_is_auto,
dihedral2priority_or[dihedral_name])
K = tokens[6]
n = tokens[7]
d = tokens[8]
w = '0.0' #ignore: this is only used by the CHARMM force field
if (dihedral_styles_selected & set(['charmm','torsion_1'])):
dihedral_styles.add('charmm')
dihedral2style[dihedral_name] = 'charmm'
#dihedral2params_or[dihedral_name] = [K,n,d,w]
dihedral2params[dihedral_name] = (K+' '+n+' '+d+' '+w)
elif (dihedral_styles_selected & set(['class2','torsion_3'])):
# Then this is a special case of the class2 angle
# lacking the higher terms in the Fourier series
dihedral_styles.add('class2')
dihedral2style[dihedral_name] = 'class2'
dihedral2params_or[dihedral_name] = [K,d,0,0,0,0]
#= (K+' '+d+' '+
# '0 0 '+'0 0')
elif ((len(tokens) > 7) and (section_name == '#torsion_3')
and (dihedral_styles_selected & set(['class2','torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
atom_names = SortByEnds(map(EncodeAName, tokens[2:6]))
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
version = tokens[0]
reference = tokens[1]
dihedral2priority_or[dih_name_orig] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(version))
#dihedral2priority[dih_name_orig] = \
# (0,
# section_is_auto,
# dihedral2priority_or[dih_name_orig])
V1 = tokens[6]
phi0_1 = tokens[7]
V2 = phi0_2 = V3 = phi0_3 = '0.0'
if len(tokens) > 9:
V2 = tokens[8]
phi0_2 = tokens[9]
if len(tokens) > 11:
V3 = tokens[10]
phi0_3 = tokens[11]
dihedral2style_or[dih_name_orig] = 'class2'
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2params_or[dih_name_orig] = [V1, phi0_1, V2, phi0_2, V3, phi0_3]
# default values for cross terms:
if not dih_name_orig in dihedral2class2_mbt_or:
dihedral2class2_mbt_or[dih_name_orig] = ['0.0','0.0','0.0'] # default value
dihedral2ver_mbt_or[dih_name_orig] = version
dihedral2ref_mbt_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_ebt_or:
dihedral2class2_ebt_or[dih_name_orig] = [['0.0','0.0','0.0'],['0.0','0.0','0.0']] # default value
dihedral2ver_ebt_or[dih_name_orig] = version
dihedral2ref_ebt_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_bb13_or:
dihedral2class2_bb13_or[dih_name_orig] = '0.0' # default value
dihedral2ver_bb13_or[dih_name_orig] = version
dihedral2ref_bb13_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_at_or:
dihedral2class2_at_or[dih_name_orig] = [['0.0','0.0','0.0'],['0.0','0.0','0.0']] # default value
dihedral2ver_at_or[dih_name_orig] = version
dihedral2ref_at_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2class2_aat_or:
dihedral2class2_aat_or[dih_name_orig] = '0.0' # default value
dihedral2ver_aat_or[dih_name_orig] = version
dihedral2ref_aat_or[dih_name_orig] = reference
elif ((len(tokens) > 6) and (section_name == '#middle_bond-torsion_3')
and (dihedral_styles_selected & set(['class2','torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names = SortByEnds(aorig)
Fmbt = [tokens[6], '0.0', '0.0']
if len(tokens) > 7:
Fmbt[1] = tokens[7]
if len(tokens) > 8:
Fmbt[2] = tokens[8]
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
#sys.stderr.write('DEBUG: (a2,a3) = '+str((a2,a3))+', '
# ' (b1,b2) = '+str(batoms)+'\n')
dihedral2style[dih_name_orig] = 'class2'
dihedral2class2_mbt_or[dih_name_orig] = [F for F in Fmbt]
dihedral2ver_mbt_or[dih_name_orig] = version
dihedral2ref_mbt_or[dih_name_orig] = reference
if not dih_name_orig in dihedral2params_or:
dihedral2params_or[dih_name_orig] = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0']
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2priority_or[dih_name_orig] = 0.0
elif ((len(tokens) > 6) and
(section_name in ('#end_bond-torsion_3',
'#bond-bond_1_3')) and
(dihedral_styles_selected &
set(['class2', 'torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names = SortByEnds(aorig)
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
dihedral2style[dih_name_orig] = 'class2'
if section_name == '#end_bond-torsion_3':
Febt = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
Febt[0][0] = tokens[6]
if len(tokens) > 7:
Febt[0][1] = tokens[7]
if len(tokens) > 8:
Febt[0][2] = tokens[8]
Febt[1][0] = Febt[0][0]
Febt[1][1] = Febt[0][1]
Febt[1][2] = Febt[0][2]
if len(tokens) > 9:
Febt[1][0] = tokens[9]
if len(tokens) > 10:
Febt[1][1] = tokens[10]
if len(tokens) > 11:
Febt[1][2] = tokens[11]
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
Febt.reverse()
dihedral2class2_ebt_or[dih_name_orig] = [ [F_ij for F_ij in F_i] for F_i in Febt] #deep copy of Febt[][]
dihedral2ver_ebt_or[dih_name_orig] = version
dihedral2ref_ebt_or[dih_name_orig] = reference
elif section_name == '#bond-bond_1_3':
Kbb13 = tokens[6]
#dihedral2ver_bb13[dih_name_orig] = version
dihedral2class2_bb13_or[dih_name_orig] = Kbb13
dihedral2ver_bb13_or[dih_name_orig] = version
dihedral2ref_bb13_or[dih_name_orig] = reference
else:
assert(False)
if not dih_name_orig in dihedral2params_or:
dihedral2params_or[dih_name_orig] = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0']
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2priority_or[dih_name_orig] = 0.0
elif ((len(tokens) > 6) and
(section_name in ('#angle-torsion_3',
'#angle-angle-torsion_1')) and
(dihedral_styles_selected &
set(['class2', 'torsion_3']))):
if line.lstrip().find('!') == 0:
continue
dihedral_styles.add('class2')
version = tokens[0]
reference = tokens[1]
if line.lstrip().find('!') == 0:
continue
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names = SortByEnds(aorig)
dih_name_orig = EncodeInteractionName(atom_names, section_is_auto)
dihedral2style[dih_name_orig] = 'class2'
if section_name == '#angle-torsion_3':
Fat = [[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]
Fat[0][0] = tokens[6]
if len(tokens) > 7:
Fat[0][1] = tokens[7]
if len(tokens) > 8:
Fat[0][2] = tokens[8]
Fat[1][0] = Fat[0][0]
Fat[1][1] = Fat[0][1]
Fat[1][2] = Fat[0][2]
if len(tokens) > 9:
Fat[1][0] = tokens[9]
if len(tokens) > 10:
Fat[1][1] = tokens[10]
if len(tokens) > 11:
Fat[1][2] = tokens[11]
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
Fat.reverse()
Fat[0].reverse()
Fat[1].reverse()
dihedral2class2_at_or[dih_name_orig] = [ [F_ij for F_ij in F_i] for F_i in Fat] #deep copy of Fat
dihedral2ver_at_or[dih_name_orig] = version
dihedral2ref_at_or[dih_name_orig] = reference
elif section_name == '#angle-angle-torsion_1':
Kaat = tokens[6]
dihedral2class2_aat_or[dih_name_orig] = Kaat
dihedral2ver_aat_or[dih_name_orig] = version
dihedral2ref_aat_or[dih_name_orig] = reference
else:
assert(False)
if not dih_name_orig in dihedral2params_or:
dihedral2params_or[dih_name_orig] = ['0.0', '0.0', '0.0', '0.0', '0.0', '0.0'] # default value
dihedral2ver_or[dih_name_orig] = version
dihedral2ref_or[dih_name_orig] = reference
dihedral2priority_or[dih_name_orig] = 0.0
elif ((len(tokens) > 8) and (section_name == '#out_of_plane')
and (improper_styles_selected & set(['cvff','out_of_plane']))):
if line.lstrip().find('!') == 0:
continue
improper_styles.add('cvff')
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names,_ignore = OOPImproperNameSort(tokens[2:6])
improper_name = EncodeInteractionName(atom_names, section_is_auto)
improper2ver[improper_name] = tokens[0]
improper2ref[improper_name] = tokens[1]
improper2priority[improper_name] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(improper2ver[improper_name]))
improper2priority[improper_name] = \
(0,
section_is_auto,
improper2priority_or[improper_name])
K = tokens[6]
n = tokens[7]
chi0 = tokens[8]
improper2style[improper_name] = 'cvff'
improper2params[improper_name] = (Kchi+' '+n+' '+chi0)
improper_symmetry_subgraph = 'cenJswapIL'
#if improper_style_name == 'cvff':
# improper2params[improper_name] = (Kchi+' '+n+' '+chi0)
# improper_symmetry_subgraph = 'cenJswapIL'
elif ((len(tokens) > 7) and (section_name == '#wilson_out_of_plane')
and (improper_styles_selected and set(['class2','wilson_out_of_plane']))):
if line.lstrip().find('!') == 0:
continue
improper_styles.add('class2')
#improper_symmetry_subgraph = 'dihedrals_nosym' (<--no)
improper_symmetry_subgraph = 'cenJsortIKL'
sys.stderr.write('tokens = ' + str(tokens) + '\n')
version = tokens[0]
reference = tokens[1]
aorig = [a for a in map(EncodeAName, tokens[2:6])]
# To avoid redundancy, it is necessary to order the atoms
# in the interaction so that two equivalent ways of ordering
# the atoms in an improper interaction do not get misinterpreted
# as two different types of improper interactions. So we sort
# the 3 "leaf" atoms surrounding the central "hub" by name.
atom_names, permutation = Class2ImproperNameSort(tokens[2:6])
# This will effect the formula for the energy.
# (specifically the "chi0" parameter)
# When we lookup the various cross-term interactions for that
# same improper interaction, we will be sure to sort them
# in the same way to make sure those interactions are
# associated with the same improper interaction.
imp_name_orig = EncodeInteractionName(atom_names, section_is_auto)
improper2ver_or[imp_name_orig] = version
improper2ref_or[imp_name_orig] = reference
improper2priority_or[imp_name_orig] = \
DetermineNumericPriority(section_is_auto,
tokens[2:6],
float(improper2ver_or[imp_name_orig]))
#improper2priority[imp_name_orig] = \
# (0,
# section_is_auto,
# improper2priority_or[imp_name_orig])
K = tokens[6]
chi0 = tokens[7]
if Parity(permutation) != 0:
# Each time the order of a pair of atoms is swapped in
# the interaction, all 3 of the "X" (chi) angles change sign
# The formula for the ordinary term in the improper
# interaction is Ei = K*((Xijkl + Xkjli + Xljik)/3 - chi0)^2
# This formula is invariant if we change the sign of all
# Xijkl, Xkjli, Xljik, chi0
# Hence, we can account for a change in atom order by
# changing the sign of the "chi0" parameter.
# We calculate the "Parity" of the permutation (ie whether
# the permutation has an even or odd number of swaps)
# and multiply chi0 by -1 for each swap.
# It's not clear if this is necessary since in practice
# the "chi0" parameter is usually zero.
chi0 = str(-1.0*float(chi0)) # same as ('-' + chi0)
improper2style_or[imp_name_orig] = 'class2'
improper2params_or[imp_name_orig] = [K, chi0]
#improper2params[imp_name_orig] = K + ' ' + chi0
# default values for cross terms:
if not imp_name_orig in improper2class2_aa_or:
improper2class2_aa_or[imp_name_orig] = '0.0' #(default)
improper2ver_aa_or[imp_name_orig] = version
improper2ref_aa_or[imp_name_orig] = reference
# Initially, set all of the angle-angle cross terms to zero
# Start with the first cross term between aorig[0],aorig[1],aorig[2] & aorig[2],aorig[1],aorig[3]
improper2cross[imp_name_orig][ImCrossTermID([aorig[0],aorig[1],aorig[2],aorig[3]])] = '0.0'
# ...then cyclically permute the 3 "leaf" atoms (aorig[0], aorig[2], aorig[3]) around the "hub" atom (aorig[1])
improper2cross[imp_name_orig][ImCrossTermID([aorig[2],aorig[1],aorig[3],aorig[0]])] = '0.0'
improper2cross[imp_name_orig][ImCrossTermID([aorig[3],aorig[1],aorig[0],aorig[2]])] = '0.0'
elif ((len(tokens) > 6) and (section_name == '#angle-angle')
and (improper_styles_selected and set(['class2','wilson_out_of_plane']))):
if line.lstrip().find('!') == 0:
continue
improper_styles.add('class2')
version = tokens[0]
reference = tokens[1]
aorig = [a for a in map(EncodeAName, tokens[2:6])]
atom_names, permutation = Class2ImproperNameSort(tokens[2:6])
imp_name_orig = EncodeInteractionName(atom_names, section_is_auto)
improper2ver_aa_or[imp_name_orig] = version
improper2ref_aa_or[imp_name_orig] = reference
K = tokens[6]
improper2style_or[imp_name_orig] = 'class2'
if not imp_name_orig in improper2params_or:
improper2params_or[imp_name_orig] = ['0.0', '0.0']
improper2ver_or[imp_name_orig] = version
improper2ref_or[imp_name_orig] = reference
improper2priority_or[imp_name_orig] = 0.0
if not imp_name_orig in improper2cross:
# then initialize all of the cross terms to zero
improper2cross[imp_name_orig][ImCrossTermID([aorig[0],aorig[1],aorig[2],aorig[3]])] = '0.0'
# ...then cyclically permute the 3 "leaf" atoms (aorig[0], aorig[2], aorig[3]) around the "hub" atom (aorig[1])
improper2cross[imp_name_orig][ImCrossTermID([aorig[2],aorig[1],aorig[3],aorig[0]])] = '0.0'
improper2cross[imp_name_orig][ImCrossTermID([aorig[3],aorig[1],aorig[0],aorig[2]])] = '0.0'
#improper2class2_aa_or[imp_name_orig] = K (not needed)
improper2cross[imp_name_orig][ImCrossTermID(aorig)] = K
elif (len(tokens) > 0) and (section_name == '#out_of_plane-out_of_plane'):
if line.lstrip().find('!') == 0:
continue
display_OOP_OOP_warning = True
elif (len(tokens) > 0) and (section_name == '#torsion-torsion_1'):
if line.lstrip().find('!') == 0:
continue
display_torsion_torsion_1_warning = True
elif section_name == '#templates':
#if line.lstrip().find('!') == 0:
# continue
lines_templates.append(line)
elif section_name == '#reference':
if line.lstrip().find('!') == 0:
continue
if len(tokens_after_section_name) > 0:
ref_number = int(tokens_after_section_name[0])
if len(line.strip()) > 0:
lines_references[ref_number].append(line)
"""
--- these next few lines of code appear to be unnecessary.
--- I'll probably delete this code in a later version
elif (len(tokens) > 3) and (section_name == '#hbond_definition'):
hbondID = tokens[1]
if tokens[2] == 'distance':
hbond2distance[hbondID] = tokens[3]
if tokens[2] == 'angle':
hbond2angle[hbondID] = tokens[3]
if tokens[2] == 'donors':
hbond2donors[hbondID] = map(EncodeAName, tokens[2:])
if tokens[2] == 'acceptors':
hbond2acceptors[hbondID] = map(EncodeAname(),tokens[2:])
"""
if display_OOP_OOP_warning:
lines_warnings.append('###########################################################\n'
'# WARNING\n'
'# ALL \"out-of-plane_out-of_plane\" INTERACTIONS ARE IGNORED.\n'
'# CHECK THAT THESE TERMS ARE NEGLEGIBLY SMALL.\n'
'# \"out-of-plane_out-of_plane\" interactions are not yet supported in LAMMPS\n'
'# (...as of 2017-2-07) There is no way that moltemplate can produce\n'
'# LAMMPS compatible parameter files for these interactions.\n'
'###########################################################\n')
if display_torsion_torsion_1_warning:
lines_warnings.append('###########################################################\n'
'# WARNING\n'
'# ALL \"torsion_torsion_1\" INTERACTIONS ARE IGNORED.\n'
'# CHECK THAT THESE TERMS ARE NEGLEGIBLY SMALL.\n'
'# \"torsion_torsion_1\" interactions are not yet supported in LAMMPS\n'
'# (...as of 2017-2-07) There is no way that moltemplate can produce\n'
'# LAMMPS compatible parameter files for these interactions.\n'
'###########################################################\n')
sys.stderr.write(' done.\n'
'building lookup tables...')
"""
--- these next few lines of code appear to be unnecessary.
--- I'll probably delete them eventually
if len(hbond2params) > 0:
sys.stdout.write('\n\n write_once("In Settings") {\n')
if hbond_style == 'hbond/dreiding/lj':
for hbondID, angle in hbond2angle:
hbond2params[hbondID] = hbond2distance[hbondID]+' '+hbond2angle[hbondID] ##<--this is not correct
for hbondID, params in hbond2params:
for donor in hbond2donors[hbondID]:
for acceptor in hbond2acceptors[hbondID]:
for hydrogen in hbond2hydrogens[hbondID]:
sys.stdout.write('pair_coeff @atom:'+donor+' @atom:'+acceptor+' '+hbond_style+' @atom:'+hydrogen+' i '+params+'\n')
sys.stdout.write(' } # (DREIDING style H-bond parameters)\n\n\n')
"""
sys.stderr.write(" done.\n")
sys.stderr.write("Trying all combinations of atom types...")
##################### POST-PROCESSING ########################
for ang_name_orig in angle2params_or:
is_auto = (ang_name_orig.find('auto_') == 0)
atom_names = ExtractANames(ang_name_orig)
num_angles = 0
atom_combos = [set([]), set([]), set([])]
#*#atom_priorities = [{}, {}, {}]
#*#atom_priorities[i][atom_name] = priority of i'th atom in interaction
# We must consider every possible combination of atom types
# which satisfy BOTH angle_equivalences and bond_equivalences.
# ...AND we must consider BOTH regular AND auto equivalences.
# For each combination generate a separate @angle interaction.
# (I fear this will make the resulting .LT file large.)
# Use different auto equivalence lookup tables for different
# atoms in the interaction. (ie the "center" and "end" atoms)
auto_angle2atom = [auto_angleend2atom,
auto_anglecenter2atom,
auto_angleend2atom]
for i in range(0, 3):
angle_atom_name = atom_names[i]
sys.stderr.write('DEBUG: angle_atom_name = '+angle_atom_name+'\n')
if not section_is_auto:
assert(angle_atom_name[-1] != '_')
# assume regular equivalences when looking up atom types
sys.stderr.write('DEBUG: equiv_angle2atom['+angle_atom_name+'] = '+
str(equiv_angle2atom[angle_atom_name])+'\n')
for a in equiv_angle2atom[angle_atom_name]:
atom_combos[i].add(a)
else:
assert((angle_atom_name[-1] == '_') or (ange_atom_name[0] == '*'))
# assume "auto" equivalences when looking up atom types
sys.stderr.write('DEBUG: auto_angle2atom['+str(i)+']['+angle_atom_name+'] = \n'
' '+str(equiv_angle2atom[i][angle_atom_name])+'\n')
for a in auto_angle2atom[i][angle_atom_name]:
atom_combos[i].add(a)
found_at_least_one = False
#*#for a1, a1priority in atom_priorities[0].items():
#*# for a2, a2priority in atom_priorities[1].items():
#*# for a3, a3priority in atom_priorities[2].items():
for a1 in atom_combos[0]:
for a2 in atom_combos[1]:
#sys.stderr.write('atom2auto_bond = '+str(atom2auto_bond)+'\n')
bond_data1 = LookupBondLength(a1, a2,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data1 == None: # Save time by continuing only if a
continue # bond was defined between a1 and a2
for a3 in atom_combos[2]:
bond_data2 = LookupBondLength(a2, a3,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data2 == None:
continue
#bond lengths:
r0s = [0.0, 0.0]
#equivalent atom names used to lookup the bonds:
batoms = [['', ''], ['', '']]
r0s[0], batoms[0] = bond_data1
r0s[1], batoms[1] = bond_data2
found_at_least_one = True
ang_name_full = ang_name_orig + ',' + \
EncodeInteractionName(batoms[0] + batoms[1],
section_is_auto)
#sys.stderr.write('DEBUG: (a1,a2,a3) = '+str((a1,a2,a3))+', '
# ' (b11,b12,b21,b22) = '+str(batoms)+'\n')
angle2ref_or[ang_name_full] = reference
angle2style_or[ang_name_full] = 'class2'
theta0_K_params = angle2params_or[ang_name_orig]
angle2params[ang_name_full] = ' '.join(theta0_K_params)
if ang_name_orig in angle2class2_bb_or:
Kbb = angle2class2_bb_or[ang_name_orig]
assert(ang_name_orig in angle2ver_bb_or)
assert(ang_name_orig in angle2ref_bb_or)
else: #(use default values)
Kbb = '0.0'
angle2class2_bb_or[ang_name_orig] = Kbb
angle2ver_bb_or[ang_name_orig] = angle2ver_or[ang_name_orig]
angle2ref_bb_or[ang_name_orig] = angle2ref_or[ang_name_orig]
angle2class2_bb[ang_name_full] = (Kbb+' '+r0s[0]+' '+r0s[1])
angle2priority_bb = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[1],
float(angle2ver_bb_or[ang_name_orig]))
angle2ver_bb[ang_name_full] = angle2ver_bb_or[ang_name_orig]
angle2ref_bb[ang_name_full] = angle2ref_bb_or[ang_name_orig]
if ang_name_orig in angle2class2_ba_or:
Kba = angle2class2_ba_or[ang_name_orig]
assert(ang_name_orig in angle2ver_ba_or)
assert(ang_name_orig in angle2ref_ba_or)
else: #(use default values)
Kba = ['0.0', '0.0']
angle2class2_ba_or[ang_name_orig] = Kba
angle2ver_ba_or[ang_name_orig] = angle2ver_or[ang_name_orig]
angle2ref_ba_or[ang_name_orig] = angle2ref_or[ang_name_orig]
angle2class2_ba[ang_name_full] = (Kba[0]+' '+Kba[1]+' '+r0s[0]+' '+r0s[1])
angle2sym_ba = (Kba[0] == Kba[1])
angle2priority_ba = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[1],
angle2ver_ba_or[ang_name_orig])
angle2ver_ba[ang_name_full] = angle2ver_ba_or[ang_name_orig]
angle2ref_ba[ang_name_full] = angle2ref_ba_or[ang_name_orig]
version = max((angle2ver_or[ang_name_orig],
angle2ver_bb_or[ang_name_orig],
angle2ver_ba_or[ang_name_orig]))
angle2ver[ang_name_full] = version
angle2ref[ang_name_full] = angle2ref_or[ang_name_orig]
angle2style[ang_name_full] = 'class2'
angle2priority[ang_name_full] = \
(1,
is_auto,
angle2priority_or[ang_name_orig],
angle2priority_bb,
angle2priority_ba)
if num_angles < len(angle2params):
sys.stderr.write('DEBUG: '+section_name[1:]+' r0 ('+ang_name_full+') = ('+r0s[0]+', '+r0s[1]+')\n')
sys.stderr.write('DEBUG: len(angle2class2_bb) = '+str(len(angle2class2_bb))+'\n')
sys.stderr.write('DEBUG: '+section_name[1:]+' r0 ('+ang_name_full+') = ('+r0s[0]+', '+r0s[1]+')\n')
#sys.stderr.write('DEBUG: len(angle2class2_ba) = '+str(len(angle2class2_ba))+'\n')
num_angles = len(angle2params)
if ((not angle2sym_ba)
and
(atom_names[0] == atom_names[2])):
raise InputError('Error: Unsupported angle interaction: \"@angle:'+str(ang_name_orig)+'\"\n'
' This interaction has symmetric atom names:\n'
', '.join(atom_names)+'\n'
' and yet it lacks symmetry in the corresponding force field parameters.\n'
' (If this is not a mistake in the .frc file, then explain\n'
' why to andrew so he can fix this.)\n')
if not found_at_least_one:
#raise InputError('Error: Undefined bonds for bond-bond interactions:\n'
# ' '+str(atom_names)+'\n')
lines_warnings.append('# WARNING: Undefied bond length for angle interaction: ' +
' '.join(atom_names)+'\n')
#sys.stderr.write('bond_names = ' + str(bond_names) + '\n')
############ POST-PROCESSING DIHEDRALS ###########
for dih_name_orig in dihedral2params_or:
#assert(dih_name_orig in dihedral2class2_mbt_or)
#assert(dih_name_orig in dihedral2class2_ebt_or)
#assert(dih_name_orig in dihedral2class2_bb13_or)
#assert(dih_name_orig in dihedral2class2_at_or)
#assert(dih_name_orig in dihedral2class2_aat_or)
is_auto = (dih_name_orig.find('auto_') == 0)
atom_names = ExtractANames(dih_name_orig)
num_dihedrals = 0
atom_combos = [set([]), set([]), set([]), set([])]
#*#atom_priorities = [{}, {}, {}, {}]
#*#atom_priorities[i][atom_name] = priority of i'th atom in interaction
# We must consider every possible combination of atom types
# which satisfy all three:
# dihedral_equivalences
# bond_equivalences
# angle_equivalences
# ...AND we must consider BOTH regular AND auto equivalences.
# For each combination generate a separate @dihedral interaction.
# (I fear this will make the resulting .LT file large.)
# Use different auto equivalence lookup tables for different
# atoms in the interaction. (ie the "center" and "end" atoms)
auto_dihedral2atom = [auto_dihedralend2atom,
auto_dihedralcenter2atom,
auto_dihedralcenter2atom,
auto_dihedralend2atom]
for i in range(0, 4):
dihedral_atom_name = atom_names[i]
sys.stderr.write('DEBUG: dihedral_atom_name = '+dihedral_atom_name+'\n')
if not is_auto:
assert(dihedral_atom_name[-1] != '_')
# assume regular equivalences when looking up atom types
sys.stderr.write('DEBUG: equiv_dihedral2atom['+dihedral_atom_name+'] = '+
str(equiv_dihedral2atom[dihedral_atom_name])+'\n')
for a in equiv_dihedral2atom[dihedral_atom_name]:
atom_combos[i].add(a)
else:
assert((dihedral_atom_name[-1] == '_') or (ange_atom_name[0] == '*'))
# assume "auto" equivalences when looking up atom types
sys.stderr.write('DEBUG: auto_dihedral2atom['+str(i)+']['+dihedral_atom_name+'] = \n'
' '+str(equiv_dihedral2atom[i][dihedral_atom_name])+'\n')
for a in auto_dihedral2atom[i][dihedral_atom_name]:
atom_combos[i].add(a)
found_at_least_one = False
#*#for a1, a1priority in atom_priorities[0].items():
#*# for a2, a2priority in atom_priorities[1].items():
#*# for a3, a3priority in atom_priorities[2].items():
#*# for a4, a3priority in atom_priorities[3].items():
for a1 in atom_combos[0]:
for a2 in atom_combos[1]:
#sys.stderr.write('atom2auto_bond = '+str(atom2auto_bond)+'\n')
bond_data12 = LookupBondLength(a1, a2,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data12 == None:
# Save time by only continuing if a bond was
# found between a1 and a2
continue
for a3 in atom_combos[2]:
bond_data23 = LookupBondLength(a2, a3,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data23 == None:
# Save time by only continuing if a bond was
# found between a2 and a3
continue
angle_data123 = LookupRestAngle(a1, a2, a3,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_anglecenter],
angle2theta0_auto_or)
if angle_data123 == None:
# Save time by only continuing if an angle was
# found between a1, a2, a3
continue
for a4 in atom_combos[3]:
bond_data34 = LookupBondLength(a3, a4,
atom2equiv_bond,
bond2r0,
atom2auto_bond,
bond2r0_auto)
if bond_data34 == None:
# Save time by only continuing if a bond was
# found between a3 and a4
continue
#rest bond lengths:
r0s = [0.0, 0.0, 0,0]
#equivalent atom names used to lookup the bonds:
batoms = [['', ''], ['', ''], ['','']]
r0s[0], batoms[0] = bond_data12
r0s[1], batoms[1] = bond_data23
r0s[2], batoms[2] = bond_data34
angle_data234 = LookupRestAngle(a2, a3, a4,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_angleend,
atom2auto_anglecenter,
atom2auto_anglecenter],
angle2theta0_auto_or)
if angle_data234 == None:
# Save time by only continuing if an angle was
# found between a2, a3, a4
continue
#rest angles:
theta0s = [0.0, 0.0]
#equivalent atom names used to lookup angles:
aatoms = [['', '',''], ['', '','']]
theta0s[0], aatoms[0] = angle_data123
theta0s[1], aatoms[1] = angle_data234
found_at_least_one = True
order_reversed = aorig[0] > aorig[-1]
if order_reversed:
theta0s.reverse()
aatoms.reverse()
aatoms[0].reverse()
aatoms[1].reverse()
#if is_auto:
dih_name_full = dih_name_orig + ',' + \
EncodeInteractionName(batoms[0] + batoms[1] + batoms[2] +
aatoms[0] + aatoms[1],
False)
#else:
# assert(batoms[0][1] == batoms[1][0])
# assert(batoms[1][1] == batoms[2][0])
# assert(aatoms[0][1] == aatoms[1][0])
# assert(aatoms[0][2] == aatoms[1][1])
# dih_name_full = dih_name_orig + ',' + \
# EncodeInteractionName([batoms[0][0], batoms[0][1]
# batoms[2][0], batoms[2][1],
# aatoms[0][0], aatoms[0][1],
# aatoms[0][2], aatoms[1][0]],
# False)
found_at_least_one = True
########### Fourier terms ###########
#if dih_name_orig in dihedral2param_or:
V_phi0_params = dihedral2params_or[dih_name_orig]
dihedral2params[dih_name_full] = ' '.join(V_phi0_params)
#else:
# dihedral2params[dih_name_full] = '0.0 0.0 0.0 0.0 0.0 0.0'
########### "mbt", "ebt", and "aat" terms ###########
# "mbt" terms:
if dih_name_orig in dihedral2class2_mbt_or:
Fmbt = dihedral2class2_mbt_or[dih_name_orig]
else:
Fmbt = ['0.0', '0.0', '0.0']
dihedral2class2_mbt_or[dih_name_orig] = Fmbt
dihedral2ver_mbt_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_mbt_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2class2_mbt[dih_name_full] = \
(Fmbt[0]+' '+Fmbt[1]+' '+Fmbt[2]+' '+r0s[1])
dihedral2priority_mbt = \
DetermineNumericPriority(is_auto,
batoms[1],
float(dihedral2ver_mbt_or[dih_name_orig]))
dihedral2ver_mbt[dih_name_full] = dihedral2ver_mbt_or[dih_name_orig]
dihedral2ref_mbt[dih_name_full] = dihedral2ref_mbt_or[dih_name_orig]
# "ebt" terms:
if dih_name_orig in dihedral2class2_ebt_or:
Febt = dihedral2class2_ebt_or[dih_name_orig]
dihedral2sym_ebt = ((Febt[0][0] == Febt[1][0]) and
(Febt[0][1] == Febt[1][1]) and
(Febt[0][2] == Febt[1][2]))
#and (r0s[0] == r0s[2]))
else:
Febt = [['0.0','0.0','0.0'], ['0.0','0.0','0.0']]
dihedral2class2_ebt_or[dih_name_orig] = Febt
dihedral2ver_ebt_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_ebt_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_ebt = True
dihedral2class2_ebt[dih_name_full]= (Febt[0][0] + ' ' +
Febt[0][1] + ' ' +
Febt[0][2] + ' ' +
Febt[1][0] + ' ' +
Febt[1][1] + ' ' +
Febt[1][2] + ' ' +
r0s[0]+' '+r0s[2])
dihedral2priority_ebt = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[2],
float(dihedral2ver_ebt_or[dih_name_orig]))
dihedral2ver_ebt[dih_name_full] = dihedral2ver_ebt_or[dih_name_orig]
dihedral2ref_ebt[dih_name_full] = dihedral2ref_ebt_or[dih_name_orig]
#(Note: large atom_priority number <==> low priority
# Only one of the atom priority numbers should be > 0)
# "bb13" terms:
if dih_name_orig in dihedral2class2_bb13_or:
Kbb13 = dihedral2class2_bb13_or[dih_name_orig]
#dihedral2sym_bb13 = (r0s[0] == r0s[2])
dihedral2sym_bb13 = True
else:
Kbb13 = '0.0'
dihedral2class2_bb13_or[dih_name_orig] = Kbb13
dihedral2ver_bb13_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_bb13_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_bb13 = True
dihedral2class2_bb13[dih_name_full] = (Kbb13+' '+r0s[0]+' '+r0s[2])
dihedral2priority_bb13 = \
DetermineNumericPriority(is_auto,
batoms[0] + batoms[2],
float(dihedral2ver_bb13_or[dih_name_orig]))
dihedral2ver_bb13[dih_name_full] = dihedral2ver_bb13_or[dih_name_orig]
dihedral2ref_bb13[dih_name_full] = dihedral2ref_bb13_or[dih_name_orig]
########### "at" and "aat" terms ###########
# "at" terms:
if dih_name_orig in dihedral2class2_at_or:
Fat = dihedral2class2_at_or[dih_name_orig]
dihedral2sym_at = ((Fat[0][0] == Fat[1][0]) and
(Fat[0][1] == Fat[1][1]) and
(Fat[0][2] == Fat[1][2]))
#and (theta0[0] == theta0[1]))
else:
Fat = [['0.0','0.0','0.0'], ['0.0','0.0','0.0']]
dihedral2class2_at_or[dih_name_orig] = Fat
dihedral2ver_at_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_at_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_at = True
dihedral2class2_at[dih_name_full] = \
(Fat[0][0] + ' ' +
Fat[0][1] + ' ' +
Fat[0][2] + ' ' +
Fat[1][0] + ' ' +
Fat[1][1] + ' ' +
Fat[1][2] + ' ' +
theta0s[0] + ' ' +
theta0s[1])
dihedral2priority_at = \
DetermineNumericPriority(is_auto,
aatoms[0] + aatoms[1],
float(dihedral2ver_at_or[dih_name_orig]))
dihedral2ver_at[dih_name_full] = dihedral2ver_at_or[dih_name_orig]
dihedral2ref_at[dih_name_full] = dihedral2ref_at_or[dih_name_orig]
# "aat" terms:
if dih_name_orig in dihedral2class2_aat_or:
Kaat = dihedral2class2_aat_or[dih_name_orig]
#dihedral2sym_aat = (theta0[0] == theta0[1])
dihedral2sym_aat = True
else:
Kaat = '0.0'
dihedral2class2_aat_or[dih_name_orig] = Kaat
dihedral2ver_aat_or[dih_name_orig] = dihedral2ver_or[dih_name_orig]
dihedral2ref_aat_or[dih_name_orig] = dihedral2ref_or[dih_name_orig]
dihedral2sym_aat = True
dihedral2class2_aat[dih_name_full] = \
(Kaat+' '+theta0s[0]+' '+theta0s[1])
dihedral2priority_aat = \
DetermineNumericPriority(is_auto,
aatoms[0] + aatoms[1],
float(dihedral2ver_aat_or[dih_name_orig]))
dihedral2ver_aat[dih_name_full] = dihedral2ver_aat_or[dih_name_orig]
dihedral2ref_aat[dih_name_full] = dihedral2ref_aat_or[dih_name_orig]
if len(dihedral2params) > num_dihedrals:
sys.stderr.write('DEBUG: dihedral['+dih_name_full+']:\n'
'(r12,r23,r34) = ('
+r0s[0]+','+r0s[1]+','+r0s[2]+') \n'
'(theta123,theta234) = ('
+theta0s[0]+','+theta0s[1]+') \n')
sys.stderr.write('DEBUG: num_dihedrals = len(dihedral2params) = '
+str(len(dihedral2params))+'\n')
version = max((dihedral2ver_or[dih_name_orig],
dihedral2ver_mbt_or[dih_name_orig],
dihedral2ver_ebt_or[dih_name_orig],
dihedral2ver_bb13_or[dih_name_orig],
dihedral2ver_at_or[dih_name_orig],
dihedral2ver_aat_or[dih_name_orig]))
dihedral2style[dih_name_full] = 'class2'
dihedral2ver[dih_name_full] = version
dihedral2ref[dih_name_full] = dihedral2ref_or[dih_name_orig]
dihedral2priority[dih_name_full] = \
(1,
is_auto,
dihedral2priority_or[dih_name_orig],
dihedral2priority_mbt,
dihedral2priority_ebt,
dihedral2priority_bb13,
dihedral2priority_at,
dihedral2priority_aat)
num_dihedrals = len(dihedral2params)
if ((not (dihedral2sym_ebt and
#dihedral2sym_mbt and
# (note: symmetry doesn't make sense for mbt)
dihedral2sym_at and
dihedral2sym_aat and
dihedral2sym_bb13))
and
((atom_names[0] == atom_names[3]) and
(atom_names[1] == atom_names[2]))):
raise InputError('Error: Unsupported dihedral interaction: \"@dihedral:'+str(dih_name_orig)+'\"\n'
' This interaction has symmetric atom names:\n'+
', '.join(atom_names)+'\n'+
' and yet it lacks symmetry in the corresponding force field parameters.\n'+
' (If this is not a mistake in the .frc file, then explain\n'+
' why to andrew so he can fix this.)\n')
#sys.stderr.write('DEBUG: number of interactions = '+str(len(dihedral2class2_bb))+'\n')
if not found_at_least_one:
#raise InputError('Error: Undefined bonds for bond-bond interactions:\n'
# ' '+str(atom_names)+'\n')
lines_warnings.append('# WARNING: Undefined bond length (r0) or rest angle (theta0) for\n'+
'# the dihedral interaction between: ' +
' '.join(atom_names)+'\n')
#sys.stderr.write('bond_names = ' + str(bond_names) + '\n')
############ POST-PROCESSING IMPROPERS ###########
for imp_name_orig in improper2cross:
assert(imp_name_orig in improper2params_or)
#assert(imp_name_orig in improper2class2_aa_or)
is_auto = (imp_name_orig.find('auto') == 0)
atom_names = ExtractANames(imp_name_orig)
num_impropers = 0
atom_combos = [set([]), set([]), set([]), set([])]
#*#atom_priorities = [{}, {}, {}, {}]
#*#atom_priorities[i][atom_name] = priority of i'th atom in interaction
# We must consider every possible combination of atom types
# which satisfy both:
# improper_equivalences
# angle_equivalences
# ...AND we must consider BOTH regular AND auto equivalences.
# For each combination generate a separate @improper interaction.
# (I fear this will make the resulting .LT file large.)
# Use different auto equivalence lookup tables for different
# atoms in the interaction. (ie the "center" and "end" atoms)
auto_improper2atom = [auto_improperend2atom,
auto_impropercenter2atom,
auto_improperend2atom,
auto_improperend2atom]
for i in range(0, 4):
improper_atom_name = atom_names[i]
sys.stderr.write('DEBUG: improper_atom_name = '+improper_atom_name+'\n')
if not is_auto:
assert(improper_atom_name[-1] != '_')
# assume regular equivalences when looking up atom types
sys.stderr.write('DEBUG: equiv_improper2atom['+improper_atom_name+'] = '+
str(equiv_improper2atom[improper_atom_name])+'\n')
for a in equiv_improper2atom[improper_atom_name]:
atom_combos[i].add(a)
else:
assert((improper_atom_name[-1] == '_') or (improper_atom_name[0] == 'X'))
# assume "auto" equivalences when looking up atom types
sys.stderr.write('DEBUG: auto_improper2atom['+str(i)+']['+improper_atom_name+'] = \n'
' '+str(auto_improper2atom[i][improper_atom_name])+'\n')
for a in auto_improper2atom[i][improper_atom_name]:
atom_combos[i].add(a)
is_auto = IsAutoInteraction(imp_name_orig) # is this an "auto" interaction?
atom_names = ExtractANames(imp_name_orig) # names of all 4 atoms
lnames = [atom_names[0], atom_names[2], atom_names[3]] # names of "leaf" atoms
#M1 = improper2cross[imp_name_orig][ 2 ]
#M2 = improper2cross[imp_name_orig][ 0 ]
#M3 = improper2cross[imp_name_orig][ 3 ]
#try:
M1 = improper2cross[imp_name_orig][ImCrossTermID([atom_names[0],
atom_names[1],
atom_names[2],
atom_names[3]])]
#except KeyError:
# M1 = '0.0'
#try:
M2 = improper2cross[imp_name_orig][ImCrossTermID([atom_names[2],
atom_names[1],
atom_names[0],
atom_names[3]])]
#except KeyError:
# M2 = '0.0'
#try:
M3 = improper2cross[imp_name_orig][ImCrossTermID([atom_names[0],
atom_names[1],
atom_names[3],
atom_names[2]])]
#except KeyError:
# M3 = '0.0'
# ###### Symmetry: ######
# Unfortunately, it's time to wade into the messy issue of symmetry.
# We desire a way to detect whether an improper interaction
# between 4 atoms is invariant with respect to atom reordering
# of the 3 peripheral "leaf" atoms which surround the central atom.
# In principle, any rearrangement of atoms would require a separate
# class2 improper interaction. However, in some cases, when the
# parameters for these rearrangements are symmetric, we can detect
# that and warn moltemplate that it is not necessary to generate new
# improper interactions for every conceivable permutation of these
# atoms. Figuring out when it is safe to do that is a headache.
# (...but it's necessary. Otherwise each junction in the molecule
# will generate 3*2*1=6 improper interactions which are usually
# redundant. This will slow down the simulation significantly
# and may make it difficult to compare the resulting LAMMPS
# input files with those generated by other tools like msi2lmp.)
#
# To make this easier, I store the parameters in arrays which
# are arranged in a more symmetric way
M = [0.0, 0.0, 0.0]
theta0 = [0.0, 0.0, 0.0]
# noti3[i] = the sorted tuple of integers from the
# set {0,1,2} which remain after deleting i
noti3 = ((1,2), (0,2), (0,1))
i_neigh = [ ([0,2,3][ noti3[i][0] ], # neighbor leaves of ith leaf
[0,2,3][ noti3[i][1] ]) for i in range(0,3)]
for i in range(0, 3):
# You will notice the pattern "[0,2,3][i]" appears often in the
# code below because for class 2 force-fields, the second atom
# (with index 1) is the central atom ("hub" atom), and the three
# that surround it ("leaf" atoms) have indices 0,2,3. I want
# to skip over the central atoms and loop over the leaf atoms
imTermID = ImCrossTermID([atom_names[ i_neigh[i][0] ],
atom_names[ 1 ],
atom_names[ [0,2,3][i] ],
atom_names[ i_neigh[i][1] ]])
M[i] = float(improper2cross[imp_name_orig][imTermID])
##i_leaf = [0,2,3][i]
##M[i] = float(improper2cross[imp_name_orig][ i_leaf ])
#angle_name_l = SortByEnds([atom_names[i_neigh[i][0]],
# atom_names[ 1 ],
# atom_names[i_neigh[i][1]]])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
#theta0[i] = float(angle2theta0_or[angle_name])
for i in range(0, 3):
if (M[ noti3[i][0] ] == M[ noti3[i][1] ]):
#and (theta0[ noti3[i][0] ] == theta0[ noti3[i][1] ])):
# Then it is safe to swap the order of these two atoms in
# the list of atoms when looking up force-field parameters
improper2sym[imp_name_orig].add(i_neigh[i][0])
improper2sym[imp_name_orig].add(i_neigh[i][1])
# Later, I can use these to decide whether or not I need to
# change the default script with symmetry rules. (I'm hoping
# that "cenJsortIKL.py" should work in most cases.)
# CONTINUEHERE: FIGURE OUT WHETHER TO WORRY ABOUT improper2sym
else:
if atom_names[i_neigh[i][0]] == atom_names[i_neigh[i][1]]:
raise InputError('Error: Unsupported improper interaction: \"@improper:'+str(imp_name_orig)+'\"\n'
' This interaction has matching atom aliases:\n'
' (@atom:'+str(atom_names[i_neigh[i][0]])+
', @atom:'+str(atom_names[i_neigh[i][1]])+')\n'
' and yet it lacks symmetry in the corresponding force field parameters.\n'
' (If this is not a mistake in the .frc file, then ask andrew to\n'
' fix this limitation.)\n')
found_at_least_one = False
#*#for a1, a1priority in atom_priorities[0].items():
#*# for a2, a2priority in atom_priorities[1].items():
#*# for a3, a3priority in atom_priorities[2].items():
#*# for a4, a3priority in atom_priorities[3].items():
for a1 in sorted(list(atom_combos[0])):
for a2 in sorted(list(atom_combos[1])):
sys.stderr.write('DEBUG: improper '+imp_name_orig+' substitutions: '+a1+','+a2+',...\n')
for a3 in sorted(list(atom_combos[2])):
#(Note: sorting "atom_combos" makes it faster and easier
# to follow the loop's progress. This nested loop can be very slow.)
theta0s = ['0.0', '0.0', '0.0']
aatoms = [['', '',''], ['', '',''], ['', '', '']]
# Collect information from the different terms in a class2 improper:
# http://lammps.sandia.gov/doc/improper_class2.html
# Loop over the neighbors of the central atom in each improper
# interaction and collect all the Mi and Ti parameters. Collect
# them in the order they appear in the formula for the Eaa
# term as it appears in the documentation for improper_style class2:
#
# http://lammps.sandia.gov/doc/improper_class2.html
#
# Eaa = M1 (Tijk - T0)(Tkjl - T2) + #common leaf node: k (index 2)
# M2 (Tijk - T0)(Tijl - T1) + #common leaf node: i (index 0)
# M3 (Tijl - T1)(Tkjl - T2) #common leaf node: l (index 3)
# (I'm trying to match the variable names used in this web page
# I wish the author had chosen the M1,M2,M3, T1,T2,T3 order in more
# symmetric way, or at least in a way that makes more sense to me.)
#angle_name_l = SortByEnds([atom_names[0], atom_names[1], atom_names[2]])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
#theta01 = angle2theta0_or[angle_name]
angle_data = LookupRestAngle(a1, a2, a3,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend],
angle2theta0_auto_or)
if angle_data == None:
# Save time by only continuing if an angle was
# found between a1, a2, a3
continue
theta0s[0], aatoms[0] = angle_data
for a4 in sorted(list(atom_combos[3])):
theta0s[1] = theta0s[2] = '0.0'
aatoms[1] = aatoms[2] = ['', '','']
#angle_name_l = SortByEnds(aatoms[0])
#angle_name = EncodeInteractionName(angle_name_l[0], is_auto)
#theta02 = angle2theta0_or[angle_name]
angle_data = LookupRestAngle(a1, a2, a4,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend],
angle2theta0_auto_or)
if angle_data == None:
# Save time by only continuing if an angle was
# found between a1, a2, a4
continue
theta0s[1], aatoms[1] = angle_data
#angle_name_l = SortByEnds(aatoms[1])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
#theta03 = angle2theta0_or[angle_name]
angle_data = LookupRestAngle(a3, a2, a4,
atom2equiv_angle,
angle2theta0_or,
[atom2auto_improperend,
atom2auto_impropercenter,
atom2auto_improperend],
angle2theta0_auto_or)
if angle_data == None:
# Save time by only continuing if an angle was
# found between a2, a2, a4
continue
theta0s[2], aatoms[2] = angle_data
# The following asserts checks that the two theta0s
# are defined whenever the corresponding M is defined.
# (Note: The order is LAMMPS-implementation specific.
# See http://lammps.sandia.gov/doc/improper_class2.html)
assert((float(theta0s[0]) != 0) or (float(M1) == 0))
assert((float(theta0s[2]) != 0) or (float(M1) == 0))
assert((float(theta0s[0]) != 0) or (float(M2) == 0))
assert((float(theta0s[1]) != 0) or (float(M2) == 0))
assert((float(theta0s[1]) != 0) or (float(M3) == 0))
assert((float(theta0s[2]) != 0) or (float(M3) == 0))
#angle_name_l = SortByEnds(aatoms[2])
#angle_name = EncodeInteractionName(angle_name_l, is_auto)
imp_name_full = imp_name_orig + ',' + \
EncodeInteractionName(aatoms[0] + aatoms[1] + aatoms[2],
False)
#if imp_name_orig in improper2params_or[imp_name_orig]:
improper2params[imp_name_full] = ' '.join(improper2params_or[imp_name_orig])
#else:
# improper2params[imp_name_full] = '0.0 0.0'
#if imp_name_orig in improper2cross:
improper2class2_aa[imp_name_full] = \
(str(M1)+' '+str(M2)+' '+str(M3)+' '+
str(theta0s[0])+' '+str(theta0s[1])+' '+str(theta0s[2]))
#else:
# improper2class2_aa[imp_name_full] = '0.0 0.0 0.0 0.0 0.0 0.0'
# improper2ver_aa_or[imp_name_orig] = improper2ver_or[imp_name_orig]
# improper2ref_aa_or[imp_name_orig] = improper2ref_or[imp_name_orig]
improper2priority_aa = \
DetermineNumericPriority(is_auto,
aatoms[0] + aatoms[1] + aatoms[2],
float(improper2ver_aa_or[imp_name_orig]))
improper2ver_aa[imp_name_full] = improper2ver_aa_or[imp_name_orig]
improper2ref_aa[imp_name_full] = improper2ref_aa_or[imp_name_orig]
version = max((improper2ver_or[imp_name_orig],
improper2ver_aa_or[imp_name_orig]))
improper2style[imp_name_full] = 'class2'
improper2ref[imp_name_full] = improper2ref_or[imp_name_orig]
improper2ver[imp_name_full] = version
improper2priority[imp_name_full] = \
(1,
is_auto,
improper2priority_or[imp_name_orig],
improper2priority_aa)
if len(improper2params) > num_impropers:
sys.stderr.write('DEBUG: improper['+imp_name_full+']:\n'
'theta0 = ('
+theta0s[0]+','+theta0s[1]+','+theta0s[2]+')\n')
sys.stderr.write('DEBUG: num_impropers = len(improper2params) = '
+str(len(improper2params))+'\n')
num_impropers = len(improper2params)
sys.stderr.write("done\n")
sys.stderr.write("Converting to moltemplate format...\n")
##################### BEGIN WRITING FILE #####################
sys.stdout.write("# This file was generated automatically using:\n")
sys.stdout.write("# " + g_program_name + " " + " ".join(sys.argv[1:]) + "\n")
sys.stdout.write("\n\n")
sys.stdout.write(ffname + " {\n\n")
sys.stdout.write("\n"
" # AtomType Mass # \"Description\" (version, reference)\n\n")
sys.stdout.write(" write_once(\"Data Masses\") {\n")
for atype in atom2mass:
sys.stdout.write(" @atom:" + atype + " " + str(atom2mass[atype]))
sys.stdout.write(" # ")
if atype in atom2element:
sys.stdout.write(atom2element[atype] + ", ")
#sys.stdout.write(atom2descr[atype])
sys.stdout.write("\"" + atom2descr[atype] + "\"")
sys.stdout.write(" (")
if atype in atom2numbonds:
sys.stdout.write("nbonds="+str(atom2numbonds[atype])+", ")
sys.stdout.write("ver=" + atom2ver[atype] +
", ref=" + atom2ref[atype])
sys.stdout.write(")\n")
sys.stdout.write(" } #(end of atom masses)\n\n\n")
sys.stdout.write(" # ---------- EQUIVALENCE CATEGORIES for bonded interaction lookup ----------\n"
" # Each type of atom has a separate ID used for looking up bond parameters\n"
" # and a separate ID for looking up 3-body angle interaction parameters\n"
" # and a separate ID for looking up 4-body dihedral interaction parameters\n"
" # and a separate ID for looking up 4-body improper interaction parameters\n"
#" # (This is because there are several different types of sp3 carbon atoms\n"
#" # which have the same torsional properties when within an alkane molecule,\n"
#" # for example. If they share the same dihedral-ID, then this frees us\n"
#" # from being forced define separate dihedral interaction parameters\n"
#" # for all of them.)\n"
" # The complete @atom type name includes ALL of these ID numbers. There's\n"
" # no need to force the end-user to type the complete name of each atom.\n"
" # The \"replace\" command used below informs moltemplate that the short\n"
" # @atom names we have been using abovee are equivalent to the complete\n"
" # @atom names used below:\n\n")
for atype in atom2ffid:
#ffid = atype + "_ffid" + atom2ffid[atype]
sys.stdout.write(" replace{ @atom:" + atype +
" @atom:" + atom2ffid[atype] + " }\n")
sys.stdout.write("\n\n\n\n")
sys.stdout.write(" # --------------- Non-Bonded Interactions: ---------------------\n"
" # Syntax:\n"
" # pair_coeff AtomType1 AtomType2 pair_style_name parameters...\n\n")
sys.stdout.write(" write_once(\"In Settings\") {\n")
for atype in pair2params:
assert(atype in pair2style)
if IsAutoInteraction(bond_name):
assert(atype in atom2auto_pair)
if include_auto_equivalences:
sys.stdout.write(' pair_coeff @atom:*,ap' + atom2auto_pair[atype] +
',aq*,ab*,aae*,aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap' + atom2auto_pair[atype] +
',aq*,ab*,aae*,aac*,ade*,adc*,aie*,aic* ' +
pair2style[atype] + ' ' +
pair2params[atype] +
' # (ver=' + pair2ver[atype] +
', ref=' +pair2ref[atype] + ')\n')
else:
continue
else:
assert(atype in atom2equiv_pair)
sys.stdout.write(' pair_coeff ' +
'@atom:*,p' + atom2equiv_pair[atype] + ',b*,a*,d*,i* ' +
'@atom:*,p' + atom2equiv_pair[atype] + ',b*,a*,d*,i* ' +
pair2style[atype] + ' ' +
pair2params[atype] +
' # (ver=' + pair2ver[atype] +
', ref=' +pair2ref[atype] + ')\n')
sys.stdout.write(" } #(end of pair_coeffs)\n\n\n\n")
################# Print Charge By Bond Interactions ##################
charge_pair_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(charge_pair_priority.items())],
key=itemgetter(1),
reverse=True)]
if len(charge_pair_priority) > 0:
sys.stdout.write(" # ---------- Charge By Bond (a.k.a. \"bond equivalences\") ----------\n")
# Print rules for generating (2-body) "bond" interactions:
sys.stdout.write('\n\n\n'
' write_once("Data Charge By Bond") {\n')
for bond_name in charge_pair_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(bond_name)]
# Did the user ask us to include "auto" interactions?
if IsAutoInteraction(bond_name):
if include_auto_equivalences:
sys.stdout.write(' @atom:*,ap*,aq' + anames[0] +
',ab*,aae*,aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap*,aq' + anames[1] +
',ab*,aae*,aac*,ade*,adc*,aie*,aic*' +
' ' + bond2chargepair[bond_name] +
" # (ver=" + charge_pair_ver[bond_name] +
", ref=" + charge_pair_ref[bond_name] + ")\n")
else:
continue
else:
sys.stdout.write(' @atom:*,p*,b' + anames[0] + ',a*,d*,i* ' +
' @atom:*,p*,b' + anames[1] + ',a*,d*,i* ' +
' ' + bond2chargepair[bond_name] +
" # (ver=" + charge_pair_ver[bond_name] +
", ref=" + charge_pair_ref[bond_name] + ")\n")
sys.stdout.write(' } #(end of Charge by Bond (bond equivalences))\n\n'
'\n\n\n\n')
################# Print 2-body Bond Interactions ##################
bond_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(bond2priority.items())],
key=itemgetter(1),
reverse=True)]
if len(bond2priority) > 0:
sys.stdout.write(" # --------------- Bond Interactions: ---------------------\n")
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (2-body) "bond" interactions: --\n'
' # BondType AtomType1 AtomType2\n')
sys.stdout.write('\n'
' write_once("Data Bonds By Type')
if bond_symmetry_subgraph != '':
sys.stdout.write(' ('+bond_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for bond_name in bond_names_priority_high_to_low:
if not (bond2style[bond_name] in
bond_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(bond_name)]
# Did the user ask us to include "auto" interactions?
if IsAutoInteraction(bond_name):
if include_auto_equivalences:
sys.stdout.write(' @bond:' + bond_name + ' ' +
' @atom:*,ap*,aq*,ab' + anames[0] +
',aae*,aac*,ade*,adc*,aie*,aic*' +
' @atom:*,ap*,aq*,ab' + anames[1] +
',aae*,aac*,ade*,adc*,aie*,aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @bond:' + bond_name + ' ' +
' @atom:*,b' + anames[0] + ',a*,d*,i* ' +
' @atom:*,b' + anames[1] + ',a*,d*,i* ' +
'\n')
sys.stdout.write(' } # end of "Data Bonds By Type" section\n'
'\n')
# Print the force-field parameters for these bond interactions:
sys.stdout.write('\n\n'
' # ------------ Bond Parameters: ----------\n')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for bond_style in bond_styles:
if not (bond_style in bond_styles_selected):
continue
sys.stdout.write(' # '+bond_style2docs[bond_style]+'\n')
sys.stdout.write('\n'
' # Syntax: \n'
' # bond_coeff BondTypeName BondStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for bond_name in bond_names_priority_high_to_low:
if not (bond2style[bond_name] in
bond_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
if (IsAutoInteraction(bond_name) and
(not include_auto_equivalences)):
continue
sys.stdout.write(' bond_coeff @bond:'+bond_name+' '+
bond2style[bond_name] + ' ' +
bond2params[bond_name] +
" # (ver=" + bond2ver[bond_name] +
", ref=" +bond2ref[bond_name] + ")\n")
sys.stdout.write(' } # end of bond_coeff commands\n'
'\n\n')
################# Print 3-body Angle Interactions ##################
ang_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(angle2priority.items())],
key=itemgetter(1),
reverse=True)]
if len(angle2priority) > 0:
sys.stdout.write(" # --------------- Angle Interactions: ---------------------\n")
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (3-body) "angle" interactions: --\n'
' # AngleType AtomType1 AtomType2 AtomType3 [BondType1 BondType2]\n')
sys.stdout.write('\n'
' write_once("Data Angles By Type')
if angle_symmetry_subgraph != '':
sys.stdout.write(' ('+angle_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for angle_name in ang_names_priority_high_to_low:
if not (angle2style[angle_name] in
angle_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(angle_name)]
#if (len(anames) == 3) and angle2style[angle_name] == 'class2':
# continue
bnames = [[a for a in map(EncodeAName, anames[3:5])],
[a for a in map(EncodeAName, anames[5:7])]]
#anm = [a for a in map(EncodeAName, anames)]
angle_is_auto = IsAutoInteraction(angle_name):
bond_is_auto1 = IsAutoInteraction(bnames[0][0]+','+bnames[0][1])
bond_is_auto2 = IsAutoInteraction(bnames[1][0]+','+bnames[1][1])
# Can we ignore "auto" interactions?
# (If so, life here is much easier)
if ((not include_auto_equivalences) or
(not (angle_is_auto or bond_is_auto1 or bond_is_auto2))):
assert(bnames[0][1] == bnames[1][0])
# Optional: Shorten the angle name since some of the bnames are redundant:
ang_name_abbr = EncodeInteractionName(anm[0:3]+
#[anm[3],anm[4],anm[6]],
[bnames[0][0],bnames[0][1],bnames[1][1]],
angle_is_auto)
sys.stdout.write(' @angle:' + ang_name_abbr + ' ' +
' @atom:*,p*,b'+bnames[0][0]+',a'+anames[0]+',d*,i* ' +
' @atom:*,p*,b'+bnames[0][1]+',a'+anames[1]+',d*,i* ' +
' @atom:*,p*,b'+bnames[1][1]+',a'+anames[2]+',d*,i*'
'\n')
else:
# Consider "auto" interactions and "auto" atom equivalences
ang_name_abbr = angle_name #(full name)
sys.stdout.write(' @angle:' + ang_name_abbr + ' ')
bshared = 'b*' #(default. overidden below)
abshared = 'ab*' #(default. overidden below)
if angle_is_auto:
a1 = a2 = a3 = 'a*'
aa1 = 'aae' + anames[0] + ',aac*'
aa2 = 'aae*,aac*' + anames[1]
aa3 = 'aae' + anames[2] + ',aac*'
else:
a1 = 'a' + anames[0]
a2 = 'a' + anames[1]
a3 = 'a' + anames[2]
aa1 = aa2 = aa3 = 'aae*,aac*'
if not bond_is_auto1:
b11 = 'b' + bnames[0][0] #(bond atom equivalent name)
b12 = 'b' + bnames[0][1] #(bond atom equivalent name)
bshared = 'b' + bnames[0][1] #(bond atom equivalent name)
ab11 = ab12 = 'ab*'
else:
b11 = b12 = 'b*'
ab11 = 'ab' + bnames[0][0] #(auto bond atom name)
ab12 = 'ab' + bnames[0][1] #(auto bond atom name)
abshared = 'ab' + bnames[0][1] #(auto bond atom name)
# print atom 1 information:
sys.stdout.write(' @atom:*,p*,'+b11+a1+',d*,i*,' +
'ap*,aq*,'+ab11+aa11+
',ade*,adc*,aie*,aic*')
if not bond_is_auto2:
b21 = 'b' + bnames[1][0] #(bond atom equivalent name)
b22 = 'b' + bnames[1][1] #(bond atom equivalent name)
bshared = 'b' + bnames[1][0]
ab21 = ab22 = 'ab*'
else:
b21 = b22 = 'b*'
ab21 = 'ab' + bnames[1][0] #(auto bond atom name)
ab22 = 'ab' + bnames[1][1] #(auto bond atom name)
abshared = 'ab' + bnames[1][0]
# print atom 2 information:
sys.stdout.write(' @atom:*,p*,'+bshared+a2+',d*,i*,' +
'ap*,aq*,'+abshared+aa2+
',ade*,adc*,aie*,aic*')
# print atom 3 information:
sys.stdout.write(' @atom:*,p*,'+b21+a3+',d*,i*,' +
'ap*,aq*,'+ab21+aa22+
',ade*,adc*,aie*,aic*')
sys.stdout.write('\n')
sys.stdout.write(' } # end of "Data Angles By Type" section\n'
'\n')
# Print the force-field parameters for these angle interactions:
sys.stdout.write('\n\n'
' # ------- Angle Force Field Parameters: -------')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for angle_style in angle_styles:
if not (angle_style in angle_styles_selected):
continue
sys.stdout.write(' # '+angle_style2docs[angle_style]+'\n')
sys.stdout.write('\n'
' # Syntax: \n'
' # angle_coeff AngleTypeName AngleStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for angle_name in ang_names_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(angle_name)]
#if (len(anames) == 3) and angle2style[angle_name] == 'class2':
# continue
# Optional: Shorten the angle name since some of the anames are redundant:
anm = [a for a in map(EncodeAName, anames)]
ang_name_abbr = EncodeInteractionName(anm[0:3]+
#[bnames[0][0],bnames[0][1], bnames[1][1]],
[anm[3],anm[4],anm[6]],
is_auto)
if not (angle2style[angle_name] in
angle_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
if (IsAutoInteraction(angle_name) and
(not include_auto_equivalences)):
continue
sys.stdout.write(' angle_coeff @angle:'+ang_name_abbr+' '+
angle2style[angle_name] + ' ' +
angle2params[angle_name] +
" # (ver=" + angle2ver[angle_name] +
", ref=" + angle2ref[angle_name] + ")\n")
if angle_name in angle2class2_bb:
sys.stdout.write(' angle_coeff @angle:'+ang_name_abbr+' '+
angle2style[angle_name] + ' bb ' +
angle2class2_bb[angle_name] +
" # (ver=" + angle2ver_bb[angle_name] +
", ref=" + angle2ref_bb[angle_name] + ")\n")
assert(angle_name in angle2class2_ba)
sys.stdout.write(' angle_coeff @angle:'+ang_name_abbr+' '+
angle2style[angle_name] + ' ba ' +
angle2class2_ba[angle_name] +
" # (ver=" + angle2ver_ba[angle_name] +
", ref=" + angle2ref_ba[angle_name] + ")\n")
sys.stdout.write(' } # end of angle_coeff commands\n'
'\n\n')
################# Print 4-body Dihedral Interactions ##################
dih_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(dihedral2priority.items())],
key=itemgetter(1),
reverse=True)]
if len(dih_names_priority_high_to_low) > 0:
sys.stdout.write(' # --------------- Dihedral Interactions: ---------------------\n')
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (4-body) "dihedral" interactions: --\n'
' # DihedralType AtmType1 AtmType2 AtmType3 AtmType3 [BondType1 Bnd2 Bnd3]\n')
sys.stdout.write('\n\n'
' write_once("Data Dihedrals By Type')
if dihedral_symmetry_subgraph != '':
sys.stdout.write(' ('+dihedral_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for dihedral_name in dih_names_priority_high_to_low:
if not (dihedral2style[dihedral_name] in
dihedral_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(dihedral_name)]
#if (len(anames) == 4) and dihedral2style[dihedral_name] == 'class2':
# continue
bnames = [anames[4:6], anames[6:8], anames[8:10]]
assert(bnames[0][1] == bnames[1][0])
assert(bnames[1][1] == bnames[2][0])
ang_names = [anames[10:13], anames[13:16]]
assert(ang_names[0][1] == ang_names[1][0])
assert(ang_names[0][2] == ang_names[1][1])
# (NOTE TO SELF:
# If these assertions fail, then try checking if they are
# all either the same, or '*'. If they are then just replace '*'
# everwhere that atom appears with the most restrictive name.)
# Optional: Shorten the angle name since some of the bnames are redundant:
is_auto = IsAutoInteraction(dihedral_name)
anm = [a for a in map(EncodeAName, anames)]
dih_name_abbr = EncodeInteractionName(anm[0:4]+
#[bnames[0][0],bnames[0][1], bnames[1][1], bnames[2][1]]
[anm[4],anm[5],anm[7],anm[9]]+
#[ang_names[0][0],ang_names[0][1],ang_names[0][2],ang_names[1][2]]
[anm[10],anm[11],anm[12],anm[15]],
is_auto)
if dih_name_abbr.find('*') != -1:
print(dihedral_name)
# Did the user ask us to include "auto" interactions?
if dihedral2style[dihedral_name] != 'class2':
if IsAutoInteraction(dihedral_name):
if include_auto_equivalences:
sys.stdout.write(' @dihedral:' + dih_name_abbr + ' ' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'
+ anames[0] +
',adc*,aie*,aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc'
+ anames[1] +
',aie*,aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc'
+ anames[2] +
',aie*,aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'
+ anames[3] +
',adc*,aie*,aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @dihedral:' + dih_name_abbr + ' ' +
' @atom:*,p*,b*,a*,d' + anames[0] + ',i* ' +
' @atom:*,p*,b*,a*,d' + anames[1] + ',i* ' +
' @atom:*,p*,b*,a*,d' + anames[2] + ',i* ' +
' @atom:*,p*,b*,a*,d' + anames[3] + ',i* ' +
'\n')
else:
if IsAutoInteraction(dihedral_name):
if include_auto_equivalences:
sys.stdout.write(' @dihedral:' + dih_name_abbr + ' ' +
' @atom:*,ap*,aq*,ab'+bnames[0][0]+',aae'+ang_names[0][0]+',aac*,ade'
+ anames[0] +
',adc*,aie*,aic*' +
' @atom:*,ap*,aq*,ab'+bnames[0][1]+',aae'+ang_names[1][0]+',aac'+ang_names[0][1]+',ade*,adc'
+ anames[1] +
',aie*,aic*' +
' @atom:*,ap*,aq*,ab'+bnames[1][0]+',aae'+ang_names[0][2]+',aac'+ang_names[1][1]+',ade*,adc'
+ anames[2] +
',aie*,aic*' +
' @atom:*,ap*,aq*,ab'+bnames[1][1]+',aae'+ang_names[1][2]+',aac*,ade'
+ anames[3] +
',adc*,aie*,aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @dihedral:' + dih_name_abbr + ' ' +
' @atom:*,p*,b'+bnames[0][0]+',a'+ang_names[0][0]+',d' + anames[0] + ',i* ' +
' @atom:*,p*,b'+bnames[0][1]+',a'+ang_names[0][1]+',d' + anames[1] + ',i* ' +
' @atom:*,p*,b'+bnames[1][0]+',a'+ang_names[1][1]+',d' + anames[2] + ',i* ' +
' @atom:*,p*,b'+bnames[1][1]+',a'+ang_names[1][2]+',d' + anames[3] + ',i* ' +
'\n')
sys.stdout.write(' } # end of "Data Dihedrals By Type" section\n'
'\n')
# Print the force-field parameters for these dihedral interactions:
sys.stdout.write('\n\n'
' # ------- Dihedral Force Field Parameters: -------\n')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for dihedral_style in dihedral_styles:
if not (dihedral_style in dihedral_styles_selected):
continue
sys.stdout.write(' # '+dihedral_style2docs[dihedral_style]+'\n')
sys.stdout.write('\n'
' # Syntax: \n'
' # dihedral_coeff DihedralTypeName DihedralStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for dihedral_name in dih_names_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(dihedral_name)]
#if (len(anames) == 4) and dihedral2style[dihedral_name] == 'class2':
# continue
# Optional: Shorten the angle name since some of the bnames are redundant:
anm = [a for a in map(EncodeAName, anames)]
dih_name_abbr = EncodeInteractionName(anm[0:4]+
#[bnames[0][0],bnames[0][1], bnames[1][1], bnames[2][1]]
[anm[4],anm[5],anm[7],anm[9]]+
#[ang_names[0][0],ang_names[0][1],ang_names[0][2],ang_names[1][2]]
[anm[10],anm[11],anm[12],anm[15]],
is_auto)
if not (dihedral2style[dihedral_name] in
dihedral_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
if (IsAutoInteraction(dihedral_name) and
(not include_auto_equivalences)):
continue
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' ' +
dihedral2params[dihedral_name] +
" # (ver=" + dihedral2ver[dihedral_name] +
", ref=" + dihedral2ref[dihedral_name] + ")\n")
if dihedral_name in dihedral2class2_mbt:
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' mbt ' +
dihedral2class2_mbt[dihedral_name] +
" # (ver=" + dihedral2ver_mbt[dihedral_name] +
", ref=" + dihedral2ref_mbt[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_ebt)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' ebt ' +
dihedral2class2_ebt[dihedral_name] +
" # (ver=" + dihedral2ver_ebt[dihedral_name] +
", ref=" + dihedral2ref_ebt[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_at)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' at ' +
dihedral2class2_at[dihedral_name] +
" # (ver=" + dihedral2ver_at[dihedral_name] +
", ref=" + dihedral2ref_at[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_aat)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' aat ' +
dihedral2class2_aat[dihedral_name] +
" # (ver=" + dihedral2ver_aat[dihedral_name] +
", ref=" + dihedral2ref_aat[dihedral_name] + ")\n")
assert(dihedral_name in dihedral2class2_bb13)
sys.stdout.write(' dihedral_coeff @dihedral:'+dih_name_abbr+' '+
dihedral2style[dihedral_name] + ' bb13 ' +
dihedral2class2_bb13[dihedral_name] +
" # (ver=" + dihedral2ver_bb13[dihedral_name] +
", ref=" + dihedral2ref_bb13[dihedral_name] + ")\n")
sys.stdout.write(' } # end of dihedral_coeff commands\n'
'\n\n')
################# Print 4-body Improper Interactions ##################
imp_names_priority_high_to_low = [x[0] for x in
sorted([x for x in reversed(improper2priority.items())],
key=itemgetter(1),
reverse=True)]
if len(imp_names_priority_high_to_low) > 0:
sys.stdout.write(" # --------------- Improper Interactions: ---------------------\n")
sys.stdout.write('\n'
'\n'
' # -- Rules for generating (4-body) "improper" interactions: --\n'
' # ImproperType AtmType1 AtmType2 AtmType3 AtmType3 [BondType1 Bnd2 Bnd3]\n')
sys.stdout.write('\n'
' write_once("Data Impropers By Type')
if improper_symmetry_subgraph != '':
sys.stdout.write(' ('+improper_symmetry_subgraph+')')
sys.stdout.write('") {\n')
for improper_name in imp_names_priority_high_to_low:
if not (improper2style[improper_name] in
improper_styles_selected):
continue
anames = ['*' if x=='X' else x
for x in ExtractANames(improper_name)]
#if (len(anames) == 4) and improper2style[improper_name] == 'class2':
# continue
ang_names = [anames[4:7],anames[7:10],anames[10:13]]
# atom orderings are LAMMPS implementation specific. See:
# http://lammps.sandia.gov/doc/improper_class2.html
#ang_names[0] <==> (a1, a2, a3) <==> (i, j, k)
#ang_names[1] <==> (a1, a2, a4) <==> (i, j, l)
#ang_names[2] <==> (a3, a2, a4) <==> (k, j, l)
assert(ang_names[0][1] == ang_names[1][1] == ang_names[2][1])
assert(ang_names[0][0] == ang_names[1][0])
assert(ang_names[1][2] == ang_names[2][2])
assert(ang_names[2][0] == ang_names[0][2])
# (NOTE TO SELF:
# If these assertions fail, then try checking if they are
# all either the same, or '*'. If they are then just replace '*'
# everwhere that atom appears with the most restrictive name.)
# Optional: Shorten the angle name since some of the bnames are redundant:
is_auto = IsAutoInteraction(improper_name)
anm = [a for a in map(EncodeAName, anames)]
imp_name_abbr = EncodeInteractionName(anm[0:4]+
#[ang_names[0][0],ang_names[0][1],ang_names[0][2],
# ang_names[1][2]]
[anm[4],anm[5],anm[6],
anm[9]],
is_auto)
# Did the user ask us to include "auto" interactions?
if improper2style[improper_name] != 'class2':
if IsAutoInteraction(improper_name):
if include_auto_equivalences:
sys.stdout.write(' @improper:' + imp_name_abbr +' '+
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie'
+ anames[0] + ',aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie*,aic'
+ anames[1] +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie'
+ anames[2] + ',aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade*,adc*,aie'
+ anames[3] + ',aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @improper:' + imp_name_abbr + ' ' +
' @atom:*,p*,b*,a*,d*,i' + anames[0] +
' @atom:*,p*,b*,a*,d*,i' + anames[1] +
' @atom:*,p*,b*,a*,d*,i' + anames[2] +
' @atom:*,p*,b*,a*,d*,i' + anames[3] +
'\n')
else:
if IsAutoInteraction(improper_name):
if include_auto_equivalences:
sys.stdout.write(' @improper:' + imp_name_abbr +' ' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'+ang_names[0][0]+',adc*,aie'
+ anames[0] + ',aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac'+ang_names[0][1]+',ade*,adc*,aie*,aic'
+ anames[1] +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'+ang_names[0][1]+',adc*,aie'
+ anames[2] + ',aic*' +
' @atom:*,ap*,aq*,ab*,aae*,aac*,ade'+ang_names[2][2]+',adc*,aie'
+ anames[3] + ',aic*' +
'\n')
else:
continue
else:
sys.stdout.write(' @improper:' + imp_name_abbr + ' ' +
' @atom:*,p*,b*,a'+ang_names[0][0]+',d*,i' + anames[0] +
' @atom:*,p*,b*,a'+ang_names[0][1]+',d*,i' + anames[1] +
' @atom:*,p*,b*,a'+ang_names[0][2]+',d*,i' + anames[2] +
' @atom:*,p*,b*,a'+ang_names[1][2]+',d*,i' + anames[3] +
'\n')
sys.stdout.write(' } # end of "Data Impropers By Type" section\n'
'\n')
# Print the force-field parameters for these improper interactions:
sys.stdout.write('\n\n'
' # ------- Improper Force Field Parameters: -------\n')
sys.stdout.write(' # For an explanation of these parameters, visit:\n')
for improper_style in improper_styles:
if not (improper_style in improper_styles_selected):
continue
sys.stdout.write(' # '+improper_style2docs[improper_style]+'\n')
sys.stdout.write('\n'
'# Syntax: \n'
' # improper_coeff ImproperTypeName ImproperStyle parameters...\n\n')
sys.stdout.write('\n'
' write_once("In Settings") {\n')
for improper_name in imp_names_priority_high_to_low:
anames = ['*' if x=='X' else x
for x in ExtractANames(improper_name)]
#if (len(anames) == 4) and improper2style[improper_name] == 'class2':
# continue
# Optional: Shorten the angle name since some of the bnames are redundant:
is_auto = IsAutoInteraction(improper_name)
anm = [a for a in map(EncodeAName, anames)]
imp_name_abbr = EncodeInteractionName(anm[0:4]+
#[ang_names[0][0],ang_names[0][1],ang_names[0][2],
# ang_names[1][2]]
[anm[4],anm[5],anm[6],
anm[9]],
is_auto)
if not (improper2style[improper_name] in
improper_styles_selected):
continue
# Did the user ask us to include "auto" interactions?
if (IsAutoInteraction(improper_name) and
(not include_auto_equivalences)):
continue
sys.stdout.write(' improper_coeff @improper:'+imp_name_abbr+' '+
improper2style[improper_name] + ' ' +
improper2params[improper_name] +
" # (ver=" + improper2ver[improper_name] +
", ref=" + improper2ref[improper_name] + ")\n")
if improper_name in improper2class2_aa:
sys.stdout.write(' improper_coeff @improper:'+imp_name_abbr+' '+
improper2style[improper_name] + ' aa ' +
improper2class2_aa[improper_name] +
" # (ver=" + improper2ver_aa[improper_name] +
", ref=" + improper2ref[improper_name] + ")\n")
sys.stdout.write(' } # end of improper_coeff commands\n'
'\n\n')
sys.stdout.write('\n\n\n\n'
' # -------------------- Select LAMMPS style(s) ------------------\n'
'\n')
sys.stdout.write('\n'
' # LAMMPS supports many different kinds of bonded and non-bonded\n'
' # interactions which can be selected at run time. Eventually\n'
' # we must inform LAMMPS which of them we will need. We specify\n'
' # this in the "In Init" section: \n\n')
sys.stdout.write(' write_once("In Init") {\n')
sys.stdout.write(' units real\n')
sys.stdout.write(' atom_style full\n')
if len(bond_styles) > 0:
sys.stdout.write(' bond_style hybrid')
for bond_style in bond_styles:
if not (bond_style in bond_styles_selected):
continue
sys.stdout.write(' ' + bond_style)
sys.stdout.write('\n')
for bond_style in bond_styles:
if not (bond_style in bond_styles_selected):
continue
sys.stdout.write(' # '+bond_style2docs[bond_style]+'\n')
sys.stdout.write('\n')
if len(angle_styles) > 0:
sys.stdout.write(' angle_style hybrid')
for angle_style in angle_styles:
if not (angle_style in angle_styles_selected):
continue
sys.stdout.write(' ' + angle_style)
sys.stdout.write('\n')
for angle_style in angle_styles:
if not (angle_style in angle_styles_selected):
continue
sys.stdout.write(' # '+angle_style2docs[angle_style]+'\n')
sys.stdout.write('\n')
if len(dihedral_styles) > 0:
sys.stdout.write(' dihedral_style hybrid')
for dihedral_style in dihedral_styles:
if not (dihedral_style in dihedral_styles_selected):
continue
sys.stdout.write(' ' + dihedral_style)
sys.stdout.write('\n')
for dihedral_style in dihedral_styles:
if not (dihedral_style in dihedral_styles_selected):
continue
sys.stdout.write(' # '+dihedral_style2docs[dihedral_style]+'\n')
sys.stdout.write('\n')
if len(improper_styles) > 0:
sys.stdout.write(' improper_style hybrid')
for improper_style in improper_styles:
if not (improper_style in improper_styles_selected):
continue
sys.stdout.write(' ' + improper_style)
sys.stdout.write('\n')
for improper_style in improper_styles:
if not (improper_style in improper_styles_selected):
continue
sys.stdout.write(' # '+improper_style2docs[improper_style]+'\n')
sys.stdout.write('\n')
if len(pair_styles) > 0:
sys.stdout.write(' pair_style hybrid')
for pair_style in pair_styles:
if not (pair_style in pair_styles_selected):
continue
sys.stdout.write(' ' + pair_style +
' ' + pair_style_args[pair_style])
sys.stdout.write('\n')
for pair_style in pair_styles:
sys.stdout.write(' # '+pair_style2docs[pair_style]+'\n')
sys.stdout.write('\n')
sys.stdout.write(' pair_modify mix ' + pair_mixing_style + '\n')
sys.stdout.write(' ' + special_bonds_command + '\n')
sys.stdout.write(' ' + kspace_style + '\n')
sys.stdout.write(' } #end of init parameters\n\n')
sys.stdout.write('} # ' + ffname + '\n\n')
sys.stdout.write("#\n"
"# WARNING: The following 1-2, 1-3, and 1-4 weighting parameters were ASSUMED:\n")
sys.stdout.write("# " + special_bonds_command + "\n")
sys.stdout.write("# (See http://lammps.sandia.gov/doc/special_bonds.html for details)\n")
#sys.stderr.write(' done.\n')
if len(lines_templates) > 0:
sys.stdout.write('\n\n\n\n'
'# ---- templates from the original .frc file used for atom type selection: ---\n')
for line in lines_templates:
sys.stdout.write('# '+line)
if len(lines_references) > 0:
sys.stdout.write('\n\n\n\n'
'# ---- references from the original .frc file: ----\n\n')
for ref_number,lines in sorted(lines_references.items()):
sys.stdout.write('# reference '+str(ref_number)+'\n')
for line in lines:
sys.stdout.write('# '+line)
sys.stdout.write('\n')
if len(lines_warnings) > 0:
sys.stdout.write('\n\n\n\n'
'# ---- additional warnings: ----\n')
for line in lines_warnings:
sys.stdout.write(line)
if filename_in != '':
file_in.close()
except InputError as err:
sys.stderr.write('\n\n' + str(err) + '\n')
sys.exit(1)
if __name__ == '__main__':
main()
|
# Copyright (c) 2015 Red Hat, Inc.
# Copyright (c) 2015 SUSE Linux Products GmbH
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.l2 import base
class TestOVSAgent(base.OVSAgentTestFramework):
def test_port_creation_and_deletion(self):
self.setup_agent_and_ports(
port_dicts=self.create_test_ports())
self.wait_until_ports_state(self.ports, up=True)
for port in self.ports:
self.agent.int_br.delete_port(port['vif_name'])
self.wait_until_ports_state(self.ports, up=False)
def test_datapath_type_system(self):
expected = constants.OVS_DATAPATH_SYSTEM
agent = self.create_agent()
self.start_agent(agent)
actual = self.ovs.db_get_val('Bridge',
agent.int_br.br_name,
'datapath_type')
self.assertEqual(expected, actual)
actual = self.ovs.db_get_val('Bridge',
agent.tun_br.br_name,
'datapath_type')
self.assertEqual(expected, actual)
def test_datapath_type_netdev(self):
expected = constants.OVS_DATAPATH_NETDEV
self.config.set_override('datapath_type',
expected,
"OVS")
agent = self.create_agent()
self.start_agent(agent)
actual = self.ovs.db_get_val('Bridge',
agent.int_br.br_name,
'datapath_type')
self.assertEqual(expected, actual)
actual = self.ovs.db_get_val('Bridge',
agent.tun_br.br_name,
'datapath_type')
self.assertEqual(expected, actual)
def test_resync_devices_set_up_after_exception(self):
self.setup_agent_and_ports(
port_dicts=self.create_test_ports(),
trigger_resync=True)
self.wait_until_ports_state(self.ports, up=True)
def test_port_vlan_tags(self):
self.setup_agent_and_ports(
port_dicts=self.create_test_ports(),
trigger_resync=True)
self.wait_until_ports_state(self.ports, up=True)
self.assert_vlan_tags(self.ports, self.agent)
def test_assert_bridges_ports_vxlan(self):
agent = self.create_agent()
self.assertTrue(self.ovs.bridge_exists(self.br_int))
self.assertTrue(self.ovs.bridge_exists(self.br_tun))
self.assert_bridge_ports()
self.assert_patch_ports(agent)
def test_assert_bridges_ports_no_tunnel(self):
self.create_agent(create_tunnels=False)
self.assertTrue(self.ovs.bridge_exists(self.br_int))
self.assertFalse(self.ovs.bridge_exists(self.br_tun))
def test_assert_pings_during_br_int_setup_not_lost(self):
self.setup_agent_and_ports(port_dicts=self.create_test_ports(),
create_tunnels=False)
self.wait_until_ports_state(self.ports, up=True)
ips = [port['fixed_ips'][0]['ip_address'] for port in self.ports]
with net_helpers.async_ping(self.namespace, ips) as running:
while running():
self.agent.setup_integration_br()
time.sleep(0.25)
Remove duplicated codes in two test cases
Change-Id: Icc36af93a6d648ba46388be270f9c7b6082d0ef1
# Copyright (c) 2015 Red Hat, Inc.
# Copyright (c) 2015 SUSE Linux Products GmbH
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.tests.common import net_helpers
from neutron.tests.functional.agent.l2 import base
class TestOVSAgent(base.OVSAgentTestFramework):
def test_port_creation_and_deletion(self):
self.setup_agent_and_ports(
port_dicts=self.create_test_ports())
self.wait_until_ports_state(self.ports, up=True)
for port in self.ports:
self.agent.int_br.delete_port(port['vif_name'])
self.wait_until_ports_state(self.ports, up=False)
def _check_datapath_type_netdev(self, expected, default=False):
if not default:
self.config.set_override('datapath_type',
expected,
"OVS")
agent = self.create_agent()
self.start_agent(agent)
actual = self.ovs.db_get_val('Bridge',
agent.int_br.br_name,
'datapath_type')
self.assertEqual(expected, actual)
actual = self.ovs.db_get_val('Bridge',
agent.tun_br.br_name,
'datapath_type')
self.assertEqual(expected, actual)
def test_datapath_type_netdev(self):
self._check_datapath_type_netdev(
constants.OVS_DATAPATH_NETDEV)
def test_datapath_type_system(self):
self._check_datapath_type_netdev(
constants.OVS_DATAPATH_SYSTEM)
def test_datapath_type_default(self):
self._check_datapath_type_netdev(
constants.OVS_DATAPATH_SYSTEM, default=True)
def test_resync_devices_set_up_after_exception(self):
self.setup_agent_and_ports(
port_dicts=self.create_test_ports(),
trigger_resync=True)
self.wait_until_ports_state(self.ports, up=True)
def test_port_vlan_tags(self):
self.setup_agent_and_ports(
port_dicts=self.create_test_ports(),
trigger_resync=True)
self.wait_until_ports_state(self.ports, up=True)
self.assert_vlan_tags(self.ports, self.agent)
def test_assert_bridges_ports_vxlan(self):
agent = self.create_agent()
self.assertTrue(self.ovs.bridge_exists(self.br_int))
self.assertTrue(self.ovs.bridge_exists(self.br_tun))
self.assert_bridge_ports()
self.assert_patch_ports(agent)
def test_assert_bridges_ports_no_tunnel(self):
self.create_agent(create_tunnels=False)
self.assertTrue(self.ovs.bridge_exists(self.br_int))
self.assertFalse(self.ovs.bridge_exists(self.br_tun))
def test_assert_pings_during_br_int_setup_not_lost(self):
self.setup_agent_and_ports(port_dicts=self.create_test_ports(),
create_tunnels=False)
self.wait_until_ports_state(self.ports, up=True)
ips = [port['fixed_ips'][0]['ip_address'] for port in self.ports]
with net_helpers.async_ping(self.namespace, ips) as running:
while running():
self.agent.setup_integration_br()
time.sleep(0.25)
|
#Splitting out maringal effects to see if they can be generalized
import numpy as np
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly, resettable_cache
#### margeff helper functions ####
#NOTE: todo marginal effects for group 2
# group 2 oprobit, ologit, gologit, mlogit, biprobit
def _check_margeff_args(at, method):
"""
Checks valid options for margeff
"""
if at not in ['overall','mean','median','zero','all']:
raise ValueError("%s not a valid option for `at`." % at)
if method not in ['dydx','eyex','dyex','eydx']:
raise ValueError("method is not understood. Got %s" % method)
def _check_discrete_args(at, method):
"""
Checks the arguments for margeff if the exogenous variables are discrete.
"""
if method in ['dyex','eyex']:
raise ValueError("%s not allowed for discrete variables" % method)
if at in ['median', 'zero']:
raise ValueError("%s not allowed for discrete variables" % at)
def _get_const_index(exog):
"""
Returns a boolean array of non-constant column indices in exog and
an scalar array of where the constant is or None
"""
effects_idx = exog.var(0) != 0
if np.any(~effects_idx):
const_idx = np.where(~effects_idx)[0]
else:
const_idx = None
return effects_idx, const_idx
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = (np.max(X, axis=0) == 1)
min = (np.min(X, axis=0) == 0)
remainder = np.all(X % 1. == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
def _get_dummy_index(X, const_idx):
dummy_ind = _isdummy(X)
dummy = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
dummy_ind[dummy_ind > const_idx] -= 1
if dummy_ind.size == 0: # don't waste your time
dummy = False
dummy_ind = None # this gets passed to stand err func
return dummy_ind, dummy
def _iscount(X):
"""
Given an array X, returns the column indices for count variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 10, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _iscount(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
remainder = np.logical_and(np.logical_and(np.all(X % 1. == 0, axis = 0),
X.var(0) != 0), np.all(X >= 0, axis=0))
dummy = _isdummy(X)
remainder = np.where(remainder)[0].tolist()
for idx in dummy:
remainder.remove(idx)
return np.array(remainder)
def _get_count_index(X, const_idx):
count_ind = _iscount(X)
count = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
count_ind[count_ind > const_idx] -= 1
if count_ind.size == 0: # don't waste your time
count = False
count_ind = None # for stand err func
return count_ind, count
def _get_margeff_exog(exog, at, atexog, ind):
if atexog is not None: # user supplied
if isinstance(atexog, dict):
# assumes values are singular or of len(exog)
for key in atexog:
exog[:,key] = atexog[key]
elif isinstance(atexog, np.ndarray): #TODO: handle DataFrames
if atexog.ndim == 1:
k_vars = len(atexog)
else:
K_vars = atexog.shape[1]
try:
assert k_vars == exog.shape[1]
except:
raise ValueError("atexog does not have the same number "
"of variables as exog")
exog = atexog
#NOTE: we should fill in atexog after we process at
if at == 'mean':
exog = np.atleast_2d(exog.mean(0))
elif at == 'median':
exog = np.atleast_2d(np.median(exog, axis=0))
elif at == 'zero':
exog = np.zeros((1,exog.shape[1]))
exog[0,~ind] = 1
return exog
def _get_count_effects(effects, exog, count_ind, method, model, params):
"""
If there's a count variable, the predicted difference is taken by
subtracting one and adding one to exog then averaging the difference
"""
# this is the index for the effect and the index for count col in exog
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
effect0 = model.predict(params, exog0)
exog0[:,i] += 2
effect1 = model.predict(params, exog0)
#NOTE: done by analogy with dummy effects but untested bc
# stata doesn't handle both count and eydx anywhere
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = ((effect1 - effect0)/2)
return effects
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
"""
If there's a dummy variable, the predicted difference is taken at
0 and 1
"""
# this is the index for the effect and the index for dummy col in exog
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:,i] = 0
effect0 = model.predict(params, exog0)
#fittedvalues0 = np.dot(exog0,params)
exog0[:,i] = 1
effect1 = model.predict(params, exog0)
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = (effect1 - effect0)
return effects
def _effects_at(effects, at):
if at == 'all':
effects = effects
elif at == 'overall':
effects = effects.mean(0)
else:
effects = effects[0,:]
return effects
def margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind,
method, J):
"""
For discrete regressors the marginal effect is
\Delta F = F(XB) | d = 1 - F(XB) | d = 0
The row of the Jacobian for this variable is given by
f(XB)*X | d = 1 - f(XB)*X | d = 0
Where F is the default prediction of the model
"""
for i in dummy_ind:
exog0 = exog.copy()
exog1 = exog.copy()
exog0[:,i] = 0
exog1[:,i] = 1
dfdb0 = model._derivative_predict(params, exog0, method)
dfdb1 = model._derivative_predict(params, exog1, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0)
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params_count(model, cov_margins, params, exog, count_ind,
method, J):
"""
For discrete regressors the marginal effect is
\Delta F = F(XB) | d += 1 - F(XB) | d -= 1
The row of the Jacobian for this variable is given by
(f(XB)*X | d += 1 - f(XB)*X | d -= 1) / 2
where F is the default prediction for the model.
"""
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
dfdb0 = model._derivative_predict(params, exog0, method)
exog0[:,i] += 2
dfdb1 = model._derivative_predict(params, exog0, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0) / 2
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
Computes the variance-covariance of marginal effects by the delta method.
Parameters
----------
model : model instance
The model that returned the fitted results. Its pdf method is used
for computing the Jacobian of discrete variables in dummy_ind and
count_ind
params : array-like
estimated model parameters
exog : array-like
exogenous variables at which to calculate the derivative
cov_params : array-like
The variance-covariance of the parameters
at : str
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Only overall has any effect here.
derivative : function or array-like
If a function, it returns the marginal effects of the model with
respect to the exogenous variables evaluated at exog. Expected to be
called derivative(params, exog). This will be numerically
differentiated. Otherwise, it can be the Jacobian of the marginal
effects with respect to the parameters.
dummy_ind : array-like
Indices of the columns of exog that contain dummy variables
count_ind : array-like
Indices of the columns of exog that contain count variables
Notes
-----
For continuous regressors, the variance-covariance is given by
Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'
where V is the parameter variance-covariance.
The outer Jacobians are computed via numerical differentiation if
derivative is a function.
"""
if callable(derivative):
from statsmodels.tools.numdiff import approx_fprime_cs
params = params.ravel('F') # for Multinomial
try:
jacobian_mat = approx_fprime_cs(params, derivative,
args=(exog,method))
except TypeError, err: #norm.cdf doesn't take complex values
from statsmodels.tool.numdiff import approx_fprime1
jacobian_mat = approx_fprime1(params, derivative,
args=(exog,method))
if at == 'overall':
jacobian_mat = np.mean(jacobian_mat, axis=1)
else:
jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
if dummy_ind is not None:
jacobian_mat = margeff_cov_params_dummy(model, jacobian_mat, params,
exog, dummy_ind, method, J)
if count_ind is not None:
jacobian_mat = margeff_cov_params_count(model, jacobian_mat, params,
exog, count_ind, method, J)
else:
jacobian_mat = derivative
#NOTE: this won't go through for at == 'all'
return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)
def margeff_cov_with_se(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
See margeff_cov_params.
Same function but returns both the covariance of the marginal effects
and their standard errors.
"""
cov_me = margeff_cov_params(model, params, exog, cov_params, at,
derivative, dummy_ind,
count_ind, method, J)
return cov_me, np.sqrt(np.diag(cov_me))
def margeff():
pass
_transform_names = dict(dydx='dy/dx',
eyex='d(lny)/d(lnx)',
dyex='dy/d(lnx)',
eydx='d(lny)/dx')
class Margins(object):
"""
Mostly a do nothing class. Lays out the methods expected of a sub-class.
This is just a sketch of what we may want out of a general margins class.
I (SS) need to look at details of other models.
"""
def __init__(self, results, get_margeff, derivative, dist=None,
margeff_args=()):
self._cache = resettable_cache()
self.results = results
self.dist = dist
self._get_margeff = get_margeff
self.get_margeff(margeff_args)
def _reset(self):
self._cache = resettable_cache()
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self._get_margeff(*args)
@cache_readonly
def tvalues(self):
raise NotImplementedError
@cache_readonly
def cov_margins(self):
raise NotImplementedError
@cache_readonly
def margins_se(self):
raise NotImplementedError
def get_frame(self):
raise NotImplementedError
@cache_readonly
def pvalues(self):
raise NotImplementedError
def conf_int(self, alpha=.05):
raise NotImplementedError
def summary(self, alpha=.05):
raise NotImplementedError
#class DiscreteMargins(Margins):
class DiscreteMargins(object):
def __init__(self, results, args=()):
self._cache = resettable_cache()
self.results = results
self.get_margeff(*args)
def _reset(self):
self._cache = resettable_cache()
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self._get_margeff(*args)
@cache_readonly
def tvalues(self):
return self.margeff / self.margeff_se
def get_frame(self, alpha=.05):
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i,name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
return norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
me_se = self.margeff_se
q = norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(zip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]),]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
# sigh, we really need to hold on to this in _data...
_, const_idx = _get_const_index(model.exog)
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
#NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:,eq], margeff_se[:,eq],
tvalues[:,eq], pvalues[:,eq], conf_int[:,:,eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha, use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
tble.insert_header_row(0, header)
#from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
self._reset() # always reset the cache when this is called
#TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx, const_idx = _get_const_index(exog)
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
J = getattr(model, 'J', 1)
effects_idx = np.tile(effects_idx, J) # adjust for multi-equation.
effects = _effects_at(effects, at)
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(model, params, exog,
results.cov_params(), at,
model._derivative_exog,
dummy_idx, count_idx,
method, J)
# reshape for multi-equation
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[effects_idx].reshape(K, J, order='F')
self.margeff_se = margeff_se[effects_idx].reshape(K, J, order='F')
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
else:
# don't care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
DOC: Update docstrings for discrete covariance calcs
#Splitting out maringal effects to see if they can be generalized
import numpy as np
from scipy.stats import norm
from statsmodels.tools.decorators import cache_readonly, resettable_cache
#### margeff helper functions ####
#NOTE: todo marginal effects for group 2
# group 2 oprobit, ologit, gologit, mlogit, biprobit
def _check_margeff_args(at, method):
"""
Checks valid options for margeff
"""
if at not in ['overall','mean','median','zero','all']:
raise ValueError("%s not a valid option for `at`." % at)
if method not in ['dydx','eyex','dyex','eydx']:
raise ValueError("method is not understood. Got %s" % method)
def _check_discrete_args(at, method):
"""
Checks the arguments for margeff if the exogenous variables are discrete.
"""
if method in ['dyex','eyex']:
raise ValueError("%s not allowed for discrete variables" % method)
if at in ['median', 'zero']:
raise ValueError("%s not allowed for discrete variables" % at)
def _get_const_index(exog):
"""
Returns a boolean array of non-constant column indices in exog and
an scalar array of where the constant is or None
"""
effects_idx = exog.var(0) != 0
if np.any(~effects_idx):
const_idx = np.where(~effects_idx)[0]
else:
const_idx = None
return effects_idx, const_idx
def _isdummy(X):
"""
Given an array X, returns the column indices for the dummy variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 2, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _isdummy(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
if X.ndim > 1:
ind = np.zeros(X.shape[1]).astype(bool)
max = (np.max(X, axis=0) == 1)
min = (np.min(X, axis=0) == 0)
remainder = np.all(X % 1. == 0, axis=0)
ind = min & max & remainder
if X.ndim == 1:
ind = np.asarray([ind])
return np.where(ind)[0]
def _get_dummy_index(X, const_idx):
dummy_ind = _isdummy(X)
dummy = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
dummy_ind[dummy_ind > const_idx] -= 1
if dummy_ind.size == 0: # don't waste your time
dummy = False
dummy_ind = None # this gets passed to stand err func
return dummy_ind, dummy
def _iscount(X):
"""
Given an array X, returns the column indices for count variables.
Parameters
----------
X : array-like
A 1d or 2d array of numbers
Examples
--------
>>> X = np.random.randint(0, 10, size=(15,5)).astype(float)
>>> X[:,1:3] = np.random.randn(15,2)
>>> ind = _iscount(X)
>>> ind
array([ True, False, False, True, True], dtype=bool)
"""
X = np.asarray(X)
remainder = np.logical_and(np.logical_and(np.all(X % 1. == 0, axis = 0),
X.var(0) != 0), np.all(X >= 0, axis=0))
dummy = _isdummy(X)
remainder = np.where(remainder)[0].tolist()
for idx in dummy:
remainder.remove(idx)
return np.array(remainder)
def _get_count_index(X, const_idx):
count_ind = _iscount(X)
count = True
# adjust back for a constant because effects doesn't have one
if const_idx is not None:
count_ind[count_ind > const_idx] -= 1
if count_ind.size == 0: # don't waste your time
count = False
count_ind = None # for stand err func
return count_ind, count
def _get_margeff_exog(exog, at, atexog, ind):
if atexog is not None: # user supplied
if isinstance(atexog, dict):
# assumes values are singular or of len(exog)
for key in atexog:
exog[:,key] = atexog[key]
elif isinstance(atexog, np.ndarray): #TODO: handle DataFrames
if atexog.ndim == 1:
k_vars = len(atexog)
else:
K_vars = atexog.shape[1]
try:
assert k_vars == exog.shape[1]
except:
raise ValueError("atexog does not have the same number "
"of variables as exog")
exog = atexog
#NOTE: we should fill in atexog after we process at
if at == 'mean':
exog = np.atleast_2d(exog.mean(0))
elif at == 'median':
exog = np.atleast_2d(np.median(exog, axis=0))
elif at == 'zero':
exog = np.zeros((1,exog.shape[1]))
exog[0,~ind] = 1
return exog
def _get_count_effects(effects, exog, count_ind, method, model, params):
"""
If there's a count variable, the predicted difference is taken by
subtracting one and adding one to exog then averaging the difference
"""
# this is the index for the effect and the index for count col in exog
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
effect0 = model.predict(params, exog0)
exog0[:,i] += 2
effect1 = model.predict(params, exog0)
#NOTE: done by analogy with dummy effects but untested bc
# stata doesn't handle both count and eydx anywhere
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = ((effect1 - effect0)/2)
return effects
def _get_dummy_effects(effects, exog, dummy_ind, method, model, params):
"""
If there's a dummy variable, the predicted difference is taken at
0 and 1
"""
# this is the index for the effect and the index for dummy col in exog
for i in dummy_ind:
exog0 = exog.copy() # only copy once, can we avoid a copy?
exog0[:,i] = 0
effect0 = model.predict(params, exog0)
#fittedvalues0 = np.dot(exog0,params)
exog0[:,i] = 1
effect1 = model.predict(params, exog0)
if 'ey' in method:
effect0 = np.log(effect0)
effect1 = np.log(effect1)
effects[:, i] = (effect1 - effect0)
return effects
def _effects_at(effects, at):
if at == 'all':
effects = effects
elif at == 'overall':
effects = effects.mean(0)
else:
effects = effects[0,:]
return effects
def _margeff_cov_params_dummy(model, cov_margins, params, exog, dummy_ind,
method, J):
"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d = 1 - F(XB) | d = 0
The row of the Jacobian for this variable is given by
f(XB)*X | d = 1 - f(XB)*X | d = 0
Where F is the default prediction of the model.
"""
for i in dummy_ind:
exog0 = exog.copy()
exog1 = exog.copy()
exog0[:,i] = 0
exog1[:,i] = 1
dfdb0 = model._derivative_predict(params, exog0, method)
dfdb1 = model._derivative_predict(params, exog1, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0)
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def _margeff_cov_params_count(model, cov_margins, params, exog, count_ind,
method, J):
"""
Returns the Jacobian for discrete regressors for use in margeff_cov_params.
For discrete regressors the marginal effect is
\Delta F = F(XB) | d += 1 - F(XB) | d -= 1
The row of the Jacobian for this variable is given by
(f(XB)*X | d += 1 - f(XB)*X | d -= 1) / 2
where F is the default prediction for the model.
"""
for i in count_ind:
exog0 = exog.copy()
exog0[:,i] -= 1
dfdb0 = model._derivative_predict(params, exog0, method)
exog0[:,i] += 2
dfdb1 = model._derivative_predict(params, exog0, method)
dfdb = (dfdb1 - dfdb0)
if dfdb.ndim >= 2: # for overall
dfdb = dfdb.mean(0) / 2
if J > 1:
K = dfdb.shape[1] / (J-1)
cov_margins[i::K, :] = dfdb
else:
cov_margins[i, :] = dfdb # how each F changes with change in B
return cov_margins
def margeff_cov_params(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
Computes the variance-covariance of marginal effects by the delta method.
Parameters
----------
model : model instance
The model that returned the fitted results. Its pdf method is used
for computing the Jacobian of discrete variables in dummy_ind and
count_ind
params : array-like
estimated model parameters
exog : array-like
exogenous variables at which to calculate the derivative
cov_params : array-like
The variance-covariance of the parameters
at : str
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Only overall has any effect here.you
derivative : function or array-like
If a function, it returns the marginal effects of the model with
respect to the exogenous variables evaluated at exog. Expected to be
called derivative(params, exog). This will be numerically
differentiated. Otherwise, it can be the Jacobian of the marginal
effects with respect to the parameters.
dummy_ind : array-like
Indices of the columns of exog that contain dummy variables
count_ind : array-like
Indices of the columns of exog that contain count variables
Notes
-----
For continuous regressors, the variance-covariance is given by
Asy. Var[MargEff] = [d margeff / d params] V [d margeff / d params]'
where V is the parameter variance-covariance.
The outer Jacobians are computed via numerical differentiation if
derivative is a function.
"""
if callable(derivative):
from statsmodels.tools.numdiff import approx_fprime_cs
params = params.ravel('F') # for Multinomial
try:
jacobian_mat = approx_fprime_cs(params, derivative,
args=(exog,method))
except TypeError, err: #norm.cdf doesn't take complex values
from statsmodels.tool.numdiff import approx_fprime1
jacobian_mat = approx_fprime1(params, derivative,
args=(exog,method))
if at == 'overall':
jacobian_mat = np.mean(jacobian_mat, axis=1)
else:
jacobian_mat = jacobian_mat.squeeze() # exog was 2d row vector
if dummy_ind is not None:
jacobian_mat = _margeff_cov_params_dummy(model, jacobian_mat,
params, exog, dummy_ind, method, J)
if count_ind is not None:
jacobian_mat = _margeff_cov_params_count(model, jacobian_mat,
params, exog, count_ind, method, J)
else:
jacobian_mat = derivative
#NOTE: this won't go through for at == 'all'
return np.dot(np.dot(jacobian_mat, cov_params), jacobian_mat.T)
def margeff_cov_with_se(model, params, exog, cov_params, at, derivative,
dummy_ind, count_ind, method, J):
"""
See margeff_cov_params.
Same function but returns both the covariance of the marginal effects
and their standard errors.
"""
cov_me = margeff_cov_params(model, params, exog, cov_params, at,
derivative, dummy_ind,
count_ind, method, J)
return cov_me, np.sqrt(np.diag(cov_me))
def margeff():
pass
_transform_names = dict(dydx='dy/dx',
eyex='d(lny)/d(lnx)',
dyex='dy/d(lnx)',
eydx='d(lny)/dx')
class Margins(object):
"""
Mostly a do nothing class. Lays out the methods expected of a sub-class.
This is just a sketch of what we may want out of a general margins class.
I (SS) need to look at details of other models.
"""
def __init__(self, results, get_margeff, derivative, dist=None,
margeff_args=()):
self._cache = resettable_cache()
self.results = results
self.dist = dist
self._get_margeff = get_margeff
self.get_margeff(margeff_args)
def _reset(self):
self._cache = resettable_cache()
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self._get_margeff(*args)
@cache_readonly
def tvalues(self):
raise NotImplementedError
@cache_readonly
def cov_margins(self):
raise NotImplementedError
@cache_readonly
def margins_se(self):
raise NotImplementedError
def get_frame(self):
raise NotImplementedError
@cache_readonly
def pvalues(self):
raise NotImplementedError
def conf_int(self, alpha=.05):
raise NotImplementedError
def summary(self, alpha=.05):
raise NotImplementedError
#class DiscreteMargins(Margins):
class DiscreteMargins(object):
def __init__(self, results, args=()):
self._cache = resettable_cache()
self.results = results
self.get_margeff(*args)
def _reset(self):
self._cache = resettable_cache()
def get_margeff(self, *args, **kwargs):
self._reset()
self.margeff = self._get_margeff(*args)
@cache_readonly
def tvalues(self):
return self.margeff / self.margeff_se
def get_frame(self, alpha=.05):
from pandas import DataFrame
names = [_transform_names[self.margeff_options['method']],
'Std. Err.', 'z', 'Pr(>|z|)',
'Conf. Int. Low', 'Cont. Int. Hi.']
ind = self.results.model.exog.var(0) != 0 # True if not a constant
exog_names = self.results.model.exog_names
var_names = [name for i,name in enumerate(exog_names) if ind[i]]
table = np.column_stack((self.margeff, self.margeff_se, self.tvalues,
self.pvalues, self.conf_int(alpha)))
return DataFrame(table, columns=names, index=var_names)
@cache_readonly
def pvalues(self):
return norm.sf(np.abs(self.tvalues)) * 2
def conf_int(self, alpha=.05):
me_se = self.margeff_se
q = norm.ppf(1 - alpha / 2)
lower = self.margeff - q * me_se
upper = self.margeff + q * me_se
return np.asarray(zip(lower, upper))
def summary(self, alpha=.05):
"""
Returns a summary table for marginal effects
Parameters
----------
alpha : float
Number between 0 and 1. The confidence intervals have the
probability 1-alpha.
Returns
-------
Summary : SummaryTable
A SummaryTable instance
"""
results = self.results
model = results.model
title = model.__class__.__name__ + " Marginal Effects"
method = self.margeff_options['method']
top_left = [('Dep. Variable:', [model.endog_names]),
('Method:', [method]),
('At:', [self.margeff_options['at']]),]
from statsmodels.iolib.summary import (Summary, summary_params,
table_extend)
exog_names = model.exog_names[:] # copy
smry = Summary()
# sigh, we really need to hold on to this in _data...
_, const_idx = _get_const_index(model.exog)
if const_idx is not None:
exog_names.pop(const_idx)
J = int(getattr(model, "J", 1))
if J > 1:
yname, yname_list = results._get_endog_name(model.endog_names,
None, all=True)
else:
yname = model.endog_names
yname_list = [yname]
smry.add_table_2cols(self, gleft=top_left, gright=[],
yname=yname, xname=exog_names, title=title)
#NOTE: add_table_params is not general enough yet for margeff
# could use a refactor with getattr instead of hard-coded params
# tvalues etc.
table = []
conf_int = self.conf_int(alpha)
margeff = self.margeff
margeff_se = self.margeff_se
tvalues = self.tvalues
pvalues = self.pvalues
if J > 1:
for eq in range(J):
restup = (results, margeff[:,eq], margeff_se[:,eq],
tvalues[:,eq], pvalues[:,eq], conf_int[:,:,eq])
tble = summary_params(restup, yname=yname_list[eq],
xname=exog_names, alpha=alpha, use_t=False,
skip_header=True)
tble.title = yname_list[eq]
# overwrite coef with method name
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
tble.insert_header_row(0, header)
#from IPython.core.debugger import Pdb; Pdb().set_trace()
table.append(tble)
table = table_extend(table, keep_headers=True)
else:
restup = (results, margeff, margeff_se, tvalues, pvalues, conf_int)
table = summary_params(restup, yname=yname, xname=exog_names,
alpha=alpha, use_t=False, skip_header=True)
header = ['', _transform_names[method], 'std err', 'z',
'P>|z|', '[%3.1f%% Conf. Int.]' % (100-alpha*100)]
table.insert_header_row(0, header)
smry.tables.append(table)
return smry
def get_margeff(self, at='overall', method='dydx', atexog=None,
dummy=False, count=False):
"""Get marginal effects of the fitted model.
Parameters
----------
at : str, optional
Options are:
- 'overall', The average of the marginal effects at each
observation.
- 'mean', The marginal effects at the mean of each regressor.
- 'median', The marginal effects at the median of each regressor.
- 'zero', The marginal effects at zero for each regressor.
- 'all', The marginal effects at each observation.
Note that if `exog` is specified, then marginal effects for all
variables not specified by `exog` are calculated using the `at`
option.
method : str, optional
Options are:
- 'dydx' - dy/dx - No transformation is made and marginal effects
are returned. This is the default.
- 'eyex' - estimate elasticities of variables in `exog` --
d(lny)/d(lnx)
- 'dyex' - estimate semielasticity -- dy/d(lnx)
- 'eydx' - estimate semeilasticity -- d(lny)/dx
Note that tranformations are done after each observation is
calculated. Semi-elasticities for binary variables are computed
using the midpoint method. 'dyex' and 'eyex' do not make sense
for discrete variables.
atexog : array-like, optional
Optionally, you can provide the exogenous variables over which to
get the marginal effects. This should be a dictionary with the key
as the zero-indexed column number and the value of the dictionary.
Default is None for all independent variables less the constant.
dummy : bool, optional
If False, treats binary variables (if present) as continuous. This
is the default. Else if True, treats binary variables as
changing from 0 to 1. Note that any variable that is either 0 or 1
is treated as binary. Each binary variable is treated separately
for now.
count : bool, optional
If False, treats count variables (if present) as continuous. This
is the default. Else if True, the marginal effect is the
change in probabilities when each observation is increased by one.
Returns
-------
effects : ndarray
the marginal effect corresponding to the input options
Notes
-----
When using after Poisson, returns the expected number of events
per period, assuming that the model is loglinear.
"""
self._reset() # always reset the cache when this is called
#TODO: if at is not all or overall, we can also put atexog values
# in summary table head
method = method.lower()
at = at.lower()
_check_margeff_args(at, method)
self.margeff_options = dict(method=method, at=at)
results = self.results
model = results.model
params = results.params
exog = model.exog.copy() # copy because values are changed
effects_idx, const_idx = _get_const_index(exog)
if dummy:
_check_discrete_args(at, method)
dummy_idx, dummy = _get_dummy_index(exog, const_idx)
else:
dummy_idx = None
if count:
_check_discrete_args(at, method)
count_idx, count = _get_count_index(exog, const_idx)
else:
count_idx = None
# get the exogenous variables
exog = _get_margeff_exog(exog, at, atexog, effects_idx)
# get base marginal effects, handled by sub-classes
effects = model._derivative_exog(params, exog, method,
dummy_idx, count_idx)
J = getattr(model, 'J', 1)
effects_idx = np.tile(effects_idx, J) # adjust for multi-equation.
effects = _effects_at(effects, at)
# Set standard error of the marginal effects by Delta method.
margeff_cov, margeff_se = margeff_cov_with_se(model, params, exog,
results.cov_params(), at,
model._derivative_exog,
dummy_idx, count_idx,
method, J)
# reshape for multi-equation
if J > 1:
K = model.K - np.any(~effects_idx) # subtract constant
self.margeff = effects[effects_idx].reshape(K, J, order='F')
self.margeff_se = margeff_se[effects_idx].reshape(K, J, order='F')
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
else:
# don't care about at constant
self.margeff_cov = margeff_cov[effects_idx][:, effects_idx]
self.margeff_se = margeff_se[effects_idx]
self.margeff = effects[effects_idx]
|
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Bridge for using cclib data in openbabel (http://openbabel.org)."""
import openbabel as ob
def makeopenbabel(atomcoords, atomnos, charge=0, mult=1):
"""Create an Open Babel molecule.
>>> import numpy, openbabel
>>> atomnos = numpy.array([1,8,1],"i")
>>> coords = numpy.array([[-1.,1.,0.],[0.,0.,0.],[1.,1.,0.]])
>>> obmol = makeopenbabel(coords, atomnos)
>>> obconversion = openbabel.OBConversion()
>>> formatok = obconversion.SetOutFormat("inchi")
>>> print obconversion.WriteString(obmol).strip()
InChI=1/H2O/h1H2
"""
obmol = ob.OBMol()
for i in range(len(atomnos)):
# Note that list(atomcoords[i]) is not equivalent!!!
coords = atomcoords[i].tolist()
atomno = int(atomnos[i])
obatom = ob.OBAtom()
obatom.SetAtomicNum(atomno)
obatom.SetVector(*coords)
obmol.AddAtom(obatom)
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
obmol.SetTotalSpinMultiplicity(mult)
obmol.SetTotalCharge(charge)
return obmol
if __name__ == "__main__":
import doctest
doctest.testmod()
The shape of atomcoords has changed.
# This file is part of cclib (http://cclib.github.io), a library for parsing
# and interpreting the results of computational chemistry packages.
#
# Copyright (C) 2006-2014, the cclib development team
#
# The library is free software, distributed under the terms of
# the GNU Lesser General Public version 2.1 or later. You should have
# received a copy of the license along with cclib. You can also access
# the full license online at http://www.gnu.org/copyleft/lgpl.html.
"""Bridge for using cclib data in openbabel (http://openbabel.org)."""
import openbabel as ob
def makeopenbabel(atomcoords, atomnos, charge=0, mult=1):
"""Create an Open Babel molecule.
>>> import numpy, openbabel
>>> atomnos = numpy.array([1, 8, 1], "i")
>>> coords = numpy.array([[-1., 1., 0.], [0., 0., 0.], [1., 1., 0.]])
>>> obmol = makeopenbabel(coords, atomnos)
>>> obconversion = openbabel.OBConversion()
>>> formatok = obconversion.SetOutFormat("inchi")
>>> print obconversion.WriteString(obmol).strip()
InChI=1/H2O/h1H2
"""
obmol = ob.OBMol()
for i in range(len(atomnos)):
# Note that list(atomcoords[i]) is not equivalent!!!
coords = atomcoords[-1][i].tolist()
atomno = int(atomnos[i])
obatom = ob.OBAtom()
obatom.SetAtomicNum(atomno)
obatom.SetVector(*coords)
obmol.AddAtom(obatom)
obmol.ConnectTheDots()
obmol.PerceiveBondOrders()
obmol.SetTotalSpinMultiplicity(mult)
obmol.SetTotalCharge(charge)
return obmol
if __name__ == "__main__":
import doctest
doctest.testmod()
|
import kii
import kiiobject
class BucketAPI(object):
def __init__(self, context):
self.context = context
def query(self, bucket, condition):
client = self.context.newClient()
client.method = "POST"
client.url = "%s/apps/%s/%s/%s" % (self.context.url, self.context.app_id, bucket.getPath(), "query")
print client.url
client.setContentType('application/vnd.kii.QueryRequest+json')
client.setKiiHeaders(self.context, True)
(code, body) = client.send(condition.toDict())
print body
if code != 200:
raise kii.CloudException(code, body)
return [kiiobject.KiiObject(bucket, o["_id"], o) for o in body["results"]]
class KiiBucket(object):
def __init__(self, owner, name):
self.owner = owner
self.name = name
def getPath(self):
return '%s/buckets/%s' % (self.owner.getPath(), self.name)
class KiiCondition(object):
def __init__(self, clause, orderBy = None, decending=None, limit=None, pagenationKey=None):
self.clause = clause
self.orderBy = orderBy
self.decending = decending
self.limit = limit
self.pagenationKey = pagenationKey
def toDict(self):
# pick up all non-None fields
query = {k:self.__dict__[k] for k in ("orderBy", "decending", "limit", "pagenationKey") if self.__dict__[k] != None}
query["clause"] = self.clause.toDict()
return {"bucketQuery":query}
class KiiClause(object):
def __init__(self, type, **data):
self.type = type
self.data = data
def toDict(self):
result = {"type": self.type}
result.update({k:self.toDictAll(v) for (k,v) in self.data.iteritems()})
return result
def toDictAll(self, v):
if isinstance(v, KiiClause):
return v.toDict()
elif isinstance(v, list) or isinstance(v, tuple):
if len(v) > 0 and isinstance(v[0], KiiClause):
return [x.toDict() for x in v]
return v
@staticmethod
def all():
"""
>>> a = KiiClause.all()
>>> a.toDict() == {'type': 'all'}
True
"""
return KiiClause('all')
@staticmethod
def equals(field, value):
"""
>>> a = KiiClause.equals("N", "V")
>>> a.toDict() == {'type': 'eq', 'field': 'N', 'value': 'V'}
True
>>> a = KiiClause.equals("N", 123)
>>> a.toDict() == {'type': 'eq', 'field': 'N', 'value': 123}
True
"""
return KiiClause('eq', field=field, value=value)
@staticmethod
def greaterThan(field, value, included):
"""
>>> a = KiiClause.greaterThan("f", 100, True)
>>> a.toDict() == {'type': 'range', 'field': 'f', 'lowerLimit': 100, 'lowerIncluded': True}
True
"""
return KiiClause('range', field=field, lowerLimit=value, lowerIncluded=included)
@staticmethod
def lessThan(field, value, included):
"""
>>> a = KiiClause.lessThan("f", 200, False)
>>> a.toDict() == {'type': 'range', 'field': 'f', 'upperLimit': 200, 'upperIncluded': False}
True
"""
return KiiClause('range', field=field, upperLimit=value, upperIncluded=included)
@staticmethod
def range(field, lowerValue, lowerIncluded, upperValue, upperIncluded):
"""
>>> a = KiiClause.range("f", 200, False, 500, True)
>>> a.toDict() == {'type': 'range', 'field': 'f', 'lowerLimit': 200, 'lowerIncluded':False, 'upperLimit': 500, 'upperIncluded': True}
True
"""
return KiiClause('range', field=field, lowerLimit=lowerValue, lowerIncluded=lowerIncluded, upperLimit=upperValue, upperIncluded=upperIncluded)
@staticmethod
def inClause(field, values):
"""
>>> a = KiiClause.inClause("f", [1,2,3,5,8,13])
>>> a.toDict() == {'type': 'in', 'field': 'f', 'values': [1,2,3,5,8,13]}
True
"""
return KiiClause("in", field=field, values=values)
@staticmethod
def notClause(clause):
"""
>>> a = KiiClause.notClause(KiiClause.equals("a", 100))
>>> a.toDict() == {'type': 'not', 'clause': {'type': 'eq', 'field': 'a', 'value': 100}}
True
"""
return KiiClause('not', clause=clause)
@staticmethod
def andClause(clauses):
"""
>>> a = KiiClause.andClause([KiiClause.equals("a", 100), KiiClause.equals("b", 200)])
>>> a.toDict() == {'type': 'and', 'clauses': [{'type': 'eq', 'field': 'a', 'value': 100}, {'type':'eq', 'field':'b', 'value':200}]}
True
"""
return KiiClause('and', clauses=clauses)
@staticmethod
def orClause(clauses):
"""
>>> a = KiiClause.orClause([KiiClause.equals("a", 100), KiiClause.equals("b", 200)])
>>> a.toDict() == {'type': 'or', 'clauses': [{'type': 'eq', 'field': 'a', 'value': 100}, {'type':'eq', 'field':'b', 'value':200}]}
True
"""
return KiiClause('or', clauses=clauses)
def doctest():
import doctest
doctest.testmod()
cleaned up the implementation of KiiClause and KiiCondition
import kii
import kiiobject
import collections
class BucketAPI(object):
def __init__(self, context):
self.context = context
def query(self, bucket, condition):
client = self.context.newClient()
client.method = "POST"
client.url = "%s/apps/%s/%s/%s" % (self.context.url, self.context.app_id, bucket.getPath(), "query")
print client.url
client.setContentType('application/vnd.kii.QueryRequest+json')
client.setKiiHeaders(self.context, True)
(code, body) = client.send(condition)
print body
if code != 200:
raise kii.CloudException(code, body)
return [kiiobject.KiiObject(bucket, o["_id"], o) for o in body["results"]]
class KiiBucket(object):
def __init__(self, owner, name):
self.owner = owner
self.name = name
def getPath(self):
return '%s/buckets/%s' % (self.owner.getPath(), self.name)
class KiiCondition(collections.defaultdict):
"""
>>> import json
>>> a = KiiCondition(KiiClause.equals('a', 10))
>>> json.dumps(a, sort_keys=True) == json.dumps({'clause':{'type':'eq', 'field':'a', 'value':10}}, sort_keys=True)
True
"""
def __init__(self, clause, orderBy = None, decending=None, limit=None, pagenationKey=None):
self["clause"] = clause
if orderBy != None : self["orderBy"] = orderBy
if decending != None : self["decending"] = decending
if limit != None : self["limit"] = limit
if pagenationKey != None : self["pagenationKey"] = pagenationKey
class KiiClause(collections.defaultdict):
def __init__(self, type, **data):
self["type"]= type
self.update(data)
@staticmethod
def all():
"""
>>> import json
>>> a = KiiClause.all()
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'all'}, sort_keys=True)
True
"""
return KiiClause('all')
@staticmethod
def equals(field, value):
"""
>>> import json
>>> a = KiiClause.equals("N", "V")
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'eq', 'field': 'N', 'value': 'V'}, sort_keys=True)
True
>>> a = KiiClause.equals("N", 123)
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'eq', 'field': 'N', 'value': 123}, sort_keys=True)
True
"""
return KiiClause('eq', field=field, value=value)
@staticmethod
def greaterThan(field, value, included):
"""
>>> import json
>>> a = KiiClause.greaterThan("f", 100, True)
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'range', 'field': 'f', 'lowerLimit': 100, 'lowerIncluded': True}, sort_keys=True)
True
"""
return KiiClause('range', field=field, lowerLimit=value, lowerIncluded=included)
@staticmethod
def lessThan(field, value, included):
"""
>>> import json
>>> a = KiiClause.lessThan("f", 200, False)
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'range', 'field': 'f', 'upperLimit': 200, 'upperIncluded': False}, sort_keys=True)
True
"""
return KiiClause('range', field=field, upperLimit=value, upperIncluded=included)
@staticmethod
def range(field, lowerValue, lowerIncluded, upperValue, upperIncluded):
"""
>>> import json
>>> a = KiiClause.range("f", 200, False, 500, True)
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'range', 'field': 'f', 'lowerLimit': 200, 'lowerIncluded':False, 'upperLimit': 500, 'upperIncluded': True}, sort_keys=True)
True
"""
return KiiClause('range', field=field, lowerLimit=lowerValue, lowerIncluded=lowerIncluded, upperLimit=upperValue, upperIncluded=upperIncluded)
@staticmethod
def inClause(field, values):
"""
>>> import json
>>> a = KiiClause.inClause("f", [1,2,3,5,8,13])
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'in', 'field': 'f', 'values': [1,2,3,5,8,13]}, sort_keys=True)
True
"""
return KiiClause("in", field=field, values=values)
@staticmethod
def notClause(clause):
"""
>>> import json
>>> a = KiiClause.notClause(KiiClause.equals("a", 100))
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'not', 'clause': {'type': 'eq', 'field': 'a', 'value': 100}}, sort_keys=True)
True
"""
return KiiClause('not', clause=clause)
@staticmethod
def andClause(clauses):
"""
>>> import json
>>> a = KiiClause.andClause([KiiClause.equals("a", 100), KiiClause.equals("b", 200)])
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'and', 'clauses': [{'type': 'eq', 'field': 'a', 'value': 100}, {'type':'eq', 'field':'b', 'value':200}]}, sort_keys=True)
True
"""
return KiiClause('and', clauses=clauses)
@staticmethod
def orClause(clauses):
"""
>>> import json
>>> a = KiiClause.orClause([KiiClause.equals("a", 100), KiiClause.equals("b", 200)])
>>> json.dumps(a, sort_keys=True) == json.dumps({'type': 'or', 'clauses': [{'type': 'eq', 'field': 'a', 'value': 100}, {'type':'eq', 'field':'b', 'value':200}]}, sort_keys=True)
True
"""
return KiiClause('or', clauses=clauses)
def doctest():
import doctest
import json
doctest.testmod()
if __name__ == "__main__":
doctest()
|
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from inferbeddings.knowledgebase import Fact, KnowledgeBaseParser
from inferbeddings.parse import parse_clause
from inferbeddings.regularizers.util import clauses_to_equality_loss
import pytest
def test_losses():
triples = [
('e1', 'p', 'e2'),
('e2', 'q', 'e3'),
('e1', 'r', 'e2'),
('e2', 's', 'e3')
]
def fact(s, p, o):
return Fact(predicate_name=p, argument_names=[s, o])
facts = [fact(s, p, o) for s, p, o in triples]
parser = KnowledgeBaseParser(facts)
nb_predicates = len(parser.predicate_vocabulary)
predicate_embedding_size = 100
predicate_embedding_layer = tf.get_variable('predicates',
shape=[nb_predicates + 1, predicate_embedding_size],
initializer=tf.contrib.layers.xavier_initializer())
clauses = [parse_clause('p(X, Y) :- q(Y, X)'), parse_clause('r(X, Y) :- s(X, Y)')]
loss = clauses_to_equality_loss('TransE', clauses, 'l2_sqr',
predicate_embedding_layer,
parser.predicate_to_index)
init_op = tf.global_variables_initializer()
for i in range(1024):
with tf.Session() as session:
session.run(init_op)
loss_value = session.run([loss])[0]
p_idx, q_idx = parser.predicate_to_index['p'], parser.predicate_to_index['q']
r_idx, s_idx = parser.predicate_to_index['r'], parser.predicate_to_index['s']
predicate_embedding_layer_value = session.run([predicate_embedding_layer])[0]
p_value, q_value = predicate_embedding_layer_value[p_idx, :], predicate_embedding_layer_value[q_idx, :]
r_value, s_value = predicate_embedding_layer_value[r_idx, :], predicate_embedding_layer_value[s_idx, :]
estimated_loss_value = np.square(p_value + q_value).sum() + np.square(r_value - s_value).sum()
assert loss_value > 0
assert estimated_loss_value > 0
np.testing.assert_allclose(loss_value, estimated_loss_value, 4)
if __name__ == '__main__':
pytest.main([__file__])
more robust testing
# -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
from inferbeddings.knowledgebase import Fact, KnowledgeBaseParser
from inferbeddings.parse import parse_clause
from inferbeddings.regularizers.util import clauses_to_equality_loss
import pytest
def test_losses():
triples = [
('e1', 'p', 'e2'),
('e2', 'q', 'e3'),
('e1', 'r', 'e2'),
('e2', 's', 'e3')
]
def fact(s, p, o):
return Fact(predicate_name=p, argument_names=[s, o])
facts = [fact(s, p, o) for s, p, o in triples]
parser = KnowledgeBaseParser(facts)
nb_predicates = len(parser.predicate_vocabulary)
predicate_embedding_size = 100
predicate_embedding_layer = tf.get_variable('predicates',
shape=[nb_predicates + 1, predicate_embedding_size],
initializer=tf.contrib.layers.xavier_initializer())
clauses = [parse_clause('p(X, Y) :- q(Y, X)'), parse_clause('r(X, Y) :- s(X, Y)')]
loss = clauses_to_equality_loss('TransE', clauses, 'l2_sqr',
predicate_embedding_layer,
parser.predicate_to_index)
for i in range(32):
optimizer = tf.train.AdagradOptimizer(0.1)
minimization_step = optimizer.minimize(loss, var_list=[predicate_embedding_layer])
init_op = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init_op)
for j in range(32):
session.run([minimization_step])
loss_value = session.run([loss])[0]
p_idx, q_idx = parser.predicate_to_index['p'], parser.predicate_to_index['q']
r_idx, s_idx = parser.predicate_to_index['r'], parser.predicate_to_index['s']
predicate_embedding_layer_value = session.run([predicate_embedding_layer])[0]
p_value, q_value = predicate_embedding_layer_value[p_idx, :], predicate_embedding_layer_value[q_idx, :]
r_value, s_value = predicate_embedding_layer_value[r_idx, :], predicate_embedding_layer_value[s_idx, :]
estimated_loss_value = np.square(p_value + q_value).sum() + np.square(r_value - s_value).sum()
assert loss_value > 0
assert estimated_loss_value > 0
np.testing.assert_allclose(loss_value, estimated_loss_value, 4)
if __name__ == '__main__':
pytest.main([__file__])
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Test import and export of objects with custom attributes."""
from flask.json import dumps
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
from ggrc.models import AccessGroup
from ggrc.models import Product
from ggrc.converters import errors
class TestCustomAttributeImportExport(TestCase):
"""Test import and export with custom attributes."""
def setUp(self):
"""Setup stage for each test.
Generate all required objects and custom attributes for import of csvs
containing custom attributes. This stage also initializes a http client
that is used for sending import/export requests.
"""
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.create_custom_attributes()
self.create_people()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
def create_custom_attributes(self):
"""Generate custom attributes needed for csv import
This function generates all custom attributes on Product and Access Group,
that are used in product_with_all_custom_attributes.csv and
multi_word_object_custom_attribute_test.csv files.
"""
gen = self.generator.generate_custom_attribute
gen("product", attribute_type="Text", title="normal text")
gen("product", attribute_type="Text", title="man text", mandatory=True)
gen("product", attribute_type="Rich Text", title="normal RT")
gen("product", attribute_type="Rich Text", title="man RT", mandatory=True)
gen("product", attribute_type="Date", title="normal Date")
gen("product", attribute_type="Date", title="man Date", mandatory=True)
gen("product", attribute_type="Checkbox", title="normal CH")
gen("product", attribute_type="Checkbox", title="man CH", mandatory=True)
gen("product", attribute_type="Dropdown", title="normal select",
options="a,b,c,d")
gen("product", attribute_type="Dropdown", title="man select",
options="e,f,g", mandatory=True)
gen("product", attribute_type="Map:Person", title="normal person")
gen("product", attribute_type="Map:Person", title="man person",
mandatory=True)
gen("access_group", attribute_type="Text",
title="access group test custom", mandatory=True)
def create_people(self):
"""Create people used in the csv files.
This function should be removed and people should be added into the csv
file as a Person block.
"""
emails = [
"user1@ggrc.com",
"miha@policy.com",
"someone.else@ggrc.com",
"another@user.com",
]
for email in emails:
self.generator.generate_person({
"name": email.split("@")[0].title(),
"email": email,
}, "gGRC Admin")
def test_product_ca_import(self):
"""Test import of product with all custom attributes.
This tests covers all possible custom attributes with mandatory flag turned
off and on, and checks for all warnings that should be present.
"""
filename = "product_with_all_custom_attributes.csv"
response = self.import_file(filename)[0]
expected_warnings = {
"Line 6: man CH contains invalid data. The value will be ignored.",
"Line 8: normal select contains invalid data. The value will be"
" ignored.",
"Line 10: man select contains invalid data. The value will be"
" ignored.",
"Line 11: normal CH contains invalid data. The value will be ignored.",
"Line 12: man CH contains invalid data. The value will be ignored.",
"Line 14: normal Date contains invalid data. The value will be"
" ignored.",
"Line 16: man Date contains invalid data. The value will be ignored.",
errors.OWNER_MISSING.format(line=21, column_name="Owner"),
"Line 22: Specified user 'kr@en.com' does not exist. That user will be"
" ignored.",
errors.OWNER_MISSING.format(line=22, column_name="Owner"),
errors.OWNER_MISSING.format(line=26, column_name="Owner"),
"Line 27: Specified user 'user@exameuple.com' does not exist. That"
" user will be ignored.",
errors.OWNER_MISSING.format(line=27, column_name="Owner"),
}
expected_errors = {
"Line 6: Field man CH is required. The line will be ignored.",
"Line 9: Field man select is required. The line will be ignored.",
"Line 10: Field man select is required. The line will be ignored.",
"Line 12: Field man CH is required. The line will be ignored.",
"Line 16: Field man Date is required. The line will be ignored.",
"Line 18: Field man RT is required. The line will be ignored.",
"Line 20: Field man text is required. The line will be ignored.",
"Line 21: Field man person is required. The line will be ignored.",
"Line 28: Field Title is required. The line will be ignored."
}
self.assertEqual(expected_warnings, set(response["row_warnings"]))
self.assertEqual(expected_errors, set(response["row_errors"]))
self.assertEqual(17, response["created"])
self.assertEqual(9, response["ignored"])
self.assertEqual(17, Product.query.count())
def tests_ca_export(self):
"""Test exporting products with custom attributes
This test checks that we get a propper response when exporting objects with
custom attributes and that the response data actually contains more lines
than an empty template would.
This tests relys on the import tests to work. If those fail they need to be
fixied before this one.
"""
filename = "product_with_all_custom_attributes.csv"
self.import_file(filename)
data = [{
"object_name": "Product",
"filters": {
"expression": {},
},
"fields": "all",
}]
response = self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
self.assert200(response)
self.assertEqual(len(response.data.splitlines()), 21)
def test_multi_word_object_with_ca(self):
"""Test multi-word (e.g. Access Group, Data Asset) object import"""
filename = "multi_word_object_custom_attribute_test.csv"
response = self.import_file(filename)[0]
self.assertEqual([], response["row_warnings"])
self.assertEqual([], response["row_errors"])
self.assertEqual(10, response["created"])
self.assertEqual(0, response["ignored"])
self.assertEqual(0, response["updated"])
self.assertEqual(10, AccessGroup.query.count())
for id_ in range(1, 11):
access_group = AccessGroup.query.filter(
AccessGroup.slug == "ag-{}".format(id_)).first()
filtered = [val for val in access_group.custom_attribute_values if
val.attribute_value == "some text {}".format(id_)]
self.assertEqual(len(filtered), 1)
Update import tests
Import tests should not have error messages hard coded because they can
change any time. Instead the expected error messages should be generated
from error templates and then checked.
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: miha@reciprocitylabs.com
# Maintained By: miha@reciprocitylabs.com
"""Test import and export of objects with custom attributes."""
from flask.json import dumps
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
from ggrc.models import AccessGroup
from ggrc.models import Product
from ggrc.converters import errors
class TestCustomAttributeImportExport(TestCase):
"""Test import and export with custom attributes."""
def setUp(self):
"""Setup stage for each test.
Generate all required objects and custom attributes for import of csvs
containing custom attributes. This stage also initializes a http client
that is used for sending import/export requests.
"""
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.create_custom_attributes()
self.create_people()
self.client.get("/login")
self.headers = {
'Content-Type': 'application/json',
"X-Requested-By": "gGRC",
"X-export-view": "blocks",
}
def create_custom_attributes(self):
"""Generate custom attributes needed for csv import
This function generates all custom attributes on Product and Access Group,
that are used in product_with_all_custom_attributes.csv and
multi_word_object_custom_attribute_test.csv files.
"""
gen = self.generator.generate_custom_attribute
gen("product", attribute_type="Text", title="normal text")
gen("product", attribute_type="Text", title="man text", mandatory=True)
gen("product", attribute_type="Rich Text", title="normal RT")
gen("product", attribute_type="Rich Text", title="man RT", mandatory=True)
gen("product", attribute_type="Date", title="normal Date")
gen("product", attribute_type="Date", title="man Date", mandatory=True)
gen("product", attribute_type="Checkbox", title="normal CH")
gen("product", attribute_type="Checkbox", title="man CH", mandatory=True)
gen("product", attribute_type="Dropdown", title="normal select",
options="a,b,c,d")
gen("product", attribute_type="Dropdown", title="man select",
options="e,f,g", mandatory=True)
gen("product", attribute_type="Map:Person", title="normal person")
gen("product", attribute_type="Map:Person", title="man person",
mandatory=True)
gen("access_group", attribute_type="Text",
title="access group test custom", mandatory=True)
def create_people(self):
"""Create people used in the csv files.
This function should be removed and people should be added into the csv
file as a Person block.
"""
emails = [
"user1@ggrc.com",
"miha@policy.com",
"someone.else@ggrc.com",
"another@user.com",
]
for email in emails:
self.generator.generate_person({
"name": email.split("@")[0].title(),
"email": email,
}, "gGRC Admin")
def test_product_ca_import(self):
"""Test import of product with all custom attributes.
This tests covers all possible custom attributes with mandatory flag turned
off and on, and checks for all warnings that should be present.
"""
filename = "product_with_all_custom_attributes.csv"
response = self.import_file(filename)[0]
expected_warnings = {
errors.WRONG_VALUE.format(line=6, column_name="man CH"),
errors.WRONG_VALUE.format(line=8, column_name="normal select"),
errors.WRONG_VALUE.format(line=10, column_name="man select"),
errors.WRONG_VALUE.format(line=11, column_name="normal CH"),
errors.WRONG_VALUE.format(line=12, column_name="man CH"),
errors.WRONG_VALUE.format(line=14, column_name="normal Date"),
errors.WRONG_VALUE.format(line=16, column_name="man Date"),
errors.OWNER_MISSING.format(line=21, column_name="Owner"),
errors.UNKNOWN_USER_WARNING.format(line=22, email="kr@en.com"),
errors.OWNER_MISSING.format(line=22, column_name="Owner"),
errors.OWNER_MISSING.format(line=26, column_name="Owner"),
errors.UNKNOWN_USER_WARNING.format(
line=27, email="user@exameuple.com"),
errors.OWNER_MISSING.format(line=27, column_name="Owner"),
}
expected_errors = {
"Line 6: Field man CH is required. The line will be ignored.",
"Line 9: Field man select is required. The line will be ignored.",
"Line 10: Field man select is required. The line will be ignored.",
"Line 12: Field man CH is required. The line will be ignored.",
"Line 16: Field man Date is required. The line will be ignored.",
"Line 18: Field man RT is required. The line will be ignored.",
"Line 20: Field man text is required. The line will be ignored.",
"Line 21: Field man person is required. The line will be ignored.",
"Line 28: Field Title is required. The line will be ignored."
}
self.assertEqual(expected_warnings, set(response["row_warnings"]))
self.assertEqual(expected_errors, set(response["row_errors"]))
self.assertEqual(17, response["created"])
self.assertEqual(9, response["ignored"])
self.assertEqual(17, Product.query.count())
def tests_ca_export(self):
"""Test exporting products with custom attributes
This test checks that we get a propper response when exporting objects with
custom attributes and that the response data actually contains more lines
than an empty template would.
This tests relys on the import tests to work. If those fail they need to be
fixied before this one.
"""
filename = "product_with_all_custom_attributes.csv"
self.import_file(filename)
data = [{
"object_name": "Product",
"filters": {
"expression": {},
},
"fields": "all",
}]
response = self.client.post("/_service/export_csv", data=dumps(data),
headers=self.headers)
self.assert200(response)
self.assertEqual(len(response.data.splitlines()), 21)
def test_multi_word_object_with_ca(self):
"""Test multi-word (e.g. Access Group, Data Asset) object import"""
filename = "multi_word_object_custom_attribute_test.csv"
response = self.import_file(filename)[0]
self.assertEqual([], response["row_warnings"])
self.assertEqual([], response["row_errors"])
self.assertEqual(10, response["created"])
self.assertEqual(0, response["ignored"])
self.assertEqual(0, response["updated"])
self.assertEqual(10, AccessGroup.query.count())
for id_ in range(1, 11):
access_group = AccessGroup.query.filter(
AccessGroup.slug == "ag-{}".format(id_)).first()
filtered = [val for val in access_group.custom_attribute_values if
val.attribute_value == "some text {}".format(id_)]
self.assertEqual(len(filtered), 1)
|
import argparse, dns.query, dns.resolver, psycopg2
from dns.exception import DNSException
from time import time
from sys import stderr
from twisted.names import client, dns
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.names.error import DNSNameError
parser = argparse.ArgumentParser(description='Reverse DNS Scraper')
parser.add_argument('range', metavar='octet', type=int, nargs='+',
help='Specify first octet range')
parser.add_argument('--threading', action='store_true', help='Use threads instead of processes')
parser.add_argument('--debug', action='store_true', help='Launch interactive console on exception or forced exit')
parser.add_argument('--octet', type=int, action='store', default=256)
parser.add_argument('--concurrent', type=int, action='store', default=500)
arguments = parser.parse_args()
try:
ip_start, ip_end = arguments.range
octet_range = arguments.octet
concurrent = arguments.concurrent
print "IP Range: %i - %i" % (ip_start, ip_end)
print "Octet Range: %i" % octet_range
print "Concurrent: %i" % concurrent
ip_end += 1
except Exception as e:
print >> stderr, e
exit(1)
conn = psycopg2.connect('dbname=dns password=test')
c = conn.cursor()
c.execute('''DROP TABLE dns;''')
c.execute('''CREATE TABLE dns (name TEXT, ip BIGINT);''')
conn.commit()
default = dns.resolver.get_default_resolver()
ns = default.nameservers[0]
if arguments.threading:
print 'Using Threading'
from threading import Thread as Split
from Queue import Queue
from array import array as Array
else:
print 'Using Multiprocessing'
from multiprocessing import Process as Split
from multiprocessing import JoinableQueue as Queue
from multiprocessing import Array
q = Queue()
def main():
start = time()
if arguments.threading:
arr = Array('d', (0,)*concurrent)
else:
arr = Array('d', concurrent, lock=False)
for i in range(concurrent):
t=Split(target=doWork, args=(arr, i))
t.daemon=True
t.start()
q.put((None, 1))
q.join()
end = max(arr)
print "Total Time: %f seconds" % (end - start)
def doWork(arr, id):
def run():
while True:
try:
prefix, level = q.get(timeout=1)
except:
continue
if prefix:
prefix = int2ip(prefix)
ips = ("%s.%i" % (prefix, octet) for octet in range(0,octet_range))
else:
ips = ("%i" % octet for octet in range(ip_start,ip_end))
for ip in ips:
auth, add = lookup(ip, level, arr, id)
print auth, add
reactor.stop()
if auth is None and add is None:
pass
else:
# print ip, auth, add
processRecords(auth, add)
if level < 4:
q.put((ip2int(ip), level+1))
q.task_done()
run()
reactor.run()
def lookup(ip, level, arr, id):
addr = ip2reverse(ip)
for i in range(5-level):
try:
ans, auth, add = yield client.lookupPointer(addr)
arr[id] = time()
defer.returnValue((auth, add))
except DNSNameError as err:
print >> stderr, 'Name Error, Count: %i, Level: %i' % (i, level)
defer.returnValue((None, None))
except Exception:
print >> stderr, 'Timeout, Count: %i, Level: %i' % (i, level)
defer.returnValue((None, None))
defer.returnValue((None, None))
lookup = inlineCallbacks(lookup)
#############
# Utilities #
#############
def ip2int(addr):
return tuple(addr.split('.'))
def int2ip(addr):
return '.'.join(addr)
def ip2reverse(ip):
ip = ip.split('.')
ip.reverse()
return '%s.in-addr.arpa' % '.'.join(ip)
try:
main()
except KeyboardInterrupt:
if arguments.debug:
from IPython import embed
embed()
def processRecords(auth, add):
if auth == add == []:
return
else:
pass
#print addr, level, auth, add
records = {}
for A in add:
if A.type is dns.A:
records[A.name.name] = A.payload.dottedQuad()
for NS in auth:
if NS.type is dns.NS:
if NS.payload.name.name not in records:
records[NS.payload.name.name] = None
try:
insertDB(records)
except e:
print "DB Error", e
def insertDB(records):
for name, ip in records.items():
if ip:
query = '''INSERT INTO dns (name, ip) SELECT '%s', %i WHERE NOT EXISTS (SELECT 1 FROM dns WHERE name = '%s' and ip IS NOT NULL);''' % (name, ip2int(ip), name)
else:
query = '''INSERT INTO dns (name, ip) SELECT '%s', NULL WHERE NOT EXISTS (SELECT 1 FROM dns WHERE name = '%s');''' % (name, name)
#print name, ip
c.execute(query)
conn.commit()
Revert "partially working"
This reverts commit d8d486e1de1408b6bcf4d749f1296d06a578c546.
import argparse, dns.query, dns.resolver, psycopg2
from dns.exception import DNSException
from time import time
from sys import stderr
parser = argparse.ArgumentParser(description='Reverse DNS Scraper')
parser.add_argument('range', metavar='octet', type=int, nargs='+',
help='Specify first octet range')
parser.add_argument('--threading', action='store_true', help='Use threads instead of processes')
parser.add_argument('--debug', action='store_true', help='Launch interactive console on exception or forced exit')
parser.add_argument('--octet', type=int, action='store', default=256)
parser.add_argument('--concurrent', type=int, action='store', default=500)
arguments = parser.parse_args()
try:
ip_start, ip_end = arguments.range
octet_range = arguments.octet
concurrent = arguments.concurrent
print "IP Range: %i - %i" % (ip_start, ip_end)
print "Octet Range: %i" % octet_range
print "Concurrent: %i" % concurrent
ip_end += 1
except Exception as e:
print >> stderr, e
exit(1)
conn = psycopg2.connect('dbname=dns password=test')
c = conn.cursor()
c.execute('''DROP TABLE dns;''')
c.execute('''CREATE TABLE dns (name TEXT, ip BIGINT);''')
conn.commit()
default = dns.resolver.get_default_resolver()
ns = default.nameservers[0]
if arguments.threading:
print 'Using Threading'
from threading import Thread as Split
from Queue import Queue
from array import array as Array
else:
print 'Using Multiprocessing'
from multiprocessing import Process as Split
from multiprocessing import JoinableQueue as Queue
from multiprocessing import Array
q = Queue()
def main():
start = time()
if arguments.threading:
arr = Array('d', (0,)*concurrent)
else:
arr = Array('d', concurrent, lock=False)
for i in range(concurrent):
t=Split(target=doWork, args=(arr, i))
t.daemon=True
t.start()
q.put((None, 1))
q.join()
end = max(arr)
print "Total Time: %f seconds" % (end - start)
def doWork(arr, id):
while True:
try:
prefix, level = q.get(timeout=1)
except:
continue
if prefix:
prefix = int2ip(prefix)
ips = ("%s.%i" % (prefix, octet) for octet in range(0,octet_range))
else:
ips = ("%i" % octet for octet in range(ip_start,ip_end))
for ip in ips:
auth, add = lookup(ip, level, arr, id)
if auth is None and add is None:
pass
else:
# print ip, auth, add
processRecords(auth, add)
if level < 4:
q.put((ip2int(ip), level+1))
q.task_done()
def lookup(ip, level, arr, id):
addr = ip2reverse(ip)
query = dns.message.make_query(addr, dns.rdatatype.PTR)
for i in range(5-level):
try:
response = dns.query.udp(query, ns, timeout=.5)
arr[id] = time()
rcode = response.rcode()
if rcode == dns.rcode.NOERROR:
return response.authority, response.additional
else:
return None, None
except dns.exception.Timeout:
pass
#print >> stderr, 'Timeout, Count: %i, Level: %i' % (i, level)
except dns.query.BadResponse:
pass
#print >> stderr, 'Bad Response, Count: %i, Level: %i' % (i, level)
return None, None
#############
# Utilities #
#############
def ip2int(addr):
return tuple(addr.split('.'))
def int2ip(addr):
return '.'.join(addr)
def ip2reverse(ip):
ip = ip.split('.')
ip.reverse()
return '%s.in-addr.arpa' % '.'.join(ip)
try:
main()
except KeyboardInterrupt:
if arguments.debug:
from IPython import embed
embed()
def processRecords(auth, add):
if auth == add == []:
return
else:
pass
#print addr, level, auth, add
records = {}
for A in add:
if A.type is dns.A:
records[A.name.name] = A.payload.dottedQuad()
for NS in auth:
if NS.type is dns.NS:
if NS.payload.name.name not in records:
records[NS.payload.name.name] = None
try:
insertDB(records)
except e:
print "DB Error", e
def insertDB(records):
for name, ip in records.items():
if ip:
query = '''INSERT INTO dns (name, ip) SELECT '%s', %i WHERE NOT EXISTS (SELECT 1 FROM dns WHERE name = '%s' and ip IS NOT NULL);''' % (name, ip2int(ip), name)
else:
query = '''INSERT INTO dns (name, ip) SELECT '%s', NULL WHERE NOT EXISTS (SELECT 1 FROM dns WHERE name = '%s');''' % (name, name)
#print name, ip
c.execute(query)
conn.commit()
|
import operator
from pprint import pprint
from fo2.connections import db_cursor_so
from base.paginator import paginator_basic
from base.views import O2BaseGetPostView
from utils.table_defs import TableDefs
from utils.views import totalize_data
from cd.forms.novo_estoque import NovoEstoqueForm
from cd.queries.novo_modulo import refs_em_palets
class NovoEstoque(O2BaseGetPostView):
def __init__(self):
super(NovoEstoque, self).__init__()
self.Form_class = NovoEstoqueForm
self.cleaned_data2self = True
self.template_name = 'cd/novo_modulo/estoque.html'
self.title_name = 'Estoque'
self.lotes_por_pagina = 20
self.table_defs = TableDefs(
{
'palete rota modelo cor lote': [],
'endereco': ['Endereço'],
'ref': ['Ref.'],
'tam': ['Tam.'],
'op': ['OP'],
'qtd_prog qtd_lote': ['Tam.Lote', 'r'],
'qtd_dbaixa': ['Qtd.Est.', 'r'],
'estagio': ['Estágio', 'c'],
'solicitacoes': ['Solicitações', 'c'],
'sol_fin': ['Solicit.Fin.', 'c'],
'sol': ['Solicitação'],
'qtd_emp': ['Qtd.Empen.', 'r'],
'qtd_sol': ['Qtd.Solic.', 'r'],
'qtd_disp': ['Qtd.Disp.', 'r'],
'qtd_fin': ['Qtd.Fin.', 'r'],
'sit': ['Situação'],
'ped_dest': ['Ped.Destino'],
'ref_dest': ['Ref.Destino'],
},
['header', '+style'],
style = {'_': 'text-align'},
)
def get_lotes_como_disponibilidade(self):
if self.situacao_empenho == 'esf':
fields = 'detalhe+fin'
else:
fields = 'detalhe'
dados = refs_em_palets.query(
self.cursor,
fields=fields,
modelo=self.modelo,
ref=self.referencia,
cor=self.cor,
tam=self.tam,
colecao=self.colecao,
op=self.op,
lote=self.lote,
endereco=self.endereco,
tipo_prod=self.tipo_prod,
selecao_ops=self.selecao_ops,
selecao_lotes=self.selecao_lotes,
situacao_empenho=self.situacao_empenho,
paletizado=self.paletizado,
)
for row in dados:
if row['est_sol'] and row['estagio'] != row['est_sol']:
row['solicitacoes'] = '-'
row['qtd_emp'] = 0
row['qtd_sol'] = 0
row['qtd_dbaixa'] = row['qtd']
row['qtd_disp'] = row['qtd_dbaixa'] - row['qtd_emp'] - row['qtd_sol']
return dados
def mount_lotes_em_estoque(self):
if self.order:
if self.order == 'el':
self.lotes.sort(key=operator.itemgetter('endereco', 'op', 'lote'))
elif self.order == 'mod':
self.lotes.sort(key=operator.itemgetter('modelo', 'ref', 'ordem_tam', 'cor', 'op', 'lote'))
len_lotes = len(self.lotes)
sum_fields = ['qtd_dbaixa', 'qtd_emp', 'qtd_sol', 'qtd_disp']
if self.situacao_empenho == 'esf':
sum_fields.append('qtd_fin')
totalize_data(
self.lotes,
{
'sum': sum_fields,
'descr': {'lote': 'Total geral:'},
'row_style':
"font-weight: bold;"
"background-image: linear-gradient(#DDD, white);",
'flags': ['NO_TOT_1'],
}
)
totalizador_lotes = self.lotes[-1]
del(self.lotes[-1])
self.lotes = paginator_basic(self.lotes, self.lotes_por_pagina, self.page)
for row in self.lotes.object_list:
if row['qtd_disp'] < 0:
row['qtd_disp|STYLE'] = 'color: red;'
self.lotes.object_list.append(totalizador_lotes)
fields = [
'palete', 'endereco', 'rota',
'modelo', 'ref', 'tam', 'cor', 'op', 'lote',
'qtd_prog', 'qtd_dbaixa', 'estagio',
'solicitacoes', 'qtd_sol', 'qtd_emp', 'qtd_disp',
]
if self.situacao_empenho == 'esf':
fields.append('sol_fin')
fields.append('qtd_fin')
self.context.update(self.table_defs.hfs_dict(*fields))
self.context.update({
'safe': [
'op',
'modelo',
],
'data': self.lotes,
'len_lotes': len_lotes,
})
def mount_estoque(self):
self.lotes = self.get_lotes_como_disponibilidade()
if len(self.lotes) > 0:
self.mount_lotes_em_estoque()
def filter_inputs(self):
self.lote = None if self.lote == '' else self.lote
self.op = None if self.op == '' else self.op
self.referencia = None if self.referencia == '' else self.referencia
self.cor = None if self.cor == '' else self.cor
self.tam = None if self.tam == '' else self.tam
self.modelo = None if self.modelo == '' else int(self.modelo)
self.endereco = None if self.endereco == '' else self.endereco
self.order = None if self.order == '-' else self.order
self.colecao = None if self.colecao == '' else self.colecao
if self.usa_paginador == 'n':
self.lotes_por_pagina = 999999
self.oc = self.lote[4:] if self.lote else None
def mount_context(self):
self.cursor = db_cursor_so(self.request)
self.filter_inputs()
self.mount_estoque()
adicionada coluna total de empenho
import operator
from pprint import pprint
from fo2.connections import db_cursor_so
from base.paginator import paginator_basic
from base.views import O2BaseGetPostView
from utils.table_defs import TableDefs
from utils.views import totalize_data
from cd.forms.novo_estoque import NovoEstoqueForm
from cd.queries.novo_modulo import refs_em_palets
class NovoEstoque(O2BaseGetPostView):
def __init__(self):
super(NovoEstoque, self).__init__()
self.Form_class = NovoEstoqueForm
self.cleaned_data2self = True
self.template_name = 'cd/novo_modulo/estoque.html'
self.title_name = 'Estoque'
self.lotes_por_pagina = 20
self.table_defs = TableDefs(
{
'palete rota modelo cor lote': [],
'endereco': ['Endereço'],
'ref': ['Ref.'],
'tam': ['Tam.'],
'op': ['OP'],
'qtd_prog qtd_lote': ['Tam.Lote', 'r'],
'qtd_dbaixa': ['Qtd.Est.', 'r'],
'estagio': ['Estágio', 'c'],
'solicitacoes': ['Solicitações', 'c'],
'sol_fin': ['Solicit.Fin.', 'c'],
'sol': ['Solicitação'],
'qtd_emp': ['Qtd.Empen.', 'r'],
'qtd_sol': ['Qtd.Solic.', 'r'],
'tot_emp': ['Tot.Empen.', 'r'],
'qtd_disp': ['Qtd.Disp.', 'r'],
'qtd_fin': ['Qtd.Fin.', 'r'],
'sit': ['Situação'],
'ped_dest': ['Ped.Destino'],
'ref_dest': ['Ref.Destino'],
},
['header', '+style'],
style = {'_': 'text-align'},
)
def get_lotes_como_disponibilidade(self):
if self.situacao_empenho == 'esf':
fields = 'detalhe+fin'
else:
fields = 'detalhe'
dados = refs_em_palets.query(
self.cursor,
fields=fields,
modelo=self.modelo,
ref=self.referencia,
cor=self.cor,
tam=self.tam,
colecao=self.colecao,
op=self.op,
lote=self.lote,
endereco=self.endereco,
tipo_prod=self.tipo_prod,
selecao_ops=self.selecao_ops,
selecao_lotes=self.selecao_lotes,
situacao_empenho=self.situacao_empenho,
paletizado=self.paletizado,
)
for row in dados:
if row['est_sol'] and row['estagio'] != row['est_sol']:
row['solicitacoes'] = '-'
row['qtd_emp'] = 0
row['qtd_sol'] = 0
row['qtd_dbaixa'] = row['qtd']
row['tot_emp'] = row['qtd_emp'] + row['qtd_sol']
row['qtd_disp'] = row['qtd_dbaixa'] - row['tot_emp']
return dados
def mount_lotes_em_estoque(self):
if self.order:
if self.order == 'el':
self.lotes.sort(key=operator.itemgetter('endereco', 'op', 'lote'))
elif self.order == 'mod':
self.lotes.sort(key=operator.itemgetter('modelo', 'ref', 'ordem_tam', 'cor', 'op', 'lote'))
len_lotes = len(self.lotes)
sum_fields = ['qtd_dbaixa', 'qtd_emp', 'qtd_sol', 'tot_emp', 'qtd_disp']
if self.situacao_empenho == 'esf':
sum_fields.append('qtd_fin')
totalize_data(
self.lotes,
{
'sum': sum_fields,
'descr': {'lote': 'Total geral:'},
'row_style':
"font-weight: bold;"
"background-image: linear-gradient(#DDD, white);",
'flags': ['NO_TOT_1'],
}
)
totalizador_lotes = self.lotes[-1]
del(self.lotes[-1])
self.lotes = paginator_basic(self.lotes, self.lotes_por_pagina, self.page)
for row in self.lotes.object_list:
if row['qtd_disp'] < 0:
row['qtd_disp|STYLE'] = 'color: red;'
self.lotes.object_list.append(totalizador_lotes)
fields = [
'palete', 'endereco', 'rota',
'modelo', 'ref', 'tam', 'cor', 'op', 'lote',
'qtd_prog', 'qtd_dbaixa', 'estagio',
'solicitacoes', 'qtd_emp', 'qtd_sol', 'tot_emp', 'qtd_disp',
]
if self.situacao_empenho == 'esf':
fields.append('sol_fin')
fields.append('qtd_fin')
self.context.update(self.table_defs.hfs_dict(*fields))
self.context.update({
'safe': [
'op',
'modelo',
],
'data': self.lotes,
'len_lotes': len_lotes,
})
def mount_estoque(self):
self.lotes = self.get_lotes_como_disponibilidade()
if len(self.lotes) > 0:
self.mount_lotes_em_estoque()
def filter_inputs(self):
self.lote = None if self.lote == '' else self.lote
self.op = None if self.op == '' else self.op
self.referencia = None if self.referencia == '' else self.referencia
self.cor = None if self.cor == '' else self.cor
self.tam = None if self.tam == '' else self.tam
self.modelo = None if self.modelo == '' else int(self.modelo)
self.endereco = None if self.endereco == '' else self.endereco
self.order = None if self.order == '-' else self.order
self.colecao = None if self.colecao == '' else self.colecao
if self.usa_paginador == 'n':
self.lotes_por_pagina = 999999
self.oc = self.lote[4:] if self.lote else None
def mount_context(self):
self.cursor = db_cursor_so(self.request)
self.filter_inputs()
self.mount_estoque()
|
"""
Univariate structural time series models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
from statsmodels.compat.collections import OrderedDict
import numpy as np
import pandas as pd
from statsmodels.tsa.filters.hp_filter import hpfilter
from statsmodels.tools.data import _is_using_pandas
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from scipy.linalg import solve_discrete_lyapunov
from statsmodels.tools.tools import Bunch
from .tools import (
companion_matrix, constrain_stationary_univariate,
unconstrain_stationary_univariate
)
import statsmodels.base.wrapper as wrap
_mask_map = {
1: 'irregular',
2: 'fixed intercept',
3: 'deterministic constant',
6: 'random walk',
7: 'local level',
8: 'fixed slope',
11: 'deterministic trend',
14: 'random walk with drift',
15: 'local linear deterministic trend',
31: 'local linear trend',
27: 'smooth trend',
26: 'random trend'
}
class UnobservedComponents(MLEModel):
r"""
Univariate unobserved components time series model
These are also known as structural time series models, and decompose a
(univariate) time series into trend, seasonal, cyclical, and irregular
components.
Parameters
----------
level : bool, optional
Whether or not to include a level component. Default is False.
trend : bool, optional
Whether or not to include a trend component. Default is False. If True,
`level` must also be True.
seasonal_period : int or None, optional
The period of the seasonal component. Default is None.
cycle : bool, optional
Whether or not to include a cycle component. Default is False.
ar : int or None, optional
The order of the autoregressive component. Default is None.
exog : array_like or None, optional
Exoenous variables.
irregular : bool, optional
Whether or not to include an irregular component. Default is True
stochastic_level : bool, optional
Whether or not any level component is stochastic. Default is True.
stochastic_trend : bool, optional
Whether or not any trend component is stochastic. Default is True.
stochastic_seasonal : bool, optional
Whether or not any seasonal component is stochastic. Default is True.
stochastic_cycle : bool, optional
Whether or not any cycle component is stochastic. Default is True.
damped_cycle : bool, optional
Whether or not the cycle component is damped. Default is False.
cycle_period_bounds : tuple, optional
A tuple with lower and upper allowed bounds for the period of the
cycle. If not provided, the following default bounds are used:
(1) if no date / time information is provided, the frequency is
constrained to be between zero and :math:`\pi`, so the period is
constrained to be in [0.5, infinity].
(2) If the date / time information is provided, the default bounds
allow the cyclical component to be between 1.5 and 12 years; depending
on the frequency of the endogenous variable, this will imply different
specific bounds.
Notes
-----
Thse models take the general form (see [1]_ Chapter 3.2 for all details)
.. math::
y_t = \mu_t + \gamma_t + c_t + \varepsilon_t
where :math:`y_t` refers to the observation vector at time :math:`t`,
:math:`\mu_t` refers to the trend component, :math:`\gamma_t` refers to the
seasonal component, :math:`c_t` refers to the cycle, and
:math:`\varepsilon_t` is the irregular. The modeling details of these
components are given below.
**Trend**
The trend is modeled either as a *local linear trend* model or as an
*integrated random walk* model.
The local linear trend is specified as:
.. math::
\mu_t = \mu_{t-1} + \nu_{t-1} + \xi_{t-1} \\
\nu_t = \nu_{t-1} + \zeta_{t-1}
with :math:`\xi_t \sim N(0, \sigma_\xi^2)` and
:math:`\zeta_t \sim N(0, \sigma_\zeta^2)`.
The integrated random walk model of order `r` is specified as:
.. math::
\Delta^r \mu_t = \xi_{t-1} \\
This component results in two parameters to be selected via maximum
likelihood: :math:`\sigma_\xi^2` and :math:`\sigma_\zeta^2`.
In the case of the integrated random walk model, the parameter
:math:`\sigma_\xi^2` is constrained to be zero, but the parameter `r` (the
order of integration) must be chosen (it is not estimated by MLE).
**Seasonal**
The seasonal component is modeled as:
.. math::
\gamma_t = - \sum_{j=1}^{s-1} \gamma_{t+1-j} + \omega_t \\
\omega_t \sim N(0, \sigma_\omega^2)
where s is the number of seasons and :math:`\omega_t` is an error term that
allows the seasonal constants to change over time (if this is not desired,
:math:`\sigma_\omega^2` can be set to zero).
This component results in one parameter to be selected via maximum
likelihood: :math:`\sigma_\omega^2`, and one parameter to be chosen, the
number of seasons `s`.
**Cycle**
The cyclical component is modeled as
.. math::
c_{t+1} = \rho_c (\tilde c_t \cos \lambda_c t
+ \tilde c_t^* \sin \lambda_c) +
\tilde \omega_t \\
c_{t+1}^* = \rho_c (- \tilde c_t \sin \lambda_c t +
\tilde c_t^* \cos \lambda_c) +
\tilde \omega_t^* \\
where :math:`\omega_t, \tilde \omega_t iid N(0, \sigma_{\tilde \omega}^2)`
This component results in three parameters to be selected via maximum
likelihood: :math:`\sigma_{\tilde \omega}^2`, :math:`\rho_c`, and
:math:`\lambda_c`.
**Irregular**
The irregular components are independent and identically distributed (iid):
.. math::
\varepsilon_t \sim N(0, \sigma_\varepsilon^2)
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, level=False, trend=False, seasonal=None,
cycle=False, autoregressive=None, exog=None, irregular=False,
stochastic_level=False, stochastic_trend=False,
stochastic_seasonal=True, stochastic_cycle=False,
damped_cycle=False, cycle_period_bounds=None,
mle_regression=True,
**kwargs):
# Model options
self.level = level
self.trend = trend
self.seasonal_period = seasonal if seasonal is not None else 0
self.seasonal = self.seasonal_period > 0
self.cycle = cycle
self.ar_order = autoregressive if autoregressive is not None else 0
self.autoregressive = self.ar_order > 0
self.irregular = irregular
self.stochastic_level = stochastic_level
self.stochastic_trend = stochastic_trend
self.stochastic_seasonal = stochastic_seasonal
self.stochastic_cycle = stochastic_cycle
self.damped_cycle = damped_cycle
self.mle_regression = mle_regression
# Check for string trend/level specification
self.trend_specification = None
if isinstance(self.level, str):
self.trend_specification = level
self.level = False
# Check if any of the trend/level components have been set, and
# reset everything to False
trend_attributes = ['irregular', 'level', 'trend',
'stochastic_level', 'stochastic_trend']
for attribute in trend_attributes:
if not getattr(self, attribute) is False:
warn("Value of `%s` may be overridden when the trend"
" component is specified using a model string."
% attribute)
setattr(self, attribute, False)
# Now set the correct specification
spec = self.trend_specification
if spec == 'irregular' or spec == 'ntrend':
self.irregular = True
self.trend_specification = 'irregular'
elif spec == 'fixed intercept':
self.level = True
elif spec == 'deterministic constant' or spec == 'dconstant':
self.irregular = True
self.level = True
self.trend_specification = 'deterministic constant'
elif spec == 'local level' or spec == 'llevel':
self.irregular = True
self.level = True
self.stochastic_level = True
self.trend_specification = 'local level'
elif spec == 'random walk' or spec == 'rwalk':
self.level = True
self.stochastic_level = True
self.trend_specification = 'random walk'
elif spec == 'fixed slope':
self.level = True
self.trend = True
elif spec == 'deterministic trend' or spec == 'dtrend':
self.irregular = True
self.level = True
self.trend = True
self.trend_specification = 'deterministic trend'
elif (spec == 'local linear deterministic trend' or
spec == 'lldtrend'):
self.irregular = True
self.level = True
self.stochastic_level = True
self.trend = True
self.trend_specification = 'local linear deterministic trend'
elif spec == 'random walk with drift' or spec == 'rwdrift':
self.level = True
self.stochastic_level = True
self.trend = True
self.trend_specification = 'random walk with drift'
elif spec == 'local linear trend' or spec == 'lltrend':
self.irregular = True
self.level = True
self.stochastic_level = True
self.trend = True
self.stochastic_trend = True
self.trend_specification = 'local linear trend'
elif spec == 'smooth trend' or spec == 'strend':
self.irregular = True
self.level = True
self.trend = True
self.stochastic_trend = True
self.trend_specification = 'smooth trend'
elif spec == 'random trend' or spec == 'rtrend':
self.level = True
self.trend = True
self.stochastic_trend = True
self.trend_specification = 'random trend'
else:
raise ValueError("Invalid level/trend specification: '%s'"
% spec)
# Check for a model that makes sense
if trend and not level:
warn("Trend component specified without level component;"
" deterministic level component added.")
self.level = True
self.stochastic_level = False
if not (self.irregular or
(self.level and self.stochastic_level) or
(self.trend and self.stochastic_trend) or
(self.seasonal and self.stochastic_seasonal) or
(self.cycle and self.stochastic_cycle) or
self.autoregressive):
warn("Specified model does not contain a stochastic element;"
" irregular component added.")
self.irregular = True
if self.seasonal and self.seasonal_period < 2:
raise ValueError('Seasonal component must have a seasonal period'
' of at least 2.')
# Create a bitmask holding the level/trend specification
self.trend_mask = (
self.irregular * 0x01 |
self.level * 0x02 |
self.level * self.stochastic_level * 0x04 |
self.trend * 0x08 |
self.trend * self.stochastic_trend * 0x10
)
# Create the trend specification, if it wasn't given
if self.trend_specification is None:
# trend specification may be none, e.g. if the model is only
# a stochastic cycle, etc.
self.trend_specification = _mask_map.get(self.trend_mask, None)
# Exogenous component
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
self.regression = self.k_exog > 0
# Model parameters
k_states = (
self.level + self.trend +
(self.seasonal_period - 1) * self.seasonal +
self.cycle * 2 +
self.ar_order +
(not self.mle_regression) * self.k_exog
)
k_posdef = (
self.stochastic_level * self.level +
self.stochastic_trend * self.trend +
self.stochastic_seasonal * self.seasonal +
self.stochastic_cycle * (self.cycle * 2) +
self.autoregressive
)
# We can still estimate the model with just the irregular component,
# just need to have one state that does nothing.
loglikelihood_burn = kwargs.get('loglikelihood_burn',
k_states - self.ar_order)
if k_states == 0:
if not self.irregular:
raise ValueError('Model has no components specified.')
k_states = 1
if k_posdef == 0:
k_posdef = 1
# Setup the representation
super(UnobservedComponents, self).__init__(
endog, k_states, k_posdef=k_posdef, exog=exog, **kwargs
)
self.setup()
# Initialize the model
self.ssm.loglikelihood_burn = loglikelihood_burn
# Need to reset the MLE names (since when they were first set, `setup`
# had not been run (and could not have been at that point))
self.data.param_names = self.param_names
# Get bounds for the frequency of the cycle, if we know the frequency
# of the data.
if cycle_period_bounds is None:
freq = self.data.freq[0] if self.data.freq is not None else ''
if freq == 'A':
cycle_period_bounds = (1.5, 12)
elif freq == 'Q':
cycle_period_bounds = (1.5*4, 12*4)
elif freq == 'M':
cycle_period_bounds = (1.5*12, 12*12)
else:
# If we have no information on data frequency, require the
# cycle frequency to be between 0 and pi
cycle_period_bounds = (2, np.inf)
self.cycle_frequency_bound = (
2*np.pi / cycle_period_bounds[1], 2*np.pi / cycle_period_bounds[0]
)
def setup(self):
"""
Setup the structural time series representation
"""
# TODO fix this
# (if we don't set it here, each instance shares a single dictionary)
self._start_params = {
'irregular_var': 0.1,
'level_var': 0.1,
'trend_var': 0.1,
'seasonal_var': 0.1,
'cycle_freq': 0.1,
'cycle_var': 0.1,
'cycle_damp': 0.1,
'ar_coeff': 0,
'ar_var': 0.1,
'reg_coeff': 0,
}
self._param_names = {
'irregular_var': 'sigma2.irregular',
'level_var': 'sigma2.level',
'trend_var': 'sigma2.trend',
'seasonal_var': 'sigma2.seasonal',
'cycle_var': 'sigma2.cycle',
'cycle_freq': 'frequency.cycle',
'cycle_damp': 'damping.cycle',
'ar_coeff': 'ar.L%d',
'ar_var': 'sigma2.ar',
'reg_coeff': 'beta.%d',
}
# Initialize the ordered sets of parameters
self.parameters = OrderedDict()
self.parameters_obs_intercept = OrderedDict()
self.parameters_obs_cov = OrderedDict()
self.parameters_transition = OrderedDict()
self.parameters_state_cov = OrderedDict()
# Initialize the fixed components of the state space matrices,
i = 0 # state offset
j = 0 # state covariance offset
if self.irregular:
self.parameters_obs_cov['irregular_var'] = 1
if self.level:
self.ssm['design', 0, i] = 1.
self.ssm['transition', i, i] = 1.
if self.trend:
self.ssm['transition', i, i+1] = 1.
if self.stochastic_level:
self.ssm['selection', i, j] = 1.
self.parameters_state_cov['level_var'] = 1
j += 1
i += 1
if self.trend:
self.ssm['transition', i, i] = 1.
if self.stochastic_trend:
self.ssm['selection', i, j] = 1.
self.parameters_state_cov['trend_var'] = 1
j += 1
i += 1
if self.seasonal:
n = self.seasonal_period - 1
self.ssm['design', 0, i] = 1.
self.ssm['transition', i:i + n, i:i + n] = (
companion_matrix(np.r_[1, [1] * n]).transpose()
)
if self.stochastic_seasonal:
self.ssm['selection', i, j] = 1.
self.parameters_state_cov['seasonal_var'] = 1
j += 1
i += n
if self.cycle:
self.ssm['design', 0, i] = 1.
self.parameters_transition['cycle_freq'] = 1
if self.damped_cycle:
self.parameters_transition['cycle_damp'] = 1
if self.stochastic_cycle:
self.ssm['selection', i:i+2, j:j+2] = np.eye(2)
self.parameters_state_cov['cycle_var'] = 1
j += 2
self._idx_cycle_transition = np.s_['transition', i:i+2, i:i+2]
i += 2
if self.autoregressive:
self.ssm['design', 0, i] = 1.
self.parameters_transition['ar_coeff'] = self.ar_order
self.parameters_state_cov['ar_var'] = 1
self.ssm['selection', i, j] = 1
self.ssm['transition', i:i+self.ar_order, i:i+self.ar_order] = (
companion_matrix(self.ar_order).T
)
self._idx_ar_transition = (
np.s_['transition', i, i:i+self.ar_order]
)
self._start_params['ar_coeff'] = (
[self._start_params['ar_coeff']] * self.ar_order
)
self._param_names['ar_coeff'] = [
self._param_names['ar_coeff'] % k
for k in range(1, self.ar_order+1)
]
j += 1
i += self.ar_order
if self.regression:
if self.mle_regression:
self.parameters_obs_intercept['reg_coeff'] = self.k_exog
self._start_params['reg_coeff'] = (
[self._start_params['reg_coeff']] * self.k_exog
)
self._param_names['reg_coeff'] = [
self._param_names['reg_coeff'] % k
for k in range(1, self.k_exog+1)
]
else:
design = np.repeat(self.ssm['design', :, :, 0], self.nobs, axis=0)
self.ssm['design'] = design.transpose()[np.newaxis, :, :]
self.ssm['design', 0, i:i+self.k_exog, :] = self.exog.transpose()
self.ssm['transition', i:i+self.k_exog, i:i+self.k_exog] = (
np.eye(self.k_exog)
)
i += self.k_exog
# Update to get the actual parameter set
self.parameters.update(self.parameters_obs_cov)
self.parameters.update(self.parameters_state_cov)
self.parameters.update(self.parameters_transition) # ordered last
self.parameters.update(self.parameters_obs_intercept)
self.k_obs_intercept = sum(self.parameters_obs_intercept.values())
self.k_obs_cov = sum(self.parameters_obs_cov.values())
self.k_transition = sum(self.parameters_transition.values())
self.k_state_cov = sum(self.parameters_state_cov.values())
self.k_params = sum(self.parameters.values())
# Other indices
idx = np.diag_indices(self.ssm.k_posdef)
self._idx_state_cov = ('state_cov', idx[0], idx[1])
def initialize_state(self):
# Initialize the AR component as stationary, the rest as approximately
# diffuse
initial_state = np.zeros(self.k_states)
initial_state_cov = (
np.eye(self.k_states, dtype=self.ssm.transition.dtype) *
self.ssm.initial_variance
)
if self.autoregressive:
start = (
self.level + self.trend +
(self.seasonal_period - 1) * self.seasonal +
self.cycle * 2
)
end = start + self.ar_order
selection_stationary = self.ssm.selection[start:end, :, 0]
selected_state_cov_stationary = np.dot(
np.dot(selection_stationary, self.ssm.state_cov[:, :, 0]),
selection_stationary.T
)
try:
initial_state_cov_stationary = solve_discrete_lyapunov(
self.ssm.transition[start:end, start:end, 0],
selected_state_cov_stationary
)
except:
initial_state_cov_stationary = solve_discrete_lyapunov(
self.ssm.transition[start:end, start:end, 0],
selected_state_cov_stationary,
method='direct'
)
initial_state_cov[start:end, start:end] = (
initial_state_cov_stationary
)
self.ssm.initialize_known(initial_state, initial_state_cov)
def filter(self, params, transformed=True, cov_type=None, return_ssm=False,
**kwargs):
# Transform parameters if necessary
if not transformed:
params = self.transform_params(params)
transformed = True
# Get the state space output
result = super(UnobservedComponents, self).filter(
params, transformed, cov_type, return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = UnobservedComponentsResultsWrapper(
UnobservedComponentsResults(self, params, result,
**result_kwargs)
)
return result
@property
def start_params(self):
if not hasattr(self, 'parameters'):
return []
# Level / trend variances
# (Use the HP filter to get initial estimates of variances)
_start_params = self._start_params.copy()
if self.level:
resid, trend1 = hpfilter(self.endog)
if self.stochastic_trend:
cycle2, trend2 = hpfilter(trend1)
_start_params['trend_var'] = np.std(trend2)**2
if self.stochastic_level:
_start_params['level_var'] = np.std(cycle2)**2
elif self.stochastic_level:
_start_params['level_var'] = np.std(trend1)**2
else:
resid = self.ssm.endog[0]
# Seasonal
if self.stochastic_seasonal:
# TODO seasonal variance starting values?
pass
# Cyclical
if self.cycle:
_start_params['cycle_var'] = np.std(resid)**2
_start_params['cycle_damp'] = (
np.linalg.pinv(resid[:-1, None]).dot(resid[1:])[0]
)
# Set initial period estimate to 3 year, if we know the frequency
# of the data observations
freq = self.data.freq[0] if self.data.freq is not None else ''
if freq == 'A':
_start_params['cycle_freq'] = 2 * np.pi / 3
elif freq == 'Q':
_start_params['cycle_freq'] = 2 * np.pi / 12
elif freq == 'M':
_start_params['cycle_freq'] = 2 * np.pi / 36
# Irregular
else:
_start_params['irregular_var'] = np.std(resid)**2
# Create the starting parameter list
start_params = []
for key in self.parameters.keys():
if np.isscalar(_start_params[key]):
start_params.append(_start_params[key])
else:
start_params += _start_params[key]
return start_params
@property
def param_names(self):
if not hasattr(self, 'parameters'):
return []
param_names = []
for key in self.parameters.keys():
if np.isscalar(self._param_names[key]):
param_names.append(self._param_names[key])
else:
param_names += self._param_names[key]
return param_names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
"""
constrained = np.zeros(unconstrained.shape, dtype=unconstrained.dtype)
# Positive parameters: obs_cov, state_cov
offset = self.k_obs_cov + self.k_state_cov
constrained[:offset] = unconstrained[:offset]**2
# Cycle parameters
if self.cycle:
# Cycle frequency must be between between our bounds
low, high = self.cycle_frequency_bound
constrained[offset] = (
1 / (1 + np.exp(-unconstrained[offset]))
) * (high - low) + low
offset += 1
# Cycle damping (if present) must be between 0 and 1
if self.damped_cycle:
constrained[offset] = (
1 / (1 + np.exp(-unconstrained[offset]))
)
offset += 1
# Autoregressive coefficients must be stationary
if self.autoregressive:
constrained[offset:offset + self.ar_order] = (
constrain_stationary_univariate(
unconstrained[offset:offset + self.ar_order]
)
)
offset += self.ar_order
# Nothing to do with betas
constrained[offset:offset + self.k_exog] = (
unconstrained[offset:offset + self.k_exog]
)
return constrained
def untransform_params(self, constrained):
"""
Reverse the transformation
"""
unconstrained = np.zeros(constrained.shape, dtype=constrained.dtype)
# Positive parameters: obs_cov, state_cov
offset = self.k_obs_cov + self.k_state_cov
unconstrained[:offset] = constrained[:offset]**0.5
# Cycle parameters
if self.cycle:
# Cycle frequency must be between between our bounds
low, high = self.cycle_frequency_bound
x = (constrained[offset] - low) / (high - low)
unconstrained[offset] = np.log(
x / (1 - x)
)
offset += 1
# Cycle damping (if present) must be between 0 and 1
if self.damped_cycle:
unconstrained[offset] = np.log(
constrained[offset] / (1 - constrained[offset])
)
offset += 1
# Autoregressive coefficients must be stationary
if self.autoregressive:
unconstrained[offset:offset + self.ar_order] = (
unconstrain_stationary_univariate(
constrained[offset:offset + self.ar_order]
)
)
offset += self.ar_order
# Nothing to do with betas
unconstrained[offset:offset + self.k_exog] = (
constrained[offset:offset + self.k_exog]
)
return unconstrained
def update(self, params, **kwargs):
params = super(UnobservedComponents, self).update(params, **kwargs)
offset = 0
# Observation covariance
if self.irregular:
self.ssm['obs_cov', 0, 0] = params[offset]
offset += 1
# State covariance
if self.k_state_cov > 0:
variances = params[offset:offset+self.k_state_cov]
if self.stochastic_cycle and self.cycle:
if self.autoregressive:
variances = np.r_[variances[:-1], variances[-2:]]
else:
variances = np.r_[variances, variances[-1]]
self.ssm[self._idx_state_cov] = variances
offset += self.k_state_cov
# Cycle transition
if self.cycle:
cos_freq = np.cos(params[offset])
sin_freq = np.sin(params[offset])
cycle_transition = np.array(
[[cos_freq, sin_freq],
[-sin_freq, cos_freq]]
)
if self.damped_cycle:
offset += 1
cycle_transition *= params[offset]
self.ssm[self._idx_cycle_transition] = cycle_transition
offset += 1
# AR transition
if self.autoregressive:
self.ssm[self._idx_ar_transition] = (
params[offset:offset+self.ar_order]
)
offset += self.ar_order
# Beta observation intercept
if self.regression:
if self.mle_regression:
self.ssm['obs_intercept'] = np.dot(
self.exog,
params[offset:offset+self.k_exog]
)[None, :]
offset += self.k_exog
# Initialize the state
self.initialize_state()
class UnobservedComponentsResults(MLEResults):
"""
Class to hold results from fitting an unobserved components model.
Parameters
----------
model : UnobservedComponents instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the unobserved components
model instance.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg',
**kwargs):
super(UnobservedComponentsResults, self).__init__(
model, params, filter_results, cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
# Save the model specification
self.specification = Bunch(**{
# Model options
'level': self.model.level,
'trend': self.model.trend,
'seasonal_period': self.model.seasonal_period,
'seasonal': self.model.seasonal,
'cycle': self.model.cycle,
'ar_order': self.model.ar_order,
'autoregressive': self.model.autoregressive,
'irregular': self.model.irregular,
'stochastic_level': self.model.stochastic_level,
'stochastic_trend': self.model.stochastic_trend,
'stochastic_seasonal': self.model.stochastic_seasonal,
'stochastic_cycle': self.model.stochastic_cycle,
'damped_cycle': self.model.damped_cycle,
'regression': self.model.regression,
'mle_regression': self.model.mle_regression,
# Check for string trend/level specification
'trend_specification': self.model.trend_specification
})
@property
def level(self):
"""
Filtered value of unobserved level component
"""
# If present, level is always the first component of the state vector
out = None
spec = self.specification
if spec.level:
offset = 0
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def trend(self):
"""
Filtered value of unobserved trend component
"""
# If present, trend is always the second component of the state vector
# (because level is always present if trend is present)
out = None
spec = self.specification
if spec.trend:
offset = int(spec.level)
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def seasonal(self):
# If present, seasonal always follows level/trend (if they are present)
# Note that we return only the first seasonal state, but there are
# in fact seasonal_period-1 seasonal states, however latter states
# are just lagged versions of the first seasonal state.
out = None
spec = self.specification
if spec.seasonal:
offset = int(spec.trend + spec.level)
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def cycle(self):
# If present, cycle always follows level/trend and seasonal
# Note that we return only the first cyclical state, but there are
# in fact 2 cyclical states. The second cyclical state is not simply
# a lag of the first cyclical state, but the first cyclical state is
# the one that enters the measurement equation.
out = None
spec = self.specification
if spec.cycle:
offset = int(spec.trend + spec.level +
spec.seasonal * (spec.seasonal_period - 1))
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def autoregressive(self):
# If present, autoregressive always follows level/trend, seasonal, and
# cyclical. If it is an AR(p) model, then there are p associated
# states, but the second - pth states are just lags of the first state.
out = None
spec = self.specification
if spec.autoregressive:
offset = int(spec.trend + spec.level +
spec.seasonal * (spec.seasonal_period - 1) +
2 * spec.cycle)
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def regression_coefficients(self):
# If present, state-vector regression coefficients always are last
# (i.e. they follow level/trend, seasonal, cyclical, and
# autoregressive states). There is one state associated with each
# regressor, and all are returned here.
out = None
spec = self.specification
if spec.regression:
if spec.mle_regression:
warnings.warn('Regression coefficients estimated via maximum'
' likelihood. Estimated coefficients are'
' available in the parameters list, not as part'
' of the state vector.')
else:
offset = int(spec.trend + spec.level +
spec.seasonal * (spec.seasonal_period - 1) +
spec.cycle * (1 + spec.stochastic_cycle) +
spec.ar_order)
filtered_state = self.filter_results.filtered_state[offset]
out = Bunch(filtered=filtered_state,
offset=offset)
return out
def plot_components(self, which='filtered', alpha=0.05,
observed=True, level=True, trend=True,
seasonal=True, cycle=True, autoregressive=True,
fig=None, figsize=None):
"""
Plot the estimated components of the model.
Parameters
----------
which : {'filtered'}, optional
Type of state estimate to plot. Default is 'filtered'.
alpha : float, optional
The confidence intervals for the components are (1 - alpha) %
level : boolean, optional
Whether or not to plot the level component, if applicable.
Default is True.
trend : boolean, optional
Whether or not to plot the trend component, if applicable.
Default is True.
seasonal : boolean, optional
Whether or not to plot the seasonal component, if applicable.
Default is True.
cycle : boolean, optional
Whether or not to plot the cyclical component, if applicable.
Default is True.
autoregressive : boolean, optional
Whether or not to plot the autoregressive state, if applicable.
Default is True.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
If all options are included in the model and selected, this produces
a 6x1 plot grid with the following plots (ordered top-to-bottom):
0. Observed series against predicted series
1. Level
2. Trend
3. Seasonal
4. Cycle
5. Autoregressive
Specific subplots will be removed if the component is not present in
the estimated model or if the corresponding keywork argument is set to
False.
All plots contain (1 - `alpha`) % confidence intervals.
"""
from scipy.stats import norm
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
plt = _import_mpl()
fig = create_mpl_fig(fig, figsize)
# Check for a valid estimation type
if which not in ['filtered']:
raise ValueError('Invalid type of state estimate.')
# Determine which plots we have
spec = self.specification
components = OrderedDict([
('level', level and spec.level),
('trend', trend and spec.trend),
('seasonal', seasonal and spec.seasonal),
('cycle', cycle and spec.cycle),
('autoregressive', autoregressive and spec.autoregressive),
])
# Number of plots
k_plots = observed + np.sum(components.values())
# Get dates, if applicable
if hasattr(self.data, 'dates') and self.data.dates is not None:
dates = self.data.dates._mpl_repr()
else:
dates = np.arange(len(resid))
# Get the critical value for confidence intervals
critical_value = norm.ppf(1 - alpha / 2.)
plot_idx = 1
# Observed, predicted, confidence intervals
if observed:
ax = fig.add_subplot(k_plots, 1, plot_idx)
plot_idx += 1
# Plot the observed dataset
ax.plot(dates, self.model.endog, color='k', label='Observed')
# Get the predicted values and confidence intervals
predict = self.filter_results.forecasts[0]
std_errors = np.sqrt(self.filter_results.forecasts_error_cov[0,0])
ci_lower = predict - critical_value * std_errors
ci_upper = predict + critical_value * std_errors
# Plot
ax.plot(dates, predict, label='One-step-ahead predictions')
ci_poly = ax.fill_between(dates, ci_lower, ci_upper, alpha=0.2)
ci_label = '$%.3g \\%%$ confidence interval' % ((1 - alpha)*100)
# Proxy artist for fill_between legend entry
# See e.g. http://matplotlib.org/1.3.1/users/legend_guide.html
p = plt.Rectangle((0, 0), 1, 1, fc=ci_poly.get_facecolor()[0])
# Legend
handles, labels = ax.get_legend_handles_labels()
handles.append(p)
labels.append(ci_label)
ax.legend(handles, labels)
ax.set_title('Predicted vs observed')
# Plot each component
for component, is_plotted in components.items():
if not is_plotted:
continue
ax = fig.add_subplot(k_plots, 1, plot_idx)
plot_idx += 1
# Get the predicted values and confidence intervals
value = getattr(self, component)[which]
offset = getattr(self, component)['offset']
std_errors = np.sqrt(
self.filter_results.filtered_state_cov[offset, offset]
)
ci_lower = value - critical_value * std_errors
ci_upper = value + critical_value * std_errors
# Plot
state_label = '%s (%s)' % (component.title(), which)
ax.plot(dates, value, label=state_label)
ci_poly = ax.fill_between(dates, ci_lower, ci_upper, alpha=0.2)
ci_label = '$%.3g \\%%$ confidence interval' % ((1 - alpha)*100)
# Legend
ax.legend()
ax.set_title('%s component' % component.title())
return fig
def summary(self, alpha=.05, start=None):
# Create the model name
model_name = [self.specification.trend_specification]
if self.specification.seasonal:
seasonal_name = 'seasonal(%d)' % self.specification.seasonal_period
if self.specification.stochastic_seasonal:
seasonal_name = 'stochastic ' + seasonal_name
model_name.append(seasonal_name)
if self.specification.cycle:
cycle_name = 'cycle'
if self.specification.stochastic_cycle:
cycle_name = 'stochastic ' + cycle_name
if self.specification.damped_cycle:
cycle_name = 'damped ' + cycle_name
model_name.append(cycle_name)
if self.specification.autoregressive:
autoregressive_name = 'AR(%d)' % self.specification.ar_order
model_name.append(autoregressive_name)
return super(UnobservedComponentsResults, self).summary(
alpha=alpha, start=start, title='Unobserved Components Results',
model_name=model_name
)
summary.__doc__ = MLEResults.summary.__doc__
class UnobservedComponentsResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(UnobservedComponentsResultsWrapper,
UnobservedComponentsResults)
REF: Consistent conversion of params to ndarray
"""
Univariate structural time series models
Author: Chad Fulton
License: Simplified-BSD
"""
from __future__ import division, absolute_import, print_function
from warnings import warn
from statsmodels.compat.collections import OrderedDict
import numpy as np
import pandas as pd
from statsmodels.tsa.filters.hp_filter import hpfilter
from statsmodels.tools.data import _is_using_pandas
from .mlemodel import MLEModel, MLEResults, MLEResultsWrapper
from scipy.linalg import solve_discrete_lyapunov
from statsmodels.tools.tools import Bunch
from .tools import (
companion_matrix, constrain_stationary_univariate,
unconstrain_stationary_univariate
)
import statsmodels.base.wrapper as wrap
_mask_map = {
1: 'irregular',
2: 'fixed intercept',
3: 'deterministic constant',
6: 'random walk',
7: 'local level',
8: 'fixed slope',
11: 'deterministic trend',
14: 'random walk with drift',
15: 'local linear deterministic trend',
31: 'local linear trend',
27: 'smooth trend',
26: 'random trend'
}
class UnobservedComponents(MLEModel):
r"""
Univariate unobserved components time series model
These are also known as structural time series models, and decompose a
(univariate) time series into trend, seasonal, cyclical, and irregular
components.
Parameters
----------
level : bool, optional
Whether or not to include a level component. Default is False.
trend : bool, optional
Whether or not to include a trend component. Default is False. If True,
`level` must also be True.
seasonal_period : int or None, optional
The period of the seasonal component. Default is None.
cycle : bool, optional
Whether or not to include a cycle component. Default is False.
ar : int or None, optional
The order of the autoregressive component. Default is None.
exog : array_like or None, optional
Exoenous variables.
irregular : bool, optional
Whether or not to include an irregular component. Default is True
stochastic_level : bool, optional
Whether or not any level component is stochastic. Default is True.
stochastic_trend : bool, optional
Whether or not any trend component is stochastic. Default is True.
stochastic_seasonal : bool, optional
Whether or not any seasonal component is stochastic. Default is True.
stochastic_cycle : bool, optional
Whether or not any cycle component is stochastic. Default is True.
damped_cycle : bool, optional
Whether or not the cycle component is damped. Default is False.
cycle_period_bounds : tuple, optional
A tuple with lower and upper allowed bounds for the period of the
cycle. If not provided, the following default bounds are used:
(1) if no date / time information is provided, the frequency is
constrained to be between zero and :math:`\pi`, so the period is
constrained to be in [0.5, infinity].
(2) If the date / time information is provided, the default bounds
allow the cyclical component to be between 1.5 and 12 years; depending
on the frequency of the endogenous variable, this will imply different
specific bounds.
Notes
-----
Thse models take the general form (see [1]_ Chapter 3.2 for all details)
.. math::
y_t = \mu_t + \gamma_t + c_t + \varepsilon_t
where :math:`y_t` refers to the observation vector at time :math:`t`,
:math:`\mu_t` refers to the trend component, :math:`\gamma_t` refers to the
seasonal component, :math:`c_t` refers to the cycle, and
:math:`\varepsilon_t` is the irregular. The modeling details of these
components are given below.
**Trend**
The trend is modeled either as a *local linear trend* model or as an
*integrated random walk* model.
The local linear trend is specified as:
.. math::
\mu_t = \mu_{t-1} + \nu_{t-1} + \xi_{t-1} \\
\nu_t = \nu_{t-1} + \zeta_{t-1}
with :math:`\xi_t \sim N(0, \sigma_\xi^2)` and
:math:`\zeta_t \sim N(0, \sigma_\zeta^2)`.
The integrated random walk model of order `r` is specified as:
.. math::
\Delta^r \mu_t = \xi_{t-1} \\
This component results in two parameters to be selected via maximum
likelihood: :math:`\sigma_\xi^2` and :math:`\sigma_\zeta^2`.
In the case of the integrated random walk model, the parameter
:math:`\sigma_\xi^2` is constrained to be zero, but the parameter `r` (the
order of integration) must be chosen (it is not estimated by MLE).
**Seasonal**
The seasonal component is modeled as:
.. math::
\gamma_t = - \sum_{j=1}^{s-1} \gamma_{t+1-j} + \omega_t \\
\omega_t \sim N(0, \sigma_\omega^2)
where s is the number of seasons and :math:`\omega_t` is an error term that
allows the seasonal constants to change over time (if this is not desired,
:math:`\sigma_\omega^2` can be set to zero).
This component results in one parameter to be selected via maximum
likelihood: :math:`\sigma_\omega^2`, and one parameter to be chosen, the
number of seasons `s`.
**Cycle**
The cyclical component is modeled as
.. math::
c_{t+1} = \rho_c (\tilde c_t \cos \lambda_c t
+ \tilde c_t^* \sin \lambda_c) +
\tilde \omega_t \\
c_{t+1}^* = \rho_c (- \tilde c_t \sin \lambda_c t +
\tilde c_t^* \cos \lambda_c) +
\tilde \omega_t^* \\
where :math:`\omega_t, \tilde \omega_t iid N(0, \sigma_{\tilde \omega}^2)`
This component results in three parameters to be selected via maximum
likelihood: :math:`\sigma_{\tilde \omega}^2`, :math:`\rho_c`, and
:math:`\lambda_c`.
**Irregular**
The irregular components are independent and identically distributed (iid):
.. math::
\varepsilon_t \sim N(0, \sigma_\varepsilon^2)
References
----------
.. [1] Durbin, James, and Siem Jan Koopman. 2012.
Time Series Analysis by State Space Methods: Second Edition.
Oxford University Press.
"""
def __init__(self, endog, level=False, trend=False, seasonal=None,
cycle=False, autoregressive=None, exog=None, irregular=False,
stochastic_level=False, stochastic_trend=False,
stochastic_seasonal=True, stochastic_cycle=False,
damped_cycle=False, cycle_period_bounds=None,
mle_regression=True,
**kwargs):
# Model options
self.level = level
self.trend = trend
self.seasonal_period = seasonal if seasonal is not None else 0
self.seasonal = self.seasonal_period > 0
self.cycle = cycle
self.ar_order = autoregressive if autoregressive is not None else 0
self.autoregressive = self.ar_order > 0
self.irregular = irregular
self.stochastic_level = stochastic_level
self.stochastic_trend = stochastic_trend
self.stochastic_seasonal = stochastic_seasonal
self.stochastic_cycle = stochastic_cycle
self.damped_cycle = damped_cycle
self.mle_regression = mle_regression
# Check for string trend/level specification
self.trend_specification = None
if isinstance(self.level, str):
self.trend_specification = level
self.level = False
# Check if any of the trend/level components have been set, and
# reset everything to False
trend_attributes = ['irregular', 'level', 'trend',
'stochastic_level', 'stochastic_trend']
for attribute in trend_attributes:
if not getattr(self, attribute) is False:
warn("Value of `%s` may be overridden when the trend"
" component is specified using a model string."
% attribute)
setattr(self, attribute, False)
# Now set the correct specification
spec = self.trend_specification
if spec == 'irregular' or spec == 'ntrend':
self.irregular = True
self.trend_specification = 'irregular'
elif spec == 'fixed intercept':
self.level = True
elif spec == 'deterministic constant' or spec == 'dconstant':
self.irregular = True
self.level = True
self.trend_specification = 'deterministic constant'
elif spec == 'local level' or spec == 'llevel':
self.irregular = True
self.level = True
self.stochastic_level = True
self.trend_specification = 'local level'
elif spec == 'random walk' or spec == 'rwalk':
self.level = True
self.stochastic_level = True
self.trend_specification = 'random walk'
elif spec == 'fixed slope':
self.level = True
self.trend = True
elif spec == 'deterministic trend' or spec == 'dtrend':
self.irregular = True
self.level = True
self.trend = True
self.trend_specification = 'deterministic trend'
elif (spec == 'local linear deterministic trend' or
spec == 'lldtrend'):
self.irregular = True
self.level = True
self.stochastic_level = True
self.trend = True
self.trend_specification = 'local linear deterministic trend'
elif spec == 'random walk with drift' or spec == 'rwdrift':
self.level = True
self.stochastic_level = True
self.trend = True
self.trend_specification = 'random walk with drift'
elif spec == 'local linear trend' or spec == 'lltrend':
self.irregular = True
self.level = True
self.stochastic_level = True
self.trend = True
self.stochastic_trend = True
self.trend_specification = 'local linear trend'
elif spec == 'smooth trend' or spec == 'strend':
self.irregular = True
self.level = True
self.trend = True
self.stochastic_trend = True
self.trend_specification = 'smooth trend'
elif spec == 'random trend' or spec == 'rtrend':
self.level = True
self.trend = True
self.stochastic_trend = True
self.trend_specification = 'random trend'
else:
raise ValueError("Invalid level/trend specification: '%s'"
% spec)
# Check for a model that makes sense
if trend and not level:
warn("Trend component specified without level component;"
" deterministic level component added.")
self.level = True
self.stochastic_level = False
if not (self.irregular or
(self.level and self.stochastic_level) or
(self.trend and self.stochastic_trend) or
(self.seasonal and self.stochastic_seasonal) or
(self.cycle and self.stochastic_cycle) or
self.autoregressive):
warn("Specified model does not contain a stochastic element;"
" irregular component added.")
self.irregular = True
if self.seasonal and self.seasonal_period < 2:
raise ValueError('Seasonal component must have a seasonal period'
' of at least 2.')
# Create a bitmask holding the level/trend specification
self.trend_mask = (
self.irregular * 0x01 |
self.level * 0x02 |
self.level * self.stochastic_level * 0x04 |
self.trend * 0x08 |
self.trend * self.stochastic_trend * 0x10
)
# Create the trend specification, if it wasn't given
if self.trend_specification is None:
# trend specification may be none, e.g. if the model is only
# a stochastic cycle, etc.
self.trend_specification = _mask_map.get(self.trend_mask, None)
# Exogenous component
self.k_exog = 0
if exog is not None:
exog_is_using_pandas = _is_using_pandas(exog, None)
if not exog_is_using_pandas:
exog = np.asarray(exog)
# Make sure we have 2-dimensional array
if exog.ndim == 1:
if not exog_is_using_pandas:
exog = exog[:, None]
else:
exog = pd.DataFrame(exog)
self.k_exog = exog.shape[1]
self.regression = self.k_exog > 0
# Model parameters
k_states = (
self.level + self.trend +
(self.seasonal_period - 1) * self.seasonal +
self.cycle * 2 +
self.ar_order +
(not self.mle_regression) * self.k_exog
)
k_posdef = (
self.stochastic_level * self.level +
self.stochastic_trend * self.trend +
self.stochastic_seasonal * self.seasonal +
self.stochastic_cycle * (self.cycle * 2) +
self.autoregressive
)
# We can still estimate the model with just the irregular component,
# just need to have one state that does nothing.
loglikelihood_burn = kwargs.get('loglikelihood_burn',
k_states - self.ar_order)
if k_states == 0:
if not self.irregular:
raise ValueError('Model has no components specified.')
k_states = 1
if k_posdef == 0:
k_posdef = 1
# Setup the representation
super(UnobservedComponents, self).__init__(
endog, k_states, k_posdef=k_posdef, exog=exog, **kwargs
)
self.setup()
# Initialize the model
self.ssm.loglikelihood_burn = loglikelihood_burn
# Need to reset the MLE names (since when they were first set, `setup`
# had not been run (and could not have been at that point))
self.data.param_names = self.param_names
# Get bounds for the frequency of the cycle, if we know the frequency
# of the data.
if cycle_period_bounds is None:
freq = self.data.freq[0] if self.data.freq is not None else ''
if freq == 'A':
cycle_period_bounds = (1.5, 12)
elif freq == 'Q':
cycle_period_bounds = (1.5*4, 12*4)
elif freq == 'M':
cycle_period_bounds = (1.5*12, 12*12)
else:
# If we have no information on data frequency, require the
# cycle frequency to be between 0 and pi
cycle_period_bounds = (2, np.inf)
self.cycle_frequency_bound = (
2*np.pi / cycle_period_bounds[1], 2*np.pi / cycle_period_bounds[0]
)
def setup(self):
"""
Setup the structural time series representation
"""
# TODO fix this
# (if we don't set it here, each instance shares a single dictionary)
self._start_params = {
'irregular_var': 0.1,
'level_var': 0.1,
'trend_var': 0.1,
'seasonal_var': 0.1,
'cycle_freq': 0.1,
'cycle_var': 0.1,
'cycle_damp': 0.1,
'ar_coeff': 0,
'ar_var': 0.1,
'reg_coeff': 0,
}
self._param_names = {
'irregular_var': 'sigma2.irregular',
'level_var': 'sigma2.level',
'trend_var': 'sigma2.trend',
'seasonal_var': 'sigma2.seasonal',
'cycle_var': 'sigma2.cycle',
'cycle_freq': 'frequency.cycle',
'cycle_damp': 'damping.cycle',
'ar_coeff': 'ar.L%d',
'ar_var': 'sigma2.ar',
'reg_coeff': 'beta.%d',
}
# Initialize the ordered sets of parameters
self.parameters = OrderedDict()
self.parameters_obs_intercept = OrderedDict()
self.parameters_obs_cov = OrderedDict()
self.parameters_transition = OrderedDict()
self.parameters_state_cov = OrderedDict()
# Initialize the fixed components of the state space matrices,
i = 0 # state offset
j = 0 # state covariance offset
if self.irregular:
self.parameters_obs_cov['irregular_var'] = 1
if self.level:
self.ssm['design', 0, i] = 1.
self.ssm['transition', i, i] = 1.
if self.trend:
self.ssm['transition', i, i+1] = 1.
if self.stochastic_level:
self.ssm['selection', i, j] = 1.
self.parameters_state_cov['level_var'] = 1
j += 1
i += 1
if self.trend:
self.ssm['transition', i, i] = 1.
if self.stochastic_trend:
self.ssm['selection', i, j] = 1.
self.parameters_state_cov['trend_var'] = 1
j += 1
i += 1
if self.seasonal:
n = self.seasonal_period - 1
self.ssm['design', 0, i] = 1.
self.ssm['transition', i:i + n, i:i + n] = (
companion_matrix(np.r_[1, [1] * n]).transpose()
)
if self.stochastic_seasonal:
self.ssm['selection', i, j] = 1.
self.parameters_state_cov['seasonal_var'] = 1
j += 1
i += n
if self.cycle:
self.ssm['design', 0, i] = 1.
self.parameters_transition['cycle_freq'] = 1
if self.damped_cycle:
self.parameters_transition['cycle_damp'] = 1
if self.stochastic_cycle:
self.ssm['selection', i:i+2, j:j+2] = np.eye(2)
self.parameters_state_cov['cycle_var'] = 1
j += 2
self._idx_cycle_transition = np.s_['transition', i:i+2, i:i+2]
i += 2
if self.autoregressive:
self.ssm['design', 0, i] = 1.
self.parameters_transition['ar_coeff'] = self.ar_order
self.parameters_state_cov['ar_var'] = 1
self.ssm['selection', i, j] = 1
self.ssm['transition', i:i+self.ar_order, i:i+self.ar_order] = (
companion_matrix(self.ar_order).T
)
self._idx_ar_transition = (
np.s_['transition', i, i:i+self.ar_order]
)
self._start_params['ar_coeff'] = (
[self._start_params['ar_coeff']] * self.ar_order
)
self._param_names['ar_coeff'] = [
self._param_names['ar_coeff'] % k
for k in range(1, self.ar_order+1)
]
j += 1
i += self.ar_order
if self.regression:
if self.mle_regression:
self.parameters_obs_intercept['reg_coeff'] = self.k_exog
self._start_params['reg_coeff'] = (
[self._start_params['reg_coeff']] * self.k_exog
)
self._param_names['reg_coeff'] = [
self._param_names['reg_coeff'] % k
for k in range(1, self.k_exog+1)
]
else:
design = np.repeat(self.ssm['design', :, :, 0], self.nobs, axis=0)
self.ssm['design'] = design.transpose()[np.newaxis, :, :]
self.ssm['design', 0, i:i+self.k_exog, :] = self.exog.transpose()
self.ssm['transition', i:i+self.k_exog, i:i+self.k_exog] = (
np.eye(self.k_exog)
)
i += self.k_exog
# Update to get the actual parameter set
self.parameters.update(self.parameters_obs_cov)
self.parameters.update(self.parameters_state_cov)
self.parameters.update(self.parameters_transition) # ordered last
self.parameters.update(self.parameters_obs_intercept)
self.k_obs_intercept = sum(self.parameters_obs_intercept.values())
self.k_obs_cov = sum(self.parameters_obs_cov.values())
self.k_transition = sum(self.parameters_transition.values())
self.k_state_cov = sum(self.parameters_state_cov.values())
self.k_params = sum(self.parameters.values())
# Other indices
idx = np.diag_indices(self.ssm.k_posdef)
self._idx_state_cov = ('state_cov', idx[0], idx[1])
def initialize_state(self):
# Initialize the AR component as stationary, the rest as approximately
# diffuse
initial_state = np.zeros(self.k_states)
initial_state_cov = (
np.eye(self.k_states, dtype=self.ssm.transition.dtype) *
self.ssm.initial_variance
)
if self.autoregressive:
start = (
self.level + self.trend +
(self.seasonal_period - 1) * self.seasonal +
self.cycle * 2
)
end = start + self.ar_order
selection_stationary = self.ssm.selection[start:end, :, 0]
selected_state_cov_stationary = np.dot(
np.dot(selection_stationary, self.ssm.state_cov[:, :, 0]),
selection_stationary.T
)
try:
initial_state_cov_stationary = solve_discrete_lyapunov(
self.ssm.transition[start:end, start:end, 0],
selected_state_cov_stationary
)
except:
initial_state_cov_stationary = solve_discrete_lyapunov(
self.ssm.transition[start:end, start:end, 0],
selected_state_cov_stationary,
method='direct'
)
initial_state_cov[start:end, start:end] = (
initial_state_cov_stationary
)
self.ssm.initialize_known(initial_state, initial_state_cov)
def filter(self, params, transformed=True, cov_type=None, return_ssm=False,
**kwargs):
params = np.array(params)
# Transform parameters if necessary
if not transformed:
params = self.transform_params(params)
transformed = True
# Get the state space output
result = super(UnobservedComponents, self).filter(
params, transformed, cov_type, return_ssm=True, **kwargs)
# Wrap in a results object
if not return_ssm:
result_kwargs = {}
if cov_type is not None:
result_kwargs['cov_type'] = cov_type
result = UnobservedComponentsResultsWrapper(
UnobservedComponentsResults(self, params, result,
**result_kwargs)
)
return result
@property
def start_params(self):
if not hasattr(self, 'parameters'):
return []
# Level / trend variances
# (Use the HP filter to get initial estimates of variances)
_start_params = self._start_params.copy()
if self.level:
resid, trend1 = hpfilter(self.endog)
if self.stochastic_trend:
cycle2, trend2 = hpfilter(trend1)
_start_params['trend_var'] = np.std(trend2)**2
if self.stochastic_level:
_start_params['level_var'] = np.std(cycle2)**2
elif self.stochastic_level:
_start_params['level_var'] = np.std(trend1)**2
else:
resid = self.ssm.endog[0]
# Seasonal
if self.stochastic_seasonal:
# TODO seasonal variance starting values?
pass
# Cyclical
if self.cycle:
_start_params['cycle_var'] = np.std(resid)**2
_start_params['cycle_damp'] = (
np.linalg.pinv(resid[:-1, None]).dot(resid[1:])[0]
)
# Set initial period estimate to 3 year, if we know the frequency
# of the data observations
freq = self.data.freq[0] if self.data.freq is not None else ''
if freq == 'A':
_start_params['cycle_freq'] = 2 * np.pi / 3
elif freq == 'Q':
_start_params['cycle_freq'] = 2 * np.pi / 12
elif freq == 'M':
_start_params['cycle_freq'] = 2 * np.pi / 36
# Irregular
else:
_start_params['irregular_var'] = np.std(resid)**2
# Create the starting parameter list
start_params = []
for key in self.parameters.keys():
if np.isscalar(_start_params[key]):
start_params.append(_start_params[key])
else:
start_params += _start_params[key]
return start_params
@property
def param_names(self):
if not hasattr(self, 'parameters'):
return []
param_names = []
for key in self.parameters.keys():
if np.isscalar(self._param_names[key]):
param_names.append(self._param_names[key])
else:
param_names += self._param_names[key]
return param_names
def transform_params(self, unconstrained):
"""
Transform unconstrained parameters used by the optimizer to constrained
parameters used in likelihood evaluation
"""
unconstrained = np.array(unconstrained)
constrained = np.zeros(unconstrained.shape, dtype=unconstrained.dtype)
# Positive parameters: obs_cov, state_cov
offset = self.k_obs_cov + self.k_state_cov
constrained[:offset] = unconstrained[:offset]**2
# Cycle parameters
if self.cycle:
# Cycle frequency must be between between our bounds
low, high = self.cycle_frequency_bound
constrained[offset] = (
1 / (1 + np.exp(-unconstrained[offset]))
) * (high - low) + low
offset += 1
# Cycle damping (if present) must be between 0 and 1
if self.damped_cycle:
constrained[offset] = (
1 / (1 + np.exp(-unconstrained[offset]))
)
offset += 1
# Autoregressive coefficients must be stationary
if self.autoregressive:
constrained[offset:offset + self.ar_order] = (
constrain_stationary_univariate(
unconstrained[offset:offset + self.ar_order]
)
)
offset += self.ar_order
# Nothing to do with betas
constrained[offset:offset + self.k_exog] = (
unconstrained[offset:offset + self.k_exog]
)
return constrained
def untransform_params(self, constrained):
"""
Reverse the transformation
"""
constrained = np.array(constrained)
unconstrained = np.zeros(constrained.shape, dtype=constrained.dtype)
# Positive parameters: obs_cov, state_cov
offset = self.k_obs_cov + self.k_state_cov
unconstrained[:offset] = constrained[:offset]**0.5
# Cycle parameters
if self.cycle:
# Cycle frequency must be between between our bounds
low, high = self.cycle_frequency_bound
x = (constrained[offset] - low) / (high - low)
unconstrained[offset] = np.log(
x / (1 - x)
)
offset += 1
# Cycle damping (if present) must be between 0 and 1
if self.damped_cycle:
unconstrained[offset] = np.log(
constrained[offset] / (1 - constrained[offset])
)
offset += 1
# Autoregressive coefficients must be stationary
if self.autoregressive:
unconstrained[offset:offset + self.ar_order] = (
unconstrain_stationary_univariate(
constrained[offset:offset + self.ar_order]
)
)
offset += self.ar_order
# Nothing to do with betas
unconstrained[offset:offset + self.k_exog] = (
constrained[offset:offset + self.k_exog]
)
return unconstrained
def update(self, params, **kwargs):
params = super(UnobservedComponents, self).update(params, **kwargs)
offset = 0
# Observation covariance
if self.irregular:
self.ssm['obs_cov', 0, 0] = params[offset]
offset += 1
# State covariance
if self.k_state_cov > 0:
variances = params[offset:offset+self.k_state_cov]
if self.stochastic_cycle and self.cycle:
if self.autoregressive:
variances = np.r_[variances[:-1], variances[-2:]]
else:
variances = np.r_[variances, variances[-1]]
self.ssm[self._idx_state_cov] = variances
offset += self.k_state_cov
# Cycle transition
if self.cycle:
cos_freq = np.cos(params[offset])
sin_freq = np.sin(params[offset])
cycle_transition = np.array(
[[cos_freq, sin_freq],
[-sin_freq, cos_freq]]
)
if self.damped_cycle:
offset += 1
cycle_transition *= params[offset]
self.ssm[self._idx_cycle_transition] = cycle_transition
offset += 1
# AR transition
if self.autoregressive:
self.ssm[self._idx_ar_transition] = (
params[offset:offset+self.ar_order]
)
offset += self.ar_order
# Beta observation intercept
if self.regression:
if self.mle_regression:
self.ssm['obs_intercept'] = np.dot(
self.exog,
params[offset:offset+self.k_exog]
)[None, :]
offset += self.k_exog
# Initialize the state
self.initialize_state()
class UnobservedComponentsResults(MLEResults):
"""
Class to hold results from fitting an unobserved components model.
Parameters
----------
model : UnobservedComponents instance
The fitted model instance
Attributes
----------
specification : dictionary
Dictionary including all attributes from the unobserved components
model instance.
See Also
--------
statsmodels.tsa.statespace.kalman_filter.FilterResults
statsmodels.tsa.statespace.mlemodel.MLEResults
"""
def __init__(self, model, params, filter_results, cov_type='opg',
**kwargs):
super(UnobservedComponentsResults, self).__init__(
model, params, filter_results, cov_type, **kwargs)
self.df_resid = np.inf # attribute required for wald tests
# Save the model specification
self.specification = Bunch(**{
# Model options
'level': self.model.level,
'trend': self.model.trend,
'seasonal_period': self.model.seasonal_period,
'seasonal': self.model.seasonal,
'cycle': self.model.cycle,
'ar_order': self.model.ar_order,
'autoregressive': self.model.autoregressive,
'irregular': self.model.irregular,
'stochastic_level': self.model.stochastic_level,
'stochastic_trend': self.model.stochastic_trend,
'stochastic_seasonal': self.model.stochastic_seasonal,
'stochastic_cycle': self.model.stochastic_cycle,
'damped_cycle': self.model.damped_cycle,
'regression': self.model.regression,
'mle_regression': self.model.mle_regression,
# Check for string trend/level specification
'trend_specification': self.model.trend_specification
})
@property
def level(self):
"""
Filtered value of unobserved level component
"""
# If present, level is always the first component of the state vector
out = None
spec = self.specification
if spec.level:
offset = 0
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def trend(self):
"""
Filtered value of unobserved trend component
"""
# If present, trend is always the second component of the state vector
# (because level is always present if trend is present)
out = None
spec = self.specification
if spec.trend:
offset = int(spec.level)
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def seasonal(self):
# If present, seasonal always follows level/trend (if they are present)
# Note that we return only the first seasonal state, but there are
# in fact seasonal_period-1 seasonal states, however latter states
# are just lagged versions of the first seasonal state.
out = None
spec = self.specification
if spec.seasonal:
offset = int(spec.trend + spec.level)
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def cycle(self):
# If present, cycle always follows level/trend and seasonal
# Note that we return only the first cyclical state, but there are
# in fact 2 cyclical states. The second cyclical state is not simply
# a lag of the first cyclical state, but the first cyclical state is
# the one that enters the measurement equation.
out = None
spec = self.specification
if spec.cycle:
offset = int(spec.trend + spec.level +
spec.seasonal * (spec.seasonal_period - 1))
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def autoregressive(self):
# If present, autoregressive always follows level/trend, seasonal, and
# cyclical. If it is an AR(p) model, then there are p associated
# states, but the second - pth states are just lags of the first state.
out = None
spec = self.specification
if spec.autoregressive:
offset = int(spec.trend + spec.level +
spec.seasonal * (spec.seasonal_period - 1) +
2 * spec.cycle)
out = Bunch(filtered=self.filter_results.filtered_state[offset],
offset=offset)
return out
@property
def regression_coefficients(self):
# If present, state-vector regression coefficients always are last
# (i.e. they follow level/trend, seasonal, cyclical, and
# autoregressive states). There is one state associated with each
# regressor, and all are returned here.
out = None
spec = self.specification
if spec.regression:
if spec.mle_regression:
warnings.warn('Regression coefficients estimated via maximum'
' likelihood. Estimated coefficients are'
' available in the parameters list, not as part'
' of the state vector.')
else:
offset = int(spec.trend + spec.level +
spec.seasonal * (spec.seasonal_period - 1) +
spec.cycle * (1 + spec.stochastic_cycle) +
spec.ar_order)
filtered_state = self.filter_results.filtered_state[offset]
out = Bunch(filtered=filtered_state,
offset=offset)
return out
def plot_components(self, which='filtered', alpha=0.05,
observed=True, level=True, trend=True,
seasonal=True, cycle=True, autoregressive=True,
fig=None, figsize=None):
"""
Plot the estimated components of the model.
Parameters
----------
which : {'filtered'}, optional
Type of state estimate to plot. Default is 'filtered'.
alpha : float, optional
The confidence intervals for the components are (1 - alpha) %
level : boolean, optional
Whether or not to plot the level component, if applicable.
Default is True.
trend : boolean, optional
Whether or not to plot the trend component, if applicable.
Default is True.
seasonal : boolean, optional
Whether or not to plot the seasonal component, if applicable.
Default is True.
cycle : boolean, optional
Whether or not to plot the cyclical component, if applicable.
Default is True.
autoregressive : boolean, optional
Whether or not to plot the autoregressive state, if applicable.
Default is True.
fig : Matplotlib Figure instance, optional
If given, subplots are created in this figure instead of in a new
figure. Note that the 2x2 grid will be created in the provided
figure using `fig.add_subplot()`.
figsize : tuple, optional
If a figure is created, this argument allows specifying a size.
The tuple is (width, height).
Notes
-----
If all options are included in the model and selected, this produces
a 6x1 plot grid with the following plots (ordered top-to-bottom):
0. Observed series against predicted series
1. Level
2. Trend
3. Seasonal
4. Cycle
5. Autoregressive
Specific subplots will be removed if the component is not present in
the estimated model or if the corresponding keywork argument is set to
False.
All plots contain (1 - `alpha`) % confidence intervals.
"""
from scipy.stats import norm
from statsmodels.graphics.utils import _import_mpl, create_mpl_fig
plt = _import_mpl()
fig = create_mpl_fig(fig, figsize)
# Check for a valid estimation type
if which not in ['filtered']:
raise ValueError('Invalid type of state estimate.')
# Determine which plots we have
spec = self.specification
components = OrderedDict([
('level', level and spec.level),
('trend', trend and spec.trend),
('seasonal', seasonal and spec.seasonal),
('cycle', cycle and spec.cycle),
('autoregressive', autoregressive and spec.autoregressive),
])
# Number of plots
k_plots = observed + np.sum(components.values())
# Get dates, if applicable
if hasattr(self.data, 'dates') and self.data.dates is not None:
dates = self.data.dates._mpl_repr()
else:
dates = np.arange(len(resid))
# Get the critical value for confidence intervals
critical_value = norm.ppf(1 - alpha / 2.)
plot_idx = 1
# Observed, predicted, confidence intervals
if observed:
ax = fig.add_subplot(k_plots, 1, plot_idx)
plot_idx += 1
# Plot the observed dataset
ax.plot(dates, self.model.endog, color='k', label='Observed')
# Get the predicted values and confidence intervals
predict = self.filter_results.forecasts[0]
std_errors = np.sqrt(self.filter_results.forecasts_error_cov[0,0])
ci_lower = predict - critical_value * std_errors
ci_upper = predict + critical_value * std_errors
# Plot
ax.plot(dates, predict, label='One-step-ahead predictions')
ci_poly = ax.fill_between(dates, ci_lower, ci_upper, alpha=0.2)
ci_label = '$%.3g \\%%$ confidence interval' % ((1 - alpha)*100)
# Proxy artist for fill_between legend entry
# See e.g. http://matplotlib.org/1.3.1/users/legend_guide.html
p = plt.Rectangle((0, 0), 1, 1, fc=ci_poly.get_facecolor()[0])
# Legend
handles, labels = ax.get_legend_handles_labels()
handles.append(p)
labels.append(ci_label)
ax.legend(handles, labels)
ax.set_title('Predicted vs observed')
# Plot each component
for component, is_plotted in components.items():
if not is_plotted:
continue
ax = fig.add_subplot(k_plots, 1, plot_idx)
plot_idx += 1
# Get the predicted values and confidence intervals
value = getattr(self, component)[which]
offset = getattr(self, component)['offset']
std_errors = np.sqrt(
self.filter_results.filtered_state_cov[offset, offset]
)
ci_lower = value - critical_value * std_errors
ci_upper = value + critical_value * std_errors
# Plot
state_label = '%s (%s)' % (component.title(), which)
ax.plot(dates, value, label=state_label)
ci_poly = ax.fill_between(dates, ci_lower, ci_upper, alpha=0.2)
ci_label = '$%.3g \\%%$ confidence interval' % ((1 - alpha)*100)
# Legend
ax.legend()
ax.set_title('%s component' % component.title())
return fig
def summary(self, alpha=.05, start=None):
# Create the model name
model_name = [self.specification.trend_specification]
if self.specification.seasonal:
seasonal_name = 'seasonal(%d)' % self.specification.seasonal_period
if self.specification.stochastic_seasonal:
seasonal_name = 'stochastic ' + seasonal_name
model_name.append(seasonal_name)
if self.specification.cycle:
cycle_name = 'cycle'
if self.specification.stochastic_cycle:
cycle_name = 'stochastic ' + cycle_name
if self.specification.damped_cycle:
cycle_name = 'damped ' + cycle_name
model_name.append(cycle_name)
if self.specification.autoregressive:
autoregressive_name = 'AR(%d)' % self.specification.ar_order
model_name.append(autoregressive_name)
return super(UnobservedComponentsResults, self).summary(
alpha=alpha, start=start, title='Unobserved Components Results',
model_name=model_name
)
summary.__doc__ = MLEResults.summary.__doc__
class UnobservedComponentsResultsWrapper(MLEResultsWrapper):
_attrs = {}
_wrap_attrs = wrap.union_dicts(MLEResultsWrapper._wrap_attrs,
_attrs)
_methods = {}
_wrap_methods = wrap.union_dicts(MLEResultsWrapper._wrap_methods,
_methods)
wrap.populate_wrapper(UnobservedComponentsResultsWrapper,
UnobservedComponentsResults)
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.plugins.ml2.drivers.openvswitch.agent import (
ovs_agent_extension_api as ovs_ext_api)
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl import (
ovs_bridge)
from neutron.tests import base
from neutron_lib import context
from networking_sfc.services.sfc.agent.extensions import sfc
class SfcAgentExtensionTestCase(base.BaseTestCase):
def setUp(self):
super(SfcAgentExtensionTestCase, self).setUp()
conn_patcher = mock.patch(
'neutron.agent.ovsdb.native.connection.Connection.start')
conn_patcher.start()
self.addCleanup(conn_patcher.stop)
self.sfc_ext = sfc.SfcAgentExtension()
self.context = context.get_admin_context()
self.connection = mock.Mock()
self.agent_api = ovs_ext_api.OVSAgentExtensionAPI(
ovs_bridge.OVSAgentBridge('br-int'),
ovs_bridge.OVSAgentBridge('br-tun'))
self.sfc_ext.consume_api(self.agent_api)
# Don't rely on used driver
mock.patch(
'neutron.manager.NeutronManager.load_class_for_provider',
return_value=lambda: mock.Mock(spec=sfc.SfcAgentDriver)
).start()
self.sfc_ext.initialize(
self.connection, constants.EXTENSION_DRIVER_TYPE)
def test_update_empty_flow_rules(self):
self.sfc_ext.update_flow_rules(self.context, flowrule_entries={})
self.assertFalse(self.sfc_ext.sfc_driver.update_flow_rules.called)
test fix: follow neutron change Id22faa1f6179c2fdf8a136972d65f10749c9fc2e
The was the OVSDB wrapper is mocked needs to be changed to
follow neutron change Id22faa1f6179c2fdf8a136972d65f10749c9fc2e .
Change-Id: Ib16ba06f65f3b7efad02755267881d92572ffc12
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.plugins.ml2.drivers.openvswitch.agent import (
ovs_agent_extension_api as ovs_ext_api)
from neutron.plugins.ml2.drivers.openvswitch.agent.common import constants
from neutron.plugins.ml2.drivers.openvswitch.agent.openflow.ovs_ofctl import (
ovs_bridge)
from neutron.tests import base
from neutron_lib import context
from networking_sfc.services.sfc.agent.extensions import sfc
class SfcAgentExtensionTestCase(base.BaseTestCase):
def setUp(self):
super(SfcAgentExtensionTestCase, self).setUp()
conn_patcher = mock.patch('neutron.agent.ovsdb.impl_idl._connection')
conn_patcher.start()
self.addCleanup(conn_patcher.stop)
self.sfc_ext = sfc.SfcAgentExtension()
self.context = context.get_admin_context()
self.connection = mock.Mock()
self.agent_api = ovs_ext_api.OVSAgentExtensionAPI(
ovs_bridge.OVSAgentBridge('br-int'),
ovs_bridge.OVSAgentBridge('br-tun'))
self.sfc_ext.consume_api(self.agent_api)
# Don't rely on used driver
mock.patch(
'neutron.manager.NeutronManager.load_class_for_provider',
return_value=lambda: mock.Mock(spec=sfc.SfcAgentDriver)
).start()
self.sfc_ext.initialize(
self.connection, constants.EXTENSION_DRIVER_TYPE)
def test_update_empty_flow_rules(self):
self.sfc_ext.update_flow_rules(self.context, flowrule_entries={})
self.assertFalse(self.sfc_ext.sfc_driver.update_flow_rules.called)
|
#
# Kiwi: a Framework and Enhanced Widgets for Python
#
# Copyright (C) 2005 Async Open Source
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Author(s): Johan Dahlin <jdahlin@async.com.br>
#
"""
Argument checking decorator and support
"""
import inspect
from types import ClassType
class CustomType(type):
def value_check(mcs, name, value):
pass
value_check = classmethod(value_check)
class number(CustomType):
"""
Custom type that verifies that the type is a number (eg float or int)
"""
type = int, float, long
class percent(CustomType):
"""
Custom type that verifies that the value is a percentage
"""
type = int, float
def value_check(mcs, name, value):
if 0 > value < 100:
raise ValueError("%s must be between 0 and 100" % name)
value_check = classmethod(value_check)
_NoValue = object()
class argcheck(object):
"""
Decorator to check type and value of arguments.
Usage:
>>> @argcheck(types...)
... def function(args..)
or
>>> class Class:
... @argcheck(types..)
... def method(self, args)
You can customize the checks by subclassing your type from CustomType,
there are two builtin types: number which is a float/int combined check
and a percent which verifis that the value is a percentage
"""
__enabled__ = True
def __init__(self, *types):
for argtype in types:
if not isinstance(argtype, (type, ClassType)):
raise TypeError("must be a type or class instance")
self.types = types
def enable(cls):
"""
Enable argcheck globally
"""
cls.__enabled__ = True
enable = classmethod(enable)
def disable(cls):
"""
Disable argcheck globally
"""
cls.__enabled__ = False
disable = classmethod(disable)
def __call__(self, func):
if not callable(func):
raise TypeError("%r must be callable" % func)
# Useful for optimized runs
if not self.__enabled__:
return func
spec = inspect.getargspec(func)
arg_names, is_varargs, is_kwargs, default_values = spec
if not default_values:
default_values = []
else:
default_values = list(default_values)
# Set all the remaining default values to _NoValue
default_values = ([_NoValue] * (len(arg_names) - len(default_values)) +
default_values)
# TODO: Is there another way of doing this?
# Not trivial since func is not attached to the class at
# this point. Nor is the class attached to the namespace.
if arg_names and arg_names[0] in ('self', 'cls'):
arg_names = arg_names[1:]
default_values = default_values[1:]
is_method = True
else:
is_method = False
types = self.types
if is_kwargs and not is_varargs and self.types:
raise TypeError("argcheck cannot be used with only keywords")
elif not is_varargs:
if len(types) != len(arg_names):
raise TypeError("%s has wrong number of arguments, "
"%d specified in decorator, "
"but function has %d" %
(func.__name__,
len(types),
len(arg_names)))
defs = len(default_values)
kwarg_types = {}
kwarg_defaults = {}
for i, arg_name in enumerate(arg_names):
kwarg_types[arg_name] = types[i]
value = default_values[i]
kwarg_defaults[arg_name] = value
if value is None or value is _NoValue:
continue
arg_type = types[i]
try:
self._type_check(value, arg_type, arg_name)
except TypeError:
raise TypeError("default value for %s must be of type %s "
"and not %s" % (arg_name,
arg_type.__name__,
type(value).__name__))
kwarg_defaults[arg_name] = value
def wrapper(*args, **kwargs):
if self.__enabled__:
cargs = args
if is_method:
cargs = cargs[1:]
# Positional arguments
for arg, type, name, default in zip(cargs, types, arg_names,
default_values):
self._type_check(arg, type, name, default)
# Keyword arguments
for name, arg in kwargs.items():
self._type_check(arg, kwarg_types[name], name,
kwarg_defaults[name])
self.extra_check(arg_names, types, args, kwargs)
return func(*args, **kwargs)
wrapper.__name__ = func.__name__
return wrapper
def extra_check(self, names, types, args, kwargs):
pass
def _type_check(self, value, argument_type, name, default=_NoValue):
if default is not _NoValue and value == default:
return
if issubclass(argument_type, CustomType):
custom = True
check_type = argument_type.type
else:
custom = False
check_type = argument_type
type_name = argument_type.__name__
if not isinstance(value, check_type):
raise TypeError(
"%s must be %s, not %s" % (name, type_name,
type(value).__name__))
if custom:
argument_type.value_check(name, value)
def test():
@argcheck(int)
def function(number):
pass
class Class:
@argcheck(percent)
def method(self, value):
pass
function(1)
try:
function(None) # fails
except TypeError, e:
print e
o = Class()
o.method(10.4) # works
try:
o.method(-1) # fails
except ValueError, e:
print e
if __name__ == '__main__':
test()
update year
#
# Kiwi: a Framework and Enhanced Widgets for Python
#
# Copyright (C) 2005,2006 Async Open Source
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# Author(s): Johan Dahlin <jdahlin@async.com.br>
#
"""
Argument checking decorator and support
"""
import inspect
from types import ClassType
class CustomType(type):
def value_check(mcs, name, value):
pass
value_check = classmethod(value_check)
class number(CustomType):
"""
Custom type that verifies that the type is a number (eg float or int)
"""
type = int, float, long
class percent(CustomType):
"""
Custom type that verifies that the value is a percentage
"""
type = int, float
def value_check(mcs, name, value):
if 0 > value < 100:
raise ValueError("%s must be between 0 and 100" % name)
value_check = classmethod(value_check)
_NoValue = object()
class argcheck(object):
"""
Decorator to check type and value of arguments.
Usage:
>>> @argcheck(types...)
... def function(args..)
or
>>> class Class:
... @argcheck(types..)
... def method(self, args)
You can customize the checks by subclassing your type from CustomType,
there are two builtin types: number which is a float/int combined check
and a percent which verifis that the value is a percentage
"""
__enabled__ = True
def __init__(self, *types):
for argtype in types:
if not isinstance(argtype, (type, ClassType)):
raise TypeError("must be a type or class instance")
self.types = types
def enable(cls):
"""
Enable argcheck globally
"""
cls.__enabled__ = True
enable = classmethod(enable)
def disable(cls):
"""
Disable argcheck globally
"""
cls.__enabled__ = False
disable = classmethod(disable)
def __call__(self, func):
if not callable(func):
raise TypeError("%r must be callable" % func)
# Useful for optimized runs
if not self.__enabled__:
return func
spec = inspect.getargspec(func)
arg_names, is_varargs, is_kwargs, default_values = spec
if not default_values:
default_values = []
else:
default_values = list(default_values)
# Set all the remaining default values to _NoValue
default_values = ([_NoValue] * (len(arg_names) - len(default_values)) +
default_values)
# TODO: Is there another way of doing this?
# Not trivial since func is not attached to the class at
# this point. Nor is the class attached to the namespace.
if arg_names and arg_names[0] in ('self', 'cls'):
arg_names = arg_names[1:]
default_values = default_values[1:]
is_method = True
else:
is_method = False
types = self.types
if is_kwargs and not is_varargs and self.types:
raise TypeError("argcheck cannot be used with only keywords")
elif not is_varargs:
if len(types) != len(arg_names):
raise TypeError("%s has wrong number of arguments, "
"%d specified in decorator, "
"but function has %d" %
(func.__name__,
len(types),
len(arg_names)))
defs = len(default_values)
kwarg_types = {}
kwarg_defaults = {}
for i, arg_name in enumerate(arg_names):
kwarg_types[arg_name] = types[i]
value = default_values[i]
kwarg_defaults[arg_name] = value
if value is None or value is _NoValue:
continue
arg_type = types[i]
try:
self._type_check(value, arg_type, arg_name)
except TypeError:
raise TypeError("default value for %s must be of type %s "
"and not %s" % (arg_name,
arg_type.__name__,
type(value).__name__))
kwarg_defaults[arg_name] = value
def wrapper(*args, **kwargs):
if self.__enabled__:
cargs = args
if is_method:
cargs = cargs[1:]
# Positional arguments
for arg, type, name, default in zip(cargs, types, arg_names,
default_values):
self._type_check(arg, type, name, default)
# Keyword arguments
for name, arg in kwargs.items():
self._type_check(arg, kwarg_types[name], name,
kwarg_defaults[name])
self.extra_check(arg_names, types, args, kwargs)
return func(*args, **kwargs)
wrapper.__name__ = func.__name__
return wrapper
def extra_check(self, names, types, args, kwargs):
pass
def _type_check(self, value, argument_type, name, default=_NoValue):
if default is not _NoValue and value == default:
return
if issubclass(argument_type, CustomType):
custom = True
check_type = argument_type.type
else:
custom = False
check_type = argument_type
type_name = argument_type.__name__
if not isinstance(value, check_type):
raise TypeError(
"%s must be %s, not %s" % (name, type_name,
type(value).__name__))
if custom:
argument_type.value_check(name, value)
def test():
@argcheck(int)
def function(number):
pass
class Class:
@argcheck(percent)
def method(self, value):
pass
function(1)
try:
function(None) # fails
except TypeError, e:
print e
o = Class()
o.method(10.4) # works
try:
o.method(-1) # fails
except ValueError, e:
print e
if __name__ == '__main__':
test()
|
from pprint import pprint
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db import connection
from django.db.models import Sum, Value
from django.db.models.functions import Coalesce
from django.shortcuts import render
from django.urls import reverse
from django.views import View
from geral.functions import (
request_user,
has_permission,
)
import lotes.models
import cd.models as models
class SolicitacaoDetalhe(LoginRequiredMixin, View):
def __init__(self):
self.template_name = 'cd/solicitacao_detalhe.html'
self.title_name = 'Detalhes de solicitação'
def mount_context(self, solicit_id, user):
context = {
'solicit_id': solicit_id,
'user': user,
}
try:
solicitacao = lotes.models.SolicitaLote.objects.get(
id=solicit_id)
except lotes.models.SolicitaLote.DoesNotExist:
context['erro'] = \
'Id de solicitação inválido.'
return context
solicit_ativa_recs = lotes.models.SolicitaLote.objects.filter(
usuario=user, ativa=True)
if len(solicit_ativa_recs) == 1:
solicit_ativa_cod = solicit_ativa_recs[0].codigo
solicit_ativa_id = str(solicit_ativa_recs[0].id)
if solicit_ativa_id != solicit_id:
context['solicit_ativa_cod'] = solicit_ativa_cod
context['solicit_ativa_id'] = solicit_ativa_id
context['solicitacao'] = solicitacao
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.values(
'id', 'lote__op', 'lote__lote', 'lote__referencia',
'lote__cor', 'lote__tamanho', 'qtd', 'update_at'
).annotate(
lote__local=Coalesce('lote__local', Value(' -ausente-'))
).filter(
solicitacao=solicitacao
).order_by(
'-update_at'
)
for row in solicit_qtds:
link = reverse(
'cd:solicitacao_detalhe__get3',
args=[solicitacao.id, 'd', row['id']])
row['delete'] = '''
<a title="Exclui lote"
href="{link}"
><span class="glyphicon glyphicon-remove"
aria-hidden="true"></span></a>
'''.format(link=link)
row['lote__lote|LINK'] = reverse(
'producao:posicao__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
link = reverse(
'cd:solicitacao_detalhe__get2',
args=[solicitacao.id, 'l'])
limpa = '''
<a title="Limpa solicitação"
href="{link}"
><span class="glyphicon glyphicon-remove-circle" aria-hidden="true"
></span></a>
'''.format(link=link)
context.update({
'safe': ['delete'],
'headers': ['Endereço', 'OP', 'Lote', 'Referência',
'Cor', 'Tamanho', 'Quant. Solicitada', 'Em', (limpa,)],
'fields': ['lote__local', 'lote__op', 'lote__lote',
'lote__referencia', 'lote__cor', 'lote__tamanho', 'qtd',
'update_at', 'delete'],
'data': solicit_qtds,
})
solicit_qtds_inat = \
lotes.models.SolicitaLoteQtd.objects_inactive.values(
'id', 'lote__op', 'lote__lote', 'lote__referencia',
'lote__cor', 'lote__tamanho', 'qtd', 'when'
).annotate(
lote__local=Coalesce('lote__local', Value(' -ausente-'))
).filter(
solicitacao=solicitacao
).order_by(
'-when'
)
for row in solicit_qtds_inat:
row['lote__lote|LINK'] = reverse(
'producao:posicao__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
context.update({
'inat_headers': ['Endereço', 'OP', 'Lote',
'Referência', 'Cor', 'Tamanho',
'Quant. Solicitada', 'Removido em'],
'inat_fields': ['lote__local', 'lote__op', 'lote__lote',
'lote__referencia', 'lote__cor', 'lote__tamanho',
'qtd', 'when'],
'inat_data': solicit_qtds_inat,
})
por_endereco = lotes.models.SolicitaLoteQtd.objects.values(
'lote__op', 'lote__lote', 'lote__qtd_produzir',
'lote__referencia', 'lote__cor', 'lote__tamanho'
).annotate(
lote__local=Coalesce('lote__local', Value(' -ausente-')),
qtdsum=Sum('qtd')
).filter(
solicitacao=solicitacao
).order_by(
'lote__local', 'lote__op', 'lote__referencia', 'lote__cor',
'lote__tamanho', 'lote__lote'
)
for row in por_endereco:
if row['qtdsum'] == row['lote__qtd_produzir']:
row['inteira_parcial'] = 'Lote inteiro'
else:
row['inteira_parcial'] = 'Parcial'
row['lote__lote|LINK'] = reverse(
'producao:posicao__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
context.update({
'e_headers': ['Endereço', 'OP', 'Lote',
'Referência', 'Cor', 'Tamanho',
'Quant. Solicitada', 'Solicitação'],
'e_fields': ['lote__local', 'lote__op', 'lote__lote',
'lote__referencia', 'lote__cor', 'lote__tamanho',
'qtdsum', 'inteira_parcial'],
'e_data': por_endereco,
})
referencias = lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao=solicitacao
).values('lote__referencia').distinct()
cursor_def = connection.cursor()
grades2 = []
for referencia in referencias:
# Grade de solicitação
context_ref = models.grade_solicitacao(
cursor_def, referencia['lote__referencia'],
solicit_id=solicit_id)
context_ref.update({
'style': {i: 'text-align: right;'
for i in range(2, len(context_ref['fields'])+1)},
})
grades2.append(context_ref)
context.update({
'grades2': grades2,
})
grade_total = models.grade_solicitacao(
cursor_def, solicit_id=solicit_id)
grade_total.update({
'style': {i: 'text-align: right;'
for i in range(2, len(grade_total['fields'])+1)},
})
context.update({
'gt': grade_total,
})
return context
def get(self, request, *args, **kwargs):
context = {'titulo': self.title_name}
if 'acao' in kwargs:
acao = kwargs['acao']
else:
acao = None
if 'id' in kwargs:
slq_id = kwargs['id']
else:
slq_id = None
solicit_id = kwargs['solicit_id']
user = request_user(request)
if acao is not None:
if not has_permission(request, 'lotes.change_solicitalote'):
context.update({
'erro': 'Usuário não tem direito de alterar solicitações.'
})
return render(request, self.template_name, context)
if acao == 'd' and slq_id is not None:
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.get(
id=slq_id)
solicit_qtds.delete()
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
if acao == 'l' and solicit_id is not None:
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao__id=solicit_id)
solicit_qtds.delete()
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
# desreserva lote em todas as solicitações
if acao == 'dl' and slq_id is not None:
lote = slq_id
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.filter(
lote__lote=lote)
solicit_qtds.delete()
context.update({
'acao_mensagem':
'Lote {} cacelado em todas as solicitações.'.format(
lote
)
})
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
# desreserva endereçados
if acao == 'de' and solicit_id is not None:
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao__id=solicit_id, lote__local__isnull=False)
solicit_qtds.delete()
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
# move endereçados
if acao == 'move' and solicit_id is not None:
try:
solicit_ativa = lotes.models.SolicitaLote.objects.get(
usuario=user, ativa=True)
try:
for solicit_qtd in \
lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao__id=solicit_id,
lote__local__isnull=False):
solicit_qtd.solicitacao = solicit_ativa
solicit_qtd.save()
except Exception:
pass
except lotes.models.SolicitaLote.DoesNotExist:
pass
data = self.mount_context(solicit_id, user)
context.update(data)
return render(request, self.template_name, context)
nova tentativa de listar lotes ausentes no início
from pprint import pprint
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db import connection
from django.db.models import Sum, Value
from django.db.models.functions import Coalesce
from django.shortcuts import render
from django.urls import reverse
from django.views import View
from geral.functions import (
request_user,
has_permission,
)
import lotes.models
import cd.models as models
class SolicitacaoDetalhe(LoginRequiredMixin, View):
def __init__(self):
self.template_name = 'cd/solicitacao_detalhe.html'
self.title_name = 'Detalhes de solicitação'
def mount_context(self, solicit_id, user):
context = {
'solicit_id': solicit_id,
'user': user,
}
try:
solicitacao = lotes.models.SolicitaLote.objects.get(
id=solicit_id)
except lotes.models.SolicitaLote.DoesNotExist:
context['erro'] = \
'Id de solicitação inválido.'
return context
solicit_ativa_recs = lotes.models.SolicitaLote.objects.filter(
usuario=user, ativa=True)
if len(solicit_ativa_recs) == 1:
solicit_ativa_cod = solicit_ativa_recs[0].codigo
solicit_ativa_id = str(solicit_ativa_recs[0].id)
if solicit_ativa_id != solicit_id:
context['solicit_ativa_cod'] = solicit_ativa_cod
context['solicit_ativa_id'] = solicit_ativa_id
context['solicitacao'] = solicitacao
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.values(
'id', 'lote__op', 'lote__lote', 'lote__referencia',
'lote__cor', 'lote__tamanho', 'qtd', 'update_at'
).annotate(
lote__local=Coalesce('lote__local', Value('-ausente-'))
).filter(
solicitacao=solicitacao
).order_by(
'-update_at'
)
for row in solicit_qtds:
link = reverse(
'cd:solicitacao_detalhe__get3',
args=[solicitacao.id, 'd', row['id']])
row['delete'] = '''
<a title="Exclui lote"
href="{link}"
><span class="glyphicon glyphicon-remove"
aria-hidden="true"></span></a>
'''.format(link=link)
row['lote__lote|LINK'] = reverse(
'producao:posicao__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
link = reverse(
'cd:solicitacao_detalhe__get2',
args=[solicitacao.id, 'l'])
limpa = '''
<a title="Limpa solicitação"
href="{link}"
><span class="glyphicon glyphicon-remove-circle" aria-hidden="true"
></span></a>
'''.format(link=link)
context.update({
'safe': ['delete'],
'headers': ['Endereço', 'OP', 'Lote', 'Referência',
'Cor', 'Tamanho', 'Quant. Solicitada', 'Em', (limpa,)],
'fields': ['lote__local', 'lote__op', 'lote__lote',
'lote__referencia', 'lote__cor', 'lote__tamanho', 'qtd',
'update_at', 'delete'],
'data': solicit_qtds,
})
solicit_qtds_inat = \
lotes.models.SolicitaLoteQtd.objects_inactive.values(
'id', 'lote__op', 'lote__lote', 'lote__referencia',
'lote__cor', 'lote__tamanho', 'qtd', 'when'
).annotate(
lote__local=Coalesce('lote__local', Value('-ausente-'))
).filter(
solicitacao=solicitacao
).order_by(
'-when'
)
for row in solicit_qtds_inat:
row['lote__lote|LINK'] = reverse(
'producao:posicao__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
context.update({
'inat_headers': ['Endereço', 'OP', 'Lote',
'Referência', 'Cor', 'Tamanho',
'Quant. Solicitada', 'Removido em'],
'inat_fields': ['lote__local', 'lote__op', 'lote__lote',
'lote__referencia', 'lote__cor', 'lote__tamanho',
'qtd', 'when'],
'inat_data': solicit_qtds_inat,
})
por_endereco = lotes.models.SolicitaLoteQtd.objects.values(
'lote__op', 'lote__lote', 'lote__qtd_produzir',
'lote__referencia', 'lote__cor', 'lote__tamanho'
).annotate(
lote__local=Coalesce('lote__local', Value('-ausente-')),
lote_ordem=Coalesce('lote__local', Value('0000')),
qtdsum=Sum('qtd')
).filter(
solicitacao=solicitacao
).order_by(
'lote_ordem', 'lote__op', 'lote__referencia', 'lote__cor',
'lote__tamanho', 'lote__lote'
)
for row in por_endereco:
if row['qtdsum'] == row['lote__qtd_produzir']:
row['inteira_parcial'] = 'Lote inteiro'
else:
row['inteira_parcial'] = 'Parcial'
row['lote__lote|LINK'] = reverse(
'producao:posicao__get',
args=[row['lote__lote']])
row['lote__lote|TARGET'] = '_BLANK'
context.update({
'e_headers': ['Endereço', 'OP', 'Lote',
'Referência', 'Cor', 'Tamanho',
'Quant. Solicitada', 'Solicitação'],
'e_fields': ['lote__local', 'lote__op', 'lote__lote',
'lote__referencia', 'lote__cor', 'lote__tamanho',
'qtdsum', 'inteira_parcial'],
'e_data': por_endereco,
})
referencias = lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao=solicitacao
).values('lote__referencia').distinct()
cursor_def = connection.cursor()
grades2 = []
for referencia in referencias:
# Grade de solicitação
context_ref = models.grade_solicitacao(
cursor_def, referencia['lote__referencia'],
solicit_id=solicit_id)
context_ref.update({
'style': {i: 'text-align: right;'
for i in range(2, len(context_ref['fields'])+1)},
})
grades2.append(context_ref)
context.update({
'grades2': grades2,
})
grade_total = models.grade_solicitacao(
cursor_def, solicit_id=solicit_id)
grade_total.update({
'style': {i: 'text-align: right;'
for i in range(2, len(grade_total['fields'])+1)},
})
context.update({
'gt': grade_total,
})
return context
def get(self, request, *args, **kwargs):
context = {'titulo': self.title_name}
if 'acao' in kwargs:
acao = kwargs['acao']
else:
acao = None
if 'id' in kwargs:
slq_id = kwargs['id']
else:
slq_id = None
solicit_id = kwargs['solicit_id']
user = request_user(request)
if acao is not None:
if not has_permission(request, 'lotes.change_solicitalote'):
context.update({
'erro': 'Usuário não tem direito de alterar solicitações.'
})
return render(request, self.template_name, context)
if acao == 'd' and slq_id is not None:
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.get(
id=slq_id)
solicit_qtds.delete()
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
if acao == 'l' and solicit_id is not None:
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao__id=solicit_id)
solicit_qtds.delete()
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
# desreserva lote em todas as solicitações
if acao == 'dl' and slq_id is not None:
lote = slq_id
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.filter(
lote__lote=lote)
solicit_qtds.delete()
context.update({
'acao_mensagem':
'Lote {} cacelado em todas as solicitações.'.format(
lote
)
})
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
# desreserva endereçados
if acao == 'de' and solicit_id is not None:
try:
solicit_qtds = lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao__id=solicit_id, lote__local__isnull=False)
solicit_qtds.delete()
except lotes.models.SolicitaLoteQtd.DoesNotExist:
pass
# move endereçados
if acao == 'move' and solicit_id is not None:
try:
solicit_ativa = lotes.models.SolicitaLote.objects.get(
usuario=user, ativa=True)
try:
for solicit_qtd in \
lotes.models.SolicitaLoteQtd.objects.filter(
solicitacao__id=solicit_id,
lote__local__isnull=False):
solicit_qtd.solicitacao = solicit_ativa
solicit_qtd.save()
except Exception:
pass
except lotes.models.SolicitaLote.DoesNotExist:
pass
data = self.mount_context(solicit_id, user)
context.update(data)
return render(request, self.template_name, context)
|
# -*- coding: utf-8 -*-
from shiva.media import MediaDir
from os import path
DEBUG = True
PROJECT_ROOT = path.dirname(path.abspath(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s/stream.db' % PROJECT_ROOT
ACCEPTED_FORMATS = (
'mp3',
'ogg',
)
Fixed DB path.
# -*- coding: utf-8 -*-
from shiva.media import MediaDir
from os import path
DEBUG = True
PROJECT_ROOT = path.dirname(path.abspath(__file__))[:-len('/settings')]
SQLALCHEMY_DATABASE_URI = 'sqlite:///%s/stream.db' % PROJECT_ROOT
ACCEPTED_FORMATS = (
'mp3',
'ogg',
)
|
#!/opt/tufin/securitysuite/ps/python/bin/python3.4
import sys
import lxml.etree
import pytos
import os
import io
import unittest
from unittest.mock import patch
from pytos.securetrack.helpers import Secure_Track_Helper
from pytos.securetrack.xml_objects.rest.cleanups import Generic_Cleanup_List
from pytos.securetrack.xml_objects.rest.domain import Domains
from pytos.common.exceptions import REST_Bad_Request_Error, REST_Not_Found_Error
from pytos.securetrack.xml_objects.rest import security_policy
from pytos.securetrack.xml_objects.rest.device import Device_Revision, Device, Devices_List, RuleSearchDeviceList, \
Device_Revisions_List
from pytos.securetrack.xml_objects.rest.rules import Rule_Documentation, Record_Set, Bindings_List, \
Interfaces_List, Cleanup_Set, Rules_List, Network_Objects_List, Policy_Analysis_Query_Result, \
SecurityPolicyDeviceViolations, Policy_List
from pytos.securetrack.xml_objects.rest.security_policy import Security_Policies_List, Security_Policy
from pytos.securetrack.xml_objects.rest.zones import Zone_List, Zone, Zone_Entry, ZoneDescendantsList
def fake_request_response(rest_file):
full_path = os.path.dirname(os.path.abspath(__file__))
sub_resources_dir = sys._getframe(1).f_locals['self'].__class__.__name__.lower()
resource_file = os.path.join(full_path, "resources", sub_resources_dir, "{}.xml".format(rest_file))
with open(resource_file, mode='rb') as f:
return f.read()
class TestDevices(unittest.TestCase):
def setUp(self):
self.helper = Secure_Track_Helper("127.0.0.1", ("username", "password"))
self.patcher = patch('pytos.common.rest_requests.requests.Session.send')
self.mock_get_uri = self.patcher.start()
self.mock_get_uri.return_value.status_code = 200
def tearDown(self):
self.patcher.stop()
def test_01_get_device(self):
self.mock_get_uri.return_value.content = fake_request_response("device_by_id")
device_by_id = self.helper.get_device_by_id(added_offline_device_id)
self.assertIsInstance(device_by_id, Device)
def test_02_get_devices_list(self):
self.mock_get_uri.return_value.content = fake_request_response("device_list")
devices_list = self.helper.get_devices_list()
self.assertIsInstance(devices_list, Devices_List)
self.assertTrue(len(devices_list) == devices_list.count)
self.assertTrue(devices_list.count > 0)
def test_03_get_devices_list_with_custom_param(self):
self.mock_get_uri.return_value.content = fake_request_response("device_list")
devices_list = self.helper.get_devices_list(custom_params={'vendor': 'cisco'})
self.assertIsInstance(devices_list, Devices_List)
self.assertEqual(len(devices_list), devices_list.count)
self.assertTrue(devices_list.count > 0)
def test_04_get_device_id_by_name(self):
self.mock_get_uri.return_value.content = fake_request_response("device_list")
device_id = self.helper.get_device_id_by_name(device_name="Router 2801")
self.assertTrue(device_id, 155)
# assert invalid request - 2 devices with same name
with self.assertRaises(IndexError):
self.helper.get_device_id_by_name(device_name="ASA FireWall")
# assert invalid request - Non existing device
with self.assertRaises(ValueError):
self.helper.get_device_id_by_name(device_name="NonExistingDeviceName")
def test_05_get_cleanups_for_device_by_id(self):
self.mock_get_uri.return_value.content = fake_request_response("cleanups_by_device_id")
cleanups = self.helper.get_cleanups_for_device_by_id(155)
self.assertIsInstance(cleanups, Generic_Cleanup_List)
self.assertTrue(len(cleanups) > 0)
def test_06_failed_to_get_cleanups_for_device_by_id(self):
self.mock_get_uri.return_value.status_code = 404
self.mock_get_uri.return_value.content = fake_request_response("no_found_error")
with self.assertRaises(ValueError):
self.helper.get_cleanups_for_device_by_id(5555)
def test_07_get_bindings_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("device_bindings")
binding = self.helper.get_bindings_for_device(155)
self.assertIsInstance(binding, Bindings_List)
self.assertTrue(len(binding) > 0)
def test_08_get_interfaces_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("device_interfaces")
interfaces = self.helper.get_interfaces_for_device(155)
self.assertIsInstance(interfaces, Interfaces_List)
self.assertTrue(len(interfaces) > 0)
def test_09_get_device_config(self):
self.assertEqual(self.helper.get_device_config_by_id(added_offline_device_id), b'\x00')
def test_10_add_offline_device(self):
global added_offline_device_id
self.mock_get_uri.return_value.status_code = 201
self.mock_get_uri.return_value.headers = {'location': '1'}
added_offline_device_id = self.helper.add_offline_device("TEST_DEVICE_123", "Cisco", "router")
self.assertIsInstance(added_offline_device_id, int)
# def test_11_upload_device_offline_config(self):
# with tempfile.NamedTemporaryFile(delete=False) as config_tempfile:
# config_tempfile.write(self.OFFLINE_TEST_DATA)
# config_temp_file_path = config_tempfile.name
# with open(config_temp_file_path) as config_tempfile:
# self.helper.upload_device_offline_config(added_offline_device_id, config_tempfile)
# os.remove(config_temp_file_path)
class TestRules(unittest.TestCase):
def setUp(self):
self.helper = Secure_Track_Helper("127.0.0.1", ("username", "password"))
self.patcher = patch('pytos.common.rest_requests.requests.Session.send')
self.mock_get_uri = self.patcher.start()
self.mock_get_uri.return_value.status_code = 200
def tearDown(self):
self.patcher.stop()
def test_01_get_shadowed_rules(self):
self.mock_get_uri.return_value.content = fake_request_response("cleanup_set")
cleanup = self.helper.get_shadowed_rules_for_device_by_id(155)
self.assertIsInstance(cleanup, Cleanup_Set)
def test_02_get_rule_by_device_and_rule_id(self):
self.mock_get_uri.return_value.content = fake_request_response("rules")
rules = self.helper.get_rule_by_device_and_rule_id(155, 1318013)
self.assertEqual(rules[0].id, 1318013)
def test_03_get_rules_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("rules")
rules = self.helper.get_rules_for_device(155)
self.assertIsInstance(rules, Rules_List)
self.assertTrue(len(rules) > 0)
def test_04_failed_to_get_rules_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("empty_rules")
rules = self.helper.get_rules_for_device(155)
self.assertIsInstance(rules, Rules_List)
self.assertTrue(len(rules) == 0)
def test_05_get_shadowing_rules_for_device_id_and_rule_uids(self):
self.mock_get_uri.return_value.content = fake_request_response("cleanup_set")
uid = "{53b95431-73ee-43de-a153-d299f4eb4804}"
shadowing_rules = self.helper.get_shadowing_rules_for_device_id_and_rule_uids(155, uid)
self.assertIsInstance(shadowing_rules, Cleanup_Set)
def test_06_failed_get_shadowing_rules_for_device_id_and_rule_uids(self):
self.mock_get_uri.return_value.content = fake_request_response("bad_request_error")
self.mock_get_uri.return_value.status_code = 400
with self.assertRaises(REST_Bad_Request_Error):
self.helper.get_shadowing_rules_for_device_id_and_rule_uids(155, [])
def test_07_get_devices_by_rule_search(self):
self.mock_get_uri.return_value.content = fake_request_response("device_list_by_rule_search")
devices = self.helper.get_devices_by_rule_search()
self.assertIsInstance(devices, RuleSearchDeviceList)
def test_08_rule_search_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("rules")
rules = self.helper.rule_search_for_device(155)
self.assertIsInstance(rules, Rules_List)
self.assertTrue(len(rules) > 0)
def test_09_get_rules_for_revision(self):
self.mock_get_uri.return_value.content = fake_request_response("rules")
rules = self.helper.get_rules_for_revision(1, True)
self.assertIsInstance(rules, Rules_List)
self.assertTrue(len(rules) > 0)
def test_10_rule_documentation_format(self):
src_xml = fake_request_response("rule_documentation")
src_tree = lxml.etree.fromstring(src_xml)
src_b = io.BytesIO()
src_tree.getroottree().write_c14n(src_b)
# create a new record set fot the rule documentation
record_sets = [
Record_Set("support@tufin.com", "admin", "2019-01-08T00:00:00+02:00", 1235, "this is a comment", "")
]
rd = Rule_Documentation("admin", 'Comment for unittest suit', record_sets, '', True)
dst_tree = lxml.etree.fromstring(rd.to_xml_string())
dst_b = io.BytesIO()
dst_tree.getroottree().write_c14n(dst_b)
self.assertEqual(src_b.getvalue(), dst_b.getvalue())
def test_11_get_rule_documentation_by_device_id_and_rule_id(self):
self.mock_get_uri.return_value.content = fake_request_response("rule_documentation")
rd = self.helper.get_rule_documentation_by_device_id_and_rule_id(155, 1330304)
self.assertIsInstance(rd, Rule_Documentation)
def test_12_failed_to_get_rule_documentation_by_device_id_and_rule_id(self):
self.mock_get_uri.return_value.content = fake_request_response("not_found_error")
self.mock_get_uri.return_value.status_code = 404
with self.assertRaises(ValueError):
self.helper.get_rule_documentation_by_device_id_and_rule_id(155, 1330303)
def test_13_get_network_objects(self):
self.mock_get_uri.return_value.content = fake_request_response("network_objects_search")
network_objects = self.helper.network_object_text_search("81.81.81.5", "any_field")
self.assertIsInstance(network_objects, Network_Objects_List)
class TestZonesPoliciesAndRevisions(unittest.TestCase):
def setUp(self):
self.helper = Secure_Track_Helper("localhost", ("username", "password"))
self.patcher = patch('pytos.common.rest_requests.requests.Session.send')
self.mock_get_uri = self.patcher.start()
self.mock_get_uri.return_value.status_code = 200
def tearDown(self):
self.patcher.stop()
def test_01_get_zones(self):
self.mock_get_uri.return_value.content = fake_request_response("zones")
zones = self.helper.get_zones()
self.assertIsInstance(zones, Zone_List)
def test_02_post_zone(self):
src_xml = fake_request_response("post_zone")
src_tree = lxml.etree.fromstring(src_xml)
src_b = io.BytesIO()
src_tree.getroottree().write_c14n(src_b)
comment = 'Name: {}, Created at: {}'.format("New Zone", "2017-04-22 10:09:18")
zone_obj = Zone(None, "New Zone", comment)
dst_tree = lxml.etree.fromstring(zone_obj.to_xml_string())
dst_b = io.BytesIO()
dst_tree.getroottree().write_c14n(dst_b)
self.assertEqual(src_b.getvalue(), dst_b.getvalue())
def test_03_post_security_policy_matrix(self):
self.mock_get_uri.return_value.headers = {'location': '1'}
self.mock_get_uri.return_value.content = fake_request_response("zones")
security_policy_name = 'Some Policy Name'
security_policy = {
'internal': {
'external': {
'severity': 'critical',
'access_type': 'ignored',
'allowed_services': ''
}
},
'external': {
'internal': {
'severity': 'high',
'access_type': 'restricted',
'allowed_services': 'https;Other 53;AOL;udp 88'
}
},
'dmz': {
'internal': {
'severity': 'critical',
'access_type': 'blocked',
'allowed_services': ''
},
'dmz': {
'severity': 'low',
'access_type': 'ignored',
'allowed_services': ''
}
}
}
policy_id = self.helper.post_security_policy_matrix(security_policy_name, security_policy)
self.assertEqual(policy_id, 1)
def test_04_post_zone_entry(self):
self.mock_get_uri.return_value.headers = {'location': '1'}
self.mock_get_uri.return_value.status_code = 201
zone_entry = Zone_Entry(1234, "Description", "1.1.1.1", 0, '255.255.255.255', 36)
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
entry_id = self.helper.post_zone_entry(zone_entry.zoneId, zone_entry)
self.assertEqual(entry_id, 1)
mock_post_uri.assert_called_with('POST',
'https://localhost/securetrack/api/zones/36/entries?context=1',
auth=('username', 'password'),
data='<zone_entry>\n <comment>Description</comment>\n <id>1234</id>\n <ip>1.1.1.1</ip>\n <netmask>255.255.255.255</netmask>\n <zoneId>36</zoneId>\n</zone_entry>', headers={'Content-Type': 'application/xml'})
def test_05_delete_zone_entry(self):
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
result = self.helper.delete_zone_entry_by_zone_and_entry_id(1, 1)
self.assertTrue(result)
mock_post_uri.assert_called_with('DELETE',
'https://localhost/securetrack/api/zones/1/entries/1?context=1',
auth=('username', 'password'),
headers={'Content-Type': 'application/xml'})
def test_06_modify_zone_entry(self):
self.mock_get_uri.return_value.content = fake_request_response("zone_entries")
zone_entries = self.helper.get_entries_for_zone_id(13)
zone_entry = zone_entries[0]
zone_entry.comment = "Modified entry"
zone_entry.ip = '101.101.101.101'
zone_entry.negate = 0
zone_entry.netmask = '255.255.255.255'
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
result = self.helper.put_zone_entry(13, zone_entry)
self.assertTrue(result)
mock_post_uri.assert_called_with('PUT',
'https://localhost/securetrack/api/zones/13/entries/54?context=1',
auth=('username', 'password'),
data='<zone_entry>\n <comment>Modified entry</comment>\n <id>54</id>\n <ip>101.101.101.101</ip>\n <negate>0</negate>\n <netmask>255.255.255.255</netmask>\n <zoneId>13</zoneId>\n</zone_entry>', headers={'Content-Type': 'application/xml'})
def test_07_get_zone_by_name(self):
self.mock_get_uri.return_value.content = fake_request_response("zones")
zone = self.helper.get_zone_by_name("dmz")
self.assertIsInstance(zone, Zone)
self.assertEqual(zone.name, "dmz")
def test_08_get_device_revisions_by_id(self):
self.mock_get_uri.return_value.content = fake_request_response("revisions")
revisions = self.helper.get_device_revisions_by_id(device_id=155)
self.assertIsInstance(revisions, Device_Revisions_List)
self.assertTrue(len(revisions) > 0)
def test_09_get_policy_analysis(self):
self.mock_get_uri.return_value.content = fake_request_response("policy_analysis_query_result")
policy_analysis = self.helper.get_policy_analysis(155)
self.assertIsInstance(policy_analysis, Policy_Analysis_Query_Result)
def test_10_get_security_policies(self):
self.mock_get_uri.return_value.content = fake_request_response("securitypolicylist")
policies = self.helper.get_security_policies()
self.assertIsInstance(policies, Security_Policies_List)
def test_11_get_security_policy_by_name(self):
self.mock_get_uri.return_value.content = fake_request_response("securitypolicylist")
policy = self.helper.get_security_policy_by_name("policy")
self.assertIsInstance(policy, Security_Policy)
self.assertEqual(policy.name, "policy")
def test_12_get_security_policy_by_id(self):
self.mock_get_uri.return_value.content = fake_request_response("securitypolicylist")
policy = self.helper.get_security_policy_by_id(3)
self.assertEqual(policy.id, 3)
def test_13_delete_security_policy_matrix(self):
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
result = self.helper.delete_security_policy_matrix(3)
self.assertTrue(result)
mock_post_uri.assert_called_with('DELETE',
'https://localhost/securetrack/api/security_policies/3',
auth=('username', 'password'),
headers={'Content-Type': 'application/xml'})
def test_14_get_revision_by_id(self):
self.mock_get_uri.return_value.content = fake_request_response("revision")
revision = self.helper.get_revision_by_id(5685)
self.assertIsInstance(revision, Device_Revision)
self.assertTrue(revision.id, 5685)
def test_15_get_security_policy_device_violations_by_severity(self):
self.mock_get_uri.return_value.content = fake_request_response("security_policy_device_violations")
violations = self.helper.get_security_policy_device_violations_by_severity(159, "CRITICAL", "SECURITY_POLICY")
self.assertIsInstance(violations, SecurityPolicyDeviceViolations)
def test_16_get_policies_for_revision(self):
self.mock_get_uri.return_value.content = fake_request_response("policies")
policies = self.helper.get_policies_for_revision(1)
self.assertIsInstance(policies, Policy_List)
def test_17_post_security_policy_exception(self):
self.mock_get_uri.return_value.headers = {'location': '1'}
self.mock_get_uri.return_value.status_code = 201
xml = fake_request_response("exception")
policy_exception = security_policy.Security_Policy_Exception.from_xml_string(xml.decode("utf-8"))
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
self.helper.post_security_policy_exception(policy_exception)
mock_post_uri.assert_called_with(
'POST',
'https://localhost/securetrack/api/security_policies/exceptions/?context=1',
auth=('username', 'password'),
data='<security_policy_exception>\n <approved_by>admin</approved_by>\n <created_by>admin</created_by>\n <creation_date>2016-01-01</creation_date>\n <description>Allow traffic from 192.168.1.1 to 192.168.1.2 on TCP port 4321</description>\n <domain>\n <description>Allow traffic from 192.168.1.1 to 192.168.1.2 on TCP port 4321</description>\n <id/>\n <name>Allow traffic from 192.168.1.1 to 192.168.1.2 on TCP port 4321</name>\n </domain>\n <exempted_traffic_list>\n <exempted_traffic>\n <comment>Creating USP exception for ticket 123</comment>\n <dest_network_collection>\n <network_items>\n <network_item xsi:type="subnet" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n <id/>\n <ip>192.168.1.2</ip>\n <prefix>32</prefix>\n </network_item>\n </network_items>\n </dest_network_collection>\n <security_requirements>\n <zone_to_zone_security_requirement>\n <from_domain/>\n <from_zone>30.100.30.0</from_zone>\n <policy_name>Test</policy_name>\n <to_domain/>\n <to_zone>200.200.200.0</to_zone>\n </zone_to_zone_security_requirement>\n </security_requirements>\n <service_collection>\n <service_items>\n <service_item xsi:type="custom" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n <id/>\n <port>4321</port>\n <protocol>tcp</protocol>\n </service_item>\n </service_items>\n </service_collection>\n <source_network_collection>\n <network_items>\n <network_item xsi:type="subnet" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n <id/>\n <ip>192.168.1.1</ip>\n <prefix>32</prefix>\n </network_item>\n </network_items>\n </source_network_collection>\n </exempted_traffic>\n </exempted_traffic_list>\n <expiration_date>2025-01-01</expiration_date>\n <name>Allow traffic from 192.168.1.1 to 192.168.1.2 on TCP port 4321</name>\n <requested_by>admin</requested_by>\n <ticket_id>123</ticket_id>\n</security_policy_exception>',
headers={'Content-Type': 'application/xml'}
)
# @patch('pytos.common.rest_requests.requests.Request')
# @patch('Secure_Track_Helper.get_entries_for_zone_id')
def test_18_delete_zone_by_zone_id(self):
with patch('pytos.common.rest_requests.requests.Request') as mock_delete_uri:
self.helper.delete_zone_by_zone_id(1, True)
mock_delete_uri.assert_called_with(
'DELETE',
'https://localhost/securetrack/api/zones/1',
auth=('username', 'password'),
headers={'Content-Type': 'application/xml'}
)
def test_19_get_zone_descendants(self):
self.mock_get_uri.return_value.content = fake_request_response("zone_descendants")
Zone_descendants_list = self.helper.get_zone_descendants("16")
self.assertIsInstance(Zone_descendants_list, ZoneDescendantsList)
self.mock_get_uri.assert_called_with("GET")
class TestTopology(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_03_get_topology_interfaces(self):
# assert valid request
topology_interfaces = self.helper.get_topology_interfaces(cisco_ASA_id)
self.assertIsInstance(topology_interfaces, pytos.securetrack.xml_objects.rest.rules.Topology_Interfaces_List)
self.assertTrue(len(topology_interfaces) > 0)
topology_interfaces = self.helper.get_topology_interfaces(5555)
self.assertIsInstance(topology_interfaces, pytos.securetrack.xml_objects.rest.rules.Topology_Interfaces_List)
self.assertTrue(len(topology_interfaces) == 0)
# assert valid request
with self.assertRaises(REST_Bad_Request_Error):
# noinspection PyTypeChecker
self.helper.get_topology_interfaces("NotValidRequest")
class TestDomains(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_01_get_domains(self):
domains = self.helper.get_domains()
self.assertIsInstance(domains, Domains)
self.assertTrue(domains[0].name, "Default")
def test_02_get_domain_by_id(self):
# assert valid request
domain = self.helper.get_domain_by_id(default_domain_id)
self.assertIsInstance(domain, pytos.securetrack.xml_objects.rest.domain.Domain)
self.assertTrue(domain.name, "Default")
# assert invalid request
with self.assertRaises(REST_Bad_Request_Error):
self.helper.get_domain_by_id(9999)
class TestNetworkObjects(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_01_get_network_objects_for_device(self):
global g_network_object
global g_network_object_group
# assert valid request
network_objects = self.helper.get_network_objects_for_device(cisco_ASA_id)
self.assertIsInstance(network_objects, pytos.securetrack.xml_objects.rest.rules.Network_Objects_List)
self.assertTrue(len(network_objects) > 0)
# save a single network object for later uses
g_network_object = network_objects[0]
# save a single network object group for later uses
for network_object in network_objects:
if network_object.type == "group" and len(network_object.members) > 1:
g_network_object_group = network_object
# assert invalid request
network_objects = self.helper.get_network_objects_for_device(5555)
self.assertIsInstance(network_objects, pytos.securetrack.xml_objects.rest.rules.Network_Objects_List)
self.assertFalse(len(network_objects))
def test_02_network_object_text_search(self):
# assert valid request
network_objects = self.helper.network_object_text_search("192.168", "any_field")
self.assertIsInstance(network_objects, pytos.securetrack.xml_objects.rest.rules.Network_Objects_List)
self.assertTrue(len(network_objects) > 0)
# assert invalid request
with self.assertRaises(ValueError):
self.helper.network_object_text_search("", "")
def test_03_network_object_subnet_search(self):
# assert valid request
network_objects = self.helper.network_object_subnet_search("192.168.0.0", "contained_in")
self.assertIsInstance(network_objects, pytos.securetrack.xml_objects.rest.rules.Network_Objects_List)
self.assertTrue(len(network_objects) > 0)
# assert invalid request
with self.assertRaises(ValueError):
self.helper.network_object_subnet_search("", "")
def test_04_get_network_objects(self):
network_objects = self.helper.get_network_objects()
self.assertIsInstance(network_objects, dict)
self.assertTrue(len(network_objects) > 0)
def test_05_get_network_object_by_device_and_object_id(self):
# assert valid request
network_object = self.helper.get_network_object_by_device_and_object_id(cisco_ASA_id, g_network_object.id)
self.assertIsInstance(network_object, pytos.securetrack.xml_objects.rest.rules.Basic_Network_Object)
self.assertTrue(network_object.id and network_object.name)
# assert invalid requests
with self.assertRaises(ValueError):
self.helper.get_network_object_by_device_and_object_id(5555, g_network_object.id)
with self.assertRaises(ValueError):
self.helper.get_network_object_by_device_and_object_id(cisco_ASA_id, 55555)
def test_06_get_member_network_objects_for_group_network_object(self):
# assert valid request
members = self.helper.get_member_network_objects_for_group_network_object(g_network_object_group, cisco_ASA_id)
for member in members:
self.assertIsInstance(member, (pytos.securetrack.xml_objects.rest.rules.Host_Network_Object,
pytos.securetrack.xml_objects.rest.rules.Subnet_Network_Object))
self.assertTrue(member.id and member.name)
# assert invalid request
with self.assertRaises(KeyError):
self.helper.get_member_network_objects_for_group_network_object(g_network_object_group, 5555)
with self.assertRaises(AttributeError):
self.helper.get_member_network_objects_for_group_network_object(g_network_object, cisco_ASA_id)
class TestServices(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_01_get_services_for_device(self):
global g_service
global g_service_group
# assert valid request
services = self.helper.get_services_for_device(cisco_ASA_id)
self.assertIsInstance(services, pytos.securetrack.xml_objects.rest.rules.Services_List)
self.assertTrue(len(services) > 0)
# save a single service for later uses
g_service = services.services[0]
# save a single service group for later uses
for service in services:
if service.type == "group" and len(service.members) > 1:
g_service_group = service
# assert invalid request
services = self.helper.get_services_for_device(5555)
self.assertIsInstance(services, pytos.securetrack.xml_objects.rest.rules.Services_List)
self.assertFalse(len(services))
def test_02_get_service_for_device_by_name(self):
# assert valid request
service = self.helper.get_service_for_device_by_name(cisco_ASA_id, g_service.display_name)
self.assertIsInstance(service, pytos.securetrack.xml_objects.rest.rules.Single_Service)
self.assertTrue(service)
# assert invalid request
with self.assertRaises(ValueError):
self.helper.get_service_for_device_by_name(cisco_ASA_id, "NotExsistingService")
def test_03_get_service_by_device_and_object_id(self):
# assert valid request
service = self.helper.get_service_by_device_and_object_id(cisco_ASA_id, g_service.id)
self.assertIsInstance(service, pytos.securetrack.xml_objects.rest.rules.Single_Service)
# self.assertTrue(service.name, "!80 (tcp)")
self.assertTrue(service.name, g_service.name)
# assert invalid request
with self.assertRaises(ValueError):
self.helper.get_service_by_device_and_object_id(cisco_ASA_id, 9999999)
with self.assertRaises(ValueError):
self.helper.get_service_by_device_and_object_id(0, 467677)
def test_04_get_member_services_for_group_service(self):
# assert valid request
member_services = self.helper.get_member_services_for_group_service(g_service_group, cisco_ASA_id)
for member in member_services:
self.assertIsInstance(member, pytos.securetrack.xml_objects.rest.rules.Single_Service)
self.assertTrue(member.id and member.name)
# assert valid request
with self.assertRaises(KeyError):
self.helper.get_member_services_for_group_service(g_service_group, 5555)
with self.assertRaises(AttributeError):
self.helper.get_member_services_for_group_service(g_service, cisco_ASA_id)
class TestGeneralSettings(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_03_get_change_authorization_status(self):
# get revisions for device
revisions = self.helper.get_device_revisions_by_id(device_id=cisco_ASA_id)
self.assertIsInstance(revisions, pytos.securetrack.xml_objects.rest.device.Device_Revisions_List)
self.assertTrue(len(revisions) > 1)
# sort the revision by id
revisions = sorted(revisions, key=lambda x: x.date, reverse=False)
# get the old and new version we want to check
old_revision = revisions[0]
new_revision = revisions[1]
# assert valid request
status = self.helper.get_change_authorization_status(old_revision.id, new_revision.id)
self.assertIsInstance(status.new_revision, pytos.securetrack.xml_objects.rest.device.Device_Revision)
self.assertIsInstance(status.old_revision, pytos.securetrack.xml_objects.rest.device.Device_Revision)
self.assertIsInstance(status.change_authorization_bindings,
pytos.securetrack.xml_objects.rest.rules.ChangeAuthorizationBindings)
self.assertTrue(status.old_revision.id and status.new_revision.id)
# assert invalid request
with self.assertRaises(REST_Not_Found_Error):
self.helper.get_change_authorization_status(9999, 9999)
if __name__ == '__main__':
unittest.main()
ST Tests
#!/opt/tufin/securitysuite/ps/python/bin/python3.4
import sys
import lxml.etree
import pytos
import os
import io
import unittest
from unittest.mock import patch
from pytos.securetrack.helpers import Secure_Track_Helper
from pytos.securetrack.xml_objects.rest.cleanups import Generic_Cleanup_List
from pytos.securetrack.xml_objects.rest.domain import Domains
from pytos.common.exceptions import REST_Bad_Request_Error, REST_Not_Found_Error
from pytos.securetrack.xml_objects.rest import security_policy
from pytos.securetrack.xml_objects.rest.device import Device_Revision, Device, Devices_List, RuleSearchDeviceList, \
Device_Revisions_List
from pytos.securetrack.xml_objects.rest.rules import Rule_Documentation, Record_Set, Bindings_List, \
Interfaces_List, Cleanup_Set, Rules_List, Network_Objects_List, Policy_Analysis_Query_Result, \
SecurityPolicyDeviceViolations, Policy_List
from pytos.securetrack.xml_objects.rest.security_policy import Security_Policies_List, Security_Policy
from pytos.securetrack.xml_objects.rest.zones import Zone_List, Zone, Zone_Entry, ZoneDescendantsList
def fake_request_response(rest_file):
full_path = os.path.dirname(os.path.abspath(__file__))
sub_resources_dir = sys._getframe(1).f_locals['self'].__class__.__name__.lower()
resource_file = os.path.join(full_path, "resources", sub_resources_dir, "{}.xml".format(rest_file))
with open(resource_file, mode='rb') as f:
return f.read()
class TestDevices(unittest.TestCase):
def setUp(self):
self.helper = Secure_Track_Helper("127.0.0.1", ("username", "password"))
self.patcher = patch('pytos.common.rest_requests.requests.Session.send')
self.mock_get_uri = self.patcher.start()
self.mock_get_uri.return_value.status_code = 200
def tearDown(self):
self.patcher.stop()
def test_01_get_device(self):
self.mock_get_uri.return_value.content = fake_request_response("device_by_id")
device_by_id = self.helper.get_device_by_id(added_offline_device_id)
self.assertIsInstance(device_by_id, Device)
def test_02_get_devices_list(self):
self.mock_get_uri.return_value.content = fake_request_response("device_list")
devices_list = self.helper.get_devices_list()
self.assertIsInstance(devices_list, Devices_List)
self.assertTrue(len(devices_list) == devices_list.count)
self.assertTrue(devices_list.count > 0)
def test_03_get_devices_list_with_custom_param(self):
self.mock_get_uri.return_value.content = fake_request_response("device_list")
devices_list = self.helper.get_devices_list(custom_params={'vendor': 'cisco'})
self.assertIsInstance(devices_list, Devices_List)
self.assertEqual(len(devices_list), devices_list.count)
self.assertTrue(devices_list.count > 0)
def test_04_get_device_id_by_name(self):
self.mock_get_uri.return_value.content = fake_request_response("device_list")
device_id = self.helper.get_device_id_by_name(device_name="Router 2801")
self.assertTrue(device_id, 155)
# assert invalid request - 2 devices with same name
with self.assertRaises(IndexError):
self.helper.get_device_id_by_name(device_name="ASA FireWall")
# assert invalid request - Non existing device
with self.assertRaises(ValueError):
self.helper.get_device_id_by_name(device_name="NonExistingDeviceName")
def test_05_get_cleanups_for_device_by_id(self):
self.mock_get_uri.return_value.content = fake_request_response("cleanups_by_device_id")
cleanups = self.helper.get_cleanups_for_device_by_id(155)
self.assertIsInstance(cleanups, Generic_Cleanup_List)
self.assertTrue(len(cleanups) > 0)
def test_06_failed_to_get_cleanups_for_device_by_id(self):
self.mock_get_uri.return_value.status_code = 404
self.mock_get_uri.return_value.content = fake_request_response("no_found_error")
with self.assertRaises(ValueError):
self.helper.get_cleanups_for_device_by_id(5555)
def test_07_get_bindings_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("device_bindings")
binding = self.helper.get_bindings_for_device(155)
self.assertIsInstance(binding, Bindings_List)
self.assertTrue(len(binding) > 0)
def test_08_get_interfaces_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("device_interfaces")
interfaces = self.helper.get_interfaces_for_device(155)
self.assertIsInstance(interfaces, Interfaces_List)
self.assertTrue(len(interfaces) > 0)
def test_09_get_device_config(self):
self.assertEqual(self.helper.get_device_config_by_id(added_offline_device_id), b'\x00')
def test_10_add_offline_device(self):
global added_offline_device_id
self.mock_get_uri.return_value.status_code = 201
self.mock_get_uri.return_value.headers = {'location': '1'}
added_offline_device_id = self.helper.add_offline_device("TEST_DEVICE_123", "Cisco", "router")
self.assertIsInstance(added_offline_device_id, int)
# def test_11_upload_device_offline_config(self):
# with tempfile.NamedTemporaryFile(delete=False) as config_tempfile:
# config_tempfile.write(self.OFFLINE_TEST_DATA)
# config_temp_file_path = config_tempfile.name
# with open(config_temp_file_path) as config_tempfile:
# self.helper.upload_device_offline_config(added_offline_device_id, config_tempfile)
# os.remove(config_temp_file_path)
class TestRules(unittest.TestCase):
def setUp(self):
self.helper = Secure_Track_Helper("127.0.0.1", ("username", "password"))
self.patcher = patch('pytos.common.rest_requests.requests.Session.send')
self.mock_get_uri = self.patcher.start()
self.mock_get_uri.return_value.status_code = 200
def tearDown(self):
self.patcher.stop()
def test_01_get_shadowed_rules(self):
self.mock_get_uri.return_value.content = fake_request_response("cleanup_set")
cleanup = self.helper.get_shadowed_rules_for_device_by_id(155)
self.assertIsInstance(cleanup, Cleanup_Set)
def test_02_get_rule_by_device_and_rule_id(self):
self.mock_get_uri.return_value.content = fake_request_response("rules")
rules = self.helper.get_rule_by_device_and_rule_id(155, 1318013)
self.assertEqual(rules[0].id, 1318013)
def test_03_get_rules_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("rules")
rules = self.helper.get_rules_for_device(155)
self.assertIsInstance(rules, Rules_List)
self.assertTrue(len(rules) > 0)
def test_04_failed_to_get_rules_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("empty_rules")
rules = self.helper.get_rules_for_device(155)
self.assertIsInstance(rules, Rules_List)
self.assertTrue(len(rules) == 0)
def test_05_get_shadowing_rules_for_device_id_and_rule_uids(self):
self.mock_get_uri.return_value.content = fake_request_response("cleanup_set")
uid = "{53b95431-73ee-43de-a153-d299f4eb4804}"
shadowing_rules = self.helper.get_shadowing_rules_for_device_id_and_rule_uids(155, uid)
self.assertIsInstance(shadowing_rules, Cleanup_Set)
def test_06_failed_get_shadowing_rules_for_device_id_and_rule_uids(self):
self.mock_get_uri.return_value.content = fake_request_response("bad_request_error")
self.mock_get_uri.return_value.status_code = 400
with self.assertRaises(REST_Bad_Request_Error):
self.helper.get_shadowing_rules_for_device_id_and_rule_uids(155, [])
def test_07_get_devices_by_rule_search(self):
self.mock_get_uri.return_value.content = fake_request_response("device_list_by_rule_search")
devices = self.helper.get_devices_by_rule_search()
self.assertIsInstance(devices, RuleSearchDeviceList)
def test_08_rule_search_for_device(self):
self.mock_get_uri.return_value.content = fake_request_response("rules")
rules = self.helper.rule_search_for_device(155)
self.assertIsInstance(rules, Rules_List)
self.assertTrue(len(rules) > 0)
def test_09_get_rules_for_revision(self):
self.mock_get_uri.return_value.content = fake_request_response("rules")
rules = self.helper.get_rules_for_revision(1, True)
self.assertIsInstance(rules, Rules_List)
self.assertTrue(len(rules) > 0)
def test_10_rule_documentation_format(self):
src_xml = fake_request_response("rule_documentation")
src_tree = lxml.etree.fromstring(src_xml)
src_b = io.BytesIO()
src_tree.getroottree().write_c14n(src_b)
# create a new record set fot the rule documentation
record_sets = [
Record_Set("support@tufin.com", "admin", "2019-01-08T00:00:00+02:00", 1235, "this is a comment", "")
]
rd = Rule_Documentation("admin", 'Comment for unittest suit', record_sets, '', True)
dst_tree = lxml.etree.fromstring(rd.to_xml_string())
dst_b = io.BytesIO()
dst_tree.getroottree().write_c14n(dst_b)
self.assertEqual(src_b.getvalue(), dst_b.getvalue())
def test_11_get_rule_documentation_by_device_id_and_rule_id(self):
self.mock_get_uri.return_value.content = fake_request_response("rule_documentation")
rd = self.helper.get_rule_documentation_by_device_id_and_rule_id(155, 1330304)
self.assertIsInstance(rd, Rule_Documentation)
def test_12_failed_to_get_rule_documentation_by_device_id_and_rule_id(self):
self.mock_get_uri.return_value.content = fake_request_response("not_found_error")
self.mock_get_uri.return_value.status_code = 404
with self.assertRaises(ValueError):
self.helper.get_rule_documentation_by_device_id_and_rule_id(155, 1330303)
def test_13_get_network_objects(self):
self.mock_get_uri.return_value.content = fake_request_response("network_objects_search")
network_objects = self.helper.network_object_text_search("81.81.81.5", "any_field")
self.assertIsInstance(network_objects, Network_Objects_List)
class TestZonesPoliciesAndRevisions(unittest.TestCase):
def setUp(self):
self.helper = Secure_Track_Helper("localhost", ("username", "password"))
self.patcher = patch('pytos.common.rest_requests.requests.Session.send')
self.mock_get_uri = self.patcher.start()
self.mock_get_uri.return_value.status_code = 200
def tearDown(self):
self.patcher.stop()
def test_01_get_zones(self):
self.mock_get_uri.return_value.content = fake_request_response("zones")
zones = self.helper.get_zones()
self.assertIsInstance(zones, Zone_List)
def test_02_post_zone(self):
src_xml = fake_request_response("post_zone")
src_tree = lxml.etree.fromstring(src_xml)
src_b = io.BytesIO()
src_tree.getroottree().write_c14n(src_b)
comment = 'Name: {}, Created at: {}'.format("New Zone", "2017-04-22 10:09:18")
zone_obj = Zone(None, "New Zone", comment)
dst_tree = lxml.etree.fromstring(zone_obj.to_xml_string())
dst_b = io.BytesIO()
dst_tree.getroottree().write_c14n(dst_b)
self.assertEqual(src_b.getvalue(), dst_b.getvalue())
def test_03_post_security_policy_matrix(self):
self.mock_get_uri.return_value.headers = {'location': '1'}
self.mock_get_uri.return_value.content = fake_request_response("zones")
security_policy_name = 'Some Policy Name'
security_policy = {
'internal': {
'external': {
'severity': 'critical',
'access_type': 'ignored',
'allowed_services': ''
}
},
'external': {
'internal': {
'severity': 'high',
'access_type': 'restricted',
'allowed_services': 'https;Other 53;AOL;udp 88'
}
},
'dmz': {
'internal': {
'severity': 'critical',
'access_type': 'blocked',
'allowed_services': ''
},
'dmz': {
'severity': 'low',
'access_type': 'ignored',
'allowed_services': ''
}
}
}
policy_id = self.helper.post_security_policy_matrix(security_policy_name, security_policy)
self.assertEqual(policy_id, 1)
def test_04_post_zone_entry(self):
self.mock_get_uri.return_value.headers = {'location': '1'}
self.mock_get_uri.return_value.status_code = 201
zone_entry = Zone_Entry(1234, "Description", "1.1.1.1", 0, '255.255.255.255', 36)
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
entry_id = self.helper.post_zone_entry(zone_entry.zoneId, zone_entry)
self.assertEqual(entry_id, 1)
mock_post_uri.assert_called_with('POST',
'https://localhost/securetrack/api/zones/36/entries?context=1',
auth=('username', 'password'),
data='<zone_entry>\n <comment>Description</comment>\n <id>1234</id>\n <ip>1.1.1.1</ip>\n <netmask>255.255.255.255</netmask>\n <zoneId>36</zoneId>\n</zone_entry>', headers={'Content-Type': 'application/xml'})
def test_05_delete_zone_entry(self):
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
result = self.helper.delete_zone_entry_by_zone_and_entry_id(1, 1)
self.assertTrue(result)
mock_post_uri.assert_called_with('DELETE',
'https://localhost/securetrack/api/zones/1/entries/1?context=1',
auth=('username', 'password'),
headers={'Content-Type': 'application/xml'})
def test_06_modify_zone_entry(self):
self.mock_get_uri.return_value.content = fake_request_response("zone_entries")
zone_entries = self.helper.get_entries_for_zone_id(13)
zone_entry = zone_entries[0]
zone_entry.comment = "Modified entry"
zone_entry.ip = '101.101.101.101'
zone_entry.negate = 0
zone_entry.netmask = '255.255.255.255'
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
result = self.helper.put_zone_entry(13, zone_entry)
self.assertTrue(result)
mock_post_uri.assert_called_with('PUT',
'https://localhost/securetrack/api/zones/13/entries/54?context=1',
auth=('username', 'password'),
data='<zone_entry>\n <comment>Modified entry</comment>\n <id>54</id>\n <ip>101.101.101.101</ip>\n <negate>0</negate>\n <netmask>255.255.255.255</netmask>\n <zoneId>13</zoneId>\n</zone_entry>', headers={'Content-Type': 'application/xml'})
def test_07_get_zone_by_name(self):
self.mock_get_uri.return_value.content = fake_request_response("zones")
zone = self.helper.get_zone_by_name("dmz")
self.assertIsInstance(zone, Zone)
self.assertEqual(zone.name, "dmz")
def test_08_get_device_revisions_by_id(self):
self.mock_get_uri.return_value.content = fake_request_response("revisions")
revisions = self.helper.get_device_revisions_by_id(device_id=155)
self.assertIsInstance(revisions, Device_Revisions_List)
self.assertTrue(len(revisions) > 0)
def test_09_get_policy_analysis(self):
self.mock_get_uri.return_value.content = fake_request_response("policy_analysis_query_result")
policy_analysis = self.helper.get_policy_analysis(155)
self.assertIsInstance(policy_analysis, Policy_Analysis_Query_Result)
def test_10_get_security_policies(self):
self.mock_get_uri.return_value.content = fake_request_response("securitypolicylist")
policies = self.helper.get_security_policies()
self.assertIsInstance(policies, Security_Policies_List)
def test_11_get_security_policy_by_name(self):
self.mock_get_uri.return_value.content = fake_request_response("securitypolicylist")
policy = self.helper.get_security_policy_by_name("policy")
self.assertIsInstance(policy, Security_Policy)
self.assertEqual(policy.name, "policy")
def test_12_get_security_policy_by_id(self):
self.mock_get_uri.return_value.content = fake_request_response("securitypolicylist")
policy = self.helper.get_security_policy_by_id(3)
self.assertEqual(policy.id, 3)
def test_13_delete_security_policy_matrix(self):
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
result = self.helper.delete_security_policy_matrix(3)
self.assertTrue(result)
mock_post_uri.assert_called_with('DELETE',
'https://localhost/securetrack/api/security_policies/3',
auth=('username', 'password'),
headers={'Content-Type': 'application/xml'})
def test_14_get_revision_by_id(self):
self.mock_get_uri.return_value.content = fake_request_response("revision")
revision = self.helper.get_revision_by_id(5685)
self.assertIsInstance(revision, Device_Revision)
self.assertTrue(revision.id, 5685)
def test_15_get_security_policy_device_violations_by_severity(self):
self.mock_get_uri.return_value.content = fake_request_response("security_policy_device_violations")
violations = self.helper.get_security_policy_device_violations_by_severity(159, "CRITICAL", "SECURITY_POLICY")
self.assertIsInstance(violations, SecurityPolicyDeviceViolations)
def test_16_get_policies_for_revision(self):
self.mock_get_uri.return_value.content = fake_request_response("policies")
policies = self.helper.get_policies_for_revision(1)
self.assertIsInstance(policies, Policy_List)
def test_17_post_security_policy_exception(self):
self.mock_get_uri.return_value.headers = {'location': '1'}
self.mock_get_uri.return_value.status_code = 201
xml = fake_request_response("exception")
policy_exception = security_policy.Security_Policy_Exception.from_xml_string(xml.decode("utf-8"))
with patch('pytos.common.rest_requests.requests.Request') as mock_post_uri:
self.helper.post_security_policy_exception(policy_exception)
mock_post_uri.assert_called_with(
'POST',
'https://localhost/securetrack/api/security_policies/exceptions/?context=1',
auth=('username', 'password'),
data='<security_policy_exception>\n <approved_by>admin</approved_by>\n <created_by>admin</created_by>\n <creation_date>2016-01-01</creation_date>\n <description>Allow traffic from 192.168.1.1 to 192.168.1.2 on TCP port 4321</description>\n <domain>\n <description>Allow traffic from 192.168.1.1 to 192.168.1.2 on TCP port 4321</description>\n <id/>\n <name>Allow traffic from 192.168.1.1 to 192.168.1.2 on TCP port 4321</name>\n </domain>\n <exempted_traffic_list>\n <exempted_traffic>\n <comment>Creating USP exception for ticket 123</comment>\n <dest_network_collection>\n <network_items>\n <network_item xsi:type="subnet" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n <id/>\n <ip>192.168.1.2</ip>\n <prefix>32</prefix>\n </network_item>\n </network_items>\n </dest_network_collection>\n <security_requirements>\n <zone_to_zone_security_requirement>\n <from_domain/>\n <from_zone>30.100.30.0</from_zone>\n <policy_name>Test</policy_name>\n <to_domain/>\n <to_zone>200.200.200.0</to_zone>\n </zone_to_zone_security_requirement>\n </security_requirements>\n <service_collection>\n <service_items>\n <service_item xsi:type="custom" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n <id/>\n <port>4321</port>\n <protocol>tcp</protocol>\n </service_item>\n </service_items>\n </service_collection>\n <source_network_collection>\n <network_items>\n <network_item xsi:type="subnet" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n <id/>\n <ip>192.168.1.1</ip>\n <prefix>32</prefix>\n </network_item>\n </network_items>\n </source_network_collection>\n </exempted_traffic>\n </exempted_traffic_list>\n <expiration_date>2025-01-01</expiration_date>\n <name>Allow traffic from 192.168.1.1 to 192.168.1.2 on TCP port 4321</name>\n <requested_by>admin</requested_by>\n <ticket_id>123</ticket_id>\n</security_policy_exception>',
headers={'Content-Type': 'application/xml'}
)
# @patch('pytos.common.rest_requests.requests.Request')
# @patch('Secure_Track_Helper.get_entries_for_zone_id')
def test_18_delete_zone_by_zone_id(self):
with patch('pytos.common.rest_requests.requests.Request') as mock_delete_uri:
self.helper.delete_zone_by_zone_id(1, True)
mock_delete_uri.assert_called_with(
'DELETE',
'https://localhost/securetrack/api/zones/1',
auth=('username', 'password'),
headers={'Content-Type': 'application/xml'}
)
def test_19_get_zone_descendants(self):
self.mock_get_uri.return_value.content = fake_request_response("zone_descendants")
Zone_descendants_list = self.helper.get_zone_descendants("16")
self.assertIsInstance(Zone_descendants_list, ZoneDescendantsList)
class TestTopology(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_03_get_topology_interfaces(self):
# assert valid request
topology_interfaces = self.helper.get_topology_interfaces(cisco_ASA_id)
self.assertIsInstance(topology_interfaces, pytos.securetrack.xml_objects.rest.rules.Topology_Interfaces_List)
self.assertTrue(len(topology_interfaces) > 0)
topology_interfaces = self.helper.get_topology_interfaces(5555)
self.assertIsInstance(topology_interfaces, pytos.securetrack.xml_objects.rest.rules.Topology_Interfaces_List)
self.assertTrue(len(topology_interfaces) == 0)
# assert valid request
with self.assertRaises(REST_Bad_Request_Error):
# noinspection PyTypeChecker
self.helper.get_topology_interfaces("NotValidRequest")
class TestDomains(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_01_get_domains(self):
domains = self.helper.get_domains()
self.assertIsInstance(domains, Domains)
self.assertTrue(domains[0].name, "Default")
def test_02_get_domain_by_id(self):
# assert valid request
domain = self.helper.get_domain_by_id(default_domain_id)
self.assertIsInstance(domain, pytos.securetrack.xml_objects.rest.domain.Domain)
self.assertTrue(domain.name, "Default")
# assert invalid request
with self.assertRaises(REST_Bad_Request_Error):
self.helper.get_domain_by_id(9999)
class TestNetworkObjects(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_01_get_network_objects_for_device(self):
global g_network_object
global g_network_object_group
# assert valid request
network_objects = self.helper.get_network_objects_for_device(cisco_ASA_id)
self.assertIsInstance(network_objects, pytos.securetrack.xml_objects.rest.rules.Network_Objects_List)
self.assertTrue(len(network_objects) > 0)
# save a single network object for later uses
g_network_object = network_objects[0]
# save a single network object group for later uses
for network_object in network_objects:
if network_object.type == "group" and len(network_object.members) > 1:
g_network_object_group = network_object
# assert invalid request
network_objects = self.helper.get_network_objects_for_device(5555)
self.assertIsInstance(network_objects, pytos.securetrack.xml_objects.rest.rules.Network_Objects_List)
self.assertFalse(len(network_objects))
def test_02_network_object_text_search(self):
# assert valid request
network_objects = self.helper.network_object_text_search("192.168", "any_field")
self.assertIsInstance(network_objects, pytos.securetrack.xml_objects.rest.rules.Network_Objects_List)
self.assertTrue(len(network_objects) > 0)
# assert invalid request
with self.assertRaises(ValueError):
self.helper.network_object_text_search("", "")
def test_03_network_object_subnet_search(self):
# assert valid request
network_objects = self.helper.network_object_subnet_search("192.168.0.0", "contained_in")
self.assertIsInstance(network_objects, pytos.securetrack.xml_objects.rest.rules.Network_Objects_List)
self.assertTrue(len(network_objects) > 0)
# assert invalid request
with self.assertRaises(ValueError):
self.helper.network_object_subnet_search("", "")
def test_04_get_network_objects(self):
network_objects = self.helper.get_network_objects()
self.assertIsInstance(network_objects, dict)
self.assertTrue(len(network_objects) > 0)
def test_05_get_network_object_by_device_and_object_id(self):
# assert valid request
network_object = self.helper.get_network_object_by_device_and_object_id(cisco_ASA_id, g_network_object.id)
self.assertIsInstance(network_object, pytos.securetrack.xml_objects.rest.rules.Basic_Network_Object)
self.assertTrue(network_object.id and network_object.name)
# assert invalid requests
with self.assertRaises(ValueError):
self.helper.get_network_object_by_device_and_object_id(5555, g_network_object.id)
with self.assertRaises(ValueError):
self.helper.get_network_object_by_device_and_object_id(cisco_ASA_id, 55555)
def test_06_get_member_network_objects_for_group_network_object(self):
# assert valid request
members = self.helper.get_member_network_objects_for_group_network_object(g_network_object_group, cisco_ASA_id)
for member in members:
self.assertIsInstance(member, (pytos.securetrack.xml_objects.rest.rules.Host_Network_Object,
pytos.securetrack.xml_objects.rest.rules.Subnet_Network_Object))
self.assertTrue(member.id and member.name)
# assert invalid request
with self.assertRaises(KeyError):
self.helper.get_member_network_objects_for_group_network_object(g_network_object_group, 5555)
with self.assertRaises(AttributeError):
self.helper.get_member_network_objects_for_group_network_object(g_network_object, cisco_ASA_id)
class TestServices(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_01_get_services_for_device(self):
global g_service
global g_service_group
# assert valid request
services = self.helper.get_services_for_device(cisco_ASA_id)
self.assertIsInstance(services, pytos.securetrack.xml_objects.rest.rules.Services_List)
self.assertTrue(len(services) > 0)
# save a single service for later uses
g_service = services.services[0]
# save a single service group for later uses
for service in services:
if service.type == "group" and len(service.members) > 1:
g_service_group = service
# assert invalid request
services = self.helper.get_services_for_device(5555)
self.assertIsInstance(services, pytos.securetrack.xml_objects.rest.rules.Services_List)
self.assertFalse(len(services))
def test_02_get_service_for_device_by_name(self):
# assert valid request
service = self.helper.get_service_for_device_by_name(cisco_ASA_id, g_service.display_name)
self.assertIsInstance(service, pytos.securetrack.xml_objects.rest.rules.Single_Service)
self.assertTrue(service)
# assert invalid request
with self.assertRaises(ValueError):
self.helper.get_service_for_device_by_name(cisco_ASA_id, "NotExsistingService")
def test_03_get_service_by_device_and_object_id(self):
# assert valid request
service = self.helper.get_service_by_device_and_object_id(cisco_ASA_id, g_service.id)
self.assertIsInstance(service, pytos.securetrack.xml_objects.rest.rules.Single_Service)
# self.assertTrue(service.name, "!80 (tcp)")
self.assertTrue(service.name, g_service.name)
# assert invalid request
with self.assertRaises(ValueError):
self.helper.get_service_by_device_and_object_id(cisco_ASA_id, 9999999)
with self.assertRaises(ValueError):
self.helper.get_service_by_device_and_object_id(0, 467677)
def test_04_get_member_services_for_group_service(self):
# assert valid request
member_services = self.helper.get_member_services_for_group_service(g_service_group, cisco_ASA_id)
for member in member_services:
self.assertIsInstance(member, pytos.securetrack.xml_objects.rest.rules.Single_Service)
self.assertTrue(member.id and member.name)
# assert valid request
with self.assertRaises(KeyError):
self.helper.get_member_services_for_group_service(g_service_group, 5555)
with self.assertRaises(AttributeError):
self.helper.get_member_services_for_group_service(g_service, cisco_ASA_id)
class TestGeneralSettings(unittest.TestCase):
def setUp(self):
self.helper = pytos.securetrack.helpers.Secure_Track_Helper(conf.get("securetrack", "hostname"),
(conf.get_username("securetrack"),
conf.get_password("securetrack")))
def test_03_get_change_authorization_status(self):
# get revisions for device
revisions = self.helper.get_device_revisions_by_id(device_id=cisco_ASA_id)
self.assertIsInstance(revisions, pytos.securetrack.xml_objects.rest.device.Device_Revisions_List)
self.assertTrue(len(revisions) > 1)
# sort the revision by id
revisions = sorted(revisions, key=lambda x: x.date, reverse=False)
# get the old and new version we want to check
old_revision = revisions[0]
new_revision = revisions[1]
# assert valid request
status = self.helper.get_change_authorization_status(old_revision.id, new_revision.id)
self.assertIsInstance(status.new_revision, pytos.securetrack.xml_objects.rest.device.Device_Revision)
self.assertIsInstance(status.old_revision, pytos.securetrack.xml_objects.rest.device.Device_Revision)
self.assertIsInstance(status.change_authorization_bindings,
pytos.securetrack.xml_objects.rest.rules.ChangeAuthorizationBindings)
self.assertTrue(status.old_revision.id and status.new_revision.id)
# assert invalid request
with self.assertRaises(REST_Not_Found_Error):
self.helper.get_change_authorization_status(9999, 9999)
if __name__ == '__main__':
unittest.main()
|
#n -*- coding: utf-8 -*-
import xlrd
import json
import os
from uuid import NAMESPACE_OID, uuid4, uuid5
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from ckan import model
from ckan.model import Session
from ckan.logic import get_action, action
from ckanext.harvest.model import HarvestJob, HarvestObject
from ckanext.harvest.harvesters import HarvesterBase
from pylons import config
from ..helpers.metadata import MetaDataParser
import logging
log = logging.getLogger(__name__)
class MeteoswissHarvester(HarvesterBase):
'''
The harvester for meteoswiss
'''
HARVEST_USER = u'harvest'
METADATA_FILE_NAME = u'OGD@Bund_Metadaten_MeteoSchweiz.xlsx'
BUCKET_NAME = u'opendata-ch'
AWS_ACCESS_KEY = config.get('ckanext.meteoswiss.access_key')
AWS_SECRET_KEY = config.get('ckanext.meteoswiss.secret_key')
SHEETS = (
# Sheet name # Use GM03 descriptions
(u'SMN', False),
(u'SMN 3', False),
(u'Föhnindex', False),
(u'HomogeneDaten', False),
(u'Klimanormwerte', True),
(u'Kamerabild', True),
)
S3_PREFIXES = {
u'SMN': 'ch.meteoschweiz.swissmetnet',
u'SMN 3': 'ch.meteoschweiz.swissmetnet',
u'Föhnindex': 'ch.meteoschweiz.swissmetnet-foehnindex',
u'HomogeneDaten': 'ch.meteoschweiz.homogenereihen',
u'Klimanormwerte': 'ch.meteoschweiz.normwerttabellen',
u'Kamerabild': 'ch.meteoschweiz.kamerabilder',
}
ORGANIZATION = {
u'de': u'Bundesamt für Meteorologie und Klimatologie MeteoSchweiz',
u'fr': u'Office fédéral de météorologie et de climatologie MétéoSuisse',
u'it': u'Ufficio federale di meteorologia e climatologia MeteoSvizzera',
u'en': u'Federal Office of Meteorology and Climatology MeteoSwiss',
}
GROUPS = {
u'de': [u'Raum und Umwelt'],
u'fr': [u'Espace et environnement'],
u'it': [u'Territorio e ambiente'],
u'en': [u'Territory and environment']
}
def _get_s3_bucket(self):
'''
Create an S3 connection to the department bucket
'''
if not hasattr(self, '_bucket'):
conn = S3Connection(self.AWS_ACCESS_KEY, self.AWS_SECRET_KEY)
self._bucket = conn.get_bucket(self.BUCKET_NAME)
return self._bucket
def _fetch_metadata_file(self):
'''
Fetching the Excel metadata file from the S3 Bucket and save on disk
'''
try:
metadata_file = Key(self._get_s3_bucket())
metadata_file.key = self.METADATA_FILE_NAME
metadata_file.get_contents_to_filename(self.METADATA_FILE_NAME)
except Exception, e:
log.exception(e)
def _get_s3_resources(self, resources, s3_prefix):
'''
Lookup all files on S3, an match them with meta descriptions
'''
result = []
for key in self._get_s3_bucket().list(s3_prefix):
path = key.name.split('/')
if len(path) >= 2 and path[0] == s3_prefix and key.size > 0:
url = key.generate_url(0, query_auth=False, force_http=True)
name = os.path.basename(key.name)
data = {
u'url': url,
u'name': name,
u'format': self._guess_format(name),
}
description = self._description_lookup(resources, name)
if description:
data.update({u'description': description})
result.append(data)
return result
def _guess_format(self, path):
return os.path.splitext(path.lower())[1][1:]
def _description_lookup(self, resources, filename):
'''
Check if metafile declared a description to this resource
'''
basename, ext = os.path.splitext(filename)
for resource in resources:
if basename in resource.get('id', ''):
return resource.get('description')
if basename in resource.get('Standort', ''):
return resource.get('description')
def info(self):
return {
'name': 'meteoswiss',
'title': 'Meteoswiss',
'description': 'Harvests the meteoswiss data',
'form_config_interface': 'Text'
}
def gather_stage(self, harvest_job):
log.debug('In Meteoswiss gather_stage')
self._fetch_metadata_file()
ids = []
for sheet_name, use_gm03_desc in self.SHEETS:
log.debug('Gathering %s' % sheet_name)
parser = MetaDataParser(self.METADATA_FILE_NAME)
metadata = parser.parse_sheet(sheet_name, use_gm03_desc)
metadata['translations'].extend(self._metadata_term_translations())
metadata['sheet_name'] = sheet_name
obj = HarvestObject(
#guid = metadata.get('id'),
job = harvest_job,
content = json.dumps(metadata)
)
obj.save()
ids.append(obj.id)
return ids
def fetch_stage(self, harvest_object):
log.debug('In Meteoswiss fetch_stage')
package_dict = json.loads(harvest_object.content)
sheet_name = package_dict.get('sheet_name')
s3_prefix = self.S3_PREFIXES.get(sheet_name)
if s3_prefix:
log.debug('Loading S3 Resources for %s' % sheet_name)
package_dict['resources'] = self._get_s3_resources(
package_dict.get('resources', []),
s3_prefix
)
harvest_object.content = json.dumps(package_dict)
harvest_object.save()
return True
def _create_uuid(self, name=None):
'''
Create a new SHA-1 uuid for a given name or a random id
'''
if name:
new_uuid = uuid5(NAMESPACE_OID, str(name))
else:
new_uuid = uuid4()
return unicode(new_uuid)
def import_stage(self, harvest_object):
log.debug('In Meteoswiss import_stage')
if not harvest_object:
log.error('No harvest object received')
return False
try:
package_dict = json.loads(harvest_object.content)
user = model.User.get(self.HARVEST_USER)
context = {
'model': model,
'session': Session,
'user': self.HARVEST_USER
}
package_dict['id'] = self._create_uuid(package_dict.get('id'))
# Find or create group the dataset should get assigned to
package_dict['groups'] = self._find_or_create_groups(context)
# Find or create the organization the dataset should get assigned to
package_dict['owner_org'] = self._find_or_create_organization(context)
# Never import state from data source!
if 'state' in package_dict:
del package_dict['state']
# Split tags
tags = package_dict.get('tags', '').split(',')
tags = [tag.strip() for tag in tags]
if '' not in tags and '(tbd)' not in tags:
package_dict['tags'] = tags
else:
del package_dict['tags']
package = model.Package.get(package_dict['id'])
model.PackageRole(package=package, user=user, role=model.Role.ADMIN)
#log.debug('Save or update package %s' % (package_dict['name'],))
result = self._create_or_update_package(package_dict, harvest_object)
log.debug('Save or update term translations')
self._submit_term_translations(context, package_dict)
Session.commit()
except Exception, e:
log.exception(e)
raise e
return True
def _find_or_create_groups(self, context):
group_name = self.GROUPS['de'][0]
data_dict = {
'id': group_name,
'name': self._gen_new_name(group_name),
'title': group_name
}
try:
group = get_action('group_show')(context, data_dict)
except:
group = get_action('group_create')(context, data_dict)
log.info('created the group ' + group['id'])
group_ids = []
group_ids.append(group['id'])
return group_ids
def _find_or_create_organization(self, context):
try:
data_dict = {
'permission': 'edit_group',
'id': self._gen_new_name(self.ORGANIZATION[u'de']),
'name': self._gen_new_name(self.ORGANIZATION[u'de']),
'title': self.ORGANIZATION[u'de']
}
organization = get_action('organization_show')(context, data_dict)
except:
organization = get_action('organization_create')(context, data_dict)
return organization['id']
def _metadata_term_translations(self):
'''
Generate term translatations for organizations
'''
try:
translations = []
for lang, org in self.ORGANIZATION.items():
if lang != u'de':
translations.append({
'lang_code': lang,
'term': self.ORGANIZATION[u'de'],
'term_translation': org
})
return translations
except Exception, e:
log.exception(e)
return []
def _submit_term_translations(self, context, package_dict):
for translation in package_dict['translations']:
action.update.term_translation_update(context, translation)
make bucket name configureable in config
#n -*- coding: utf-8 -*-
import xlrd
import json
import os
from uuid import NAMESPACE_OID, uuid4, uuid5
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from ckan import model
from ckan.model import Session
from ckan.logic import get_action, action
from ckanext.harvest.model import HarvestJob, HarvestObject
from ckanext.harvest.harvesters import HarvesterBase
from pylons import config
from ..helpers.metadata import MetaDataParser
import logging
log = logging.getLogger(__name__)
class MeteoswissHarvester(HarvesterBase):
'''
The harvester for meteoswiss
'''
HARVEST_USER = u'harvest'
METADATA_FILE_NAME = u'OGD@Bund_Metadaten_MeteoSchweiz.xlsx'
BUCKET_NAME = config.get('ckanext.meteoswiss.bucket_name', 'opendata-ch')
AWS_ACCESS_KEY = config.get('ckanext.meteoswiss.access_key')
AWS_SECRET_KEY = config.get('ckanext.meteoswiss.secret_key')
SHEETS = (
# Sheet name # Use GM03 descriptions
(u'SMN', False),
(u'SMN 3', False),
(u'Föhnindex', False),
(u'HomogeneDaten', False),
(u'Klimanormwerte', True),
(u'Kamerabild', True),
)
S3_PREFIXES = {
u'SMN': 'ch.meteoschweiz.swissmetnet',
u'SMN 3': 'ch.meteoschweiz.swissmetnet',
u'Föhnindex': 'ch.meteoschweiz.swissmetnet-foehnindex',
u'HomogeneDaten': 'ch.meteoschweiz.homogenereihen',
u'Klimanormwerte': 'ch.meteoschweiz.normwerttabellen',
u'Kamerabild': 'ch.meteoschweiz.kamerabilder',
}
ORGANIZATION = {
u'de': u'Bundesamt für Meteorologie und Klimatologie MeteoSchweiz',
u'fr': u'Office fédéral de météorologie et de climatologie MétéoSuisse',
u'it': u'Ufficio federale di meteorologia e climatologia MeteoSvizzera',
u'en': u'Federal Office of Meteorology and Climatology MeteoSwiss',
}
GROUPS = {
u'de': [u'Raum und Umwelt'],
u'fr': [u'Espace et environnement'],
u'it': [u'Territorio e ambiente'],
u'en': [u'Territory and environment']
}
def _get_s3_bucket(self):
'''
Create an S3 connection to the department bucket
'''
if not hasattr(self, '_bucket'):
conn = S3Connection(self.AWS_ACCESS_KEY, self.AWS_SECRET_KEY)
self._bucket = conn.get_bucket(self.BUCKET_NAME)
return self._bucket
def _fetch_metadata_file(self):
'''
Fetching the Excel metadata file from the S3 Bucket and save on disk
'''
try:
metadata_file = Key(self._get_s3_bucket())
metadata_file.key = self.METADATA_FILE_NAME
metadata_file.get_contents_to_filename(self.METADATA_FILE_NAME)
except Exception, e:
log.exception(e)
def _get_s3_resources(self, resources, s3_prefix):
'''
Lookup all files on S3, an match them with meta descriptions
'''
result = []
for key in self._get_s3_bucket().list(s3_prefix):
path = key.name.split('/')
if len(path) >= 2 and path[0] == s3_prefix and key.size > 0:
url = key.generate_url(0, query_auth=False, force_http=True)
name = os.path.basename(key.name)
data = {
u'url': url,
u'name': name,
u'format': self._guess_format(name),
}
description = self._description_lookup(resources, name)
if description:
data.update({u'description': description})
result.append(data)
return result
def _guess_format(self, path):
return os.path.splitext(path.lower())[1][1:]
def _description_lookup(self, resources, filename):
'''
Check if metafile declared a description to this resource
'''
basename, ext = os.path.splitext(filename)
for resource in resources:
if basename in resource.get('id', ''):
return resource.get('description')
if basename in resource.get('Standort', ''):
return resource.get('description')
def info(self):
return {
'name': 'meteoswiss',
'title': 'Meteoswiss',
'description': 'Harvests the meteoswiss data',
'form_config_interface': 'Text'
}
def gather_stage(self, harvest_job):
log.debug('In Meteoswiss gather_stage')
self._fetch_metadata_file()
ids = []
for sheet_name, use_gm03_desc in self.SHEETS:
log.debug('Gathering %s' % sheet_name)
parser = MetaDataParser(self.METADATA_FILE_NAME)
metadata = parser.parse_sheet(sheet_name, use_gm03_desc)
metadata['translations'].extend(self._metadata_term_translations())
metadata['sheet_name'] = sheet_name
obj = HarvestObject(
#guid = metadata.get('id'),
job = harvest_job,
content = json.dumps(metadata)
)
obj.save()
ids.append(obj.id)
return ids
def fetch_stage(self, harvest_object):
log.debug('In Meteoswiss fetch_stage')
package_dict = json.loads(harvest_object.content)
sheet_name = package_dict.get('sheet_name')
s3_prefix = self.S3_PREFIXES.get(sheet_name)
if s3_prefix:
log.debug('Loading S3 Resources for %s' % sheet_name)
package_dict['resources'] = self._get_s3_resources(
package_dict.get('resources', []),
s3_prefix
)
harvest_object.content = json.dumps(package_dict)
harvest_object.save()
return True
def _create_uuid(self, name=None):
'''
Create a new SHA-1 uuid for a given name or a random id
'''
if name:
new_uuid = uuid5(NAMESPACE_OID, str(name))
else:
new_uuid = uuid4()
return unicode(new_uuid)
def import_stage(self, harvest_object):
log.debug('In Meteoswiss import_stage')
if not harvest_object:
log.error('No harvest object received')
return False
try:
package_dict = json.loads(harvest_object.content)
user = model.User.get(self.HARVEST_USER)
context = {
'model': model,
'session': Session,
'user': self.HARVEST_USER
}
package_dict['id'] = self._create_uuid(package_dict.get('id'))
# Find or create group the dataset should get assigned to
package_dict['groups'] = self._find_or_create_groups(context)
# Find or create the organization the dataset should get assigned to
package_dict['owner_org'] = self._find_or_create_organization(context)
# Never import state from data source!
if 'state' in package_dict:
del package_dict['state']
# Split tags
tags = package_dict.get('tags', '').split(',')
tags = [tag.strip() for tag in tags]
if '' not in tags and '(tbd)' not in tags:
package_dict['tags'] = tags
else:
del package_dict['tags']
package = model.Package.get(package_dict['id'])
model.PackageRole(package=package, user=user, role=model.Role.ADMIN)
#log.debug('Save or update package %s' % (package_dict['name'],))
result = self._create_or_update_package(package_dict, harvest_object)
log.debug('Save or update term translations')
self._submit_term_translations(context, package_dict)
Session.commit()
except Exception, e:
log.exception(e)
raise e
return True
def _find_or_create_groups(self, context):
group_name = self.GROUPS['de'][0]
data_dict = {
'id': group_name,
'name': self._gen_new_name(group_name),
'title': group_name
}
try:
group = get_action('group_show')(context, data_dict)
except:
group = get_action('group_create')(context, data_dict)
log.info('created the group ' + group['id'])
group_ids = []
group_ids.append(group['id'])
return group_ids
def _find_or_create_organization(self, context):
try:
data_dict = {
'permission': 'edit_group',
'id': self._gen_new_name(self.ORGANIZATION[u'de']),
'name': self._gen_new_name(self.ORGANIZATION[u'de']),
'title': self.ORGANIZATION[u'de']
}
organization = get_action('organization_show')(context, data_dict)
except:
organization = get_action('organization_create')(context, data_dict)
return organization['id']
def _metadata_term_translations(self):
'''
Generate term translatations for organizations
'''
try:
translations = []
for lang, org in self.ORGANIZATION.items():
if lang != u'de':
translations.append({
'lang_code': lang,
'term': self.ORGANIZATION[u'de'],
'term_translation': org
})
return translations
except Exception, e:
log.exception(e)
return []
def _submit_term_translations(self, context, package_dict):
for translation in package_dict['translations']:
action.update.term_translation_update(context, translation)
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Terminal Plugin."""
import os
import sys
import time
import subprocess
import os.path as osp
from qtpy.QtWidgets import (QApplication, QMessageBox, QVBoxLayout, QMenu,
QShortcut)
from qtpy.QtCore import Qt, Signal
from qtpy.QtGui import QKeySequence
from spyder.plugins import SpyderPluginWidget
# from spyder.preferences import PluginConfigPage
from spyder.config.base import _
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import (create_action, create_toolbutton,
add_actions)
from spyder.widgets.tabs import Tabs
from spyder.config.gui import set_shortcut, config_shortcut
# from spyder.plugins import SpyderPluginWidget
from spyder_terminal.widgets.terminalgui import TerminalWidget
from spyder.py3compat import is_text_string, to_text_string
from spyder.utils.misc import select_port
from spyder.py3compat import getcwd
LOCATION = osp.realpath(osp.join(os.getcwd(),
osp.dirname(__file__)))
# class TerminalConfigPage(PluginConfigPage):
# """Terminal plugin preferences."""
# pass
class TerminalPlugin(SpyderPluginWidget):
"""Terminal plugin."""
CONF_SECTION = 'terminal'
focus_changed = Signal()
def __init__(self, parent):
"""Widget constructor."""
SpyderPluginWidget.__init__(self, parent)
self.tab_widget = None
self.menu_actions = None
self.port = select_port(default_port=8070)
self.server = subprocess.Popen(
['python', osp.join(LOCATION, 'server', 'main.py'),
'--port', str(self.port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
time.sleep(0.5)
self.main = parent
self.terms = []
self.untitled_num = 0
self.initialize_plugin()
layout = QVBoxLayout()
new_term_btn = create_toolbutton(self,
icon=ima.icon('project_expanded'),
tip=_('Open a new terminal'),
triggered=self.create_new_term)
menu_btn = create_toolbutton(self, icon=ima.icon('tooloptions'),
tip=_('Options'))
self.menu = QMenu(self)
menu_btn.setMenu(self.menu)
menu_btn.setPopupMode(menu_btn.InstantPopup)
add_actions(self.menu, self.menu_actions)
# if self.get_option('first_time', True):
# self.setup_shortcuts()
# self.shortcuts = self.create_shortcuts()
corner_widgets = {Qt.TopRightCorner: [new_term_btn, menu_btn]}
self.tabwidget = Tabs(self, menu=self.menu, actions=self.menu_actions,
corner_widgets=corner_widgets)
if hasattr(self.tabwidget, 'setDocumentMode') \
and not sys.platform == 'darwin':
# Don't set document mode to true on OSX because it generates
# a crash when the console is detached from the main window
# Fixes Issue 561
self.tabwidget.setDocumentMode(True)
self.tabwidget.currentChanged.connect(self.refresh_plugin)
self.tabwidget.move_data.connect(self.move_tab)
self.tabwidget.set_close_function(self.close_term)
layout.addWidget(self.tabwidget)
self.setLayout(layout)
paste_shortcut = QShortcut(QKeySequence("Ctrl+Shift+T"),
self, self.create_new_term)
paste_shortcut.setContext(Qt.WidgetWithChildrenShortcut)
# ------ SpyderPluginMixin API --------------------------------
def on_first_registration(self):
"""Action to be performed on first plugin registration."""
self.main.tabify_plugins(self.main.extconsole, self)
def update_font(self):
"""Update font from Preferences."""
font = self.get_plugin_font()
for term in self.terms:
term.set_font(font.family())
# ------ SpyderPluginWidget API ------------------------------
def get_plugin_title(self):
"""Return widget title."""
title = _('System Terminal')
return title
def get_plugin_icon(self):
"""Return widget icon."""
return ima.icon('cmdprompt')
def get_plugin_actions(self):
"""Get plugin actions."""
self.menu_actions = []
return self.menu_actions
def get_focus_widget(self):
"""
Set focus on current selected terminal.
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level.
"""
term = self.tabwidget.currentWidget()
if term is not None:
return term.view
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed."""
for term in self.terms:
term.close()
self.server.terminate()
return True
def refresh_plugin(self):
"""Refresh tabwidget."""
term = None
if self.tabwidget.count():
term = self.tabwidget.currentWidget()
term.view.setFocus()
else:
term = None
def register_plugin(self):
"""Register plugin in Spyder's main window."""
self.focus_changed.connect(self.main.plugin_focus_changed)
self.main.add_dockwidget(self)
self.create_new_term(give_focus=False)
# ------ Public API (for terminals) -------------------------
def get_terms(self):
"""Return terminal list."""
return [cl for cl in self.terms if isinstance(cl, TerminalWidget)]
def get_focus_term(self):
"""Return current terminal with focus, if any."""
widget = QApplication.focusWidget()
for term in self.get_terms():
if widget is term:
return term
def get_current_term(self):
"""Return the currently selected terminal."""
try:
terminal = self.tabwidget.currentWidget()
except AttributeError:
terminal = None
if terminal is not None:
return terminal
def create_new_term(self, name=None, give_focus=True):
"""Add a new terminal tab."""
font = self.get_plugin_font()
term = TerminalWidget(self, self.port, path=getcwd(),
font=font.family())
self.add_tab(term)
def close_term(self, index=None, term=None):
"""Close a terminal tab."""
if not self.tabwidget.count():
return
if term is not None:
index = self.tabwidget.indexOf(term)
if index is None and term is None:
index = self.tabwidget.currentIndex()
if index is not None:
term = self.tabwidget.widget(index)
term.close()
self.tabwidget.removeTab(self.tabwidget.indexOf(term))
self.terms.remove(term)
if self.tabwidget.count() == 0:
self.create_new_term()
# ------ Public API (for tabs) ---------------------------
def add_tab(self, widget):
"""Add tab."""
self.terms.append(widget)
num_term = self.tabwidget.count() + 1
index = self.tabwidget.addTab(widget, "Terminal {0}".format(num_term))
self.tabwidget.setCurrentIndex(index)
self.tabwidget.setTabToolTip(index, "Terminal {0}".format(num_term))
if self.dockwidget and not self.ismaximized:
self.dockwidget.setVisible(True)
self.dockwidget.raise_()
self.activateWindow()
widget.view.setFocus()
def move_tab(self, index_from, index_to):
"""
Move tab (tabs themselves have already been moved by the tabwidget).
Allows to change order of tabs.
"""
term = self.terms.pop(index_from)
self.terms.insert(index_to, term)
Update plugin name to Terminal
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Terminal Plugin."""
import os
import sys
import time
import subprocess
import os.path as osp
from qtpy.QtWidgets import (QApplication, QMessageBox, QVBoxLayout, QMenu,
QShortcut)
from qtpy.QtCore import Qt, Signal
from qtpy.QtGui import QKeySequence
from spyder.plugins import SpyderPluginWidget
# from spyder.preferences import PluginConfigPage
from spyder.config.base import _
from spyder.utils import icon_manager as ima
from spyder.utils.qthelpers import (create_action, create_toolbutton,
add_actions)
from spyder.widgets.tabs import Tabs
from spyder.config.gui import set_shortcut, config_shortcut
# from spyder.plugins import SpyderPluginWidget
from spyder_terminal.widgets.terminalgui import TerminalWidget
from spyder.py3compat import is_text_string, to_text_string
from spyder.utils.misc import select_port
from spyder.py3compat import getcwd
LOCATION = osp.realpath(osp.join(os.getcwd(),
osp.dirname(__file__)))
# class TerminalConfigPage(PluginConfigPage):
# """Terminal plugin preferences."""
# pass
class TerminalPlugin(SpyderPluginWidget):
"""Terminal plugin."""
CONF_SECTION = 'terminal'
focus_changed = Signal()
def __init__(self, parent):
"""Widget constructor."""
SpyderPluginWidget.__init__(self, parent)
self.tab_widget = None
self.menu_actions = None
self.port = select_port(default_port=8070)
self.server = subprocess.Popen(
['python', osp.join(LOCATION, 'server', 'main.py'),
'--port', str(self.port)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
time.sleep(0.5)
self.main = parent
self.terms = []
self.untitled_num = 0
self.initialize_plugin()
layout = QVBoxLayout()
new_term_btn = create_toolbutton(self,
icon=ima.icon('project_expanded'),
tip=_('Open a new terminal'),
triggered=self.create_new_term)
menu_btn = create_toolbutton(self, icon=ima.icon('tooloptions'),
tip=_('Options'))
self.menu = QMenu(self)
menu_btn.setMenu(self.menu)
menu_btn.setPopupMode(menu_btn.InstantPopup)
add_actions(self.menu, self.menu_actions)
# if self.get_option('first_time', True):
# self.setup_shortcuts()
# self.shortcuts = self.create_shortcuts()
corner_widgets = {Qt.TopRightCorner: [new_term_btn, menu_btn]}
self.tabwidget = Tabs(self, menu=self.menu, actions=self.menu_actions,
corner_widgets=corner_widgets)
if hasattr(self.tabwidget, 'setDocumentMode') \
and not sys.platform == 'darwin':
# Don't set document mode to true on OSX because it generates
# a crash when the console is detached from the main window
# Fixes Issue 561
self.tabwidget.setDocumentMode(True)
self.tabwidget.currentChanged.connect(self.refresh_plugin)
self.tabwidget.move_data.connect(self.move_tab)
self.tabwidget.set_close_function(self.close_term)
layout.addWidget(self.tabwidget)
self.setLayout(layout)
paste_shortcut = QShortcut(QKeySequence("Ctrl+Shift+T"),
self, self.create_new_term)
paste_shortcut.setContext(Qt.WidgetWithChildrenShortcut)
# ------ SpyderPluginMixin API --------------------------------
def on_first_registration(self):
"""Action to be performed on first plugin registration."""
self.main.tabify_plugins(self.main.extconsole, self)
def update_font(self):
"""Update font from Preferences."""
font = self.get_plugin_font()
for term in self.terms:
term.set_font(font.family())
# ------ SpyderPluginWidget API ------------------------------
def get_plugin_title(self):
"""Return widget title."""
title = _('Terminal')
return title
def get_plugin_icon(self):
"""Return widget icon."""
return ima.icon('cmdprompt')
def get_plugin_actions(self):
"""Get plugin actions."""
self.menu_actions = []
return self.menu_actions
def get_focus_widget(self):
"""
Set focus on current selected terminal.
Return the widget to give focus to when
this plugin's dockwidget is raised on top-level.
"""
term = self.tabwidget.currentWidget()
if term is not None:
return term.view
def closing_plugin(self, cancelable=False):
"""Perform actions before parent main window is closed."""
for term in self.terms:
term.close()
self.server.terminate()
return True
def refresh_plugin(self):
"""Refresh tabwidget."""
term = None
if self.tabwidget.count():
term = self.tabwidget.currentWidget()
term.view.setFocus()
else:
term = None
def register_plugin(self):
"""Register plugin in Spyder's main window."""
self.focus_changed.connect(self.main.plugin_focus_changed)
self.main.add_dockwidget(self)
self.create_new_term(give_focus=False)
# ------ Public API (for terminals) -------------------------
def get_terms(self):
"""Return terminal list."""
return [cl for cl in self.terms if isinstance(cl, TerminalWidget)]
def get_focus_term(self):
"""Return current terminal with focus, if any."""
widget = QApplication.focusWidget()
for term in self.get_terms():
if widget is term:
return term
def get_current_term(self):
"""Return the currently selected terminal."""
try:
terminal = self.tabwidget.currentWidget()
except AttributeError:
terminal = None
if terminal is not None:
return terminal
def create_new_term(self, name=None, give_focus=True):
"""Add a new terminal tab."""
font = self.get_plugin_font()
term = TerminalWidget(self, self.port, path=getcwd(),
font=font.family())
self.add_tab(term)
def close_term(self, index=None, term=None):
"""Close a terminal tab."""
if not self.tabwidget.count():
return
if term is not None:
index = self.tabwidget.indexOf(term)
if index is None and term is None:
index = self.tabwidget.currentIndex()
if index is not None:
term = self.tabwidget.widget(index)
term.close()
self.tabwidget.removeTab(self.tabwidget.indexOf(term))
self.terms.remove(term)
if self.tabwidget.count() == 0:
self.create_new_term()
# ------ Public API (for tabs) ---------------------------
def add_tab(self, widget):
"""Add tab."""
self.terms.append(widget)
num_term = self.tabwidget.count() + 1
index = self.tabwidget.addTab(widget, "Terminal {0}".format(num_term))
self.tabwidget.setCurrentIndex(index)
self.tabwidget.setTabToolTip(index, "Terminal {0}".format(num_term))
if self.dockwidget and not self.ismaximized:
self.dockwidget.setVisible(True)
self.dockwidget.raise_()
self.activateWindow()
widget.view.setFocus()
def move_tab(self, index_from, index_to):
"""
Move tab (tabs themselves have already been moved by the tabwidget).
Allows to change order of tabs.
"""
term = self.terms.pop(index_from)
self.terms.insert(index_to, term)
|
#cmd to run test: coverage run -m unittest discover ./km/tests
import unittest
import os
from km.utils import MutationFinder as umf
from km.utils.Jellyfish import Jellyfish
class kmMuttaionTest(unittest.TestCase):
def testNPM1(self):
seq_f = "./data/catalog/GRCh38/NPM1_4ins_exons_10-11utr.fa"
base_name = os.path.basename(seq_f)
(ref_name, ext) = os.path.splitext(base_name)
jf = Jellyfish("./data/jf/02H025_NPM1.jf", cutoff=0.05, n_cutoff=5)
ref_seq = []
for line in open(seq_f, "r"):
line = line.strip()
if line[0] == '>':
continue
ref_seq.append(line)
ref_seq = ''.join(ref_seq)
finder = umf.MutationFinder(
ref_name, ref_seq, jf,
False, 500
)
paths_quant = finder.get_paths_quant()
paths_quant = paths_quant[len(paths_quant)-1]
self.assertEqual(str(paths_quant.get_variant_name()),
"Insertion\t45:/TCTG:45",
"Test fail: NPM1 -> variant name")
self.assertEqual(paths_quant.get_sequence(),
"CGGATGACTGACCAAGAGGCTATTCAAGATCTCTGTCTGGCAGTGGAGGAAGTCTCTTTAAGAAAATAG",
"Test fail: NPM1 -> sequence")
# AATTGCTTCCGGATGACTGACCAAGAGGCTATTCAAGATCTCTGTCTGGCAGTGGAGGAAGTCTCTTTAAGAAAATAGTTTAAA
def testFLT3_ITD(self):
seq_f = "./data/catalog/GRCh38/FLT3-ITD_exons_13-15.fa"
base_name = os.path.basename(seq_f)
(ref_name, ext) = os.path.splitext(base_name)
jf = Jellyfish("./data/jf/03H116_ITD.jf", cutoff=0.05, n_cutoff=5)
ref_seq = []
for line in open(seq_f, "r"):
line = line.strip()
if line[0] == '>':
continue
ref_seq.append(line)
ref_seq = ''.join(ref_seq)
finder = umf.MutationFinder(
ref_name, ref_seq, jf,
False, 500
)
paths_quant = finder.get_paths_quant()
paths_quant = paths_quant[len(paths_quant)-1]
self.assertEqual(str(paths_quant.get_variant_name()),
"ITD\t204:/AACTCCCATTTGAGATCATATTCATATTCTCTGAAATCAACGTAGAAGTACTCATTATCTGAGGAGCCGGTCACC:204",
"Test fail: FLT3-ITD -> variant name")
self.assertEqual(paths_quant.get_sequence(),
"TACCTTCCCAAACTCTAAATTTTCTCTTGGAAACTCCCATTTGAGATCATATTCATATTCTCTGAAATCAACGTAGAAGTACTCATTATCTGAGGAGCCGGTCACCAACTCCCATTTGAGATCATATTCATATTCTCTGAAATCAACGTAGAAGTACTCATTATCTGAGGAGCCGGTCACCTGTACCATCTGTAGCTGGCTTTCATACCTA",
"Test fail: FLT3-ITD -> sequence")
def testFLT3_TKD(self):
seq_f = "./data/catalog/GRCh38/FLT3-TKD_exon_20.fa"
base_name = os.path.basename(seq_f)
(ref_name, ext) = os.path.splitext(base_name)
jf = Jellyfish("./data/jf/05H094_FLT3-TKD_del.jf", cutoff=0.05, n_cutoff=5)
ref_seq = []
for line in open(seq_f, "r"):
line = line.strip()
if line[0] == '>':
continue
ref_seq.append(line)
ref_seq = ''.join(ref_seq)
finder = umf.MutationFinder(
ref_name, ref_seq, jf,
False, 500
)
paths_quant = finder.get_paths_quant()
paths_quant = paths_quant[len(paths_quant)-1]
self.assertEqual(str(paths_quant.get_variant_name()),
"Deletion\t32:gat/:35",
"Test fail: FLT3-TKD -> variant name")
self.assertEqual(paths_quant.get_sequence(),
# "TGCCCCTGACAACATAGTTGGAATCACTCATATCTCGAGCCAATCCAAAGTCACATATCTTCACC",
"TGCCCCTGACAACATAGTTGGAATCACTCATATCTCGAGCCAATCCAAAGTCACATATCTT",
"Test fail: FLT3-TKD -> sequence")
def testDNMT3A(self):
seq_f = "./data/catalog/GRCh38/DNMT3A_R882_exon_23.fa"
base_name = os.path.basename(seq_f)
(ref_name, ext) = os.path.splitext(base_name)
jf = Jellyfish("./data/jf/02H033_DNMT3A_sub.jf", cutoff=0.05, n_cutoff=5)
ref_seq = []
for line in open(seq_f, "r"):
line = line.strip()
if line[0] == '>':
continue
ref_seq.append(line)
ref_seq = ''.join(ref_seq)
finder = umf.MutationFinder(
ref_name, ref_seq, jf,
False, 500
)
paths_quant = finder.get_paths_quant()
paths_quant = paths_quant[len(paths_quant)-1]
self.assertEqual(str(paths_quant.get_variant_name()),
"Substitution\t33:c/T:34",
"Test fail: DNMT3A -> variant name")
self.assertEqual(paths_quant.get_sequence(),
# "ATGACCGGCCCAGCAGTCTCTGCCTCGCCAAGCGGCTCATGTTGGAGACGTCAGTATAGTGGACT",
"TGACCGGCCCAGCAGTCTCTGCCTCGCCAAGTGGCTCATGTTGGAGACGTCAGTATAGTGGA",
"Test fail: DNMT3A -> sequence")
def test_not_linear(self):
ref_seq = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
k_len = 31
ref_name = "not_linear"
jf = Jellyfish("./data/jf/02H033_DNMT3A_sub.jf")
finder = umf.MutationFinder("", "", jf, False, 500)
with self.assertRaises(ValueError):
finder.get_ref_kmer(ref_seq, 31, ref_name)
def runTests():
unittest.main()
if __name__ == "__main__":
runTests()
test include find_mutation tool
# cmd to run test: coverage run -m unittest discover ./km/tests
import unittest
import os
import sys
from argparse import Namespace
from km.tools import find_mutation as fm
from km.utils.Jellyfish import Jellyfish
from km.utils import MutationFinder as umf
from contextlib import contextmanager
from StringIO import StringIO
@contextmanager
def captured_output():
new_out, new_err = StringIO(), StringIO()
old_out, old_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = new_out, new_err
yield sys.stdout, sys.stderr
finally:
sys.stdout, sys.stderr = old_out, old_err
class kmMuttaionTest(unittest.TestCase):
def testNPM1(self):
args = Namespace(
count=5,
graphical=False,
jellyfish_fn='./data/jf/02H025_NPM1.jf',
ratio=0.05,
steps=500,
target_fn=["./data/catalog/GRCh38/NPM1_4ins_exons_10-11utr.fa"],
verbose=False
)
with captured_output() as (out, err):
fm.main_find_mut(args, None)
output = out.getvalue().split("\n")
output = output[16].split("\t")
self.assertEqual(output[2],
"Insertion",
"Test fail: NPM1 -> type")
self.assertEqual(output[3],
"45:/TCTG:45",
"Test fail: NPM1 -> variant")
self.assertEqual(output[7],
"CGGATGACTGACCAAGAGGCTATTCAAGATCTCTGTCTGGCAGTGGAGGAAGTCTCTTTAAGAAAATAG",
"Test fail: NPM1 -> sequence")
# AATTGCTTCCGGATGACTGACCAAGAGGCTATTCAAGATCTCTGTCTGGCAGTGGAGGAAGTCTCTTTAAGAAAATAGTTTAAA
def testFLT3_ITD(self):
args = Namespace(
count=5,
graphical=False,
jellyfish_fn='./data/jf/03H116_ITD.jf',
ratio=0.05,
steps=500,
target_fn=["./data/catalog/GRCh38/FLT3-ITD_exons_13-15.fa"],
verbose=False
)
with captured_output() as (out, err):
fm.main_find_mut(args, None)
output = out.getvalue().split("\n")
output = output[16].split("\t")
self.assertEqual(output[2],
"ITD",
"Test fail: FLT3-ITD -> type")
self.assertEqual(output[3],
"204:/AACTCCCATTTGAGATCATATTCATATTCTCTGAAATCAACGTAGAAGTACTCATTATCTGAGGAGCCGGTCACC:204",
"Test fail: FLT3-ITD -> variant")
self.assertEqual(output[7],
"TACCTTCCCAAACTCTAAATTTTCTCTTGGAAACTCCCATTTGAGATCATATTCATATTCTCTGAAATCAACGTAGAAGTACTCATTATCTGAGGAGCCGGTCACCAACTCCCATTTGAGATCATATTCATATTCTCTGAAATCAACGTAGAAGTACTCATTATCTGAGGAGCCGGTCACCTGTACCATCTGTAGCTGGCTTTCATACCTA",
"Test fail: FLT3-ITD -> sequence")
def testFLT3_TKD(self):
args = Namespace(
count=5,
graphical=False,
jellyfish_fn='./data/jf/05H094_FLT3-TKD_del.jf',
ratio=0.05,
steps=500,
target_fn=["./data/catalog/GRCh38/FLT3-TKD_exon_20.fa"],
verbose=False
)
with captured_output() as (out, err):
fm.main_find_mut(args, None)
output = out.getvalue().split("\n")
output = output[16].split("\t")
self.assertEqual(output[2],
"Deletion",
"Test fail: FLT3-TKD -> type")
self.assertEqual(output[3],
"32:gat/:35",
"Test fail: FLT3-TKD -> variant")
self.assertEqual(output[7],
"TGCCCCTGACAACATAGTTGGAATCACTCATATCTCGAGCCAATCCAAAGTCACATATCTT",
"Test fail: FLT3-TKD -> sequence")
def testDNMT3A(self):
args = Namespace(
count=5,
graphical=False,
jellyfish_fn="./data/jf/02H033_DNMT3A_sub.jf",
ratio=0.05,
steps=500,
target_fn=["./data/catalog/GRCh38/DNMT3A_R882_exon_23.fa"],
verbose=False
)
with captured_output() as (out, err):
fm.main_find_mut(args, None)
output = out.getvalue().split("\n")
output = output[16].split("\t")
self.assertEqual(output[2],
"Substitution",
"Test fail: DNMT3A -> type")
self.assertEqual(output[3],
"33:c/T:34",
"Test fail: DNMT3A -> variant")
self.assertEqual(output[7],
"TGACCGGCCCAGCAGTCTCTGCCTCGCCAAGTGGCTCATGTTGGAGACGTCAGTATAGTGGA",
"Test fail: DNMT3A -> sequence")
def test_not_linear(self):
ref_seq = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"
k_len = 31
ref_name = "not_linear"
jf = Jellyfish("./data/jf/02H033_DNMT3A_sub.jf")
finder = umf.MutationFinder("", "", jf, False, 500)
with self.assertRaises(ValueError):
finder.get_ref_kmer(ref_seq, 31, ref_name)
def runTests():
unittest.main()
if __name__ == "__main__":
runTests()
|
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo.serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import flavor_access
from nova.api.openstack.compute.contrib import flavormanage as flavormanage_v2
from nova.api.openstack.compute.plugins.v3 import flavor_manage as \
flavormanage_v21
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
def fake_db_flavor(**updates):
db_flavor = {
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'frob',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329),
'updated_at': None,
'memory_mb': 256,
'vcpus': 1,
'flavorid': 1,
'swap': 0,
'rxtx_factor': 1.0,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'id': 7,
'is_public': True,
'disabled': False,
}
if updates:
db_flavor.update(updates)
return db_flavor
def fake_get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted='yes'):
if flavorid == 'failtest':
raise exception.FlavorNotFound(flavor_id=flavorid)
elif not str(flavorid) == '1234':
raise Exception("This test expects flavorid 1234, not %s" % flavorid)
if read_deleted != 'no':
raise test.TestingException("Should not be reading deleted")
return fake_db_flavor(flavorid=flavorid)
def fake_destroy(flavorname):
pass
def fake_create(context, kwargs, projects=None):
newflavor = fake_db_flavor()
flavorid = kwargs.get('flavorid')
if flavorid is None:
flavorid = 1234
newflavor['flavorid'] = flavorid
newflavor["name"] = kwargs.get('name')
newflavor["memory_mb"] = int(kwargs.get('memory_mb'))
newflavor["vcpus"] = int(kwargs.get('vcpus'))
newflavor["root_gb"] = int(kwargs.get('root_gb'))
newflavor["ephemeral_gb"] = int(kwargs.get('ephemeral_gb'))
newflavor["swap"] = kwargs.get('swap')
newflavor["rxtx_factor"] = float(kwargs.get('rxtx_factor'))
newflavor["is_public"] = bool(kwargs.get('is_public'))
newflavor["disabled"] = bool(kwargs.get('disabled'))
return newflavor
class FlavorManageTestV21(test.NoDBTestCase):
controller = flavormanage_v21.FlavorManageController()
validation_error = exception.ValidationError
base_url = '/v2/fake/flavors'
def setUp(self):
super(FlavorManageTestV21, self).setUp()
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_get_flavor_by_flavor_id)
self.stubs.Set(flavors, "destroy", fake_destroy)
self.stubs.Set(db, "flavor_create", fake_create)
self.ctxt = context.RequestContext('fake', 'fake',
is_admin=True, auth_token=True)
self.app = self._setup_app()
self.request_body = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": unicode('1234'),
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
self.expected_flavor = self.request_body
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-flavor-manage',
'os-flavor-rxtx',
'os-flavor-access', 'flavors',
'os-flavor-extra-data'))
def test_delete(self):
req = fakes.HTTPRequest.blank(self.base_url + '/1234')
res = self.controller._delete(req, 1234)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller,
flavormanage_v21.FlavorManageController):
status_int = self.controller._delete.wsgi_code
else:
status_int = res.status_int
self.assertEqual(202, status_int)
# subsequent delete should fail
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._delete, req, "failtest")
def _test_create_missing_parameter(self, parameter):
body = {
"flavor": {
"name": "azAZ09. -_",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": unicode('1234'),
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
del body['flavor'][parameter]
req = fakes.HTTPRequest.blank(self.base_url)
self.assertRaises(self.validation_error, self.controller._create,
req, body=body)
def test_create_missing_name(self):
self._test_create_missing_parameter('name')
def test_create_missing_ram(self):
self._test_create_missing_parameter('ram')
def test_create_missing_vcpus(self):
self._test_create_missing_parameter('vcpus')
def test_create_missing_disk(self):
self._test_create_missing_parameter('disk')
def _create_flavor_success_case(self, body):
req = webob.Request.blank(self.base_url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(200, res.status_code)
return jsonutils.loads(res.body)
def test_create(self):
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def test_create_public_default(self):
del self.request_body['flavor']['os-flavor-access:is_public']
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def test_create_without_flavorid(self):
del self.request_body['flavor']['id']
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def _create_flavor_bad_request_case(self, body):
self.stubs.UnsetAll()
req = webob.Request.blank(self.base_url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(res.status_code, 400)
def test_create_invalid_name(self):
self.request_body['flavor']['name'] = 'bad !@#!$%\x00 name'
self._create_flavor_bad_request_case(self.request_body)
def test_create_flavor_name_is_whitespace(self):
self.request_body['flavor']['name'] = ' '
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_name_too_long(self):
self.request_body['flavor']['name'] = 'a' * 256
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_flavorname(self):
del self.request_body['flavor']['name']
self._create_flavor_bad_request_case(self.request_body)
def test_create_empty_body(self):
body = {
"flavor": {}
}
self._create_flavor_bad_request_case(body)
def test_create_no_body(self):
body = {}
self._create_flavor_bad_request_case(body)
def test_create_invalid_format_body(self):
body = {
"flavor": []
}
self._create_flavor_bad_request_case(body)
def test_create_invalid_flavorid(self):
self.request_body['flavor']['id'] = "!@#!$#!$^#&^$&"
self._create_flavor_bad_request_case(self.request_body)
def test_create_check_flavor_id_length(self):
MAX_LENGTH = 255
self.request_body['flavor']['id'] = "a" * (MAX_LENGTH + 1)
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_leading_trailing_whitespaces_in_flavor_id(self):
self.request_body['flavor']['id'] = " bad_id "
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_ram(self):
del self.request_body['flavor']['ram']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_0_ram(self):
self.request_body['flavor']['ram'] = 0
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_vcpus(self):
del self.request_body['flavor']['vcpus']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_0_vcpus(self):
self.request_body['flavor']['vcpus'] = 0
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_disk(self):
del self.request_body['flavor']['disk']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_disk(self):
self.request_body['flavor']['disk'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_ephemeral(self):
self.request_body['flavor']['OS-FLV-EXT-DATA:ephemeral'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_swap(self):
self.request_body['flavor']['swap'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_rxtx_factor(self):
self.request_body['flavor']['rxtx_factor'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_non_boolean_is_public(self):
self.request_body['flavor']['os-flavor-access:is_public'] = 123
self._create_flavor_bad_request_case(self.request_body)
def test_flavor_exists_exception_returns_409(self):
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1235,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
flavorid, swap, rxtx_factor, is_public):
raise exception.FlavorExists(name=name)
self.stubs.Set(flavors, "create", fake_create)
req = webob.Request.blank(self.base_url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(expected)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 409)
@mock.patch('nova.compute.flavors.create',
side_effect=exception.FlavorCreateFailed)
def test_flavor_create_db_failed(self, mock_create):
request_dict = {
"flavor": {
"name": "test",
'id': "12345",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
req = webob.Request.blank(self.base_url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(request_dict)
res = req.get_response(self.app)
self.assertEqual(res.status_int, 500)
self.assertIn('Unable to create flavor', res.body)
def test_invalid_memory_mb(self):
"""Check negative and decimal number can't be accepted."""
self.stubs.UnsetAll()
self.assertRaises(exception.InvalidInput, flavors.create, "abc",
-512, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcd",
512.2, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcde",
None, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
512, 2, None, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
"test_memory_mb", 2, None, 1, 1234, 512, 1, True)
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class PrivateFlavorManageTestV21(test.TestCase):
controller = flavormanage_v21.FlavorManageController()
base_url = '/v2/fake/flavors'
def setUp(self):
super(PrivateFlavorManageTestV21, self).setUp()
self.flavor_access_controller = flavor_access.FlavorAccessController()
self.ctxt = context.RequestContext('fake', 'fake',
is_admin=True, auth_token=True)
self.app = self._setup_app()
self.expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"swap": 512,
"rxtx_factor": 1
}
}
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-flavor-manage',
'os-flavor-access',
'os-flavor-rxtx', 'flavors',
'os-flavor-extra-data'),
fake_auth_context=self.ctxt)
def _get_response(self):
req = webob.Request.blank(self.base_url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(self.expected)
res = req.get_response(self.app)
return jsonutils.loads(res.body)
def test_create_private_flavor_should_not_grant_flavor_access(self):
self.expected["flavor"]["os-flavor-access:is_public"] = False
body = self._get_response()
for key in self.expected["flavor"]:
self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
flavor_access_body = self.flavor_access_controller.index(
FakeRequest(), body["flavor"]["id"])
expected_flavor_access_body = {
"tenant_id": "%s" % self.ctxt.project_id,
"flavor_id": "%s" % body["flavor"]["id"]
}
self.assertNotIn(expected_flavor_access_body,
flavor_access_body["flavor_access"])
def test_create_public_flavor_should_not_create_flavor_access(self):
self.expected["flavor"]["os-flavor-access:is_public"] = True
body = self._get_response()
for key in self.expected["flavor"]:
self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
class FlavorManageTestV2(FlavorManageTestV21):
controller = flavormanage_v2.FlavorManageController()
validation_error = webob.exc.HTTPBadRequest
def setUp(self):
super(FlavorManageTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
def _setup_app(self):
return fakes.wsgi_app(init_only=('flavors',),
fake_auth_context=self.ctxt)
class PrivateFlavorManageTestV2(PrivateFlavorManageTestV21):
controller = flavormanage_v2.FlavorManageController()
def setUp(self):
super(PrivateFlavorManageTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
def _setup_app(self):
return fakes.wsgi_app(init_only=('flavors',),
fake_auth_context=self.ctxt)
Make use of controller method in test_flavor_manage
In API unit testing, making call through WSGI is little bit overhead,
wherever applicable, unit tests can make direct call to controller
methods.
This patch use controller methods in test_flavor_manage where ever
possible.
Partially implements blueprint v2-on-v3-api
Change-Id: I85f37f0983690ff6b3324df653601e0c4804fe5e
# Copyright 2011 Andrew Bogott for the Wikimedia Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import mock
from oslo.serialization import jsonutils
import webob
from nova.api.openstack.compute.contrib import flavor_access
from nova.api.openstack.compute.contrib import flavormanage as flavormanage_v2
from nova.api.openstack.compute.plugins.v3 import flavor_manage as \
flavormanage_v21
from nova.compute import flavors
from nova import context
from nova import db
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
def fake_db_flavor(**updates):
db_flavor = {
'root_gb': 1,
'ephemeral_gb': 1,
'name': u'frob',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 19, 18, 49, 30, 877329),
'updated_at': None,
'memory_mb': 256,
'vcpus': 1,
'flavorid': 1,
'swap': 0,
'rxtx_factor': 1.0,
'extra_specs': {},
'deleted_at': None,
'vcpu_weight': None,
'id': 7,
'is_public': True,
'disabled': False,
}
if updates:
db_flavor.update(updates)
return db_flavor
def fake_get_flavor_by_flavor_id(flavorid, ctxt=None, read_deleted='yes'):
if flavorid == 'failtest':
raise exception.FlavorNotFound(flavor_id=flavorid)
elif not str(flavorid) == '1234':
raise Exception("This test expects flavorid 1234, not %s" % flavorid)
if read_deleted != 'no':
raise test.TestingException("Should not be reading deleted")
return fake_db_flavor(flavorid=flavorid)
def fake_destroy(flavorname):
pass
def fake_create(context, kwargs, projects=None):
newflavor = fake_db_flavor()
flavorid = kwargs.get('flavorid')
if flavorid is None:
flavorid = 1234
newflavor['flavorid'] = flavorid
newflavor["name"] = kwargs.get('name')
newflavor["memory_mb"] = int(kwargs.get('memory_mb'))
newflavor["vcpus"] = int(kwargs.get('vcpus'))
newflavor["root_gb"] = int(kwargs.get('root_gb'))
newflavor["ephemeral_gb"] = int(kwargs.get('ephemeral_gb'))
newflavor["swap"] = kwargs.get('swap')
newflavor["rxtx_factor"] = float(kwargs.get('rxtx_factor'))
newflavor["is_public"] = bool(kwargs.get('is_public'))
newflavor["disabled"] = bool(kwargs.get('disabled'))
return newflavor
class FlavorManageTestV21(test.NoDBTestCase):
controller = flavormanage_v21.FlavorManageController()
validation_error = exception.ValidationError
base_url = '/v2/fake/flavors'
def setUp(self):
super(FlavorManageTestV21, self).setUp()
self.stubs.Set(flavors,
"get_flavor_by_flavor_id",
fake_get_flavor_by_flavor_id)
self.stubs.Set(flavors, "destroy", fake_destroy)
self.stubs.Set(db, "flavor_create", fake_create)
self.ctxt = context.RequestContext('fake', 'fake',
is_admin=True, auth_token=True)
self.app = self._setup_app()
self.request_body = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": unicode('1234'),
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
self.expected_flavor = self.request_body
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-flavor-manage',
'os-flavor-rxtx',
'os-flavor-access', 'flavors',
'os-flavor-extra-data'))
def test_delete(self):
req = fakes.HTTPRequest.blank(self.base_url + '/1234')
res = self.controller._delete(req, 1234)
# NOTE: on v2.1, http status code is set as wsgi_code of API
# method instead of status_int in a response object.
if isinstance(self.controller,
flavormanage_v21.FlavorManageController):
status_int = self.controller._delete.wsgi_code
else:
status_int = res.status_int
self.assertEqual(202, status_int)
# subsequent delete should fail
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._delete, req, "failtest")
def _test_create_missing_parameter(self, parameter):
body = {
"flavor": {
"name": "azAZ09. -_",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": unicode('1234'),
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
del body['flavor'][parameter]
req = fakes.HTTPRequest.blank(self.base_url)
self.assertRaises(self.validation_error, self.controller._create,
req, body=body)
def test_create_missing_name(self):
self._test_create_missing_parameter('name')
def test_create_missing_ram(self):
self._test_create_missing_parameter('ram')
def test_create_missing_vcpus(self):
self._test_create_missing_parameter('vcpus')
def test_create_missing_disk(self):
self._test_create_missing_parameter('disk')
def _create_flavor_success_case(self, body):
req = webob.Request.blank(self.base_url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(body)
res = req.get_response(self.app)
self.assertEqual(200, res.status_code)
return jsonutils.loads(res.body)
def test_create(self):
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def test_create_public_default(self):
del self.request_body['flavor']['os-flavor-access:is_public']
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def test_create_without_flavorid(self):
del self.request_body['flavor']['id']
body = self._create_flavor_success_case(self.request_body)
for key in self.expected_flavor["flavor"]:
self.assertEqual(body["flavor"][key],
self.expected_flavor["flavor"][key])
def _create_flavor_bad_request_case(self, body):
self.stubs.UnsetAll()
req = fakes.HTTPRequest.blank('')
self.assertRaises(self.validation_error, self.controller._create,
req, body=body)
def test_create_invalid_name(self):
self.request_body['flavor']['name'] = 'bad !@#!$%\x00 name'
self._create_flavor_bad_request_case(self.request_body)
def test_create_flavor_name_is_whitespace(self):
self.request_body['flavor']['name'] = ' '
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_name_too_long(self):
self.request_body['flavor']['name'] = 'a' * 256
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_flavorname(self):
del self.request_body['flavor']['name']
self._create_flavor_bad_request_case(self.request_body)
def test_create_empty_body(self):
body = {
"flavor": {}
}
self._create_flavor_bad_request_case(body)
def test_create_no_body(self):
body = {}
self._create_flavor_bad_request_case(body)
def test_create_invalid_format_body(self):
body = {
"flavor": []
}
self._create_flavor_bad_request_case(body)
def test_create_invalid_flavorid(self):
self.request_body['flavor']['id'] = "!@#!$#!$^#&^$&"
self._create_flavor_bad_request_case(self.request_body)
def test_create_check_flavor_id_length(self):
MAX_LENGTH = 255
self.request_body['flavor']['id'] = "a" * (MAX_LENGTH + 1)
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_leading_trailing_whitespaces_in_flavor_id(self):
self.request_body['flavor']['id'] = " bad_id "
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_ram(self):
del self.request_body['flavor']['ram']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_0_ram(self):
self.request_body['flavor']['ram'] = 0
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_vcpus(self):
del self.request_body['flavor']['vcpus']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_0_vcpus(self):
self.request_body['flavor']['vcpus'] = 0
self._create_flavor_bad_request_case(self.request_body)
def test_create_without_disk(self):
del self.request_body['flavor']['disk']
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_disk(self):
self.request_body['flavor']['disk'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_ephemeral(self):
self.request_body['flavor']['OS-FLV-EXT-DATA:ephemeral'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_swap(self):
self.request_body['flavor']['swap'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_minus_rxtx_factor(self):
self.request_body['flavor']['rxtx_factor'] = -1
self._create_flavor_bad_request_case(self.request_body)
def test_create_with_non_boolean_is_public(self):
self.request_body['flavor']['os-flavor-access:is_public'] = 123
self._create_flavor_bad_request_case(self.request_body)
def test_flavor_exists_exception_returns_409(self):
expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"id": 1235,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
def fake_create(name, memory_mb, vcpus, root_gb, ephemeral_gb,
flavorid, swap, rxtx_factor, is_public):
raise exception.FlavorExists(name=name)
self.stubs.Set(flavors, "create", fake_create)
req = fakes.HTTPRequest.blank('')
self.assertRaises(webob.exc.HTTPConflict, self.controller._create,
req, body=expected)
@mock.patch('nova.compute.flavors.create',
side_effect=exception.FlavorCreateFailed)
def test_flavor_create_db_failed(self, mock_create):
request_dict = {
"flavor": {
"name": "test",
'id': "12345",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"swap": 512,
"rxtx_factor": 1,
"os-flavor-access:is_public": True,
}
}
req = fakes.HTTPRequest.blank('')
ex = self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller._create,
req, body=request_dict)
self.assertIn('Unable to create flavor', ex.explanation)
def test_invalid_memory_mb(self):
"""Check negative and decimal number can't be accepted."""
self.stubs.UnsetAll()
self.assertRaises(exception.InvalidInput, flavors.create, "abc",
-512, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcd",
512.2, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcde",
None, 2, 1, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
512, 2, None, 1, 1234, 512, 1, True)
self.assertRaises(exception.InvalidInput, flavors.create, "abcdef",
"test_memory_mb", 2, None, 1, 1234, 512, 1, True)
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
class PrivateFlavorManageTestV21(test.TestCase):
controller = flavormanage_v21.FlavorManageController()
base_url = '/v2/fake/flavors'
def setUp(self):
super(PrivateFlavorManageTestV21, self).setUp()
self.flavor_access_controller = flavor_access.FlavorAccessController()
self.ctxt = context.RequestContext('fake', 'fake',
is_admin=True, auth_token=True)
self.app = self._setup_app()
self.expected = {
"flavor": {
"name": "test",
"ram": 512,
"vcpus": 2,
"disk": 1,
"OS-FLV-EXT-DATA:ephemeral": 1,
"swap": 512,
"rxtx_factor": 1
}
}
def _setup_app(self):
return fakes.wsgi_app_v21(init_only=('os-flavor-manage',
'os-flavor-access',
'os-flavor-rxtx', 'flavors',
'os-flavor-extra-data'),
fake_auth_context=self.ctxt)
def _get_response(self):
req = webob.Request.blank(self.base_url)
req.headers['Content-Type'] = 'application/json'
req.method = 'POST'
req.body = jsonutils.dumps(self.expected)
res = req.get_response(self.app)
return jsonutils.loads(res.body)
def test_create_private_flavor_should_not_grant_flavor_access(self):
self.expected["flavor"]["os-flavor-access:is_public"] = False
body = self._get_response()
for key in self.expected["flavor"]:
self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
flavor_access_body = self.flavor_access_controller.index(
FakeRequest(), body["flavor"]["id"])
expected_flavor_access_body = {
"tenant_id": "%s" % self.ctxt.project_id,
"flavor_id": "%s" % body["flavor"]["id"]
}
self.assertNotIn(expected_flavor_access_body,
flavor_access_body["flavor_access"])
def test_create_public_flavor_should_not_create_flavor_access(self):
self.expected["flavor"]["os-flavor-access:is_public"] = True
body = self._get_response()
for key in self.expected["flavor"]:
self.assertEqual(body["flavor"][key], self.expected["flavor"][key])
class FlavorManageTestV2(FlavorManageTestV21):
controller = flavormanage_v2.FlavorManageController()
validation_error = webob.exc.HTTPBadRequest
def setUp(self):
super(FlavorManageTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
def _setup_app(self):
return fakes.wsgi_app(init_only=('flavors',),
fake_auth_context=self.ctxt)
class PrivateFlavorManageTestV2(PrivateFlavorManageTestV21):
controller = flavormanage_v2.FlavorManageController()
def setUp(self):
super(PrivateFlavorManageTestV2, self).setUp()
self.flags(
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Flavormanage', 'Flavorextradata',
'Flavor_access', 'Flavor_rxtx', 'Flavor_swap'])
def _setup_app(self):
return fakes.wsgi_app(init_only=('flavors',),
fake_auth_context=self.ctxt)
|
from distutils.command.install import install
from distutils.core import setup
from distutils import log
import os
import json
import sys
kernel_json = {
"argv": [sys.executable,
"-m", "metakernel_python",
"-f", "{connection_file}"],
"display_name": "MetaKernel Python",
"language": "python"
}
class install_with_kernelspec(install):
def run(self):
install.run(self)
from IPython.kernel.kernelspec import install_kernel_spec
from IPython.utils.tempdir import TemporaryDirectory
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
# TODO: Copy resources once they're specified
log.info('Installing kernel spec')
install_kernel_spec(td, 'metakernel_python', system=not self.user, replace=True)
svem_flag = '--single-version-externally-managed'
if svem_flag in sys.argv:
# Die, setuptools, die.
sys.argv.remove(svem_flag)
setup(name='metakernel_python',
version='0.5',
description='A Python kernel for Jupyter/IPython',
long_description="A Python kernel for Jupyter/IPython, based on MetaKernel",
url="https://github.com/blink1073/metakernel/tree/master/metakernel_python",
author='Douglas Blank',
author_email='doug.blank@gmail.com',
py_modules=['metakernel_python'],
install_requires=["metakernel", "jedi"],
cmdclass={'install': install_with_kernelspec},
classifiers = [
'Framework :: IPython',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Topic :: System :: Shells',
]
)
Testing installation on Travis
from distutils.command.install import install
from distutils.core import setup
from distutils import log
import os
import json
import sys
kernel_json = {
"argv": [sys.executable,
"-m", "metakernel_python",
"-f", "{connection_file}"],
"display_name": "MetaKernel Python",
"language": "python"
}
class install_with_kernelspec(install):
def run(self):
install.run(self)
from IPython.kernel.kernelspec import install_kernel_spec
from IPython.utils.tempdir import TemporaryDirectory
with TemporaryDirectory() as td:
os.chmod(td, 0o755) # Starts off as 700, not user readable
with open(os.path.join(td, 'kernel.json'), 'w') as f:
json.dump(kernel_json, f, sort_keys=True)
# TODO: Copy resources once they're specified
log.info('Installing kernel spec')
#install_kernel_spec(td, 'metakernel_python', system=not self.user, replace=True)
install_kernel_spec(td, 'metakernel_python', system=False, replace=True)
svem_flag = '--single-version-externally-managed'
if svem_flag in sys.argv:
# Die, setuptools, die.
sys.argv.remove(svem_flag)
setup(name='metakernel_python',
version='0.5',
description='A Python kernel for Jupyter/IPython',
long_description="A Python kernel for Jupyter/IPython, based on MetaKernel",
url="https://github.com/blink1073/metakernel/tree/master/metakernel_python",
author='Douglas Blank',
author_email='doug.blank@gmail.com',
py_modules=['metakernel_python'],
install_requires=["metakernel", "jedi"],
cmdclass={'install': install_with_kernelspec},
classifiers = [
'Framework :: IPython',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2',
'Topic :: System :: Shells',
]
)
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@authors: Andrés Calderón andres.calderon@correlibre.org
@license: GNU AFFERO GENERAL PUBLIC LICENSE
Caliope Server is the web server of Caliope's Framework
Copyright (C) 2013 Infometrika
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import redis
import sys
import getopt
from simplekv.memory.redisstore import RedisStore
from os import path
from pyinotify import (WatchManager, Notifier, ProcessEvent, IN_CREATE, IN_MODIFY, IN_DELETE)
from cid.utils.jsOptimizer import *
from cid.utils.fileUtils import loadJSONFromFile
class StaticsChangesProcessor(ProcessEvent):
def __init__(self, jso,store):
self.jso = jso
self.store = store
def process_IN_CREATE(self, event):
pass
#print "Create: %s" % path.join(event.path, event.name)
#jso.js_put_file_cache(path.join(event.path, event.name),store)
def process_IN_MODIFY(self, event):
print "Modify: %s" % path.join(event.path, event.name)
self.jso.js_put_file_cache(path.join(event.path, event.name),self.store)
def process_IN_DELETE(self, event):
pass
def _parseCommandArguments(argv):
print "_parseCommandArguments" + str(argv)
server_config_file = "conf/caliope_server.json"
try:
opts, args = getopt.getopt(argv, "hc:", ["help", "config=",])
except getopt.GetoptError:
print 'jsOptimizerProcess.py -c <server_configfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'jsOptimizerProcess.py -c <server_configfile>'
sys.exit()
elif opt in ("-c", "--config"):
server_config_file = arg
return server_config_file
def configure_server_and_app(server_config_file):
config = loadJSONFromFile(server_config_file, '')
print config['server']
if 'static' in config['server']:
static_path = config['server']['static']
else:
static_path = "."
return static_path
def main(argv):
server_config_file = _parseCommandArguments(argv)
static_path = configure_server_and_app(server_config_file)
print "server_config_file = "+ server_config_file
print "static_path = "+ static_path
store = RedisStore(redis.StrictRedis())
jso = jsOptimizer()
jso.watch(static_path,store,force=True)
try:
wm = WatchManager()
notifier = Notifier(wm, StaticsChangesProcessor(jso,store))
wm.add_watch(static_path, IN_CREATE|IN_MODIFY|IN_DELETE, rec=True)
notifier.loop()
finally:
pass
if __name__ == '__main__':
#: Start the application
main(sys.argv[1:])
fix watcher
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@authors: Andrés Calderón andres.calderon@correlibre.org
@license: GNU AFFERO GENERAL PUBLIC LICENSE
Caliope Server is the web server of Caliope's Framework
Copyright (C) 2013 Infometrika
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import redis
import sys
import getopt
from simplekv.memory.redisstore import RedisStore
from os import path
from pyinotify import (WatchManager, Notifier, ProcessEvent, IN_ACCESS,IN_ATTRIB,IN_CREATE, IN_MODIFY, IN_DELETE)
from cid.utils.jsOptimizer import *
from cid.utils.fileUtils import loadJSONFromFile
class StaticsChangesProcessor(ProcessEvent):
def __init__(self, jso,store):
self.jso = jso
self.store = store
def process_IN_CREATE(self, event):
#print "Create: %s" % path.join(event.path, event.name)
self.jso.js_put_file_cache(path.join(event.path, event.name),self.store)
def process_IN_MODIFY(self, event):
#print "Modify: %s" % path.join(event.path, event.name)
self.jso.js_put_file_cache(path.join(event.path, event.name),self.store)
def process_IN_DELETE(self, event):
pass
def process_IN_ATTRIB(self, event):
self.jso.js_put_file_cache(path.join(event.path, event.name),self.store)
#print "in attrib: %s" % path.join(event.path, event.name)
def process_IN_ACCESS(self, event):
pass
#print "in access: %s" % path.join(event.path, event.name)
def _parseCommandArguments(argv):
print "_parseCommandArguments" + str(argv)
server_config_file = "conf/caliope_server.json"
try:
opts, args = getopt.getopt(argv, "hc:", ["help", "config=",])
except getopt.GetoptError:
print 'jsOptimizerProcess.py -c <server_configfile>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'jsOptimizerProcess.py -c <server_configfile>'
sys.exit()
elif opt in ("-c", "--config"):
server_config_file = arg
return server_config_file
def configure_server_and_app(server_config_file):
config = loadJSONFromFile(server_config_file, '')
print config['server']
if 'static' in config['server']:
static_path = config['server']['static']
else:
static_path = "."
return static_path
def main(argv):
server_config_file = _parseCommandArguments(argv)
static_path = configure_server_and_app(server_config_file)
print "server_config_file = "+ server_config_file
print "static_path = "+ static_path
store = RedisStore(redis.StrictRedis())
jso = jsOptimizer()
jso.watch(static_path,store,force=True)
try:
wm = WatchManager()
notifier = Notifier(wm, StaticsChangesProcessor(jso,store))
wm.add_watch(static_path, IN_ATTRIB|IN_ACCESS|IN_CREATE|IN_MODIFY|IN_DELETE, rec=True)
notifier.loop()
finally:
pass
if __name__ == '__main__':
#: Start the application
main(sys.argv[1:])
|
# Filename: calib.py
# pylint: disable=locally-disabled
"""
Calibration.
"""
import numpy as np
import km3db
from thepipe import Module
from km3pipe.hardware import Detector
from km3pipe.dataclasses import Table
from km3pipe.tools import istype
from km3pipe.logger import get_logger
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = ["Thomas Heid"]
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
log = get_logger(__name__)
try:
import numba as nb
except (ImportError, OSError):
HAVE_NUMBA = False
jit = lambda f: f
log.warning("No numba detected, consider `pip install numba` for more speed!")
else:
try:
from numba.typed import Dict
except ImportError:
log.warning("Please update numba (0.43+) to have dictionary support!")
HAVE_NUMBA = False
jit = lambda f: f
else:
HAVE_NUMBA = True
from numba import jit
class Calibration(Module):
"""A very simple, preliminary Module which gives access to the calibration.
Parameters
----------
apply: bool, optional [default=True]
Apply the calibration to the hits (add position/direction/t0)?
filename: str, optional [default=None]
DetX file with detector description.
det_id: int, optional
.detx ID of detector (when retrieving from database).
t0set: optional
t0set (when retrieving from database).
calibset: optional
calibset (when retrieving from database).
"""
__name__ = "Calibration"
name = "Calibration"
def configure(self):
self._should_apply = self.get("apply", default=True)
self.filename = self.get("filename")
self.det_id = self.get("det_id")
self.run = self.get("run")
self.t0set = self.get("t0set")
self.calibset = self.get("calibset")
self.detector = self.get("detector")
self._pos_dom_channel = None
self._dir_dom_channel = None
self._t0_dom_channel = None
self._pos_pmt_id = None
self._dir_pmt_id = None
self._t0_pmt_id = None
self._lookup_tables = None # for Numba
if self.det_id and self.run:
self.cprint(
"Grabbing the calibration for Det ID {} and run {}".format(
self.det_id, self.run
)
)
raw_detx = km3db.tools.detx_for_run(self.det_id, self.run)
self.detector = Detector(string=raw_detx)
self._create_dom_channel_lookup()
self._create_pmt_id_lookup()
return
# TODO: deprecation
if self.get("calibration"):
self.log.warning(
"The parameter 'calibration' has been renamed "
"to 'calibset'. The 'calibration' parameter will be removed "
"in the next version of KM3Pipe"
)
self.calibset = self.get("calibration")
if self.filename or self.det_id:
if self.filename is not None:
self.detector = Detector(filename=self.filename)
if self.det_id:
self.detector = Detector(
det_id=self.det_id, t0set=self.t0set, calibration=self.calibset
)
if self.detector is not None:
self.log.debug("Creating lookup tables")
self._create_dom_channel_lookup()
self._create_pmt_id_lookup()
else:
self.log.critical("No detector information loaded.")
def process(self, blob, key="Hits", outkey="CalibHits"):
if self._should_apply:
blob[outkey] = self.apply(blob[key])
return blob
def get_detector(self):
"""Return the detector"""
return self.detector
def apply_t0(self, hits):
"""Apply only t0s"""
if HAVE_NUMBA:
apply_t0_nb(hits.time, hits.dom_id, hits.channel_id, self._lookup_tables)
else:
n = len(hits)
cal = np.empty(n)
lookup = self._calib_by_dom_and_channel
for i in range(n):
calib = lookup[hits["dom_id"][i]][hits["channel_id"][i]]
cal[i] = calib[6]
hits.time += cal
return hits
def apply(self, hits, no_copy=False, correct_slewing=True, slewing_variant=3):
"""Add x, y, z, t0 (and du, floor if DataFrame) columns to the hits."""
if not no_copy:
try:
hits = hits.copy()
except AttributeError: # probably a km3io object
pass
if istype(hits, "OfflineBranch"):
hits = Table(
dict(
dom_id=hits.dom_id,
channel_id=hits.channel_id,
time=hits.t,
tot=hits.tot,
triggered=hits.trig,
)
)
if istype(hits, "DataFrame"):
# do we ever see McHits here?
hits = Table.from_template(hits, "Hits")
if hasattr(hits, "dom_id") and hasattr(hits, "channel_id"):
try:
(
dir_x,
dir_y,
dir_z,
du,
floor,
pos_x,
pos_y,
pos_z,
t0,
) = _get_calibration_for_hits(hits, self._calib_by_dom_and_channel)
except KeyError as e:
self.log.critical("Wrong calibration (DETX) data provided.")
raise
elif hasattr(hits, "pmt_id"):
try:
(
dir_x,
dir_y,
dir_z,
du,
floor,
pos_x,
pos_y,
pos_z,
t0,
) = _get_calibration_for_mchits(hits, self._calib_by_pmt_id)
except KeyError as e:
self.log.critical("Wrong calibration (DETX) data provided.")
raise
else:
raise TypeError(
"Don't know how to apply calibration to '{0}'. "
"We need at least 'dom_id' and 'channel_id', or "
"'pmt_id'.".format(hits.name)
)
if hasattr(hits, "time"):
if hits.time.dtype != t0.dtype:
time = hits.time.astype("f4") + t0.astype("f4")
hits = hits.drop_columns(["time"])
hits = hits.append_columns(["time"], [time])
else:
hits.time += t0
hits_data = {}
for colname in hits.dtype.names:
hits_data[colname] = hits[colname]
calib = {
"dir_x": dir_x,
"dir_y": dir_y,
"dir_z": dir_z,
"du": du.astype(np.uint8),
"floor": du.astype(np.uint8),
"pos_x": pos_x,
"pos_y": pos_y,
"pos_z": pos_z,
"t0": t0,
}
hits_data.update(calib)
if correct_slewing and hasattr(hits, "tot"): # only correct non-MC hits
hits_data["time"] -= slew(hits_data["tot"], variant=slewing_variant)
return Table(
hits_data, h5loc=hits.h5loc, split_h5=hits.split_h5, name=hits.name
)
def _create_dom_channel_lookup(self):
if HAVE_NUMBA:
from numba.typed import Dict
from numba import types
data = Dict.empty(key_type=types.i8, value_type=types.float64[:, :])
else:
data = {}
for pmt in self.detector.pmts:
if pmt.dom_id not in data:
data[pmt.dom_id] = np.zeros((31, 9))
data[pmt.dom_id][pmt.channel_id] = np.asarray(
[
pmt.pos_x,
pmt.pos_y,
pmt.pos_z,
pmt.dir_x,
pmt.dir_y,
pmt.dir_z,
pmt.t0,
pmt.du,
pmt.floor,
],
dtype=np.float64,
)
self._calib_by_dom_and_channel = data
if HAVE_NUMBA:
self._lookup_tables = [(dom, cal) for dom, cal in data.items()]
def _create_pmt_id_lookup(self):
if HAVE_NUMBA:
from numba.typed import Dict
from numba import types
data = Dict.empty(key_type=types.i8, value_type=types.float64[:])
else:
data = {}
for pmt in self.detector.pmts:
data[pmt.pmt_id] = np.asarray(
[
pmt.pos_x,
pmt.pos_y,
pmt.pos_z,
pmt.dir_x,
pmt.dir_y,
pmt.dir_z,
pmt.t0,
pmt.du,
pmt.floor,
],
dtype=np.float64,
)
self._calib_by_pmt_id = data
def __repr__(self):
return self.__str__()
def __str__(self):
return "Calibration: det_id({0})".format(self.det_id)
@jit
def apply_t0_nb(times, dom_ids, channel_ids, lookup_tables):
"""Apply t0s using a lookup table of tuples (dom_id, calib)"""
dom_id = 0
lookup = np.empty((31, 9))
for i in range(len(times)):
cur_dom_id = dom_ids[i]
if cur_dom_id != dom_id:
dom_id = cur_dom_id
for (d, m) in lookup_tables:
if d == dom_id:
np.copyto(lookup, m)
t0 = lookup[channel_ids[i]][6]
times[i] += t0
@jit
def _get_calibration_for_hits(hits, lookup):
"""Append the position, direction and t0 columns and add t0 to time"""
n = len(hits)
cal = np.empty((n, 9))
for i in range(n):
calib = lookup[hits["dom_id"][i]][hits["channel_id"][i]]
cal[i] = calib
dir_x = cal[:, 3]
dir_y = cal[:, 4]
dir_z = cal[:, 5]
du = cal[:, 7]
floor = cal[:, 8]
pos_x = cal[:, 0]
pos_y = cal[:, 1]
pos_z = cal[:, 2]
t0 = cal[:, 6]
return [dir_x, dir_y, dir_z, du, floor, pos_x, pos_y, pos_z, t0]
@jit
def _get_calibration_for_mchits(hits, lookup):
"""Append the position, direction and t0 columns and add t0 to time"""
n_hits = len(hits)
cal = np.empty((n_hits, 9))
for i in range(n_hits):
cal[i] = lookup[hits["pmt_id"][i]]
dir_x = cal[:, 3]
dir_y = cal[:, 4]
dir_z = cal[:, 5]
du = cal[:, 7]
floor = cal[:, 8]
pos_x = cal[:, 0]
pos_y = cal[:, 1]
pos_z = cal[:, 2]
t0 = cal[:, 6]
return [dir_x, dir_y, dir_z, du, floor, pos_x, pos_y, pos_z, t0]
class CalibrationService(Module):
"""A service which provides calibration routines for hits
Parameters
----------
filename: str, optional [default=None]
DetX file with detector description.
det_id: int, optional
.detx ID of detector (when retrieving from database).
t0set: optional
t0set (when retrieving from database).
calibset: optional
calibset (when retrieving from database).
detector: kp.hardware.Detector, optional
"""
__name__ = "Calibration"
name = "Calibration"
def configure(self):
self.filename = self.get("filename")
self.det_id = self.get("det_id")
self.t0set = self.get("t0set")
self.calibset = self.get("calibset")
self._detector = self.get("detector")
if self._detector is not None:
self._calibration = Calibration(detector=self._detector)
self._calibration = None
self.expose(self.calibrate, "calibrate")
self.expose(self.get_detector, "get_detector")
self.expose(self.get_calibration, "get_calibration")
self.expose(self.load_calibration, "load_calibration")
self.expose(self.correct_slewing, "correct_slewing")
self.expose(self.detector_deprecation, "detector")
def load_calibration(self, filename=None, det_id=None, t0set=None, calibset=None):
"""Load another calibration"""
self.filename = filename
self.det_id = det_id
self.t0set = t0set
self.calibset = calibset
self._detector = None
self._calibration = None
@property
def detector_deprecation(self):
self.log.deprecation(
"The service 'detector' of the CalibrationService has been "
"deprecated. Please use 'get_detector()' instead in future."
)
def calibrate(self, hits, correct_slewing=True):
return self.calibration.apply(hits, correct_slewing=correct_slewing)
@property
def detector(self):
if self._detector is None:
self._detector = self.calibration.detector
return self._detector
def get_detector(self):
"""Extra getter to be as lazy as possible (expose triggers otherwise"""
return self.detector
@property
def calibration(self):
if self._calibration is None:
self._calibration = Calibration(
filename=self.filename,
det_id=self.det_id,
t0set=self.t0set,
calibset=self.calibset,
)
return self._calibration
def get_calibration(self):
"""Extra getter to be as lazy as possible (expose triggers otherwise"""
return self.calibration
def correct_slewing(self, hits):
"""Apply time slewing correction to the hit times"""
hits.time -= slew(hits.tot)
@jit
def slew(tot, variant=3):
"""Calculate the time slewing of a PMT response for a given ToT
Parameters
----------
tot: int or np.array(int)
Time over threshold value of a hit
variant: int, optional
The variant of the slew calculation.
1: The first parametrisation approach
2: Jannik's improvement of the parametrisation
3: The latest lookup table approach based on lab measurements.
Returns
-------
time: int
Time slewing, which has to be subtracted from the original hit time.
"""
if variant < 3:
if variant == 1:
p0 = 7.70824
p1 = 0.00879447
p2 = -0.0621101
p3 = -1.90226
if variant == 2:
p0 = 13.6488662517
p1 = -0.128744123166
p2 = -0.0174837749244
p3 = -4.47119633965
corr = p0 * np.exp(p1 * np.sqrt(tot) + p2 * tot) + p3
return corr
corr = np.array(
[
8.01,
7.52,
7.05,
6.59,
6.15,
5.74,
5.33,
4.95,
4.58,
4.22,
3.89,
3.56,
3.25,
2.95,
2.66,
2.39,
2.12,
1.87,
1.63,
1.40,
1.19,
0.98,
0.78,
0.60,
0.41,
0.24,
0.07,
-0.10,
-0.27,
-0.43,
-0.59,
-0.75,
-0.91,
-1.08,
-1.24,
-1.41,
-1.56,
-1.71,
-1.85,
-1.98,
-2.11,
-2.23,
-2.35,
-2.47,
-2.58,
-2.69,
-2.79,
-2.89,
-2.99,
-3.09,
-3.19,
-3.28,
-3.37,
-3.46,
-3.55,
-3.64,
-3.72,
-3.80,
-3.88,
-3.96,
-4.04,
-4.12,
-4.20,
-4.27,
-4.35,
-4.42,
-4.49,
-4.56,
-4.63,
-4.70,
-4.77,
-4.84,
-4.90,
-4.97,
-5.03,
-5.10,
-5.16,
-5.22,
-5.28,
-5.34,
-5.40,
-5.46,
-5.52,
-5.58,
-5.63,
-5.69,
-5.74,
-5.80,
-5.85,
-5.91,
-5.96,
-6.01,
-6.06,
-6.11,
-6.16,
-6.21,
-6.26,
-6.31,
-6.36,
-6.41,
-6.45,
-6.50,
-6.55,
-6.59,
-6.64,
-6.68,
-6.72,
-6.77,
-6.81,
-6.85,
-6.89,
-6.93,
-6.98,
-7.02,
-7.06,
-7.09,
-7.13,
-7.17,
-7.21,
-7.25,
-7.28,
-7.32,
-7.36,
-7.39,
-7.43,
-7.46,
-7.50,
-7.53,
-7.57,
-7.60,
-7.63,
-7.66,
-7.70,
-7.73,
-7.76,
-7.79,
-7.82,
-7.85,
-7.88,
-7.91,
-7.94,
-7.97,
-7.99,
-8.02,
-8.05,
-8.07,
-8.10,
-8.13,
-8.15,
-8.18,
-8.20,
-8.23,
-8.25,
-8.28,
-8.30,
-8.32,
-8.34,
-8.37,
-8.39,
-8.41,
-8.43,
-8.45,
-8.47,
-8.49,
-8.51,
-8.53,
-8.55,
-8.57,
-8.59,
-8.61,
-8.62,
-8.64,
-8.66,
-8.67,
-8.69,
-8.70,
-8.72,
-8.74,
-8.75,
-8.76,
-8.78,
-8.79,
-8.81,
-8.82,
-8.83,
-8.84,
-8.86,
-8.87,
-8.88,
-8.89,
-8.90,
-8.92,
-8.93,
-8.94,
-8.95,
-8.96,
-8.97,
-8.98,
-9.00,
-9.01,
-9.02,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
]
)
return corr[tot]
Doc formatting
# Filename: calib.py
# pylint: disable=locally-disabled
"""
Calibration.
"""
import numpy as np
import km3db
from thepipe import Module
from km3pipe.hardware import Detector
from km3pipe.dataclasses import Table
from km3pipe.tools import istype
from km3pipe.logger import get_logger
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = ["Thomas Heid"]
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "tgal@km3net.de"
__status__ = "Development"
log = get_logger(__name__)
try:
import numba as nb
except (ImportError, OSError):
HAVE_NUMBA = False
jit = lambda f: f
log.warning("No numba detected, consider `pip install numba` for more speed!")
else:
try:
from numba.typed import Dict
except ImportError:
log.warning("Please update numba (0.43+) to have dictionary support!")
HAVE_NUMBA = False
jit = lambda f: f
else:
HAVE_NUMBA = True
from numba import jit
class Calibration(Module):
"""A very simple, preliminary Module which gives access to the calibration.
Parameters
----------
apply: bool, optional [default=True]
Apply the calibration to the hits (add position/direction/t0)?
filename: str, optional [default=None]
DetX file with detector description.
det_id: int, optional
.detx ID of detector (when retrieving from database).
t0set: optional
t0set (when retrieving from database).
calibset: optional
calibset (when retrieving from database).
"""
__name__ = "Calibration"
name = "Calibration"
def configure(self):
self._should_apply = self.get("apply", default=True)
self.filename = self.get("filename")
self.det_id = self.get("det_id")
self.run = self.get("run")
self.t0set = self.get("t0set")
self.calibset = self.get("calibset")
self.detector = self.get("detector")
self._pos_dom_channel = None
self._dir_dom_channel = None
self._t0_dom_channel = None
self._pos_pmt_id = None
self._dir_pmt_id = None
self._t0_pmt_id = None
self._lookup_tables = None # for Numba
if self.det_id and self.run:
self.cprint(
"Grabbing the calibration for Det ID {} and run {}".format(
self.det_id, self.run
)
)
raw_detx = km3db.tools.detx_for_run(self.det_id, self.run)
self.detector = Detector(string=raw_detx)
self._create_dom_channel_lookup()
self._create_pmt_id_lookup()
return
# TODO: deprecation
if self.get("calibration"):
self.log.warning(
"The parameter 'calibration' has been renamed "
"to 'calibset'. The 'calibration' parameter will be removed "
"in the next version of KM3Pipe"
)
self.calibset = self.get("calibration")
if self.filename or self.det_id:
if self.filename is not None:
self.detector = Detector(filename=self.filename)
if self.det_id:
self.detector = Detector(
det_id=self.det_id, t0set=self.t0set, calibration=self.calibset
)
if self.detector is not None:
self.log.debug("Creating lookup tables")
self._create_dom_channel_lookup()
self._create_pmt_id_lookup()
else:
self.log.critical("No detector information loaded.")
def process(self, blob, key="Hits", outkey="CalibHits"):
if self._should_apply:
blob[outkey] = self.apply(blob[key])
return blob
def get_detector(self):
"""Return the detector"""
return self.detector
def apply_t0(self, hits):
"""Apply only t0s"""
if HAVE_NUMBA:
apply_t0_nb(hits.time, hits.dom_id, hits.channel_id, self._lookup_tables)
else:
n = len(hits)
cal = np.empty(n)
lookup = self._calib_by_dom_and_channel
for i in range(n):
calib = lookup[hits["dom_id"][i]][hits["channel_id"][i]]
cal[i] = calib[6]
hits.time += cal
return hits
def apply(self, hits, no_copy=False, correct_slewing=True, slewing_variant=3):
"""Add x, y, z, t0 (and du, floor if DataFrame) columns to the hits."""
if not no_copy:
try:
hits = hits.copy()
except AttributeError: # probably a km3io object
pass
if istype(hits, "OfflineBranch"):
hits = Table(
dict(
dom_id=hits.dom_id,
channel_id=hits.channel_id,
time=hits.t,
tot=hits.tot,
triggered=hits.trig,
)
)
if istype(hits, "DataFrame"):
# do we ever see McHits here?
hits = Table.from_template(hits, "Hits")
if hasattr(hits, "dom_id") and hasattr(hits, "channel_id"):
try:
(
dir_x,
dir_y,
dir_z,
du,
floor,
pos_x,
pos_y,
pos_z,
t0,
) = _get_calibration_for_hits(hits, self._calib_by_dom_and_channel)
except KeyError as e:
self.log.critical("Wrong calibration (DETX) data provided.")
raise
elif hasattr(hits, "pmt_id"):
try:
(
dir_x,
dir_y,
dir_z,
du,
floor,
pos_x,
pos_y,
pos_z,
t0,
) = _get_calibration_for_mchits(hits, self._calib_by_pmt_id)
except KeyError as e:
self.log.critical("Wrong calibration (DETX) data provided.")
raise
else:
raise TypeError(
"Don't know how to apply calibration to '{0}'. "
"We need at least 'dom_id' and 'channel_id', or "
"'pmt_id'.".format(hits.name)
)
if hasattr(hits, "time"):
if hits.time.dtype != t0.dtype:
time = hits.time.astype("f4") + t0.astype("f4")
hits = hits.drop_columns(["time"])
hits = hits.append_columns(["time"], [time])
else:
hits.time += t0
hits_data = {}
for colname in hits.dtype.names:
hits_data[colname] = hits[colname]
calib = {
"dir_x": dir_x,
"dir_y": dir_y,
"dir_z": dir_z,
"du": du.astype(np.uint8),
"floor": du.astype(np.uint8),
"pos_x": pos_x,
"pos_y": pos_y,
"pos_z": pos_z,
"t0": t0,
}
hits_data.update(calib)
if correct_slewing and hasattr(hits, "tot"): # only correct non-MC hits
hits_data["time"] -= slew(hits_data["tot"], variant=slewing_variant)
return Table(
hits_data, h5loc=hits.h5loc, split_h5=hits.split_h5, name=hits.name
)
def _create_dom_channel_lookup(self):
if HAVE_NUMBA:
from numba.typed import Dict
from numba import types
data = Dict.empty(key_type=types.i8, value_type=types.float64[:, :])
else:
data = {}
for pmt in self.detector.pmts:
if pmt.dom_id not in data:
data[pmt.dom_id] = np.zeros((31, 9))
data[pmt.dom_id][pmt.channel_id] = np.asarray(
[
pmt.pos_x,
pmt.pos_y,
pmt.pos_z,
pmt.dir_x,
pmt.dir_y,
pmt.dir_z,
pmt.t0,
pmt.du,
pmt.floor,
],
dtype=np.float64,
)
self._calib_by_dom_and_channel = data
if HAVE_NUMBA:
self._lookup_tables = [(dom, cal) for dom, cal in data.items()]
def _create_pmt_id_lookup(self):
if HAVE_NUMBA:
from numba.typed import Dict
from numba import types
data = Dict.empty(key_type=types.i8, value_type=types.float64[:])
else:
data = {}
for pmt in self.detector.pmts:
data[pmt.pmt_id] = np.asarray(
[
pmt.pos_x,
pmt.pos_y,
pmt.pos_z,
pmt.dir_x,
pmt.dir_y,
pmt.dir_z,
pmt.t0,
pmt.du,
pmt.floor,
],
dtype=np.float64,
)
self._calib_by_pmt_id = data
def __repr__(self):
return self.__str__()
def __str__(self):
return "Calibration: det_id({0})".format(self.det_id)
@jit
def apply_t0_nb(times, dom_ids, channel_ids, lookup_tables):
"""Apply t0s using a lookup table of tuples (dom_id, calib)"""
dom_id = 0
lookup = np.empty((31, 9))
for i in range(len(times)):
cur_dom_id = dom_ids[i]
if cur_dom_id != dom_id:
dom_id = cur_dom_id
for (d, m) in lookup_tables:
if d == dom_id:
np.copyto(lookup, m)
t0 = lookup[channel_ids[i]][6]
times[i] += t0
@jit
def _get_calibration_for_hits(hits, lookup):
"""Append the position, direction and t0 columns and add t0 to time"""
n = len(hits)
cal = np.empty((n, 9))
for i in range(n):
calib = lookup[hits["dom_id"][i]][hits["channel_id"][i]]
cal[i] = calib
dir_x = cal[:, 3]
dir_y = cal[:, 4]
dir_z = cal[:, 5]
du = cal[:, 7]
floor = cal[:, 8]
pos_x = cal[:, 0]
pos_y = cal[:, 1]
pos_z = cal[:, 2]
t0 = cal[:, 6]
return [dir_x, dir_y, dir_z, du, floor, pos_x, pos_y, pos_z, t0]
@jit
def _get_calibration_for_mchits(hits, lookup):
"""Append the position, direction and t0 columns and add t0 to time"""
n_hits = len(hits)
cal = np.empty((n_hits, 9))
for i in range(n_hits):
cal[i] = lookup[hits["pmt_id"][i]]
dir_x = cal[:, 3]
dir_y = cal[:, 4]
dir_z = cal[:, 5]
du = cal[:, 7]
floor = cal[:, 8]
pos_x = cal[:, 0]
pos_y = cal[:, 1]
pos_z = cal[:, 2]
t0 = cal[:, 6]
return [dir_x, dir_y, dir_z, du, floor, pos_x, pos_y, pos_z, t0]
class CalibrationService(Module):
"""A service which provides calibration routines for hits
Parameters
----------
filename: str, optional [default=None]
DetX file with detector description.
det_id: int, optional
.detx ID of detector (when retrieving from database).
t0set: optional
t0set (when retrieving from database).
calibset: optional
calibset (when retrieving from database).
detector: kp.hardware.Detector, optional
"""
__name__ = "Calibration"
name = "Calibration"
def configure(self):
self.filename = self.get("filename")
self.det_id = self.get("det_id")
self.t0set = self.get("t0set")
self.calibset = self.get("calibset")
self._detector = self.get("detector")
if self._detector is not None:
self._calibration = Calibration(detector=self._detector)
self._calibration = None
self.expose(self.calibrate, "calibrate")
self.expose(self.get_detector, "get_detector")
self.expose(self.get_calibration, "get_calibration")
self.expose(self.load_calibration, "load_calibration")
self.expose(self.correct_slewing, "correct_slewing")
self.expose(self.detector_deprecation, "detector")
def load_calibration(self, filename=None, det_id=None, t0set=None, calibset=None):
"""Load another calibration"""
self.filename = filename
self.det_id = det_id
self.t0set = t0set
self.calibset = calibset
self._detector = None
self._calibration = None
@property
def detector_deprecation(self):
self.log.deprecation(
"The service 'detector' of the CalibrationService has been "
"deprecated. Please use 'get_detector()' instead in future."
)
def calibrate(self, hits, correct_slewing=True):
return self.calibration.apply(hits, correct_slewing=correct_slewing)
@property
def detector(self):
if self._detector is None:
self._detector = self.calibration.detector
return self._detector
def get_detector(self):
"""Extra getter to be as lazy as possible (expose triggers otherwise"""
return self.detector
@property
def calibration(self):
if self._calibration is None:
self._calibration = Calibration(
filename=self.filename,
det_id=self.det_id,
t0set=self.t0set,
calibset=self.calibset,
)
return self._calibration
def get_calibration(self):
"""Extra getter to be as lazy as possible (expose triggers otherwise"""
return self.calibration
def correct_slewing(self, hits):
"""Apply time slewing correction to the hit times"""
hits.time -= slew(hits.tot)
@jit
def slew(tot, variant=3):
"""Calculate the time slewing of a PMT response for a given ToT
Parameters
----------
tot: int or np.array(int)
Time over threshold value of a hit
variant: int, optional
The variant of the slew calculation.
1: The first parametrisation approach
2: Jannik's improvement of the parametrisation
3: The latest lookup table approach based on lab measurements.
Returns
-------
time: int
Time slewing, which has to be subtracted from the original hit time.
"""
if variant < 3:
if variant == 1:
p0 = 7.70824
p1 = 0.00879447
p2 = -0.0621101
p3 = -1.90226
if variant == 2:
p0 = 13.6488662517
p1 = -0.128744123166
p2 = -0.0174837749244
p3 = -4.47119633965
corr = p0 * np.exp(p1 * np.sqrt(tot) + p2 * tot) + p3
return corr
corr = np.array(
[
8.01,
7.52,
7.05,
6.59,
6.15,
5.74,
5.33,
4.95,
4.58,
4.22,
3.89,
3.56,
3.25,
2.95,
2.66,
2.39,
2.12,
1.87,
1.63,
1.40,
1.19,
0.98,
0.78,
0.60,
0.41,
0.24,
0.07,
-0.10,
-0.27,
-0.43,
-0.59,
-0.75,
-0.91,
-1.08,
-1.24,
-1.41,
-1.56,
-1.71,
-1.85,
-1.98,
-2.11,
-2.23,
-2.35,
-2.47,
-2.58,
-2.69,
-2.79,
-2.89,
-2.99,
-3.09,
-3.19,
-3.28,
-3.37,
-3.46,
-3.55,
-3.64,
-3.72,
-3.80,
-3.88,
-3.96,
-4.04,
-4.12,
-4.20,
-4.27,
-4.35,
-4.42,
-4.49,
-4.56,
-4.63,
-4.70,
-4.77,
-4.84,
-4.90,
-4.97,
-5.03,
-5.10,
-5.16,
-5.22,
-5.28,
-5.34,
-5.40,
-5.46,
-5.52,
-5.58,
-5.63,
-5.69,
-5.74,
-5.80,
-5.85,
-5.91,
-5.96,
-6.01,
-6.06,
-6.11,
-6.16,
-6.21,
-6.26,
-6.31,
-6.36,
-6.41,
-6.45,
-6.50,
-6.55,
-6.59,
-6.64,
-6.68,
-6.72,
-6.77,
-6.81,
-6.85,
-6.89,
-6.93,
-6.98,
-7.02,
-7.06,
-7.09,
-7.13,
-7.17,
-7.21,
-7.25,
-7.28,
-7.32,
-7.36,
-7.39,
-7.43,
-7.46,
-7.50,
-7.53,
-7.57,
-7.60,
-7.63,
-7.66,
-7.70,
-7.73,
-7.76,
-7.79,
-7.82,
-7.85,
-7.88,
-7.91,
-7.94,
-7.97,
-7.99,
-8.02,
-8.05,
-8.07,
-8.10,
-8.13,
-8.15,
-8.18,
-8.20,
-8.23,
-8.25,
-8.28,
-8.30,
-8.32,
-8.34,
-8.37,
-8.39,
-8.41,
-8.43,
-8.45,
-8.47,
-8.49,
-8.51,
-8.53,
-8.55,
-8.57,
-8.59,
-8.61,
-8.62,
-8.64,
-8.66,
-8.67,
-8.69,
-8.70,
-8.72,
-8.74,
-8.75,
-8.76,
-8.78,
-8.79,
-8.81,
-8.82,
-8.83,
-8.84,
-8.86,
-8.87,
-8.88,
-8.89,
-8.90,
-8.92,
-8.93,
-8.94,
-8.95,
-8.96,
-8.97,
-8.98,
-9.00,
-9.01,
-9.02,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
-9.04,
]
)
return corr[tot]
|
from tornado import web
from sprockets.mixins import cors
class SimpleRequestHandler(cors.CORSMixin, web.RequestHandler):
"""Very simple request handler that CORS enables the GET endpoint."""
def initialize(self, creds=False):
super(SimpleRequestHandler, self).initialize()
self.cors.allowed_methods.add('GET')
self.cors.credentials_supported = creds
def prepare(self):
# This is used to test that the mixin does not interfere
# with request failures. You really shouldn't call super()
# after you explicitly finish() but anyway...
if 'X-Fail' in self.request.headers:
self.set_status(400)
self.finish()
super(SimpleRequestHandler, self).prepare()
def get(self):
self.set_status(204)
self.finish()
Make examples.py runnable.
#!/usr/bin/env python
import logging
from tornado import ioloop, web
from sprockets.mixins import cors
class SimpleRequestHandler(cors.CORSMixin, web.RequestHandler):
"""Very simple request handler that CORS enables the GET endpoint."""
def initialize(self, creds=False):
super(SimpleRequestHandler, self).initialize()
self.cors.allowed_methods.add('GET')
self.cors.credentials_supported = creds
def prepare(self):
# This is used to test that the mixin does not interfere
# with request failures. You really shouldn't call super()
# after you explicitly finish() but anyway...
if 'X-Fail' in self.request.headers:
self.set_status(400)
self.finish()
super(SimpleRequestHandler, self).prepare()
def get(self):
self.set_status(204)
self.finish()
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG,
format='%(levelname)-8s %(name)s: %(message)s')
app = web.Application([
web.url('/public', SimpleRequestHandler),
web.url('/private', SimpleRequestHandler, {'creds': True}),
], cors_origins=['http://www.example.com'], debug=True)
app.listen(8000)
iol = ioloop.IOLoop.instance()
try:
iol.start()
except KeyboardInterrupt:
logging.info('stopping IOLoop')
iol.add_callback(iol.stop)
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ConfigParser
import getpass
import logging
import os
from openfisca_survey_manager.scripts.surv import add_survey_to_collection, create_data_file_by_format
from openfisca_survey_manager.survey_collections import SurveyCollection
from openfisca_france_data import default_config_files_directory as config_files_directory
log = logging.getLogger(__name__)
def build_bdf_survey_collection(years = None, erase = False, overwrite = False):
if years is None:
log.error("A list of years to process is needed")
if erase:
bdf_survey_collection = SurveyCollection(
name = "budget_des_familles", config_files_directory = config_files_directory)
else:
try:
bdf_survey_collection = SurveyCollection.load(
collection = 'budget_des_familles', config_files_directory = config_files_directory)
except ConfigParser.NoOptionError:
bdf_survey_collection = SurveyCollection(
name = "budget_des_familles", config_files_directory = config_files_directory)
input_data_directory = bdf_survey_collection.config.get('data', 'input_directory')
if getpass.getuser() == 'benjello':
input_data_directory = os.path.join(os.path.dirname(input_data_directory), 'INSEE')
else:
input_data_directory = os.path.dirname(input_data_directory)
for year in years:
data_directory_path = os.path.join(
input_data_directory,
'budget_des_familles/{}'.format(year)
)
data_file_by_format = create_data_file_by_format(data_directory_path)
survey_name = 'budget_des_familles_{}'.format(year)
add_survey_to_collection(
survey_name = survey_name,
survey_collection = bdf_survey_collection,
stata_files = data_file_by_format['stata'],
)
collections_directory = bdf_survey_collection.config.get('collections', 'collections_directory')
collection_json_path = os.path.join(collections_directory, "budget_des_familles" + ".json")
bdf_survey_collection.dump(json_file_path = collection_json_path)
surveys = [survey for survey in bdf_survey_collection.surveys if survey.name.endswith(str(year))]
print surveys
bdf_survey_collection.fill_hdf(source_format = 'stata', surveys = surveys, overwrite = overwrite)
return bdf_survey_collection
if __name__ == '__main__':
years = [2011]
bdf_survey_collection = build_bdf_survey_collection(years = years, erase = False, overwrite)
add overwrite=True in builder
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014, 2015 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ConfigParser
import getpass
import logging
import os
from openfisca_survey_manager.scripts.surv import add_survey_to_collection, create_data_file_by_format
from openfisca_survey_manager.survey_collections import SurveyCollection
from openfisca_france_data import default_config_files_directory as config_files_directory
log = logging.getLogger(__name__)
def build_bdf_survey_collection(years = None, erase = False, overwrite = False):
if years is None:
log.error("A list of years to process is needed")
if erase:
bdf_survey_collection = SurveyCollection(
name = "budget_des_familles", config_files_directory = config_files_directory)
else:
try:
bdf_survey_collection = SurveyCollection.load(
collection = 'budget_des_familles', config_files_directory = config_files_directory)
except ConfigParser.NoOptionError:
bdf_survey_collection = SurveyCollection(
name = "budget_des_familles", config_files_directory = config_files_directory)
input_data_directory = bdf_survey_collection.config.get('data', 'input_directory')
if getpass.getuser() == 'benjello':
input_data_directory = os.path.join(os.path.dirname(input_data_directory), 'INSEE')
else:
input_data_directory = os.path.dirname(input_data_directory)
for year in years:
data_directory_path = os.path.join(
input_data_directory,
'budget_des_familles/{}'.format(year)
)
data_file_by_format = create_data_file_by_format(data_directory_path)
survey_name = 'budget_des_familles_{}'.format(year)
add_survey_to_collection(
survey_name = survey_name,
survey_collection = bdf_survey_collection,
stata_files = data_file_by_format['stata'],
)
collections_directory = bdf_survey_collection.config.get('collections', 'collections_directory')
collection_json_path = os.path.join(collections_directory, "budget_des_familles" + ".json")
bdf_survey_collection.dump(json_file_path = collection_json_path)
surveys = [survey for survey in bdf_survey_collection.surveys if survey.name.endswith(str(year))]
print surveys
bdf_survey_collection.fill_hdf(source_format = 'stata', surveys = surveys, overwrite = overwrite)
return bdf_survey_collection
if __name__ == '__main__':
years = [2011]
bdf_survey_collection = build_bdf_survey_collection(years = years, erase = False, overwrite = True)
|
import argparse
import os
import time
import glob
import pprint
import numpy as np
from functools import partial
import preprocessing
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.framework import convert_to_constants
NETS = {
'mobilenet_v1': {
'preprocess':'inception',
'input_size':224,
'num_classes':1001},
'mobilenet_v2': {
'preprocess':'inception',
'input_size':224,
'num_classes':1001},
'nasnet_mobile': {
'preprocess':'inception',
'input_size':224,
'num_classes':1001},
'nasnet_large': {
'preprocess':'inception',
'input_size':331,
'num_classes':1001},
'resnet_v1.5_50_tfv2': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1000},
'resnet_v1_50': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1001},
'resnet_v2_50': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1001},
'resnet_v2_152': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1001},
'vgg_16': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1000},
'vgg_19': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1000},
'inception_v3': {
'preprocess':'inception',
'input_size':299,
'num_classes':1001},
'inception_v4': {
'preprocess':'inception',
'input_size':299,
'num_classes':1001},
}
def get_input_size(model):
return NETS[model]['input_size']
def get_preprocessing(model):
if NETS[model]['preprocess'] == 'vgg':
return preprocessing.vgg_preprocess
else:
return preprocessing.inception_preprocess
def get_num_classes(model):
return NETS[model]['num_classes']
def deserialize_image_record(record):
feature_map = {
'image/encoded': tf.io.FixedLenFeature([ ], tf.string, ''),
'image/class/label': tf.io.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.io.FixedLenFeature([ ], tf.string, ''),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32)
}
with tf.compat.v1.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(serialized=record, features=feature_map)
imgdata = obj['image/encoded']
label = tf.cast(obj['image/class/label'], tf.int32)
return imgdata, label
def get_preprocess_fn(model, mode='validation'):
"""Creates a function to parse and process a TFRecord using the model's parameters
model: string, the model name (see NETS table)
mode: string, which mode to use (validation or benchmark)
returns: function, the preprocessing function for a record
"""
preprocess_fn = get_preprocessing(model)
input_size = get_input_size(model)
def validation_process(record):
# Parse TFRecord
imgdata, label = deserialize_image_record(record)
label -= 1 # Change to 0-based (don't use background class)
try: image = tf.image.decode_jpeg(imgdata, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST')
except: image = tf.image.decode_png(imgdata, channels=3)
# Use model's preprocessing function
image = preprocess_fn(image, input_size, input_size)
return image, label
def benchmark_process(path):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image, channels=3)
image = preprocess_fn(image, input_size, input_size)
return image
if mode == 'validation':
return validation_process
elif mode == 'benchmark':
return benchmark_process
else:
raise ValueError("Mode must be either 'validation' or 'benchmark'")
def get_dataset(model, data_files, batch_size, use_synthetic, mode='validation'):
if use_synthetic:
input_size = get_input_size(model)
features = np.random.normal(
loc=112, scale=70,
size=(batch_size, input_size, input_size, 3)).astype(np.float32)
features = np.clip(features, 0.0, 255.0)
features = tf.convert_to_tensor(value=tf.compat.v1.get_variable(
"features", dtype=tf.float32, initializer=tf.constant(features)))
dataset = tf.data.Dataset.from_tensor_slices([features])
dataset = dataset.repeat()
else:
# preprocess function for input data
preprocess_fn = get_preprocess_fn(model, mode)
num_classes = get_num_classes(model)
if mode == 'validation':
dataset = tf.data.TFRecordDataset(data_files)
dataset = dataset.map(map_func=preprocess_fn, num_parallel_calls=8)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
dataset = dataset.repeat(count=1)
elif mode == 'benchmark':
dataset = tf.data.Dataset.from_tensor_slices(data_files)
dataset = dataset.map(map_func=preprocess_fn, num_parallel_calls=8)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.repeat(count=1)
else:
raise ValueError("Mode must be either 'validation' or 'benchmark'")
return dataset
def get_func_from_saved_model(saved_model_dir):
saved_model_loaded = tf.saved_model.load(saved_model_dir, tags=[tag_constants.SERVING])
func = saved_model_loaded.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
return func
def get_num_ops(func, trt=False):
"""Returns number of ops in func.
If trt is set, returns only the number of TRTEngineOp in func.
"""
if not trt:
return len(func.graph.as_graph_def().node)
else:
return len([1 for n in func.graph.as_graph_def().node
if str(n.op)=='TRTEngineOp'])
def get_graph_func(model,
conversion_params=trt.DEFAULT_TRT_CONVERSION_PARAMS,
model_dir=None,
use_trt=False,
calib_files=None,
num_calib_inputs=None,
use_synthetic=False,
batch_size=None,
saved_model_dir=None,
optimize_offline=False,
root_saved_model_dir='./saved_models'):
"""Retreives a frozen SavedModel and applies TF-TRT
model: str, the model name
use_trt: bool, if true use TensorRT
precision: str, floating point precision (FP32, FP16, or INT8)
batch_size: int, batch size for TensorRT optimizations
returns: tensorflow.SavedModel, the TensorRT compatible frozen graph.
"""
num_nodes = {}
saved_model_dir = saved_model_dir or os.path.join(root_saved_model_dir, model)
start_time = time.time()
graph_func = get_func_from_saved_model(saved_model_dir)
num_nodes['native_tf'] = get_num_ops(graph_func)
if use_trt:
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=saved_model_dir,
conversion_params=conversion_params,
)
def input_fn(input_files, num_iterations):
dataset = get_dataset(
model, input_files, batch_size, False, 'validation')
for i, (batch_images, _) in enumerate(dataset):
if i >= num_iterations:
break
yield (batch_images,)
print(" step %d/%d" % (i+1, num_iterations))
i += 1
if conversion_params.precision_mode != 'INT8':
converter.convert()
converted_saved_model_dir = 'saved_model_{}'.format(model)
if optimize_offline:
converter.build(input_fn=partial(input_fn, data_files, 1))
converter.save(converted_saved_model_dir)
graph_func = get_func_from_saved_model(converted_saved_model_dir)
else:
converter.convert(calibration_input_fn=partial(
input_fn, calib_files, num_calib_inputs//batch_size))
if optimize_offline:
converter.build(input_fn=partial(input_fn, data_files, 1))
converted_saved_model_dir = 'savd_model_{}'.format(model)
converter.save(converted_saved_model_dir)
graph_func = get_func_from_saved_model(converted_saved_model_dir)
converted_graph_def = converter._converted_graph_def
num_nodes['tftrt_total'] = get_num_ops(graph_func)
num_nodes['trt_only'] = get_num_ops(graph_func, trt=True)
return graph_func, num_nodes, {'conversion': time.time() - start_time}
def eval_fn(model, preds, labels, adjust):
"""Measures number of correct predicted labels in a batch.
Assumes preds and labels are numpy arrays.
"""
preds = np.argmax(preds, axis=1).reshape(-1) - adjust
return np.sum((labels.reshape(-1) == preds).astype(np.float32))
def run(graph_func, model, data_files, batch_size,
num_iterations, num_warmup_iterations, use_synthetic, display_every=100,
mode='validation', target_duration=None):
'''Run the given graph_func on the data files provided. In validation mode,
it consumes TFRecords with labels and reports accuracy. In benchmark mode, it
times inference on real data (.jpgs).'''
def get_inference_func(func):
frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)
def wrap_func(*args, **kwargs):
# Assumes frozen_func has one output tensor
return frozen_func(*args, **kwargs)[0]
return wrap_func
graph_func = get_inference_func(graph_func)
results = {}
corrects = 0
iter_times = []
adjust = 1 if get_num_classes(model) == 1001 else 0
initial_time = time.time()
dataset = get_dataset(model, data_files, batch_size, use_synthetic, mode)
if mode == 'validation':
for i, (batch_feats, batch_labels) in enumerate(dataset):
start_time = time.time()
batch_preds = graph_func(batch_feats).numpy()
end_time = time.time()
iter_times.append(end_time - start_time)
if i % display_every == 0:
print(" step %d/%d, iter_time(ms)=%.0f" %
(i+1, 50000//batch_size, iter_times[-1]*1000))
corrects += eval_fn(
model, batch_preds, batch_labels.numpy(), adjust)
if i > 1 and target_duration is not None and \
time.time() - initial_time > target_duration:
break
accuracy = corrects / (batch_size * i)
results['accuracy'] = accuracy
elif mode == 'benchmark':
for i, batch_feats in enumerate(dataset):
if i >= num_warmup_iterations:
start_time = time.time()
outs = graph_func(batch_feats)
iter_times.append(time.time() - start_time)
if i % display_every == 0:
print(" step %d/%d, iter_time(ms)=%.0f" %
(i+1, num_iterations, iter_times[-1]*1000))
else:
outs = graph_func(batch_feats)
if i > 0 and target_duration is not None and \
time.time() - initial_time > target_duration:
break
if num_iterations is not None and i >= num_iterations:
break
if not iter_times:
return results
iter_times = np.array(iter_times)
iter_times = iter_times[num_warmup_iterations:]
results['total_time'] = np.sum(iter_times)
results['images_per_sec'] = np.mean(batch_size / iter_times)
results['99th_percentile'] = np.percentile(iter_times, q=99, interpolation='lower') * 1000
results['latency_mean'] = np.mean(iter_times) * 1000
results['latency_median'] = np.median(iter_times) * 1000
results['latency_min'] = np.min(iter_times) * 1000
return results
def get_trt_conversion_params(max_workspace_size_bytes,
precision_mode,
minimum_segment_size,
max_batch_size):
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS
conversion_params = conversion_params._replace(
max_workspace_size_bytes=max_workspace_size_bytes)
conversion_params = conversion_params._replace(precision_mode=precision_mode)
conversion_params = conversion_params._replace(minimum_segment_size=minimum_segment_size)
conversion_params = conversion_params._replace(use_calibration=precision_mode=='INT8')
conversion_params = conversion_params._replace(max_batch_size=max_batch_size)
return conversion_params
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate model')
parser.add_argument('--model', type=str, default='resnet_v1_50',
choices=list(NETS.keys()),
help='Which model to use.')
parser.add_argument('--data_dir', type=str, default=None,
help='Directory containing validation set TFRecord files.')
parser.add_argument('--calib_data_dir', type=str,
help='Directory containing TFRecord files for calibrating INT8.')
parser.add_argument('--root_saved_model_dir', type=str, default=None,
help='Directory containing saved models.')
parser.add_argument('--saved_model_dir', type=str, default=None,
help='Directory containing a particular saved model.')
parser.add_argument('--use_trt', action='store_true',
help='If set, the graph will be converted to a TensorRT graph.')
parser.add_argument('--optimize_offline', action='store_true',
help='If set, TensorRT engines are built before runtime.')
parser.add_argument('--precision', type=str, choices=['FP32', 'FP16', 'INT8'], default='FP32',
help='Precision mode to use. FP16 and INT8 only work in conjunction with --use_trt')
parser.add_argument('--batch_size', type=int, default=8,
help='Number of images per batch.')
parser.add_argument('--minimum_segment_size', type=int, default=2,
help='Minimum number of TF ops in a TRT engine.')
parser.add_argument('--num_iterations', type=int, default=2048,
help='How many iterations(batches) to evaluate. If not supplied, the whole set will be evaluated.')
parser.add_argument('--display_every', type=int, default=100,
help='Number of iterations executed between two consecutive display of metrics')
parser.add_argument('--use_synthetic', action='store_true',
help='If set, one batch of random data is generated and used at every iteration.')
parser.add_argument('--num_warmup_iterations', type=int, default=50,
help='Number of initial iterations skipped from timing')
parser.add_argument('--num_calib_inputs', type=int, default=500,
help='Number of inputs (e.g. images) used for calibration '
'(last batch is skipped in case it is not full)')
parser.add_argument('--max_workspace_size', type=int, default=(1<<30),
help='workspace size in bytes')
parser.add_argument('--mode', choices=['validation', 'benchmark'], default='validation',
help='Which mode to use (validation or benchmark)')
parser.add_argument('--target_duration', type=int, default=None,
help='If set, script will run for specified number of seconds.')
args = parser.parse_args()
if args.precision != 'FP32' and not args.use_trt:
raise ValueError('TensorRT must be enabled for FP16 or INT8 modes (--use_trt).')
if args.precision == 'INT8' and not args.calib_data_dir and not args.use_synthetic:
raise ValueError('--calib_data_dir is required for INT8 mode')
if args.num_iterations is not None and args.num_iterations <= args.num_warmup_iterations:
raise ValueError('--num_iterations must be larger than --num_warmup_iterations '
'({} <= {})'.format(args.num_iterations, args.num_warmup_iterations))
if args.num_calib_inputs < args.batch_size:
raise ValueError('--num_calib_inputs must not be smaller than --batch_size'
'({} <= {})'.format(args.num_calib_inputs, args.batch_size))
if args.mode == 'validation' and args.use_synthetic:
raise ValueError('Cannot use both validation mode and synthetic dataset')
if args.data_dir is None and not args.use_synthetic:
raise ValueError("--data_dir required if you are not using synthetic data")
if args.use_synthetic and args.num_iterations is None:
raise ValueError("--num_iterations is required for --use_synthetic")
if args.root_saved_model_dir is None and args.saved_model_dir is None:
raise ValueError("Please set one of --root_saved_model_dir or --saved_model_dir")
if args.root_saved_model_dir is not None and args.saved_model_dir is not None:
print("Both --root_saved_model_dir and --saved_model_dir are set.\n \
Using saved_model_dir:{}".format(args.saved_model_dir))
calib_files = []
data_files = []
def get_files(data_dir, filename_pattern):
if data_dir == None:
return []
files = tf.io.gfile.glob(os.path.join(data_dir, filename_pattern))
if files == []:
raise ValueError('Can not find any files in {} with '
'pattern "{}"'.format(data_dir, filename_pattern))
return files
if not args.use_synthetic:
if args.mode == "validation":
data_files = get_files(args.data_dir, 'validation*')
elif args.mode == "benchmark":
data_files = [os.path.join(path, name) for path, _, files in os.walk(args.data_dir) for name in files]
else:
raise ValueError("Mode must be either 'validation' or 'benchamark'")
if args.precision == 'INT8':
calib_files = get_files(args.calib_data_dir, 'train*')
params = get_trt_conversion_params(
args.max_workspace_size,
args.precision,
args.minimum_segment_size,
args.batch_size,)
graph_func, num_nodes, times = get_graph_func(
model=args.model,
conversion_params=params,
use_trt=args.use_trt,
calib_files=calib_files,
batch_size=args.batch_size,
num_calib_inputs=args.num_calib_inputs,
use_synthetic=args.use_synthetic,
saved_model_dir=args.saved_model_dir,
optimize_offline=args.optimize_offline,
root_saved_model_dir=args.root_saved_model_dir)
def print_dict(input_dict, str=''):
for k, v in sorted(input_dict.items()):
headline = '{}({}): '.format(str, k) if str else '{}: '.format(k)
print('{}{}'.format(headline, '%.1f'%v if type(v)==float else v))
print_dict(vars(args))
print('TensorRT Conversion Params:')
pprint.pprint(params)
pprint.pprint(num_nodes)
pprint.pprint(times)
results = run(
graph_func,
model=args.model,
data_files=data_files,
batch_size=args.batch_size,
num_iterations=args.num_iterations,
num_warmup_iterations=args.num_warmup_iterations,
use_synthetic=args.use_synthetic,
display_every=args.display_every,
mode=args.mode,
target_duration=args.target_duration)
if args.mode == 'validation':
print(' accuracy: %.2f' % (results['accuracy'] * 100))
print(' images/sec: %d' % results['images_per_sec'])
print(' 99th_percentile(ms): %.2f' % results['99th_percentile'])
print(' total_time(s): %.1f' % results['total_time'])
print(' latency_mean(ms): %.2f' % results['latency_mean'])
print(' latency_median(ms): %.2f' % results['latency_median'])
print(' latency_min(ms): %.2f' % results['latency_min'])
Move down import of internal module
import argparse
import os
import time
import glob
import pprint
import numpy as np
from functools import partial
import tensorflow as tf
from tensorflow.python.compiler.tensorrt import trt_convert as trt
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import tag_constants
from tensorflow.python.framework import convert_to_constants
import preprocessing
NETS = {
'mobilenet_v1': {
'preprocess':'inception',
'input_size':224,
'num_classes':1001},
'mobilenet_v2': {
'preprocess':'inception',
'input_size':224,
'num_classes':1001},
'nasnet_mobile': {
'preprocess':'inception',
'input_size':224,
'num_classes':1001},
'nasnet_large': {
'preprocess':'inception',
'input_size':331,
'num_classes':1001},
'resnet_v1.5_50_tfv2': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1000},
'resnet_v1_50': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1001},
'resnet_v2_50': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1001},
'resnet_v2_152': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1001},
'vgg_16': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1000},
'vgg_19': {
'preprocess':'vgg',
'input_size':224,
'num_classes':1000},
'inception_v3': {
'preprocess':'inception',
'input_size':299,
'num_classes':1001},
'inception_v4': {
'preprocess':'inception',
'input_size':299,
'num_classes':1001},
}
def get_input_size(model):
return NETS[model]['input_size']
def get_preprocessing(model):
if NETS[model]['preprocess'] == 'vgg':
return preprocessing.vgg_preprocess
else:
return preprocessing.inception_preprocess
def get_num_classes(model):
return NETS[model]['num_classes']
def deserialize_image_record(record):
feature_map = {
'image/encoded': tf.io.FixedLenFeature([ ], tf.string, ''),
'image/class/label': tf.io.FixedLenFeature([1], tf.int64, -1),
'image/class/text': tf.io.FixedLenFeature([ ], tf.string, ''),
'image/object/bbox/xmin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymin': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/xmax': tf.io.VarLenFeature(dtype=tf.float32),
'image/object/bbox/ymax': tf.io.VarLenFeature(dtype=tf.float32)
}
with tf.compat.v1.name_scope('deserialize_image_record'):
obj = tf.io.parse_single_example(serialized=record, features=feature_map)
imgdata = obj['image/encoded']
label = tf.cast(obj['image/class/label'], tf.int32)
return imgdata, label
def get_preprocess_fn(model, mode='validation'):
"""Creates a function to parse and process a TFRecord using the model's parameters
model: string, the model name (see NETS table)
mode: string, which mode to use (validation or benchmark)
returns: function, the preprocessing function for a record
"""
preprocess_fn = get_preprocessing(model)
input_size = get_input_size(model)
def validation_process(record):
# Parse TFRecord
imgdata, label = deserialize_image_record(record)
label -= 1 # Change to 0-based (don't use background class)
try: image = tf.image.decode_jpeg(imgdata, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST')
except: image = tf.image.decode_png(imgdata, channels=3)
# Use model's preprocessing function
image = preprocess_fn(image, input_size, input_size)
return image, label
def benchmark_process(path):
image = tf.io.read_file(path)
image = tf.image.decode_jpeg(image, channels=3)
image = preprocess_fn(image, input_size, input_size)
return image
if mode == 'validation':
return validation_process
elif mode == 'benchmark':
return benchmark_process
else:
raise ValueError("Mode must be either 'validation' or 'benchmark'")
def get_dataset(model, data_files, batch_size, use_synthetic, mode='validation'):
if use_synthetic:
input_size = get_input_size(model)
features = np.random.normal(
loc=112, scale=70,
size=(batch_size, input_size, input_size, 3)).astype(np.float32)
features = np.clip(features, 0.0, 255.0)
features = tf.convert_to_tensor(value=tf.compat.v1.get_variable(
"features", dtype=tf.float32, initializer=tf.constant(features)))
dataset = tf.data.Dataset.from_tensor_slices([features])
dataset = dataset.repeat()
else:
# preprocess function for input data
preprocess_fn = get_preprocess_fn(model, mode)
num_classes = get_num_classes(model)
if mode == 'validation':
dataset = tf.data.TFRecordDataset(data_files)
dataset = dataset.map(map_func=preprocess_fn, num_parallel_calls=8)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
dataset = dataset.repeat(count=1)
elif mode == 'benchmark':
dataset = tf.data.Dataset.from_tensor_slices(data_files)
dataset = dataset.map(map_func=preprocess_fn, num_parallel_calls=8)
dataset = dataset.batch(batch_size=batch_size)
dataset = dataset.repeat(count=1)
else:
raise ValueError("Mode must be either 'validation' or 'benchmark'")
return dataset
def get_func_from_saved_model(saved_model_dir):
saved_model_loaded = tf.saved_model.load(saved_model_dir, tags=[tag_constants.SERVING])
func = saved_model_loaded.signatures[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
return func
def get_num_ops(func, trt=False):
"""Returns number of ops in func.
If trt is set, returns only the number of TRTEngineOp in func.
"""
if not trt:
return len(func.graph.as_graph_def().node)
else:
return len([1 for n in func.graph.as_graph_def().node
if str(n.op)=='TRTEngineOp'])
def get_graph_func(model,
conversion_params=trt.DEFAULT_TRT_CONVERSION_PARAMS,
model_dir=None,
use_trt=False,
calib_files=None,
num_calib_inputs=None,
use_synthetic=False,
batch_size=None,
saved_model_dir=None,
optimize_offline=False,
root_saved_model_dir='./saved_models'):
"""Retreives a frozen SavedModel and applies TF-TRT
model: str, the model name
use_trt: bool, if true use TensorRT
precision: str, floating point precision (FP32, FP16, or INT8)
batch_size: int, batch size for TensorRT optimizations
returns: tensorflow.SavedModel, the TensorRT compatible frozen graph.
"""
num_nodes = {}
saved_model_dir = saved_model_dir or os.path.join(root_saved_model_dir, model)
start_time = time.time()
graph_func = get_func_from_saved_model(saved_model_dir)
num_nodes['native_tf'] = get_num_ops(graph_func)
if use_trt:
converter = trt.TrtGraphConverterV2(
input_saved_model_dir=saved_model_dir,
conversion_params=conversion_params,
)
def input_fn(input_files, num_iterations):
dataset = get_dataset(
model, input_files, batch_size, False, 'validation')
for i, (batch_images, _) in enumerate(dataset):
if i >= num_iterations:
break
yield (batch_images,)
print(" step %d/%d" % (i+1, num_iterations))
i += 1
if conversion_params.precision_mode != 'INT8':
converter.convert()
converted_saved_model_dir = 'saved_model_{}'.format(model)
if optimize_offline:
converter.build(input_fn=partial(input_fn, data_files, 1))
converter.save(converted_saved_model_dir)
graph_func = get_func_from_saved_model(converted_saved_model_dir)
else:
converter.convert(calibration_input_fn=partial(
input_fn, calib_files, num_calib_inputs//batch_size))
if optimize_offline:
converter.build(input_fn=partial(input_fn, data_files, 1))
converted_saved_model_dir = 'savd_model_{}'.format(model)
converter.save(converted_saved_model_dir)
graph_func = get_func_from_saved_model(converted_saved_model_dir)
converted_graph_def = converter._converted_graph_def
num_nodes['tftrt_total'] = get_num_ops(graph_func)
num_nodes['trt_only'] = get_num_ops(graph_func, trt=True)
return graph_func, num_nodes, {'conversion': time.time() - start_time}
def eval_fn(model, preds, labels, adjust):
"""Measures number of correct predicted labels in a batch.
Assumes preds and labels are numpy arrays.
"""
preds = np.argmax(preds, axis=1).reshape(-1) - adjust
return np.sum((labels.reshape(-1) == preds).astype(np.float32))
def run(graph_func, model, data_files, batch_size,
num_iterations, num_warmup_iterations, use_synthetic, display_every=100,
mode='validation', target_duration=None):
'''Run the given graph_func on the data files provided. In validation mode,
it consumes TFRecords with labels and reports accuracy. In benchmark mode, it
times inference on real data (.jpgs).'''
def get_inference_func(func):
frozen_func = convert_to_constants.convert_variables_to_constants_v2(func)
def wrap_func(*args, **kwargs):
# Assumes frozen_func has one output tensor
return frozen_func(*args, **kwargs)[0]
return wrap_func
graph_func = get_inference_func(graph_func)
results = {}
corrects = 0
iter_times = []
adjust = 1 if get_num_classes(model) == 1001 else 0
initial_time = time.time()
dataset = get_dataset(model, data_files, batch_size, use_synthetic, mode)
if mode == 'validation':
for i, (batch_feats, batch_labels) in enumerate(dataset):
start_time = time.time()
batch_preds = graph_func(batch_feats).numpy()
end_time = time.time()
iter_times.append(end_time - start_time)
if i % display_every == 0:
print(" step %d/%d, iter_time(ms)=%.0f" %
(i+1, 50000//batch_size, iter_times[-1]*1000))
corrects += eval_fn(
model, batch_preds, batch_labels.numpy(), adjust)
if i > 1 and target_duration is not None and \
time.time() - initial_time > target_duration:
break
accuracy = corrects / (batch_size * i)
results['accuracy'] = accuracy
elif mode == 'benchmark':
for i, batch_feats in enumerate(dataset):
if i >= num_warmup_iterations:
start_time = time.time()
outs = graph_func(batch_feats)
iter_times.append(time.time() - start_time)
if i % display_every == 0:
print(" step %d/%d, iter_time(ms)=%.0f" %
(i+1, num_iterations, iter_times[-1]*1000))
else:
outs = graph_func(batch_feats)
if i > 0 and target_duration is not None and \
time.time() - initial_time > target_duration:
break
if num_iterations is not None and i >= num_iterations:
break
if not iter_times:
return results
iter_times = np.array(iter_times)
iter_times = iter_times[num_warmup_iterations:]
results['total_time'] = np.sum(iter_times)
results['images_per_sec'] = np.mean(batch_size / iter_times)
results['99th_percentile'] = np.percentile(iter_times, q=99, interpolation='lower') * 1000
results['latency_mean'] = np.mean(iter_times) * 1000
results['latency_median'] = np.median(iter_times) * 1000
results['latency_min'] = np.min(iter_times) * 1000
return results
def get_trt_conversion_params(max_workspace_size_bytes,
precision_mode,
minimum_segment_size,
max_batch_size):
conversion_params = trt.DEFAULT_TRT_CONVERSION_PARAMS
conversion_params = conversion_params._replace(
max_workspace_size_bytes=max_workspace_size_bytes)
conversion_params = conversion_params._replace(precision_mode=precision_mode)
conversion_params = conversion_params._replace(minimum_segment_size=minimum_segment_size)
conversion_params = conversion_params._replace(use_calibration=precision_mode=='INT8')
conversion_params = conversion_params._replace(max_batch_size=max_batch_size)
return conversion_params
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Evaluate model')
parser.add_argument('--model', type=str, default='resnet_v1_50',
choices=list(NETS.keys()),
help='Which model to use.')
parser.add_argument('--data_dir', type=str, default=None,
help='Directory containing validation set TFRecord files.')
parser.add_argument('--calib_data_dir', type=str,
help='Directory containing TFRecord files for calibrating INT8.')
parser.add_argument('--root_saved_model_dir', type=str, default=None,
help='Directory containing saved models.')
parser.add_argument('--saved_model_dir', type=str, default=None,
help='Directory containing a particular saved model.')
parser.add_argument('--use_trt', action='store_true',
help='If set, the graph will be converted to a TensorRT graph.')
parser.add_argument('--optimize_offline', action='store_true',
help='If set, TensorRT engines are built before runtime.')
parser.add_argument('--precision', type=str, choices=['FP32', 'FP16', 'INT8'], default='FP32',
help='Precision mode to use. FP16 and INT8 only work in conjunction with --use_trt')
parser.add_argument('--batch_size', type=int, default=8,
help='Number of images per batch.')
parser.add_argument('--minimum_segment_size', type=int, default=2,
help='Minimum number of TF ops in a TRT engine.')
parser.add_argument('--num_iterations', type=int, default=2048,
help='How many iterations(batches) to evaluate. If not supplied, the whole set will be evaluated.')
parser.add_argument('--display_every', type=int, default=100,
help='Number of iterations executed between two consecutive display of metrics')
parser.add_argument('--use_synthetic', action='store_true',
help='If set, one batch of random data is generated and used at every iteration.')
parser.add_argument('--num_warmup_iterations', type=int, default=50,
help='Number of initial iterations skipped from timing')
parser.add_argument('--num_calib_inputs', type=int, default=500,
help='Number of inputs (e.g. images) used for calibration '
'(last batch is skipped in case it is not full)')
parser.add_argument('--max_workspace_size', type=int, default=(1<<30),
help='workspace size in bytes')
parser.add_argument('--mode', choices=['validation', 'benchmark'], default='validation',
help='Which mode to use (validation or benchmark)')
parser.add_argument('--target_duration', type=int, default=None,
help='If set, script will run for specified number of seconds.')
args = parser.parse_args()
if args.precision != 'FP32' and not args.use_trt:
raise ValueError('TensorRT must be enabled for FP16 or INT8 modes (--use_trt).')
if args.precision == 'INT8' and not args.calib_data_dir and not args.use_synthetic:
raise ValueError('--calib_data_dir is required for INT8 mode')
if args.num_iterations is not None and args.num_iterations <= args.num_warmup_iterations:
raise ValueError('--num_iterations must be larger than --num_warmup_iterations '
'({} <= {})'.format(args.num_iterations, args.num_warmup_iterations))
if args.num_calib_inputs < args.batch_size:
raise ValueError('--num_calib_inputs must not be smaller than --batch_size'
'({} <= {})'.format(args.num_calib_inputs, args.batch_size))
if args.mode == 'validation' and args.use_synthetic:
raise ValueError('Cannot use both validation mode and synthetic dataset')
if args.data_dir is None and not args.use_synthetic:
raise ValueError("--data_dir required if you are not using synthetic data")
if args.use_synthetic and args.num_iterations is None:
raise ValueError("--num_iterations is required for --use_synthetic")
if args.root_saved_model_dir is None and args.saved_model_dir is None:
raise ValueError("Please set one of --root_saved_model_dir or --saved_model_dir")
if args.root_saved_model_dir is not None and args.saved_model_dir is not None:
print("Both --root_saved_model_dir and --saved_model_dir are set.\n \
Using saved_model_dir:{}".format(args.saved_model_dir))
calib_files = []
data_files = []
def get_files(data_dir, filename_pattern):
if data_dir == None:
return []
files = tf.io.gfile.glob(os.path.join(data_dir, filename_pattern))
if files == []:
raise ValueError('Can not find any files in {} with '
'pattern "{}"'.format(data_dir, filename_pattern))
return files
if not args.use_synthetic:
if args.mode == "validation":
data_files = get_files(args.data_dir, 'validation*')
elif args.mode == "benchmark":
data_files = [os.path.join(path, name) for path, _, files in os.walk(args.data_dir) for name in files]
else:
raise ValueError("Mode must be either 'validation' or 'benchamark'")
if args.precision == 'INT8':
calib_files = get_files(args.calib_data_dir, 'train*')
params = get_trt_conversion_params(
args.max_workspace_size,
args.precision,
args.minimum_segment_size,
args.batch_size,)
graph_func, num_nodes, times = get_graph_func(
model=args.model,
conversion_params=params,
use_trt=args.use_trt,
calib_files=calib_files,
batch_size=args.batch_size,
num_calib_inputs=args.num_calib_inputs,
use_synthetic=args.use_synthetic,
saved_model_dir=args.saved_model_dir,
optimize_offline=args.optimize_offline,
root_saved_model_dir=args.root_saved_model_dir)
def print_dict(input_dict, str=''):
for k, v in sorted(input_dict.items()):
headline = '{}({}): '.format(str, k) if str else '{}: '.format(k)
print('{}{}'.format(headline, '%.1f'%v if type(v)==float else v))
print_dict(vars(args))
print('TensorRT Conversion Params:')
pprint.pprint(params)
pprint.pprint(num_nodes)
pprint.pprint(times)
results = run(
graph_func,
model=args.model,
data_files=data_files,
batch_size=args.batch_size,
num_iterations=args.num_iterations,
num_warmup_iterations=args.num_warmup_iterations,
use_synthetic=args.use_synthetic,
display_every=args.display_every,
mode=args.mode,
target_duration=args.target_duration)
if args.mode == 'validation':
print(' accuracy: %.2f' % (results['accuracy'] * 100))
print(' images/sec: %d' % results['images_per_sec'])
print(' 99th_percentile(ms): %.2f' % results['99th_percentile'])
print(' total_time(s): %.1f' % results['total_time'])
print(' latency_mean(ms): %.2f' % results['latency_mean'])
print(' latency_median(ms): %.2f' % results['latency_median'])
print(' latency_min(ms): %.2f' % results['latency_min'])
|
from hy.lang.expression import HYExpression
from hy.lex.errors import LexException
from hy.lang.string import HYString
from hy.lang.symbol import HYSymbol
from hy.lang.number import HYNumber
from hy.lex.machine import Machine
from hy.lang.list import HYList
from hy.lang.bool import HYBool
from hy.lang.map import HYMap
WHITESPACE = [" ", "\t", "\n", "\r"]
def _resolve_atom(value):
if value == "true":
return HYBool(True)
elif value == "false":
return HYBool(False)
if value.isdigit():
return HYNumber(value)
return HYSymbol(value)
class State(object):
def __init__(self, machine):
self.machine = machine
self.sub_machine = None
def enter(self):
pass
def exit(self):
pass
def sub(self, machine):
self.sub_machine = Machine(machine)
def process(self, x):
if self.sub_machine:
self.sub_machine.process(x)
idle = type(self.sub_machine.state) == Idle
if idle:
self.nodes += self.sub_machine.nodes
self.sub_machine = None
return
return self.p(x)
class Comment(State):
def p(self, x):
if x == '\n':
return Idle
class Idle(State):
def p(self, x):
if x == ";": return Comment
if x == "(": return Expression
if x in WHITESPACE: return
raise LexException("Unknown char: %s" % (x))
class Expression(State):
def enter(self):
self.nodes = HYExpression([])
self.bulk = ""
def exit(self):
if self.bulk:
self.nodes.append(_resolve_atom(self.bulk))
self.machine.nodes.append(self.nodes)
def commit(self):
if self.bulk.strip() != "":
self.nodes.append(_resolve_atom(self.bulk))
self.bulk = ""
def p(self, x):
if x == ")": return Idle
if x in WHITESPACE: self.commit(); return
if x == "\"": self.sub(String); return
if x == "(": self.sub(Expression); return
if x == "[": self.sub(List); return
if x == "{": self.sub(Map); return
if x == ";": self.sub(Comment); return
self.bulk += x
class List(State):
def enter(self):
self.nodes = HYList([])
self.bulk = ""
def exit(self):
if self.bulk:
self.nodes.append(_resolve_atom(self.bulk))
self.machine.nodes.append(self.nodes)
def commit(self):
if self.bulk.strip() != "":
self.nodes.append(_resolve_atom(self.bulk))
self.bulk = ""
def p(self, x):
if x == "]": return Idle
if x in WHITESPACE: self.commit(); return
if x == "\"": self.sub(String); return
if x == "[": self.sub(List); return
if x == "(": self.sub(Expression); return
if x == "{": self.sub(Map); return
if x == ";": self.sub(Comment); return
self.bulk += x
class Map(State):
def enter(self):
self.nodes = []
self.bulk = ""
def exit(self):
if self.bulk:
self.nodes.append(_resolve_atom(self.bulk))
if (len(self.nodes) % 2) != 0:
raise Exception("Hash map is fucked")
ret = HYMap({})
i = iter(self.nodes)
hmap = zip(i, i)
for key, val in hmap:
ret[key] = val
self.machine.nodes.append(ret)
def commit(self):
if self.bulk.strip() != "":
self.nodes.append(_resolve_atom(self.bulk))
self.bulk = ""
def p(self, x):
if x == "}": return Idle
if x in WHITESPACE: self.commit(); return
if x == "\"": self.sub(String); return
if x == "[": self.sub(List); return
if x == "{": self.sub(Map); return
if x == "(": self.sub(Expression); return
if x == ";": self.sub(Comment); return
self.bulk += x
class String(State):
magic = { "n": "\n", "t": "\t", "\\": "\\", "\"": "\"" }
def enter(self):
self.buf = ""
self.esc = False
def exit(self):
self.machine.nodes.append(HYString(self.buf))
def p(self, x):
if x == "\\":
self.esc = True
return
if x == "\"" and not self.esc:
return Idle
if self.esc and x not in self.magic:
raise LexException("Unknown escape: \\%s" % (x))
elif self.esc:
x = self.magic[x]
self.esc = False
self.buf += x
adjusting the logic for neg nums
from hy.lang.expression import HYExpression
from hy.lex.errors import LexException
from hy.lang.string import HYString
from hy.lang.symbol import HYSymbol
from hy.lang.number import HYNumber
from hy.lex.machine import Machine
from hy.lang.list import HYList
from hy.lang.bool import HYBool
from hy.lang.map import HYMap
WHITESPACE = [" ", "\t", "\n", "\r"]
def _resolve_atom(value):
if value == "true":
return HYBool(True)
elif value == "false":
return HYBool(False)
try:
return HYNumber(value)
except ValueError:
pass
return HYSymbol(value)
class State(object):
def __init__(self, machine):
self.machine = machine
self.sub_machine = None
def enter(self):
pass
def exit(self):
pass
def sub(self, machine):
self.sub_machine = Machine(machine)
def process(self, x):
if self.sub_machine:
self.sub_machine.process(x)
idle = type(self.sub_machine.state) == Idle
if idle:
self.nodes += self.sub_machine.nodes
self.sub_machine = None
return
return self.p(x)
class Comment(State):
def p(self, x):
if x == '\n':
return Idle
class Idle(State):
def p(self, x):
if x == ";": return Comment
if x == "(": return Expression
if x in WHITESPACE: return
raise LexException("Unknown char: %s" % (x))
class Expression(State):
def enter(self):
self.nodes = HYExpression([])
self.bulk = ""
def exit(self):
if self.bulk:
self.nodes.append(_resolve_atom(self.bulk))
self.machine.nodes.append(self.nodes)
def commit(self):
if self.bulk.strip() != "":
self.nodes.append(_resolve_atom(self.bulk))
self.bulk = ""
def p(self, x):
if x == ")": return Idle
if x in WHITESPACE: self.commit(); return
if x == "\"": self.sub(String); return
if x == "(": self.sub(Expression); return
if x == "[": self.sub(List); return
if x == "{": self.sub(Map); return
if x == ";": self.sub(Comment); return
self.bulk += x
class List(State):
def enter(self):
self.nodes = HYList([])
self.bulk = ""
def exit(self):
if self.bulk:
self.nodes.append(_resolve_atom(self.bulk))
self.machine.nodes.append(self.nodes)
def commit(self):
if self.bulk.strip() != "":
self.nodes.append(_resolve_atom(self.bulk))
self.bulk = ""
def p(self, x):
if x == "]": return Idle
if x in WHITESPACE: self.commit(); return
if x == "\"": self.sub(String); return
if x == "[": self.sub(List); return
if x == "(": self.sub(Expression); return
if x == "{": self.sub(Map); return
if x == ";": self.sub(Comment); return
self.bulk += x
class Map(State):
def enter(self):
self.nodes = []
self.bulk = ""
def exit(self):
if self.bulk:
self.nodes.append(_resolve_atom(self.bulk))
if (len(self.nodes) % 2) != 0:
raise Exception("Hash map is fucked")
ret = HYMap({})
i = iter(self.nodes)
hmap = zip(i, i)
for key, val in hmap:
ret[key] = val
self.machine.nodes.append(ret)
def commit(self):
if self.bulk.strip() != "":
self.nodes.append(_resolve_atom(self.bulk))
self.bulk = ""
def p(self, x):
if x == "}": return Idle
if x in WHITESPACE: self.commit(); return
if x == "\"": self.sub(String); return
if x == "[": self.sub(List); return
if x == "{": self.sub(Map); return
if x == "(": self.sub(Expression); return
if x == ";": self.sub(Comment); return
self.bulk += x
class String(State):
magic = { "n": "\n", "t": "\t", "\\": "\\", "\"": "\"" }
def enter(self):
self.buf = ""
self.esc = False
def exit(self):
self.machine.nodes.append(HYString(self.buf))
def p(self, x):
if x == "\\":
self.esc = True
return
if x == "\"" and not self.esc:
return Idle
if self.esc and x not in self.magic:
raise LexException("Unknown escape: \\%s" % (x))
elif self.esc:
x = self.magic[x]
self.esc = False
self.buf += x
|
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from rest_framework import serializers
from rest_framework.reverse import reverse
from silver.models import (MeteredFeatureUnitsLog, Customer, Subscription,
MeteredFeature, Plan, Provider, Invoice,
DocumentEntry, ProductCode, Proforma)
class MFUnitsLogUrl(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
customer_pk = request.parser_context['kwargs']['customer_pk']
subscription_pk = request.parser_context['kwargs']['subscription_pk']
kwargs = {
'customer_pk': customer_pk,
'subscription_pk': subscription_pk,
'mf_product_code': obj.product_code.value
}
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
class MeteredFeatureSerializer(serializers.ModelSerializer):
product_code = serializers.SlugRelatedField(
slug_field='value',
queryset=ProductCode.objects.all()
)
class Meta:
model = MeteredFeature
fields = ('name', 'unit', 'price_per_unit', 'included_units',
'product_code')
class MeteredFeatureRelatedField(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
kwargs = {'pk': obj.pk}
return reverse(view_name, kwargs=kwargs, request=request, format=format)
def to_native(self, obj):
request = self.context.get('request', None)
return MeteredFeatureSerializer(obj, context={'request': request}).data
class MeteredFeatureInSubscriptionSerializer(serializers.HyperlinkedModelSerializer):
url = MFUnitsLogUrl(view_name='mf-log-units', source='*', read_only=True)
product_code = serializers.SlugRelatedField(
slug_field='value',
queryset=ProductCode.objects.all()
)
class Meta:
model = MeteredFeature
fields = ('name', 'unit', 'price_per_unit', 'included_units',
'product_code', 'url')
class MeteredFeatureUnitsLogSerializer(serializers.HyperlinkedModelSerializer):
# The 2 lines below are needed because of a DRF3 bug
start_date = serializers.DateField(read_only=True)
end_date = serializers.DateField(read_only=True)
class Meta:
model = MeteredFeatureUnitsLog
fields = ('consumed_units', 'start_date', 'end_date')
class ProviderSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Provider
fields = ('id', 'url', 'name', 'company', 'invoice_series', 'flow',
'email', 'address_1', 'address_2', 'city', 'state',
'zip_code', 'country', 'extra', 'invoice_series',
'invoice_starting_number', 'proforma_series',
'proforma_starting_number')
def validate(self, data):
if data['flow'] == 'proforma':
if not data.get('proforma_starting_number', None) and\
not data.get('proforma_series', None):
errors = {'proforma_series': "This field is required as the "
"chosen flow is proforma.",
'proforma_starting_number': "This field is required "
"as the chosen flow is "
"proforma."}
raise serializers.ValidationError(errors)
elif not data.get('proforma_series'):
errors = {'proforma_series': "This field is required as the "
"chosen flow is proforma."}
raise serializers.ValidationError(errors)
elif not data.get('proforma_starting_number', None):
errors = {'proforma_starting_number': "This field is required "
"as the chosen flow is "
"proforma."}
raise serializers.ValidationError(errors)
return data
class ProductCodeRelatedField(serializers.SlugRelatedField):
def __init__(self, **kwargs):
super(ProductCodeRelatedField, self).__init__(
slug_field='value', queryset=ProductCode.objects.all(), **kwargs)
def to_internal_value(self, data):
try:
return ProductCode.objects.get(**{self.slug_field: data})
except ObjectDoesNotExist:
return ProductCode(**{self.slug_field: data})
except (TypeError, ValueError):
self.fail('invalid')
class PlanSerializer(serializers.HyperlinkedModelSerializer):
metered_features = MeteredFeatureSerializer(
required=False, many=True
)
provider = serializers.HyperlinkedRelatedField(
queryset=Provider.objects.all(),
view_name='provider-detail',
)
product_code = ProductCodeRelatedField()
class Meta:
model = Plan
fields = ('name', 'url', 'interval', 'interval_count', 'amount',
'currency', 'trial_period_days', 'generate_after', 'enabled',
'private', 'product_code', 'metered_features', 'provider')
def validate_metered_features(self, value):
metered_features = []
for mf_data in value:
metered_features.append(MeteredFeature(**mf_data))
try:
Plan.validate_metered_features(metered_features)
except ValidationError, e:
raise serializers.ValidationError(str(e)[3:-2])
return value
def create(self, validated_data):
metered_features_data = validated_data.pop('metered_features')
metered_features = []
for mf_data in metered_features_data:
metered_features.append(MeteredFeature.objects.create(**mf_data))
product_code = validated_data.pop('product_code')
product_code = ProductCode.objects.get_or_create(value=product_code)[0]
validated_data.update({'product_code': product_code})
plan = Plan.objects.create(**validated_data)
plan.metered_features.add(*metered_features)
plan.product_code = product_code
plan.save()
return plan
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.generate_after = validated_data.get('generate_after', instance.generate_after)
instance.save()
return instance
class SubscriptionUrl(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
kwargs = {'customer_pk': obj.customer.pk, 'subscription_pk': obj.pk}
return reverse(view_name, kwargs=kwargs, request=request, format=format)
class SubscriptionSerializer(serializers.HyperlinkedModelSerializer):
trial_end = serializers.DateField(required=False)
start_date = serializers.DateField(required=False)
ended_at = serializers.DateField(read_only=True)
url = SubscriptionUrl(view_name='subscription-detail', source='*',
queryset=Subscription.objects.all(), required=False)
class Meta:
model = Subscription
fields = ('url', 'plan', 'customer', 'trial_end', 'start_date',
'ended_at', 'state', 'reference')
read_only_fields = ('state', )
def validate(self, attrs):
instance = Subscription(**attrs)
instance.clean()
return attrs
class SubscriptionDetailSerializer(SubscriptionSerializer):
metered_features = MeteredFeatureInSubscriptionSerializer(
source='plan.metered_features', many=True, read_only=True
)
class Meta(SubscriptionSerializer.Meta):
fields = SubscriptionSerializer.Meta.fields + ('metered_features',)
class CustomerSerializer(serializers.HyperlinkedModelSerializer):
subscriptions = SubscriptionUrl(view_name='subscription-detail', many=True,
read_only=True)
class Meta:
model = Customer
fields = ('url', 'customer_reference', 'name', 'company', 'email',
'address_1', 'address_2', 'city', 'state', 'zip_code',
'country', 'extra', 'sales_tax_number', 'sales_tax_name',
'sales_tax_percent', 'subscriptions')
class ProductCodeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProductCode
fields = ('url', 'value')
class DocumentEntrySerializer(serializers.HyperlinkedModelSerializer):
product_code = serializers.SlugRelatedField(
slug_field='value',
read_only=True
)
class Meta:
model = DocumentEntry
fields = ('entry_id', 'description', 'unit', 'unit_price', 'quantity',
'total', 'start_date', 'end_date', 'prorated', 'product_code')
class InvoicePdfUrlSerializer(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
kwargs = {'invoice_id': obj.id}
return reverse(view_name, kwargs=kwargs, request=request, format=format)
class InvoiceSerializer(serializers.HyperlinkedModelSerializer):
invoice_entries = DocumentEntrySerializer(many=True)
pdf = InvoicePdfUrlSerializer(view_name='invoice-pdf', source='*',
read_only=True)
class Meta:
model = Invoice
fields = ('id', 'series', 'number', 'provider', 'customer',
'archived_provider', 'archived_customer', 'due_date',
'issue_date', 'paid_date', 'cancel_date', 'sales_tax_name',
'sales_tax_percent', 'currency', 'state', 'proforma',
'invoice_entries', 'total', 'pdf')
read_only_fields = ('archived_provider', 'archived_customer', 'total')
def create(self, validated_data):
entries = validated_data.pop('invoice_entries', None)
# Create the new invoice objectj
invoice = Invoice.objects.create(**validated_data)
# Add the invoice entries
for entry in entries:
entry_dict = {}
entry_dict['invoice'] = invoice
for field in entry.items():
entry_dict[field[0]] = field[1]
DocumentEntry.objects.create(**entry_dict)
return invoice
def update(self, instance, validated_data):
# The provider has changed => force the generation of the correct number
# corresponding to the count of the new provider
current_provider = instance.provider
new_provider = validated_data.get('provider')
if new_provider and new_provider != current_provider:
instance.number = None
updateable_fields = instance.updateable_fields
for field_name in updateable_fields:
field_value = validated_data.get(field_name,
getattr(instance, field_name))
setattr(instance, field_name, field_value)
instance.save()
return instance
def validate(self, data):
if self.instance:
self.instance.clean()
if self.instance and data['state'] != self.instance.state:
msg = "Direct state modification is not allowed."\
" Use the corresponding endpoint to update the state."
raise serializers.ValidationError(msg)
return data
class ProformaPdfUrlSerializer(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
kwargs = {'proforma_id': obj.id}
return reverse(view_name, kwargs=kwargs, request=request, format=format)
class ProformaSerializer(serializers.HyperlinkedModelSerializer):
proforma_entries = DocumentEntrySerializer(many=True)
pdf = ProformaPdfUrlSerializer(view_name='proforma-pdf', source='*',
read_only=True)
class Meta:
model = Proforma
fields = ('id', 'series', 'number', 'provider', 'customer',
'archived_provider', 'archived_customer', 'due_date',
'issue_date', 'paid_date', 'cancel_date', 'sales_tax_name',
'sales_tax_percent', 'currency', 'state', 'invoice',
'proforma_entries', 'total', 'pdf')
read_only_fields = ('archived_provider', 'archived_customer', 'total')
def create(self, validated_data):
entries = validated_data.pop('proforma_entries', None)
proforma = Proforma.objects.create(**validated_data)
for entry in entries:
entry_dict = {}
entry_dict['proforma'] = proforma
for field in entry.items():
entry_dict[field[0]] = field[1]
DocumentEntry.objects.create(**entry_dict)
return proforma
def update(self, instance, validated_data):
# The provider has changed => force the generation of the correct number
# corresponding to the count of the new provider
current_provider = instance.provider
new_provider = validated_data.get('provider')
if new_provider and new_provider != current_provider:
instance.number = None
updateable_fields = instance.updateable_fields
for field_name in updateable_fields:
field_value = validated_data.get(field_name,
getattr(instance, field_name))
setattr(instance, field_name, field_value)
instance.save()
return instance
def validate(self, data):
if self.instance:
self.instance.clean()
if self.instance and data['state'] != self.instance.state:
msg = "Direct state modification is not allowed."\
" Use the corresponding endpoint to update the state."
raise serializers.ValidationError(msg)
return data
Return null in the seriazlier if the pdf is not yet generated
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from rest_framework import serializers
from rest_framework.reverse import reverse
from silver.models import (MeteredFeatureUnitsLog, Customer, Subscription,
MeteredFeature, Plan, Provider, Invoice,
DocumentEntry, ProductCode, Proforma)
class MFUnitsLogUrl(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
customer_pk = request.parser_context['kwargs']['customer_pk']
subscription_pk = request.parser_context['kwargs']['subscription_pk']
kwargs = {
'customer_pk': customer_pk,
'subscription_pk': subscription_pk,
'mf_product_code': obj.product_code.value
}
return self.reverse(view_name, kwargs=kwargs, request=request, format=format)
class MeteredFeatureSerializer(serializers.ModelSerializer):
product_code = serializers.SlugRelatedField(
slug_field='value',
queryset=ProductCode.objects.all()
)
class Meta:
model = MeteredFeature
fields = ('name', 'unit', 'price_per_unit', 'included_units',
'product_code')
class MeteredFeatureRelatedField(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
kwargs = {'pk': obj.pk}
return reverse(view_name, kwargs=kwargs, request=request, format=format)
def to_native(self, obj):
request = self.context.get('request', None)
return MeteredFeatureSerializer(obj, context={'request': request}).data
class MeteredFeatureInSubscriptionSerializer(serializers.HyperlinkedModelSerializer):
url = MFUnitsLogUrl(view_name='mf-log-units', source='*', read_only=True)
product_code = serializers.SlugRelatedField(
slug_field='value',
queryset=ProductCode.objects.all()
)
class Meta:
model = MeteredFeature
fields = ('name', 'unit', 'price_per_unit', 'included_units',
'product_code', 'url')
class MeteredFeatureUnitsLogSerializer(serializers.HyperlinkedModelSerializer):
# The 2 lines below are needed because of a DRF3 bug
start_date = serializers.DateField(read_only=True)
end_date = serializers.DateField(read_only=True)
class Meta:
model = MeteredFeatureUnitsLog
fields = ('consumed_units', 'start_date', 'end_date')
class ProviderSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Provider
fields = ('id', 'url', 'name', 'company', 'invoice_series', 'flow',
'email', 'address_1', 'address_2', 'city', 'state',
'zip_code', 'country', 'extra', 'invoice_series',
'invoice_starting_number', 'proforma_series',
'proforma_starting_number')
def validate(self, data):
if data['flow'] == 'proforma':
if not data.get('proforma_starting_number', None) and\
not data.get('proforma_series', None):
errors = {'proforma_series': "This field is required as the "
"chosen flow is proforma.",
'proforma_starting_number': "This field is required "
"as the chosen flow is "
"proforma."}
raise serializers.ValidationError(errors)
elif not data.get('proforma_series'):
errors = {'proforma_series': "This field is required as the "
"chosen flow is proforma."}
raise serializers.ValidationError(errors)
elif not data.get('proforma_starting_number', None):
errors = {'proforma_starting_number': "This field is required "
"as the chosen flow is "
"proforma."}
raise serializers.ValidationError(errors)
return data
class ProductCodeRelatedField(serializers.SlugRelatedField):
def __init__(self, **kwargs):
super(ProductCodeRelatedField, self).__init__(
slug_field='value', queryset=ProductCode.objects.all(), **kwargs)
def to_internal_value(self, data):
try:
return ProductCode.objects.get(**{self.slug_field: data})
except ObjectDoesNotExist:
return ProductCode(**{self.slug_field: data})
except (TypeError, ValueError):
self.fail('invalid')
class PlanSerializer(serializers.HyperlinkedModelSerializer):
metered_features = MeteredFeatureSerializer(
required=False, many=True
)
provider = serializers.HyperlinkedRelatedField(
queryset=Provider.objects.all(),
view_name='provider-detail',
)
product_code = ProductCodeRelatedField()
class Meta:
model = Plan
fields = ('name', 'url', 'interval', 'interval_count', 'amount',
'currency', 'trial_period_days', 'generate_after', 'enabled',
'private', 'product_code', 'metered_features', 'provider')
def validate_metered_features(self, value):
metered_features = []
for mf_data in value:
metered_features.append(MeteredFeature(**mf_data))
try:
Plan.validate_metered_features(metered_features)
except ValidationError, e:
raise serializers.ValidationError(str(e)[3:-2])
return value
def create(self, validated_data):
metered_features_data = validated_data.pop('metered_features')
metered_features = []
for mf_data in metered_features_data:
metered_features.append(MeteredFeature.objects.create(**mf_data))
product_code = validated_data.pop('product_code')
product_code = ProductCode.objects.get_or_create(value=product_code)[0]
validated_data.update({'product_code': product_code})
plan = Plan.objects.create(**validated_data)
plan.metered_features.add(*metered_features)
plan.product_code = product_code
plan.save()
return plan
def update(self, instance, validated_data):
instance.name = validated_data.get('name', instance.name)
instance.generate_after = validated_data.get('generate_after', instance.generate_after)
instance.save()
return instance
class SubscriptionUrl(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
kwargs = {'customer_pk': obj.customer.pk, 'subscription_pk': obj.pk}
return reverse(view_name, kwargs=kwargs, request=request, format=format)
class SubscriptionSerializer(serializers.HyperlinkedModelSerializer):
trial_end = serializers.DateField(required=False)
start_date = serializers.DateField(required=False)
ended_at = serializers.DateField(read_only=True)
url = SubscriptionUrl(view_name='subscription-detail', source='*',
queryset=Subscription.objects.all(), required=False)
class Meta:
model = Subscription
fields = ('url', 'plan', 'customer', 'trial_end', 'start_date',
'ended_at', 'state', 'reference')
read_only_fields = ('state', )
def validate(self, attrs):
instance = Subscription(**attrs)
instance.clean()
return attrs
class SubscriptionDetailSerializer(SubscriptionSerializer):
metered_features = MeteredFeatureInSubscriptionSerializer(
source='plan.metered_features', many=True, read_only=True
)
class Meta(SubscriptionSerializer.Meta):
fields = SubscriptionSerializer.Meta.fields + ('metered_features',)
class CustomerSerializer(serializers.HyperlinkedModelSerializer):
subscriptions = SubscriptionUrl(view_name='subscription-detail', many=True,
read_only=True)
class Meta:
model = Customer
fields = ('url', 'customer_reference', 'name', 'company', 'email',
'address_1', 'address_2', 'city', 'state', 'zip_code',
'country', 'extra', 'sales_tax_number', 'sales_tax_name',
'sales_tax_percent', 'subscriptions')
class ProductCodeSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProductCode
fields = ('url', 'value')
class DocumentEntrySerializer(serializers.HyperlinkedModelSerializer):
product_code = serializers.SlugRelatedField(
slug_field='value',
read_only=True
)
class Meta:
model = DocumentEntry
fields = ('entry_id', 'description', 'unit', 'unit_price', 'quantity',
'total', 'start_date', 'end_date', 'prorated', 'product_code')
class InvoicePdfUrlSerializer(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
if not obj.pdf:
return None
kwargs = {'invoice_id': obj.id}
return reverse(view_name, kwargs=kwargs, request=request, format=format)
class InvoiceSerializer(serializers.HyperlinkedModelSerializer):
invoice_entries = DocumentEntrySerializer(many=True)
pdf = InvoicePdfUrlSerializer(view_name='invoice-pdf', source='*',
read_only=True)
class Meta:
model = Invoice
fields = ('id', 'series', 'number', 'provider', 'customer',
'archived_provider', 'archived_customer', 'due_date',
'issue_date', 'paid_date', 'cancel_date', 'sales_tax_name',
'sales_tax_percent', 'currency', 'state', 'proforma',
'invoice_entries', 'total', 'pdf')
read_only_fields = ('archived_provider', 'archived_customer', 'total')
def create(self, validated_data):
entries = validated_data.pop('invoice_entries', None)
# Create the new invoice objectj
invoice = Invoice.objects.create(**validated_data)
# Add the invoice entries
for entry in entries:
entry_dict = {}
entry_dict['invoice'] = invoice
for field in entry.items():
entry_dict[field[0]] = field[1]
DocumentEntry.objects.create(**entry_dict)
return invoice
def update(self, instance, validated_data):
# The provider has changed => force the generation of the correct number
# corresponding to the count of the new provider
current_provider = instance.provider
new_provider = validated_data.get('provider')
if new_provider and new_provider != current_provider:
instance.number = None
updateable_fields = instance.updateable_fields
for field_name in updateable_fields:
field_value = validated_data.get(field_name,
getattr(instance, field_name))
setattr(instance, field_name, field_value)
instance.save()
return instance
def validate(self, data):
if self.instance:
self.instance.clean()
if self.instance and data['state'] != self.instance.state:
msg = "Direct state modification is not allowed."\
" Use the corresponding endpoint to update the state."
raise serializers.ValidationError(msg)
return data
class ProformaPdfUrlSerializer(serializers.HyperlinkedRelatedField):
def get_url(self, obj, view_name, request, format):
if not obj.pdf:
return None
kwargs = {'proforma_id': obj.id}
return reverse(view_name, kwargs=kwargs, request=request, format=format)
class ProformaSerializer(serializers.HyperlinkedModelSerializer):
proforma_entries = DocumentEntrySerializer(many=True)
pdf = ProformaPdfUrlSerializer(view_name='proforma-pdf', source='*',
read_only=True)
class Meta:
model = Proforma
fields = ('id', 'series', 'number', 'provider', 'customer',
'archived_provider', 'archived_customer', 'due_date',
'issue_date', 'paid_date', 'cancel_date', 'sales_tax_name',
'sales_tax_percent', 'currency', 'state', 'invoice',
'proforma_entries', 'total', 'pdf')
read_only_fields = ('archived_provider', 'archived_customer', 'total')
def create(self, validated_data):
entries = validated_data.pop('proforma_entries', None)
proforma = Proforma.objects.create(**validated_data)
for entry in entries:
entry_dict = {}
entry_dict['proforma'] = proforma
for field in entry.items():
entry_dict[field[0]] = field[1]
DocumentEntry.objects.create(**entry_dict)
return proforma
def update(self, instance, validated_data):
# The provider has changed => force the generation of the correct number
# corresponding to the count of the new provider
current_provider = instance.provider
new_provider = validated_data.get('provider')
if new_provider and new_provider != current_provider:
instance.number = None
updateable_fields = instance.updateable_fields
for field_name in updateable_fields:
field_value = validated_data.get(field_name,
getattr(instance, field_name))
setattr(instance, field_name, field_value)
instance.save()
return instance
def validate(self, data):
if self.instance:
self.instance.clean()
if self.instance and data['state'] != self.instance.state:
msg = "Direct state modification is not allowed."\
" Use the corresponding endpoint to update the state."
raise serializers.ValidationError(msg)
return data
|
"""Simple TCP sockets.
Each Actor has a TCP IPv4 port/socket that will accept incoming
connections for messages. Each connection from a remote Actor will
accept a single message per connection. The connection is dropped and
re-established for multiple messages; this is less efficient but has
more fairness.
This transport can be used within a process, between processes, and
even between processes on separate systems.
"""
# n.b. The core of this is very similar to asyncore/asynchat.
# Unfortunately, those modules are deprecated in Python 3.4 in favor
# of asyncio, which is powerful... and complex. Thespian aims to
# support Python 2.6 through 3.4 and beyond, and has more specific
# needs (undoubtably a subset of asyncio capabilities) that can be
# implemented more simply and directly here. In addition, this module
# should be extensible to support SSL, which asynchat is not (maybe).
# For Thespian, there are two classes of sockets:
# * the Actor's primary receive socket, and
# * transient outgoing send sockets.
# All steps of both types of sockets are handled asynchronously, very
# similarly to the asyncore channel.
#
# For the receive socket, it will listen for and accept incoming
# connections, and then accept a single message on that connection,
# closing the connection on completion (or error).
#
# For the transmit socket, it will connect, send, and close with a
# TransmitIntent.
# ----------------------------------------------------------------------
# TCP Buffering issues
#
# TCP is unique in that there are unusual buffering considerations to
# account for. Specifically, a sender can connect to a listener, send
# a message, and close the socket --- *WITHOUT the receiver even
# processing the accept! As a result, the transmitter must take
# additional steps to ensure that the message that was sent has been
# delivered.
#
# There are two ways that this confirmation can be handled:
#
# 1) confirmation sent back in the original connection
#
# 2) messages confirmed by a separate confirmation message with a
# unique message identifier for idempotency.
#
# Disadvantages of #1:
# * More complicated exchange between sender and receiver
# * There must be a header synchronization with a size indicator so
# that the receiver knows when the full message has been received
# and should be acknowledged.
# * The socket must exist for a potentially much longer period and
# retransmits must still be attempted on failure.
#
# Disadvantages of #2:
# * Doubles connection establishment requirements.
# * More complicated send queuing to ensure ordering of messages between
# sender and recipient. However, this really must exist for condition
# #1 as well.
#
# Could also do a hybrid of both. On send, start with header
# containing message ID (and size?) then wait a brief time after send
# for the ACK, then disconnect and wait for the separate ACK later.
#
# At this point, the connection establishment seems to be the
# overriding performance dominator, and the message header
# synchronization and size indication seem like good ideas anyhow to
# confirm that the entire message has been received by the recipient.
# This method is feasible because of the asynchronous handling of the
# transmit sequence (as opposed to a blocking transmit, which would
# consume the processing budget for highly active scenarios).
import logging
from thespian.system.utilis import (thesplog, fmap, partition)
from thespian.system.timing import timePeriodSeconds, ExpirationTimer
from thespian.actors import *
from thespian.system.transport import *
from thespian.system.transport.IPBase import (TCPv4ActorAddress)
from thespian.system.transport.streamBuffer import (toSendBuffer,
ReceiveBuffer,
ackMsg, ackPacket,
ackDataErrMsg,
isControlMessage)
from thespian.system.transport.asyncTransportBase import asyncTransportBase
from thespian.system.transport.wakeupTransportBase import wakeupTransportBase
from thespian.system.transport.errmgmt import *
from thespian.system.messages.multiproc import ChildMayHaveDied
from thespian.system.addressManager import ActorLocalAddress
import socket
import select
from datetime import timedelta
try:
import cPickle as pickle
except Exception:
import pickle
import errno
from contextlib import closing
DEFAULT_ADMIN_PORT = 1900
serializer = pickle
# json cannot be used because Messages are often structures, which
# cannot be converted to JSON.
# max # of listens to sign up for at a time
LISTEN_DEPTH = 100
# max time to hold open an incoming socket
MAX_INCOMING_SOCKET_PERIOD = timedelta(minutes=7)
MAX_CONSECUTIVE_READ_FAILURES = 20
# close idle sockets after this amount of time
MAX_IDLE_SOCKET_PERIOD = timedelta(minutes=20)
# if true, keep sockets open for multiple messages
REUSE_SOCKETS = True
class TCPEndpoint(TransportInit__Base):
def __init__(self, *args): self.args = args
@property
def addrInst(self): return self.args[0]
def _safeSocketShutdown(sock):
if sock:
sock = getattr(sock, 'socket', sock)
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error as ex:
if ex.errno != errno.ENOTCONN:
thesplog('Error during shutdown of socket %s: %s', sock, ex)
sock.close()
class TCPIncoming_Common(PauseWithBackoff):
def __init__(self, rmtAddr, baseSock, rcvBuf=None):
super(TCPIncoming_Common, self).__init__()
self._openSock = baseSock
# _rmtAddr may be None until a message is rcvd with
# identification
self._rmtAddr = rmtAddr
self._rData = rcvBuf or ReceiveBuffer(serializer.loads)
self._expires = ExpirationTimer(MAX_INCOMING_SOCKET_PERIOD)
self.failCount = 0
@property
def socket(self):
return self._openSock
@property
def fromAddress(self):
return self._rmtAddr
@fromAddress.setter
def fromAddress(self, newAddr):
self._rmtAddr = newAddr
def delay(self):
# n.b. include _pauseUntil from PauseWithBackoff
return max(timedelta(seconds=0),
min(self._expires.remaining(),
getattr(self, '_pauseUntil', self._expires).remaining()))
def addData(self, newData): self._rData.addMore(newData)
def remainingSize(self): return self._rData.remainingAmount()
def receivedAllData(self): return self._rData.isDone()
@property
def data(self): return self._rData.completed()
def close(self):
_safeSocketShutdown(self)
self._openSock = None
def __str__(self):
return 'TCPInc(%s)<%s>' % (str(self._rmtAddr), str(self._rData))
class TCPIncoming(TCPIncoming_Common):
def __del__(self):
_safeSocketShutdown(s.socket)
self._openSock = None
class TCPIncomingPersistent(TCPIncoming_Common):
pass
class IdleSocket(object):
def __init__(self, socket, addr):
self.socket = socket
self.rmtaddr = addr
# n.b. the remote may have bound an outbound connect socket to
# a different address, but rmtAddr represents the primary
# address of an Actor/Admin: the one it listens on.
# self.rmtAddr = rmtAddr
self.validity = ExpirationTimer(MAX_IDLE_SOCKET_PERIOD)
def expired(self):
return self.validity.expired()
def __str__(self):
return 'Idle-socket %s->%s (%s)' % (str(self.socket),
str(self.rmtaddr),
str(self.validity))
def shutdown(self, shtarg=socket.SHUT_RDWR):
self.socket.shutdown(shtarg)
def close(self):
self.socket.close()
def opsKey(addr):
return addr.addressDetails
# The definition of these two address types has moved to IPBase, but
# declare them here as well for backward compatibility with older
# running Thespian instances.
import thespian.system.transport.IPBase
class RoutedTCPv4ActorAddress(
thespian.system.transport.IPBase.RoutedTCPv4ActorAddress):
pass
class TXOnlyAdminTCPv4ActorAddress(
thespian.system.transport.IPBase.TXOnlyAdminTCPv4ActorAddress):
pass
class ExternalTransportCopy(object): pass
class TCPTransport(asyncTransportBase, wakeupTransportBase):
"A transport using TCP IPv4 sockets for communications."
def __init__(self, initType, *args):
super(TCPTransport, self).__init__()
if isinstance(initType, ExternalInterfaceTransportInit):
# External process that is going to talk "in". There is
# no parent, and the child is the systemAdmin.
capabilities, logDefs, concurrency_context = args
adminRouting = False
self.txOnly = False # communications from outside-in are always local and therefore not restricted.
convAddr = capabilities.get('Convention Address.IPv4', '')
if convAddr and type(convAddr) == type( (1,2) ):
externalAddr = convAddr
elif type(convAddr) == type("") and ':' in convAddr:
externalAddr = convAddr.split(':')
externalAddr = externalAddr[0], int(externalAddr[1])
else:
externalAddr = (convAddr, capabilities.get('Admin Port', DEFAULT_ADMIN_PORT))
templateAddr = ActorAddress(TCPv4ActorAddress(None, 0, external = externalAddr))
self._adminAddr = self.getAdminAddr(capabilities)
self._parentAddr = None
isAdmin = False
elif isinstance(initType, TCPEndpoint):
instanceNum, assignedAddr, self._parentAddr, self._adminAddr, adminRouting, self.txOnly = initType.args
isAdmin = assignedAddr == self._adminAddr
templateAddr = assignedAddr or \
ActorAddress(
TCPv4ActorAddress(None, 0,
external=(self._parentAddr or
self._adminAddr or
True)))
elif isinstance(initType, ExternalTransportCopy):
self._adminAddr, self.txOnly, adminRouting = args
self._parentAddr = None
isAdmin = False
templateAddr = ActorAddress(
TCPv4ActorAddress(None, 0, self._adminAddr))
else:
thesplog('TCPTransport init of type %s unsupported', type(initType), level=logging.ERROR)
raise ActorSystemStartupFailure('Invalid TCPTransport init type (%s)'%type(initType))
self.socket = socket.socket(*templateAddr.addressDetails.socketArgs)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(*templateAddr.addressDetails.bindArgs)
self.socket.listen(LISTEN_DEPTH)
# N.B. myAddress is actually the address we will export for
# others to talk to us, not the bind address. The difference
# is that we bind to '0.0.0.0' (inaddr_any), but that's not a
# valid address for people to send stuff to us. The
# self.socket socket name is likely inaddr_any but has the
# valid port, whereas the templateAddr has our actual public
# address.
if isAdmin and self.txOnly:
# Must be the admin, and in txOnly mode
self.myAddress = ActorAddress(TXOnlyAdminTCPv4ActorAddress(
templateAddr.addressDetails.connectArgs[0][0],
self.socket.getsockname()[1],
external=True))
elif adminRouting:
self.myAddress = ActorAddress(RoutedTCPv4ActorAddress(
templateAddr.addressDetails.connectArgs[0][0],
self.socket.getsockname()[1],
self._adminAddr,
txOnly=self.txOnly,
external=True))
else:
self.myAddress = ActorAddress(TCPv4ActorAddress(
templateAddr.addressDetails.connectArgs[0][0],
self.socket.getsockname()[1],
external=True))
self._transmitIntents = {} # key = fd, value = tx intent
self._waitingTransmits = [] # list of intents without sockets
self._incomingSockets = {} # key = fd, value = TCP Incoming
self._incomingEnvelopes = []
self._watches = []
if REUSE_SOCKETS:
# key = opsKey(remote listen address), value=IdleSocket
self._openSockets = {}
self._checkChildren = False
self._shutdownSignalled = False
def close(self):
"""Releases all resources and terminates functionality. This is
better done deterministically by explicitly calling this
method (although __del__ will attempt to perform similar
operations), but it has the unfortunate side-effect of
making this object modal: after the close it can be
referenced but not successfully used anymore, so it
explicitly nullifies its contents.
"""
if hasattr(self, '_transmitIntents'):
for each in self._transmitIntents:
self._transmitIntents[each].tx_done(SendStatus.Failed)
delattr(self, '_transmitIntents')
if hasattr(self, '_waitingTransmits'):
for each in self._waitingTransmits:
each.tx_done(SendStatus.Failed)
delattr(self, '_waitingTransmits')
if hasattr(self, '_incomingSockets'):
for each in self._incomingSockets:
self._incomingSockets[each].close()
delattr(self, '_incomingSockets')
if hasattr(self, 'socket'):
self._safeSocketShutdown(getattr(self, 'socket', None))
delattr(self, 'socket')
def __del__(self):
self.close()
@staticmethod
def _safeSocketShutdown(sock):
# n.b. _safeSocketShutdown is a static method instead of a
# global because if __del__ calls close, the
# _safeSocketShutdown may already have been unbound. However,
# this still needs unusual protection to validate socket in
# case socket was already unloaded.
if sock and socket and isinstance(socket.error, Exception):
sock = getattr(sock, 'socket', sock)
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error as ex:
if ex.errno != errno.ENOTCONN:
thesplog('Error during shutdown of socket %s: %s', sock, ex)
sock.close()
def protectedFileNumList(self):
return (list(self._transmitIntents.keys()) +
list(filter(None, map(self._socketFile,
self._waitingTransmits))) +
list(self._incomingSockets.keys()) + [self.socket.fileno()])
def childResetFileNumList(self):
return self.protectedFileNumList() + \
[self._openSockets[S].socket.fileno() for S in self._openSockets]
@staticmethod
def getAdminAddr(capabilities):
return ActorAddress(
(TXOnlyAdminTCPv4ActorAddress
if capabilities.get('Outbound Only', False) else
TCPv4ActorAddress)
(None, capabilities.get('Admin Port', DEFAULT_ADMIN_PORT),
external=(TCPTransport.getConventionAddress(capabilities) or
('', capabilities.get('Admin Port',
DEFAULT_ADMIN_PORT)) or
True)))
@staticmethod
def getAddressFromString(addrspec, adminRouting=False):
if isinstance(addrspec, tuple):
addrparts = addrspec
else:
addrparts = addrspec.split(':')
addrtype = (RoutedTCPv4ActorAddress if adminRouting else
TCPv4ActorAddress)
return ActorAddress(
addrtype(addrparts[0],
addrparts[1] if addrparts[1:] else DEFAULT_ADMIN_PORT,
external=True))
@staticmethod
def getConventionAddress(capabilities):
convAddr = capabilities.get('Convention Address.IPv4', None)
if not convAddr:
return None
try:
return TCPTransport.getAddressFromString(convAddr)
except Exception as ex:
thesplog('Invalid TCP convention address "%s": %s', convAddr, ex,
level=logging.ERROR)
raise InvalidActorAddress(convAddr, str(ex))
def external_transport_clone(self):
# An external process wants a unique context for communicating
# with Actors.
return TCPTransport(ExternalTransportCopy(),
self._adminAddr,
self.txOnly,
isinstance(self.myAddress.addressDetails,
RoutedTCPv4ActorAddress))
def _updateStatusResponse(self, resp):
"""Called to update a Thespian_SystemStatus or Thespian_ActorStatus
with common information
"""
for each in self._transmitIntents.values():
resp.addPendingMessage(self.myAddress,
each.targetAddr,
str(each.message))
for each in self._waitingTransmits:
resp.addPendingMessage(self.myAddress,
each.targetAddr,
str(each.message))
for each in self._incomingEnvelopes:
resp.addReceivedMessage(each.sender,
self.myAddress,
str(each.message))
asyncTransportBase._updateStatusResponse(self, resp)
wakeupTransportBase._updateStatusResponse(self, resp)
for num, each in enumerate(self._openSockets.values()):
resp.addKeyVal(str(each), 'sock#%d-fd%d' % (num, each.socket.fileno()))
@staticmethod
def probeAdmin(addr):
"""Called to see if there might be an admin running already at the
specified addr. This is called from the systemBase, so
simple blocking operations are fine. This only needs to
check for a responder; higher level logic will verify that
it's actually an ActorAdmin suitable for use.
"""
ss = socket.socket(*addr.addressDetails.socketArgs)
try:
ss.setsockopt(socket.SOL_SOCKET,
getattr(socket, 'SO_EXCLUSIVEADDRUSE',
socket.SO_REUSEADDR), 1)
try:
ss.bind(*addr.addressDetails.bindArgs)
# no other process bound
return False
except socket.error as ex:
if err_bind_inuse(ex.errno):
return True
# Some other error... not sure if that means an admin
# is running or not.
return False # assume not
finally:
ss.close()
def prepEndpoint(self, assignedLocalAddr, capabilities):
"""In the parent, prepare to establish a new communications endpoint
with a new Child Actor. The result of this call will be
passed to a created child process to use when initializing
the Transport object for that class; the result of this
call will also be kept by the parent to finalize the
communications after creation of the Child by calling
connectEndpoint() with this returned object.
"""
if isinstance(assignedLocalAddr.addressDetails, ActorLocalAddress):
a1, a2 = assignedLocalAddr.addressDetails.addressInstanceNum, None
else:
# assumed to be an actual TCPActorAddress-based address
# (e.g. admin)
a1, a2 = None, assignedLocalAddr
return TCPEndpoint(a1, a2,
self.myAddress,
self._adminAddr,
capabilities.get('Admin Routing', False) or
capabilities.get('Outbound Only', False),
capabilities.get('Outbound Only', False))
def connectEndpoint(self, endPoint):
pass
def deadAddress(self, addressManager, childAddr):
canceli, continuei = partition(lambda i: i[1].targetAddr == childAddr,
self._transmitIntents.items())
self._transmitIntents = dict(continuei)
for _, each in canceli:
each.socket.close()
delattr(each, 'socket')
self._finishIntent(each, SendStatus.DeadTarget)
canceli, continuei = partition(lambda i: i.targetAddr == childAddr,
self._waitingTransmits)
self._waitingTransmits = continuei
for each in canceli:
self._finishIntent(each, SendStatus.DeadTarget)
# No need to clean up self._incomingSockets entries: they will
# timeout naturally.
super(TCPTransport, self).deadAddress(addressManager, childAddr)
# Ports may be re-used, so do not set this address to dead in
# the address manager
def close_oldest_idle_sockets(self, num_to_close=1):
aged_keys = sorted([(self._openSockets[K].validity, K)
for K in self._openSockets])
for _,oldkey in aged_keys[:num_to_close]:
_safeSocketShutdown(self._openSockets.pop(oldkey))
def new_socket(self, op, *args, **kw):
try:
return op(*args, **kw)
except IOError as ex:
if err_too_many_open_sockets(ex):
pass
else:
raise
self.close_oldest_idle_sockets(3)
return op(*args, **kw)
_XMITStepSendConnect = 1
_XMITStepSendData = 2
_XMITStepShutdownWrite = 3
_XMITStepWaitForAck = 4
_XMITStepFinishCleanup = 5
_XMITStepRetry = 6
def serializer(self, intent):
return toSendBuffer((self.myAddress, intent.message), serializer.dumps)
def lostRemote(self, rmtaddr):
"""[optional] Called by adminstrative levels (e.g. convention.py) to
indicate that the indicated remote address is no longer
accessible. This is customarily used only by the Admin in
"Admin Routing" scenarios when the remote is shutdown or
de-registered to allow the transport to cleanup (e.g. close
open sockets, etc.).
This does *not* do anything to remote TXOnly sockets: those
connections were initiated by the remote and should
therefore be dropped by the remote. Dropping those
connections at this point would be harmful, especially
because this is typically called when first reconnecting to
the remote.
"""
if isinstance(rmtaddr.addressDetails, TXOnlyAdminTCPv4ActorAddress):
return
if hasattr(self, '_openSockets'):
for rmvkey in [each
for each in self._openSockets
if rmtaddr.addressDetails.isSameSystem(
self._openSockets[each].rmtaddr)]:
_safeSocketShutdown(self._openSockets[rmvkey])
del self._openSockets[rmvkey]
for each in [i for i in self._transmitIntents
if rmtaddr.addressDetails.isSameSystem(
self._transmitIntents[i].targetAddr)]:
self._cancel_fd_ops(each)
for each in [i for i,v in self._incomingSockets.items()
if rmtaddr.addressDetails.isSameSystem(
v.fromAddress
if v.fromAddress.addressDetails else
v.socket)]:
self._cancel_fd_ops(each)
def _cancel_fd_ops(self, errfileno):
if errfileno == self.socket.fileno():
thesplog('SELECT FATAL ERROR ON MAIN LISTEN SOCKET',
level=logging.ERROR)
raise RuntimeError('Fatal listen socket error; aborting')
if errfileno in self._incomingSockets:
incoming = self._incomingSockets[errfileno]
del self._incomingSockets[errfileno]
incoming = self._handlePossibleIncoming(incoming, errfileno,
closed=True)
if incoming:
self._incomingSockets[incoming.socket.fileno()] = incoming
return
if self._processIntents(errfileno, closed=True):
return
if self._waitingTransmits:
W = self._waitingTransmits.pop(0)
if self._nextTransmitStepCheck(W, errfileno, closed=True):
self._waitingTransmits.append(W)
return
closed_openSocks = []
for I in getattr(self, '_openSockets', {}):
if self._socketFile(self._openSockets[I]) == errfileno:
closed_openSocks.append(I)
for each in closed_openSocks:
del self._openSockets[each]
def interrupt_wait(self,
signal_shutdown=False,
check_children=False):
self._shutdownSignalled |= signal_shutdown
self._checkChildren |= check_children
# Now generate a spurious connection to break out of the
# select.select loop. This is especially useful if a signal
# handler caused a message to be sent to myself: get the
# select loop to wakeup and process the message.
with closing(self.new_socket(
socket.socket,
*self.myAddress.addressDetails.socketArgs)) as ts:
try:
ts.connect(*self.myAddress.addressDetails.connectArgs)
except Exception:
pass
def _scheduleTransmitActual(self, intent):
intent = self._forwardIfNeeded(intent)
if not intent:
return
if intent.targetAddr == self.myAddress:
self._processReceivedEnvelope(ReceiveEnvelope(intent.targetAddr,
intent.message))
if not isinstance(intent.message, ForwardMessage):
self.interrupt_wait()
return self._finishIntent(intent)
intent.stage = self._XMITStepSendConnect
if self._nextTransmitStep(intent):
if hasattr(intent, 'socket'):
self._transmitIntents[intent.socket.fileno()] = intent
else:
self._waitingTransmits.append(intent)
def _finishIntent(self, intent, status=SendStatus.Sent):
if hasattr(intent, 'socket'):
if hasattr(self, '_openSockets'):
if not self._queue_intent_extra(intent):
if status == SendStatus.Sent:
opskey = opsKey(intent.targetAddr)
_safeSocketShutdown(self._openSockets.get(opskey, None))
self._openSockets[opskey] = IdleSocket(intent.socket,
intent.targetAddr)
# No need to restart a pending transmit for
# this target here; the main loop will check
# the waitingIntents and find/start the next one
# automatically.
else:
_safeSocketShutdown(intent)
# Here waiting intents need to be re-queued
# since otherwise they won't run until timeout
runnable, waiting = partition(
lambda I: I.targetAddr == intent.targetAddr,
self._waitingTransmits)
self._waitingTransmits = waiting
for R in runnable:
if status == SendStatus.DeadTarget:
R.tx_done(status)
elif self._nextTransmitStep(R):
if hasattr(R, 'socket'):
thesplog('<S> waiting intent is now re-processing: %s', R.identify())
self._transmitIntents[R.socket.fileno()] = R
else:
self._waitingTransmits.append(R)
else:
_safeSocketShutdown(intent)
delattr(intent, 'socket')
intent.tx_done(status)
return False # intent no longer needs to be attempted
def _queue_intent_extra(self, intent):
extraRead = getattr(intent, 'extraRead', None)
if not extraRead:
return False
incoming = TCPIncomingPersistent(intent.targetAddr,
intent.socket)
try:
incoming.addData(extraRead)
except Exception:
# Bad trailing data, so discard it.
thesplog('discarding bad trailing tx ack data')
return False
pendingIncoming = self._addedDataToIncoming(incoming)
if pendingIncoming:
self._incomingSockets[
pendingIncoming.socket.fileno()] = pendingIncoming
return True # socket is in-progress or was already handled
def _forwardIfNeeded(self, intent):
# Called when an intent is originally received to determine if
# the target address requires forwarding; if so, wrap the
# message in a ForwardMessage wrapper and set the routing
# path.
if intent.targetAddr == self.myAddress or \
isinstance(intent.message, ForwardMessage) or \
not isinstance(intent.targetAddr.addressDetails,
RoutedTCPv4ActorAddress):
return intent
# Replace None with our local admin and remove this actor
routing = [A or self._adminAddr
for A in intent.targetAddr.addressDetails.routing
if A != self.myAddress]
if self.txOnly and routing and \
(routing[0] != self._adminAddr) and \
self.myAddress != self._adminAddr:
routing.insert(0, self._adminAddr)
if not routing or routing == [intent.targetAddr]:
return intent
if intent.targetAddr.addressDetails.isLocalAddr():
return intent
newmsg = ForwardMessage(intent.message,
intent.targetAddr,
self.myAddress, routing)
newaddr = newmsg.fwdTargets[0]
if hasattr(self, '_addressMgr'):
newaddr, newmsg = self._addressMgr.prepMessageSend(newaddr, newmsg)
try:
isDead = newmsg == SendStatus.DeadTarget
except Exception:
isDead = False
if isDead:
# this is a DeadEnvelope or a ChildActorExited; drop
# it instead of recursing forever.
self._finishIntent(intent, SendStatus.Sent)
return None
# Changing the target addr to the next relay target for the
# transmit machinery, but the levels above may process
# completion based on the original target (e.g. systemCommon
# _checkNextTransmit), so add a completion operation that will
# reset the target back to the original (this occurs before
# other callbacks because callbacks are called in reverse
# order of addition).
intent.addCallback(lambda r, i, ta=intent.targetAddr:
i.changeTargetAddr(ta))
intent.changeMessage(newmsg)
intent.changeTargetAddr(newaddr)
intent.serMsg = self.serializer(intent)
return intent
def _nextTransmitStepCheck(self, intent, fileno, closed=False):
# Return True if this intent is still valid, False if it has
# been completed. If fileno is -1, this means check if there is
# time remaining still on this intent
if self._socketFile(intent) == fileno or \
(fileno == -1 and
intent.timeToRetry(hasattr(self, '_openSockets') and
opsKey(intent.targetAddr) in self._openSockets)):
if closed:
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
if intent.expired():
# Transmit timed out (consider this permanent)
thesplog('Transmit attempt from %s to %s timed out'
', returning PoisonPacket',
self.myAddress, intent.targetAddr, level=logging.WARNING)
# self._incomingEnvelopes.append(ReceiveEnvelope(intent.targetAddr,
# PoisonPacket(intent.message)))
# Stop attempting this transmit
return self._finishIntent(intent, SendStatus.Failed)
# Continue to attempt this transmit
if not intent.delay():
return self._nextTransmitStep(intent)
return True
def _nextTransmitStep(self, intent):
# Return of True means keep waiting on this Transmit Intent;
# False means it is done
try:
return getattr(self, '_next_XMIT_%s' % intent.stage,
'_unknown_XMIT_step')(intent)
except Exception as ex:
import traceback
thesplog('xmit UNcaught exception %s; aborting intent.\n%s',
ex, traceback.format_exc(), level=logging.ERROR)
return False
def _next_XMIT_1(self, intent):
if hasattr(self, '_openSockets'):
opskey = opsKey(intent.targetAddr)
if opskey in self._openSockets:
intent.socket = self._openSockets[opskey].socket
# This intent takes the open socket; there should be only
# one intent per target but this "take" prevents an
# erroneous second target intent from causing corruption.
# The _finishIntent operation will return the socket to
# the _openSockets list. It's possible that both sides
# will simultaneously attempt to transmit, but this should
# be rare, and the effect will be that neither will get
# the expected ACK and will close the socket to be
# re-opened on the next retry period, which is a
# reasonable approach.
del self._openSockets[opskey]
intent.stage = self._XMITStepSendData
intent.amtSent = 0
return self._nextTransmitStep(intent)
# If there is an active or pending Intent for this target,
# just queue this one (by returning True)
if any(T for T in self._transmitIntents.values()
if T.targetAddr == intent.targetAddr and
hasattr(T, 'socket')):
intent.awaitingTXSlot()
return True
# Fall through to get a new Socket for this intent
if isinstance(intent.targetAddr.addressDetails,
TXOnlyAdminTCPv4ActorAddress) and \
intent.targetAddr != self._adminAddr:
# Cannot initiate outbound connection to this remote
# Admin; wait for incoming connection instead.
intent.backoffPause(True) # KWQ... not really
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
intent.socket = self.new_socket(
socket.socket,
*intent.targetAddr .addressDetails.socketArgs)
intent.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
intent.socket.setblocking(0)
# Disable Nagle to transmit headers and acks asap; our sends
# are usually small
intent.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
intent.socket.connect(*intent.targetAddr
.addressDetails.connectArgs)
intent.socket.setblocking(0)
except socket.error as err:
# EINPROGRESS means non-blocking socket connect is in progress...
if not err_inprogress(err.errno):
thesplog('Socket connect failure %s to %s on %s'
' (returning %s)',
err, intent.targetAddr, intent.socket,
intent.completionCallback,
level=logging.WARNING)
return self._finishIntent(intent,
SendStatus.DeadTarget
if err_conn_refused(err)
else SendStatus.Failed)
except Exception as ex:
thesplog('Unexpected TCP socket connect exception: %s', ex,
level=logging.ERROR)
return self._finishIntent(intent, SendStatus.BadPacket)
intent.stage = self._XMITStepSendData # When connect completes
intent.amtSent = 0
return True
def _next_XMIT_2(self, intent):
# Got connected, ready to send
if not hasattr(intent, 'socket'):
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
try:
intent.amtSent += intent.socket.send(
intent.serMsg[intent.amtSent:])
except socket.error as err:
if err_send_inprogress(err.errno):
intent.backoffPause(True)
return True
if err_send_connrefused(err):
# in non-blocking, sometimes connection attempts are
# discovered here rather than for the actual connect
# request.
thesplog('ConnRefused to %s; declaring as DeadTarget.',
intent.targetAddr, level=logging.INFO)
return self._finishIntent(intent, SendStatus.DeadTarget)
thesplog('Socket error sending to %s on %s: %s / %s: %s',
intent.targetAddr, intent.socket, str(err), err.errno,
intent, level=logging.ERROR)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
except Exception:
import traceback
thesplog('Error sending: %s', traceback.format_exc(),
level=logging.ERROR)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
if intent.amtSent >= len(intent.serMsg):
# After data is sent, stop transmit
intent.stage = self._XMITStepShutdownWrite
return True
def _next_XMIT_3(self, intent):
try:
pass
# Original did a socket shutdown for writing, but actual
# socket implementations aren't so sophisticated and this
# tended to stop all socket communications in both
# directions.
# intent.socket.shutdown(socket.SHUT_WR)
except socket.error:
# No shutdown handling, just close
intent.stage = self._XMITStepFinishCleanup
return self._nextTransmitStep(intent)
intent.ackbuf = ReceiveBuffer(serializer.loads)
intent.stage = self._XMITStepWaitForAck
return True
def _next_XMIT_4(self, intent):
# Actually, select below waited on readable, not writeable
try:
rcv = intent.socket.recv(intent.ackbuf.remainingAmount())
except socket.error as err:
if err_recv_retry(err.errno):
intent.backoffPause(True)
return True
if err_recv_connreset(err):
thesplog('Remote %s closed connection before ack received'
' at %s for %s',
str(intent.targetAddr), str(self.myAddress),
intent.identify(),
level=logging.WARNING)
else:
thesplog('Socket Error waiting for transmit ack from'
' %s to %s: %s',
str(intent.targetAddr), str(self.myAddress), err,
level=logging.ERROR, exc_info=True)
rcv = '' # Remote closed connection
except Exception as err:
thesplog('General error waiting for transmit ack from'
' %s to %s: %s',
str(intent.targetAddr), str(self.myAddress), err,
level=logging.ERROR, exc_info=True)
rcv = '' # Remote closed connection
if not rcv:
# Socket closed. Reschedule transmit.
intent.backoffPause(True)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
return self._check_XMIT_4_done(intent, rcv)
def _check_XMIT_4_done(self, intent, rcv):
intent.ackbuf.addMore(rcv)
if not intent.ackbuf.isDone():
# Continue waiting for ACK
return True
compl = intent.ackbuf.completed()
if not compl:
# ACK/NAK was corrupted; retry.
intent.backoffPause(True)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
ackmsg, intent.extraRead = compl
if isControlMessage(ackmsg):
intent.result = SendStatus.Sent if ackmsg == ackPacket \
else SendStatus.BadPacket
intent.stage = self._XMITStepFinishCleanup
return self._nextTransmitStep(intent)
# Must have received a transmit packet from the remote;
# process as a received incoming.
intent.ackbuf.removeExtra()
if self._addedDataToIncoming(TCPIncomingPersistent(intent.targetAddr,
intent.socket,
intent.ackbuf),
True):
# intent.ackbuf.completed() said ackmsg was a full receive
# packet, but _addedDataToIncoming disagreed. This should
# NEVER HAPPEN.
thesplog('<<< Should never happen: '
'not full receive while waiting for ACK.'
' Aborting socket.',
level=logging.CRITICAL)
intent.ackbuf = ReceiveBuffer(serializer.loads)
intent.backoffPause(True)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
intent.ackbuf = ReceiveBuffer(serializer.loads)
nxtrcv = intent.extraRead
intent.extraRead = ''
return self._check_XMIT_4_done(intent, nxtrcv)
def _next_XMIT_5(self, intent):
return self._finishIntent(intent, intent.result)
def _next_XMIT_6(self, intent):
if hasattr(intent, 'socket'):
_safeSocketShutdown(intent)
delattr(intent, 'socket')
if hasattr(intent, 'ackbuf'):
delattr(intent, 'ackbuf')
if intent.retry():
intent.stage = self._XMITStepSendConnect
# stage just set won't be executed until retry delay times out
return True
return self._finishIntent(intent, SendStatus.Failed)
def _processIntents(self, filedesc, closed=False):
if filedesc in self._transmitIntents:
intent = self._transmitIntents[filedesc]
del self._transmitIntents[filedesc]
if self._nextTransmitStepCheck(intent, filedesc):
if hasattr(intent, 'socket'):
self._transmitIntents[intent.socket.fileno()] = intent
else:
self._waitingTransmits.append(intent)
return True
return False
def _processIntentTimeouts(self):
procIntents = list(self._transmitIntents.values())
waitIntents = list(self._waitingTransmits)
self._transmitIntents = {}
self._waitingTransmits = []
for intent in procIntents:
if hasattr(intent, '_pauseUntil') and not intent.expired():
self._transmitIntents[intent.socket.fileno()] = intent
continue
if self._nextTransmitStepCheck(intent, -1):
if hasattr(intent, 'socket'):
self._transmitIntents[intent.socket.fileno()] = intent
else:
self._waitingTransmits.append(intent)
for intent in waitIntents:
if self._nextTransmitStepCheck(intent, -1):
if hasattr(intent, 'socket'):
self._transmitIntents[intent.socket.fileno()] = intent
else:
self._waitingTransmits.append(intent)
@staticmethod
def _waitForSendable(sendIntent):
return sendIntent.stage != TCPTransport._XMITStepWaitForAck
@staticmethod
def _socketFile(sendOrRecv):
return sendOrRecv.socket.fileno() \
if getattr(sendOrRecv, 'socket', None) else None
def set_watch(self, watchlist):
self._watches = watchlist
def _runWithExpiry(self, incomingHandler):
xmitOnly = incomingHandler == TransmitOnly or \
isinstance(incomingHandler, TransmitOnly)
if hasattr(self, '_aborting_run'):
delattr(self, '_aborting_run')
while not self.run_time.expired() and \
(not hasattr(self, '_aborting_run') or
(self._aborting_run and
(len(self._transmitIntents) > 0 or
len(self._waitingTransmits) > 0))):
if xmitOnly:
if not self._transmitIntents and not self._waitingTransmits:
return 0
else:
while self._incomingEnvelopes:
rEnv = self._incomingEnvelopes.pop(0)
if incomingHandler is None:
return rEnv
r = Thespian__Run_HandlerResult(incomingHandler(rEnv))
if not r:
return r
wsend, wrecv = fmap(
TCPTransport._socketFile,
partition(TCPTransport._waitForSendable,
filter(lambda T: not T.backoffPause(),
self._transmitIntents.values())))
wrecv = list(filter(None, wrecv))
wsend = list(filter(None, wsend))
wrecv.extend(list(
filter(lambda I: not self._incomingSockets[I].backoffPause(),
filter(None, self._incomingSockets))))
if hasattr(self, '_openSockets'):
wrecv.extend(list(map(lambda s: s.socket.fileno(),
self._openSockets.values())))
delays = list(filter(None,
[self.run_time.remaining()] +
[self._transmitIntents[T].delay()
for T in self._transmitIntents] +
[W.delay() for W in self._waitingTransmits] +
[self._incomingSockets[I].delay()
for I in self._incomingSockets]))
# n.b. if a long period of time has elapsed (e.g. laptop
# sleeping) then delays could be negative.
delay = max(0, timePeriodSeconds(min(delays))) if delays else None
if not xmitOnly:
wrecv.extend([self.socket.fileno()])
else:
# Windows does not support calling select with three
# empty lists, so as a workaround, supply the main
# listener if everything else is pending delays (or
# completed but unrealized) here, and ensure the main
# listener does not accept any listens below.
if not wrecv and not wsend:
if not hasattr(self, 'dummySock'):
self.dummySock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
wrecv.extend([self.dummySock.fileno()])
if self._watches:
wrecv.extend(self._watches)
rrecv, rsend, rerr = [], [], []
try:
rrecv, rsend, rerr = select.select(wrecv, wsend,
set(wsend+wrecv), delay)
except ValueError as ex:
thesplog('ValueError on select(#%d: %s, #%d: %s, #%d: %s, %s)',
len(wrecv), wrecv, len(wsend), wsend,
len(set(wsend + wrecv)), set(wsend + wrecv),
delay, level=logging.ERROR)
raise
except (OSError, select.error) as ex:
errnum = getattr(ex, 'errno', ex.args[0])
if err_select_retry(errnum):
# probably a change in descriptors
thesplog('select retry on %s', ex, level=logging.DEBUG)
self._check_indicators()
continue
if err_bad_fileno(errnum):
# One of the selected file descriptors was bad,
# but no indication which one. It should not be
# one of the ones locally managed by this
# transport, so it's likely one of the
# user-supplied "watched" file descriptors. Find
# and remove it, then carry on.
if errnum == errno.EBADF:
bad = []
for each in self._watches:
try:
_ = select.select([each], [], [], 0)
except:
bad.append(each)
if not bad:
thesplog('bad internal file descriptor!')
try:
_ = select.select([self.socket.fileno()], [], [], 0)
except:
thesplog('listen %s is bad', self.socket.fileno)
rerr.append(self.socket.fileno)
for each in wrecv:
try:
_ = select.select([each], [], [], 0)
except:
thesplog('wrecv %s is bad', each)
rerr.append(each)
for each in wsend:
try:
select.select([each], [], [], 0)
except:
thesplog('wsend %s is bad', each)
rerr.append(each)
else:
self._watches = [W for W in self._watches if W not in bad]
continue
# If it was a regular file descriptor, fall through to clean it up.
else:
raise
if rerr:
for errfileno in rerr:
self._cancel_fd_ops(errfileno)
origPendingSends = len(self._transmitIntents) + \
len(self._waitingTransmits)
# Handle newly sendable data
for eachs in rsend:
self._processIntents(eachs)
# Handle newly receivable data
for each in rrecv:
# n.b. ignore this if trying to quiesce; may have had
# to supply this fd to avoid calling select with three
# empty lists.
if each == self.socket.fileno() and not xmitOnly:
self._acceptNewIncoming()
continue
# Get idleSockets before checking incoming and
# transmit; those latter may modify _openSockets
# (including replacing the element) so ensure that
# only the sockets indicated by select are processed,
# and only once each.
idleSockets = list(getattr(self, '_openSockets', {}).values())
if each in self._incomingSockets:
incoming = self._incomingSockets[each]
del self._incomingSockets[each]
incoming = self._handlePossibleIncoming(incoming, each)
if incoming:
self._incomingSockets[
incoming.socket.fileno()] = incoming
continue
if self._processIntents(each):
continue
for idle in idleSockets:
rmtaddr = idle.rmtaddr
curOpen = self._openSockets.get(opsKey(rmtaddr), None)
if curOpen and curOpen != idle:
# duplicate sockets to remote, and this one is
# no longer tracked, so close it and keep
# existing openSocket.
_safeSocketShutdown(idle)
else:
fnum = None
try:
fnum = idle.socket.fileno()
except IOError as ex:
if not err_bad_fileno(ex.errno):
raise
if fnum is None or each == fnum:
del self._openSockets[opsKey(rmtaddr)]
incoming = self._handlePossibleIncoming(
TCPIncomingPersistent(rmtaddr, idle.socket),
each)
if incoming:
self._incomingSockets[
incoming.socket.fileno()] = incoming
elif idle.expired():
_safeSocketShutdown(idle)
del self._openSockets[opsKey(rmtaddr)]
# Handle timeouts
self._processIntentTimeouts()
rmvIncoming = []
for I in self._incomingSockets:
newI = self._handlePossibleIncoming(self._incomingSockets[I],
-1)
if newI:
# newI will possibly be new incoming data, but
# it's going to use the same socket
self._incomingSockets[I] = newI
else:
rmvIncoming.append(I)
for I in rmvIncoming:
del self._incomingSockets[I]
watchready = [W for W in self._watches if W in rrecv]
if watchready:
self._incomingEnvelopes.append(
ReceiveEnvelope(self.myAddress, WatchMessage(watchready)))
# Check if it's time to quit
if [] == rrecv and [] == rsend:
if [] == rerr and self.run_time.expired():
# Timeout, give up
return Thespian__Run_Expired()
continue
if xmitOnly:
remXmits = len(self._transmitIntents) + \
len(self._waitingTransmits)
if origPendingSends > remXmits or remXmits == 0:
return remXmits
# Handle queued internal "received" data
if not xmitOnly:
while self._incomingEnvelopes:
rEnv = self._incomingEnvelopes.pop(0)
if incomingHandler is None:
return rEnv
r = Thespian__Run_HandlerResult(incomingHandler(rEnv))
if not r:
return r
return Thespian__Run_Terminated() \
if hasattr(self, '_aborting_run') else \
Thespian__Run_Expired()
def _check_indicators(self):
if self._checkChildren:
self._checkChildren = False
self._incomingEnvelopes.append(
ReceiveEnvelope(self.myAddress, ChildMayHaveDied()))
if self._shutdownSignalled:
self._shutdownSignalled = False
self._incomingEnvelopes.append(
ReceiveEnvelope(self.myAddress, ActorExitRequest()))
def _acceptNewIncoming(self):
accepted = False
try:
lsock, rmtTxAddr = self.new_socket(self.socket.accept)
accepted = True
except (OSError, socket.error) as ex:
thesplog('Error accepting incoming: %s', ex)
self._check_indicators()
if not accepted or rmtTxAddr == self.myAddress:
self._incomingEnvelopes.append(Thespian__UpdateWork())
if not accepted:
return
lsock.setblocking(0)
# Disable Nagle to transmit headers and acks asap
lsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Note that the TCPIncoming is initially None.
# Due to the way sockets work, the transmit comes from a
# system-selected port that is different from the port that
# the remote Actor (or admin) is listening on (and which
# represents it's official ActorAddress). Once a successful
# message has been received, the message will indicate the
# originating address and the TCPIncoming object will be
# updated accordingly.
self._incomingSockets[lsock.fileno()] = (
(TCPIncomingPersistent
if hasattr(self, '_openSockets') else
TCPIncoming)
(ActorAddress(None), lsock))
def _handlePossibleIncoming(self, incomingSocket, fileno, closed=False):
if closed:
# Remote closed, so unconditionally drop this socket
incomingSocket.close()
return None
elif incomingSocket.socket and \
(incomingSocket.socket.fileno() == fileno or
not incomingSocket.delay()):
return self._handleReadableIncoming(incomingSocket)
else:
if not incomingSocket.delay():
# No more delay time left
incomingSocket.close()
return None
return incomingSocket
def _finishIncoming(self, incomingSocket, fromRealAddr):
# Only called if incomingSocket can continue to be used; if
# there was an error then incomingSocket should be closed and
# released.
fromAddr = incomingSocket.fromAddress
if fromAddr and isinstance(incomingSocket, TCPIncomingPersistent):
opskey = opsKey(fromAddr)
_safeSocketShutdown(self._openSockets.get(opskey, None))
self._openSockets[opskey] = IdleSocket(incomingSocket.socket,
fromAddr)
for T in self._transmitIntents.values():
if T.targetAddr == fromAddr and T.stage == self._XMITStepRetry:
T.retry(immediately=True)
# This intent will be picked up on the next
# timeout check in the main loop and
# processed; by waiting for main loop
# processing, fairness with read handling is
# allowed.
break
else:
incomingSocket.close()
return None
def _handleReadableIncoming(self, inc):
try:
rdata = inc.socket.recv(min(8192, inc.remainingSize()))
inc.failCount = 0
except socket.error as e:
inc.failCount = getattr(inc, 'failCount', 0) + 1
if err_recv_inprogress(e.errno) and \
inc.failCount < MAX_CONSECUTIVE_READ_FAILURES:
inc.backoffPause(True)
return inc
inc.close()
return None
if not rdata:
# Since this point is only arrived at when select() says
# the socket is readable, this is an indicator of a closed
# socket. Since previous calls didn't detect
# receivedAllData(), this is an aborted/incomplete
# reception. Discard it.
inc.close()
return None
try:
inc.addData(rdata)
except Exception:
# Bad data, so discard it and close the socket.
thesplog('corrupted incoming data; closing socket',
level=logging.WARNING)
inc.close()
return None
return self._addedDataToIncoming(inc)
def _addedDataToIncoming(self, inc, skipFinish=False):
if not inc.receivedAllData():
# Continue running and monitoring this socket
return inc
rdata, extra = '', ''
try:
rdata, extra = inc.data
if isControlMessage(rdata):
raise ValueError('Error: received control message'
' "%s"; expecting incoming data.' %
(str(rdata)))
rEnv = ReceiveEnvelope(*rdata)
except Exception:
import traceback
thesplog('OUCH! Error deserializing received data:'
' %s (rdata="%s", extra="%s")',
traceback.format_exc(), rdata, extra)
try:
inc.socket.send(ackDataErrMsg)
except Exception:
pass # socket will be closed anyhow; AckErr was a courtesy
inc.close()
return None
try:
inc.socket.send(ackMsg)
except socket.error as err:
if err_send_connreset(err):
thesplog('Remote %s closed socket before ACK could be sent',
inc.socket, level=logging.WARNING)
else:
raise
inc.fromAddress = rdata[0]
self._processReceivedEnvelope(rEnv)
if extra and isinstance(inc, TCPIncomingPersistent):
newinc = TCPIncomingPersistent(inc.fromAddress, inc.socket)
try:
newinc.addData(rdata)
except Exception:
# Bad trailing data, so discard it by doing nothing.
thesplog('discarding bad incoming trailing data')
pass
else:
return self._addedDataToIncoming(newinc)
if not skipFinish:
self._finishIncoming(inc, rEnv.sender)
return None
def _processReceivedEnvelope(self, envelope):
if not isinstance(envelope.message, ForwardMessage):
self._incomingEnvelopes.append(envelope)
return
if envelope.message.fwdTo == self.myAddress:
self._incomingEnvelopes.append(
ReceiveEnvelope(envelope.message.fwdFrom,
envelope.message.fwdMessage))
return
# The ForwardMessage has not reached the final destination, so
# update and target it at the next one.
if len(envelope.message.fwdTargets) < 1 and \
envelope.message.fwdTo != self.myAddress:
thesplog('Incorrectly received ForwardMessage destined for'
' %s at %s via %s: %s',
envelope.message.fwdTo, self.myAddress,
list(map(str, envelope.message.fwdTargets)),
envelope.message.fwdMessage,
level=logging.ERROR)
# discard (TBD: send back as Poison? DeadLetter? Routing failure)
return
nextTgt = envelope.message.fwdTargets[0]
envelope.message.fwdTargets = envelope.message.fwdTargets[1:]
self.scheduleTransmit(getattr(self, '_addressMgr', None),
TransmitIntent(nextTgt, envelope.message))
def abort_run(self, drain=False):
self._aborting_run = drain
Queue finished intent processing in TCPTransport for bulk processing.
"""Simple TCP sockets.
Each Actor has a TCP IPv4 port/socket that will accept incoming
connections for messages. Each connection from a remote Actor will
accept a single message per connection. The connection is dropped and
re-established for multiple messages; this is less efficient but has
more fairness.
This transport can be used within a process, between processes, and
even between processes on separate systems.
"""
# n.b. The core of this is very similar to asyncore/asynchat.
# Unfortunately, those modules are deprecated in Python 3.4 in favor
# of asyncio, which is powerful... and complex. Thespian aims to
# support Python 2.6 through 3.4 and beyond, and has more specific
# needs (undoubtably a subset of asyncio capabilities) that can be
# implemented more simply and directly here. In addition, this module
# should be extensible to support SSL, which asynchat is not (maybe).
# For Thespian, there are two classes of sockets:
# * the Actor's primary receive socket, and
# * transient outgoing send sockets.
# All steps of both types of sockets are handled asynchronously, very
# similarly to the asyncore channel.
#
# For the receive socket, it will listen for and accept incoming
# connections, and then accept a single message on that connection,
# closing the connection on completion (or error).
#
# For the transmit socket, it will connect, send, and close with a
# TransmitIntent.
# ----------------------------------------------------------------------
# TCP Buffering issues
#
# TCP is unique in that there are unusual buffering considerations to
# account for. Specifically, a sender can connect to a listener, send
# a message, and close the socket --- *WITHOUT the receiver even
# processing the accept! As a result, the transmitter must take
# additional steps to ensure that the message that was sent has been
# delivered.
#
# There are two ways that this confirmation can be handled:
#
# 1) confirmation sent back in the original connection
#
# 2) messages confirmed by a separate confirmation message with a
# unique message identifier for idempotency.
#
# Disadvantages of #1:
# * More complicated exchange between sender and receiver
# * There must be a header synchronization with a size indicator so
# that the receiver knows when the full message has been received
# and should be acknowledged.
# * The socket must exist for a potentially much longer period and
# retransmits must still be attempted on failure.
#
# Disadvantages of #2:
# * Doubles connection establishment requirements.
# * More complicated send queuing to ensure ordering of messages between
# sender and recipient. However, this really must exist for condition
# #1 as well.
#
# Could also do a hybrid of both. On send, start with header
# containing message ID (and size?) then wait a brief time after send
# for the ACK, then disconnect and wait for the separate ACK later.
#
# At this point, the connection establishment seems to be the
# overriding performance dominator, and the message header
# synchronization and size indication seem like good ideas anyhow to
# confirm that the entire message has been received by the recipient.
# This method is feasible because of the asynchronous handling of the
# transmit sequence (as opposed to a blocking transmit, which would
# consume the processing budget for highly active scenarios).
import logging
from thespian.system.utilis import (thesplog, fmap, partition)
from thespian.system.timing import timePeriodSeconds, ExpirationTimer
from thespian.actors import *
from thespian.system.transport import *
from thespian.system.transport.IPBase import (TCPv4ActorAddress)
from thespian.system.transport.streamBuffer import (toSendBuffer,
ReceiveBuffer,
ackMsg, ackPacket,
ackDataErrMsg,
isControlMessage)
from thespian.system.transport.asyncTransportBase import asyncTransportBase
from thespian.system.transport.wakeupTransportBase import wakeupTransportBase
from thespian.system.transport.errmgmt import *
from thespian.system.messages.multiproc import ChildMayHaveDied
from thespian.system.addressManager import ActorLocalAddress
import socket
import select
from datetime import timedelta
try:
import cPickle as pickle
except Exception:
import pickle
import errno
from contextlib import closing
DEFAULT_ADMIN_PORT = 1900
serializer = pickle
# json cannot be used because Messages are often structures, which
# cannot be converted to JSON.
# max # of listens to sign up for at a time
LISTEN_DEPTH = 100
# max time to hold open an incoming socket
MAX_INCOMING_SOCKET_PERIOD = timedelta(minutes=7)
MAX_CONSECUTIVE_READ_FAILURES = 20
# close idle sockets after this amount of time
MAX_IDLE_SOCKET_PERIOD = timedelta(minutes=20)
# if true, keep sockets open for multiple messages
REUSE_SOCKETS = True
class TCPEndpoint(TransportInit__Base):
def __init__(self, *args): self.args = args
@property
def addrInst(self): return self.args[0]
def _safeSocketShutdown(sock):
if sock:
sock = getattr(sock, 'socket', sock)
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error as ex:
if ex.errno != errno.ENOTCONN:
thesplog('Error during shutdown of socket %s: %s', sock, ex)
sock.close()
class TCPIncoming_Common(PauseWithBackoff):
def __init__(self, rmtAddr, baseSock, rcvBuf=None):
super(TCPIncoming_Common, self).__init__()
self._openSock = baseSock
# _rmtAddr may be None until a message is rcvd with
# identification
self._rmtAddr = rmtAddr
self._rData = rcvBuf or ReceiveBuffer(serializer.loads)
self._expires = ExpirationTimer(MAX_INCOMING_SOCKET_PERIOD)
self.failCount = 0
@property
def socket(self):
return self._openSock
@property
def fromAddress(self):
return self._rmtAddr
@fromAddress.setter
def fromAddress(self, newAddr):
self._rmtAddr = newAddr
def delay(self):
# n.b. include _pauseUntil from PauseWithBackoff
return max(timedelta(seconds=0),
min(self._expires.remaining(),
getattr(self, '_pauseUntil', self._expires).remaining()))
def addData(self, newData): self._rData.addMore(newData)
def remainingSize(self): return self._rData.remainingAmount()
def receivedAllData(self): return self._rData.isDone()
@property
def data(self): return self._rData.completed()
def close(self):
_safeSocketShutdown(self)
self._openSock = None
def __str__(self):
return 'TCPInc(%s)<%s>' % (str(self._rmtAddr), str(self._rData))
class TCPIncoming(TCPIncoming_Common):
def __del__(self):
_safeSocketShutdown(s.socket)
self._openSock = None
class TCPIncomingPersistent(TCPIncoming_Common):
pass
class IdleSocket(object):
def __init__(self, socket, addr):
self.socket = socket
self.rmtaddr = addr
# n.b. the remote may have bound an outbound connect socket to
# a different address, but rmtAddr represents the primary
# address of an Actor/Admin: the one it listens on.
# self.rmtAddr = rmtAddr
self.validity = ExpirationTimer(MAX_IDLE_SOCKET_PERIOD)
def expired(self):
return self.validity.expired()
def __str__(self):
return 'Idle-socket %s->%s (%s)' % (str(self.socket),
str(self.rmtaddr),
str(self.validity))
def shutdown(self, shtarg=socket.SHUT_RDWR):
self.socket.shutdown(shtarg)
def close(self):
self.socket.close()
def opsKey(addr):
return addr.addressDetails
# The definition of these two address types has moved to IPBase, but
# declare them here as well for backward compatibility with older
# running Thespian instances.
import thespian.system.transport.IPBase
class RoutedTCPv4ActorAddress(
thespian.system.transport.IPBase.RoutedTCPv4ActorAddress):
pass
class TXOnlyAdminTCPv4ActorAddress(
thespian.system.transport.IPBase.TXOnlyAdminTCPv4ActorAddress):
pass
class ExternalTransportCopy(object): pass
class TCPTransport(asyncTransportBase, wakeupTransportBase):
"A transport using TCP IPv4 sockets for communications."
def __init__(self, initType, *args):
super(TCPTransport, self).__init__()
if isinstance(initType, ExternalInterfaceTransportInit):
# External process that is going to talk "in". There is
# no parent, and the child is the systemAdmin.
capabilities, logDefs, concurrency_context = args
adminRouting = False
self.txOnly = False # communications from outside-in are always local and therefore not restricted.
convAddr = capabilities.get('Convention Address.IPv4', '')
if convAddr and type(convAddr) == type( (1,2) ):
externalAddr = convAddr
elif type(convAddr) == type("") and ':' in convAddr:
externalAddr = convAddr.split(':')
externalAddr = externalAddr[0], int(externalAddr[1])
else:
externalAddr = (convAddr, capabilities.get('Admin Port', DEFAULT_ADMIN_PORT))
templateAddr = ActorAddress(TCPv4ActorAddress(None, 0, external = externalAddr))
self._adminAddr = self.getAdminAddr(capabilities)
self._parentAddr = None
isAdmin = False
elif isinstance(initType, TCPEndpoint):
instanceNum, assignedAddr, self._parentAddr, self._adminAddr, adminRouting, self.txOnly = initType.args
isAdmin = assignedAddr == self._adminAddr
templateAddr = assignedAddr or \
ActorAddress(
TCPv4ActorAddress(None, 0,
external=(self._parentAddr or
self._adminAddr or
True)))
elif isinstance(initType, ExternalTransportCopy):
self._adminAddr, self.txOnly, adminRouting = args
self._parentAddr = None
isAdmin = False
templateAddr = ActorAddress(
TCPv4ActorAddress(None, 0, self._adminAddr))
else:
thesplog('TCPTransport init of type %s unsupported', type(initType), level=logging.ERROR)
raise ActorSystemStartupFailure('Invalid TCPTransport init type (%s)'%type(initType))
self.socket = socket.socket(*templateAddr.addressDetails.socketArgs)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(*templateAddr.addressDetails.bindArgs)
self.socket.listen(LISTEN_DEPTH)
# N.B. myAddress is actually the address we will export for
# others to talk to us, not the bind address. The difference
# is that we bind to '0.0.0.0' (inaddr_any), but that's not a
# valid address for people to send stuff to us. The
# self.socket socket name is likely inaddr_any but has the
# valid port, whereas the templateAddr has our actual public
# address.
if isAdmin and self.txOnly:
# Must be the admin, and in txOnly mode
self.myAddress = ActorAddress(TXOnlyAdminTCPv4ActorAddress(
templateAddr.addressDetails.connectArgs[0][0],
self.socket.getsockname()[1],
external=True))
elif adminRouting:
self.myAddress = ActorAddress(RoutedTCPv4ActorAddress(
templateAddr.addressDetails.connectArgs[0][0],
self.socket.getsockname()[1],
self._adminAddr,
txOnly=self.txOnly,
external=True))
else:
self.myAddress = ActorAddress(TCPv4ActorAddress(
templateAddr.addressDetails.connectArgs[0][0],
self.socket.getsockname()[1],
external=True))
self._transmitIntents = {} # key = fd, value = tx intent
self._waitingTransmits = [] # list of intents without sockets
self._incomingSockets = {} # key = fd, value = TCP Incoming
self._incomingEnvelopes = []
self._finished_intents = []
self._watches = []
if REUSE_SOCKETS:
# key = opsKey(remote listen address), value=IdleSocket
self._openSockets = {}
self._checkChildren = False
self._shutdownSignalled = False
def close(self):
"""Releases all resources and terminates functionality. This is
better done deterministically by explicitly calling this
method (although __del__ will attempt to perform similar
operations), but it has the unfortunate side-effect of
making this object modal: after the close it can be
referenced but not successfully used anymore, so it
explicitly nullifies its contents.
"""
if hasattr(self, '_transmitIntents'):
for each in self._transmitIntents:
self._transmitIntents[each].tx_done(SendStatus.Failed)
delattr(self, '_transmitIntents')
if hasattr(self, '_waitingTransmits'):
for each in self._waitingTransmits:
each.tx_done(SendStatus.Failed)
delattr(self, '_waitingTransmits')
if hasattr(self, '_incomingSockets'):
for each in self._incomingSockets:
self._incomingSockets[each].close()
delattr(self, '_incomingSockets')
if hasattr(self, 'socket'):
self._safeSocketShutdown(getattr(self, 'socket', None))
delattr(self, 'socket')
def __del__(self):
self.close()
@staticmethod
def _safeSocketShutdown(sock):
# n.b. _safeSocketShutdown is a static method instead of a
# global because if __del__ calls close, the
# _safeSocketShutdown may already have been unbound. However,
# this still needs unusual protection to validate socket in
# case socket was already unloaded.
if sock and socket and isinstance(socket.error, Exception):
sock = getattr(sock, 'socket', sock)
try:
sock.shutdown(socket.SHUT_RDWR)
except socket.error as ex:
if ex.errno != errno.ENOTCONN:
thesplog('Error during shutdown of socket %s: %s', sock, ex)
sock.close()
def protectedFileNumList(self):
return (list(self._transmitIntents.keys()) +
list(filter(None, map(self._socketFile,
self._waitingTransmits))) +
list(self._incomingSockets.keys()) + [self.socket.fileno()])
def childResetFileNumList(self):
return self.protectedFileNumList() + \
[self._openSockets[S].socket.fileno() for S in self._openSockets]
@staticmethod
def getAdminAddr(capabilities):
return ActorAddress(
(TXOnlyAdminTCPv4ActorAddress
if capabilities.get('Outbound Only', False) else
TCPv4ActorAddress)
(None, capabilities.get('Admin Port', DEFAULT_ADMIN_PORT),
external=(TCPTransport.getConventionAddress(capabilities) or
('', capabilities.get('Admin Port',
DEFAULT_ADMIN_PORT)) or
True)))
@staticmethod
def getAddressFromString(addrspec, adminRouting=False):
if isinstance(addrspec, tuple):
addrparts = addrspec
else:
addrparts = addrspec.split(':')
addrtype = (RoutedTCPv4ActorAddress if adminRouting else
TCPv4ActorAddress)
return ActorAddress(
addrtype(addrparts[0],
addrparts[1] if addrparts[1:] else DEFAULT_ADMIN_PORT,
external=True))
@staticmethod
def getConventionAddress(capabilities):
convAddr = capabilities.get('Convention Address.IPv4', None)
if not convAddr:
return None
try:
return TCPTransport.getAddressFromString(convAddr)
except Exception as ex:
thesplog('Invalid TCP convention address "%s": %s', convAddr, ex,
level=logging.ERROR)
raise InvalidActorAddress(convAddr, str(ex))
def external_transport_clone(self):
# An external process wants a unique context for communicating
# with Actors.
return TCPTransport(ExternalTransportCopy(),
self._adminAddr,
self.txOnly,
isinstance(self.myAddress.addressDetails,
RoutedTCPv4ActorAddress))
def _updateStatusResponse(self, resp):
"""Called to update a Thespian_SystemStatus or Thespian_ActorStatus
with common information
"""
for each in self._transmitIntents.values():
resp.addPendingMessage(self.myAddress,
each.targetAddr,
str(each.message))
for each in self._waitingTransmits:
resp.addPendingMessage(self.myAddress,
each.targetAddr,
str(each.message))
for each in self._incomingEnvelopes:
resp.addReceivedMessage(each.sender,
self.myAddress,
str(each.message))
asyncTransportBase._updateStatusResponse(self, resp)
wakeupTransportBase._updateStatusResponse(self, resp)
for num, each in enumerate(self._openSockets.values()):
resp.addKeyVal(str(each), 'sock#%d-fd%d' % (num, each.socket.fileno()))
@staticmethod
def probeAdmin(addr):
"""Called to see if there might be an admin running already at the
specified addr. This is called from the systemBase, so
simple blocking operations are fine. This only needs to
check for a responder; higher level logic will verify that
it's actually an ActorAdmin suitable for use.
"""
ss = socket.socket(*addr.addressDetails.socketArgs)
try:
ss.setsockopt(socket.SOL_SOCKET,
getattr(socket, 'SO_EXCLUSIVEADDRUSE',
socket.SO_REUSEADDR), 1)
try:
ss.bind(*addr.addressDetails.bindArgs)
# no other process bound
return False
except socket.error as ex:
if err_bind_inuse(ex.errno):
return True
# Some other error... not sure if that means an admin
# is running or not.
return False # assume not
finally:
ss.close()
def prepEndpoint(self, assignedLocalAddr, capabilities):
"""In the parent, prepare to establish a new communications endpoint
with a new Child Actor. The result of this call will be
passed to a created child process to use when initializing
the Transport object for that class; the result of this
call will also be kept by the parent to finalize the
communications after creation of the Child by calling
connectEndpoint() with this returned object.
"""
if isinstance(assignedLocalAddr.addressDetails, ActorLocalAddress):
a1, a2 = assignedLocalAddr.addressDetails.addressInstanceNum, None
else:
# assumed to be an actual TCPActorAddress-based address
# (e.g. admin)
a1, a2 = None, assignedLocalAddr
return TCPEndpoint(a1, a2,
self.myAddress,
self._adminAddr,
capabilities.get('Admin Routing', False) or
capabilities.get('Outbound Only', False),
capabilities.get('Outbound Only', False))
def connectEndpoint(self, endPoint):
pass
def deadAddress(self, addressManager, childAddr):
canceli, continuei = partition(lambda i: i[1].targetAddr == childAddr,
self._transmitIntents.items())
self._transmitIntents = dict(continuei)
for _, each in canceli:
each.socket.close()
delattr(each, 'socket')
self._finishIntent(each, SendStatus.DeadTarget)
canceli, continuei = partition(lambda i: i.targetAddr == childAddr,
self._waitingTransmits)
self._waitingTransmits = continuei
for each in canceli:
self._finishIntent(each, SendStatus.DeadTarget)
# No need to clean up self._incomingSockets entries: they will
# timeout naturally.
super(TCPTransport, self).deadAddress(addressManager, childAddr)
# Ports may be re-used, so do not set this address to dead in
# the address manager
def close_oldest_idle_sockets(self, num_to_close=1):
aged_keys = sorted([(self._openSockets[K].validity, K)
for K in self._openSockets])
for _,oldkey in aged_keys[:num_to_close]:
_safeSocketShutdown(self._openSockets.pop(oldkey))
def new_socket(self, op, *args, **kw):
try:
return op(*args, **kw)
except IOError as ex:
if err_too_many_open_sockets(ex):
pass
else:
raise
self.close_oldest_idle_sockets(3)
return op(*args, **kw)
_XMITStepSendConnect = 1
_XMITStepSendData = 2
_XMITStepShutdownWrite = 3
_XMITStepWaitForAck = 4
_XMITStepFinishCleanup = 5
_XMITStepRetry = 6
def serializer(self, intent):
return toSendBuffer((self.myAddress, intent.message), serializer.dumps)
def lostRemote(self, rmtaddr):
"""[optional] Called by adminstrative levels (e.g. convention.py) to
indicate that the indicated remote address is no longer
accessible. This is customarily used only by the Admin in
"Admin Routing" scenarios when the remote is shutdown or
de-registered to allow the transport to cleanup (e.g. close
open sockets, etc.).
This does *not* do anything to remote TXOnly sockets: those
connections were initiated by the remote and should
therefore be dropped by the remote. Dropping those
connections at this point would be harmful, especially
because this is typically called when first reconnecting to
the remote.
"""
if isinstance(rmtaddr.addressDetails, TXOnlyAdminTCPv4ActorAddress):
return
if hasattr(self, '_openSockets'):
for rmvkey in [each
for each in self._openSockets
if rmtaddr.addressDetails.isSameSystem(
self._openSockets[each].rmtaddr)]:
_safeSocketShutdown(self._openSockets[rmvkey])
del self._openSockets[rmvkey]
for each in [i for i in self._transmitIntents
if rmtaddr.addressDetails.isSameSystem(
self._transmitIntents[i].targetAddr)]:
self._cancel_fd_ops(each)
for each in [i for i,v in self._incomingSockets.items()
if rmtaddr.addressDetails.isSameSystem(
v.fromAddress
if v.fromAddress.addressDetails else
v.socket)]:
self._cancel_fd_ops(each)
def _cancel_fd_ops(self, errfileno):
if errfileno == self.socket.fileno():
thesplog('SELECT FATAL ERROR ON MAIN LISTEN SOCKET',
level=logging.ERROR)
raise RuntimeError('Fatal listen socket error; aborting')
if errfileno in self._incomingSockets:
incoming = self._incomingSockets[errfileno]
del self._incomingSockets[errfileno]
incoming = self._handlePossibleIncoming(incoming, errfileno,
closed=True)
if incoming:
self._incomingSockets[incoming.socket.fileno()] = incoming
return
if self._processIntents(errfileno, closed=True):
return
if self._waitingTransmits:
W = self._waitingTransmits.pop(0)
if self._nextTransmitStepCheck(W, errfileno, closed=True):
self._waitingTransmits.append(W)
return
closed_openSocks = []
for I in getattr(self, '_openSockets', {}):
if self._socketFile(self._openSockets[I]) == errfileno:
closed_openSocks.append(I)
for each in closed_openSocks:
del self._openSockets[each]
def interrupt_wait(self,
signal_shutdown=False,
check_children=False):
self._shutdownSignalled |= signal_shutdown
self._checkChildren |= check_children
# Now generate a spurious connection to break out of the
# select.select loop. This is especially useful if a signal
# handler caused a message to be sent to myself: get the
# select loop to wakeup and process the message.
with closing(self.new_socket(
socket.socket,
*self.myAddress.addressDetails.socketArgs)) as ts:
try:
ts.connect(*self.myAddress.addressDetails.connectArgs)
except Exception:
pass
def _scheduleTransmitActual(self, intent):
intent = self._forwardIfNeeded(intent)
if not intent:
return
if intent.targetAddr == self.myAddress:
self._processReceivedEnvelope(ReceiveEnvelope(intent.targetAddr,
intent.message))
if not isinstance(intent.message, ForwardMessage):
self.interrupt_wait()
return self._finishIntent(intent)
intent.stage = self._XMITStepSendConnect
if self._nextTransmitStep(intent):
if hasattr(intent, 'socket'):
self._transmitIntents[intent.socket.fileno()] = intent
else:
self._waitingTransmits.append(intent)
def _finishIntent(self, intent, status=SendStatus.Sent):
if hasattr(intent, 'socket'):
if hasattr(self, '_openSockets'):
if not self._queue_intent_extra(intent):
if status == SendStatus.Sent:
opskey = opsKey(intent.targetAddr)
_safeSocketShutdown(self._openSockets.get(opskey, None))
self._openSockets[opskey] = IdleSocket(intent.socket,
intent.targetAddr)
# No need to restart a pending transmit for
# this target here; the main loop will check
# the waitingIntents and find/start the next one
# automatically.
else:
_safeSocketShutdown(intent)
# Here waiting intents need to be re-queued
# since otherwise they won't run until timeout
runnable, waiting = partition(
lambda I: I.targetAddr == intent.targetAddr,
self._waitingTransmits)
self._waitingTransmits = waiting
for R in runnable:
if status == SendStatus.DeadTarget:
self._finished_intents.append((R, status))
elif self._nextTransmitStep(R):
if hasattr(R, 'socket'):
thesplog('<S> waiting intent is now re-processing: %s', R.identify())
self._transmitIntents[R.socket.fileno()] = R
else:
self._waitingTransmits.append(R)
else:
_safeSocketShutdown(intent)
delattr(intent, 'socket')
self._finished_intents.append((intent, status))
return False # intent no longer needs to be attempted
def _queue_intent_extra(self, intent):
extraRead = getattr(intent, 'extraRead', None)
if not extraRead:
return False
incoming = TCPIncomingPersistent(intent.targetAddr,
intent.socket)
try:
incoming.addData(extraRead)
except Exception:
# Bad trailing data, so discard it.
thesplog('discarding bad trailing tx ack data')
return False
pendingIncoming = self._addedDataToIncoming(incoming)
if pendingIncoming:
self._incomingSockets[
pendingIncoming.socket.fileno()] = pendingIncoming
return True # socket is in-progress or was already handled
def _forwardIfNeeded(self, intent):
# Called when an intent is originally received to determine if
# the target address requires forwarding; if so, wrap the
# message in a ForwardMessage wrapper and set the routing
# path.
if intent.targetAddr == self.myAddress or \
isinstance(intent.message, ForwardMessage) or \
not isinstance(intent.targetAddr.addressDetails,
RoutedTCPv4ActorAddress):
return intent
# Replace None with our local admin and remove this actor
routing = [A or self._adminAddr
for A in intent.targetAddr.addressDetails.routing
if A != self.myAddress]
if self.txOnly and routing and \
(routing[0] != self._adminAddr) and \
self.myAddress != self._adminAddr:
routing.insert(0, self._adminAddr)
if not routing or routing == [intent.targetAddr]:
return intent
if intent.targetAddr.addressDetails.isLocalAddr():
return intent
newmsg = ForwardMessage(intent.message,
intent.targetAddr,
self.myAddress, routing)
newaddr = newmsg.fwdTargets[0]
if hasattr(self, '_addressMgr'):
newaddr, newmsg = self._addressMgr.prepMessageSend(newaddr, newmsg)
try:
isDead = newmsg == SendStatus.DeadTarget
except Exception:
isDead = False
if isDead:
# this is a DeadEnvelope or a ChildActorExited; drop
# it instead of recursing forever.
self._finishIntent(intent, SendStatus.Sent)
return None
# Changing the target addr to the next relay target for the
# transmit machinery, but the levels above may process
# completion based on the original target (e.g. systemCommon
# _checkNextTransmit), so add a completion operation that will
# reset the target back to the original (this occurs before
# other callbacks because callbacks are called in reverse
# order of addition).
intent.addCallback(lambda r, i, ta=intent.targetAddr:
i.changeTargetAddr(ta))
intent.changeMessage(newmsg)
intent.changeTargetAddr(newaddr)
intent.serMsg = self.serializer(intent)
return intent
def _nextTransmitStepCheck(self, intent, fileno, closed=False):
# Return True if this intent is still valid, False if it has
# been completed. If fileno is -1, this means check if there is
# time remaining still on this intent
if self._socketFile(intent) == fileno or \
(fileno == -1 and
intent.timeToRetry(hasattr(self, '_openSockets') and
opsKey(intent.targetAddr) in self._openSockets)):
if closed:
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
if intent.expired():
# Transmit timed out (consider this permanent)
thesplog('Transmit attempt from %s to %s timed out'
', returning PoisonPacket',
self.myAddress, intent.targetAddr, level=logging.WARNING)
# self._incomingEnvelopes.append(ReceiveEnvelope(intent.targetAddr,
# PoisonPacket(intent.message)))
# Stop attempting this transmit
return self._finishIntent(intent, SendStatus.Failed)
# Continue to attempt this transmit
if not intent.delay():
return self._nextTransmitStep(intent)
return True
def _nextTransmitStep(self, intent):
# Return of True means keep waiting on this Transmit Intent;
# False means it is done
try:
return getattr(self, '_next_XMIT_%s' % intent.stage,
'_unknown_XMIT_step')(intent)
except Exception as ex:
import traceback
thesplog('xmit UNcaught exception %s; aborting intent.\n%s',
ex, traceback.format_exc(), level=logging.ERROR)
return False
def _next_XMIT_1(self, intent):
if hasattr(self, '_openSockets'):
opskey = opsKey(intent.targetAddr)
if opskey in self._openSockets:
intent.socket = self._openSockets[opskey].socket
# This intent takes the open socket; there should be only
# one intent per target but this "take" prevents an
# erroneous second target intent from causing corruption.
# The _finishIntent operation will return the socket to
# the _openSockets list. It's possible that both sides
# will simultaneously attempt to transmit, but this should
# be rare, and the effect will be that neither will get
# the expected ACK and will close the socket to be
# re-opened on the next retry period, which is a
# reasonable approach.
del self._openSockets[opskey]
intent.stage = self._XMITStepSendData
intent.amtSent = 0
return self._nextTransmitStep(intent)
# If there is an active or pending Intent for this target,
# just queue this one (by returning True)
if any(T for T in self._transmitIntents.values()
if T.targetAddr == intent.targetAddr and
hasattr(T, 'socket')):
intent.awaitingTXSlot()
return True
# Fall through to get a new Socket for this intent
if isinstance(intent.targetAddr.addressDetails,
TXOnlyAdminTCPv4ActorAddress) and \
intent.targetAddr != self._adminAddr:
# Cannot initiate outbound connection to this remote
# Admin; wait for incoming connection instead.
intent.backoffPause(True) # KWQ... not really
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
intent.socket = self.new_socket(
socket.socket,
*intent.targetAddr .addressDetails.socketArgs)
intent.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
intent.socket.setblocking(0)
# Disable Nagle to transmit headers and acks asap; our sends
# are usually small
intent.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
try:
intent.socket.connect(*intent.targetAddr
.addressDetails.connectArgs)
intent.socket.setblocking(0)
except socket.error as err:
# EINPROGRESS means non-blocking socket connect is in progress...
if not err_inprogress(err.errno):
thesplog('Socket connect failure %s to %s on %s'
' (returning %s)',
err, intent.targetAddr, intent.socket,
intent.completionCallback,
level=logging.WARNING)
return self._finishIntent(intent,
SendStatus.DeadTarget
if err_conn_refused(err)
else SendStatus.Failed)
except Exception as ex:
thesplog('Unexpected TCP socket connect exception: %s', ex,
level=logging.ERROR)
return self._finishIntent(intent, SendStatus.BadPacket)
intent.stage = self._XMITStepSendData # When connect completes
intent.amtSent = 0
return True
def _next_XMIT_2(self, intent):
# Got connected, ready to send
if not hasattr(intent, 'socket'):
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
try:
intent.amtSent += intent.socket.send(
intent.serMsg[intent.amtSent:])
except socket.error as err:
if err_send_inprogress(err.errno):
intent.backoffPause(True)
return True
if err_send_connrefused(err):
# in non-blocking, sometimes connection attempts are
# discovered here rather than for the actual connect
# request.
thesplog('ConnRefused to %s; declaring as DeadTarget.',
intent.targetAddr, level=logging.INFO)
return self._finishIntent(intent, SendStatus.DeadTarget)
thesplog('Socket error sending to %s on %s: %s / %s: %s',
intent.targetAddr, intent.socket, str(err), err.errno,
intent, level=logging.ERROR)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
except Exception:
import traceback
thesplog('Error sending: %s', traceback.format_exc(),
level=logging.ERROR)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
if intent.amtSent >= len(intent.serMsg):
# After data is sent, stop transmit
intent.stage = self._XMITStepShutdownWrite
return True
def _next_XMIT_3(self, intent):
try:
pass
# Original did a socket shutdown for writing, but actual
# socket implementations aren't so sophisticated and this
# tended to stop all socket communications in both
# directions.
# intent.socket.shutdown(socket.SHUT_WR)
except socket.error:
# No shutdown handling, just close
intent.stage = self._XMITStepFinishCleanup
return self._nextTransmitStep(intent)
intent.ackbuf = ReceiveBuffer(serializer.loads)
intent.stage = self._XMITStepWaitForAck
return True
def _next_XMIT_4(self, intent):
# Actually, select below waited on readable, not writeable
try:
rcv = intent.socket.recv(intent.ackbuf.remainingAmount())
except socket.error as err:
if err_recv_retry(err.errno):
intent.backoffPause(True)
return True
if err_recv_connreset(err):
thesplog('Remote %s closed connection before ack received'
' at %s for %s',
str(intent.targetAddr), str(self.myAddress),
intent.identify(),
level=logging.WARNING)
else:
thesplog('Socket Error waiting for transmit ack from'
' %s to %s: %s',
str(intent.targetAddr), str(self.myAddress), err,
level=logging.ERROR, exc_info=True)
rcv = '' # Remote closed connection
except Exception as err:
thesplog('General error waiting for transmit ack from'
' %s to %s: %s',
str(intent.targetAddr), str(self.myAddress), err,
level=logging.ERROR, exc_info=True)
rcv = '' # Remote closed connection
if not rcv:
# Socket closed. Reschedule transmit.
intent.backoffPause(True)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
return self._check_XMIT_4_done(intent, rcv)
def _check_XMIT_4_done(self, intent, rcv):
intent.ackbuf.addMore(rcv)
if not intent.ackbuf.isDone():
# Continue waiting for ACK
return True
compl = intent.ackbuf.completed()
if not compl:
# ACK/NAK was corrupted; retry.
intent.backoffPause(True)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
ackmsg, intent.extraRead = compl
if isControlMessage(ackmsg):
intent.result = SendStatus.Sent if ackmsg == ackPacket \
else SendStatus.BadPacket
intent.stage = self._XMITStepFinishCleanup
return self._nextTransmitStep(intent)
# Must have received a transmit packet from the remote;
# process as a received incoming.
intent.ackbuf.removeExtra()
if self._addedDataToIncoming(TCPIncomingPersistent(intent.targetAddr,
intent.socket,
intent.ackbuf),
True):
# intent.ackbuf.completed() said ackmsg was a full receive
# packet, but _addedDataToIncoming disagreed. This should
# NEVER HAPPEN.
thesplog('<<< Should never happen: '
'not full receive while waiting for ACK.'
' Aborting socket.',
level=logging.CRITICAL)
intent.ackbuf = ReceiveBuffer(serializer.loads)
intent.backoffPause(True)
intent.stage = self._XMITStepRetry
return self._nextTransmitStep(intent)
intent.ackbuf = ReceiveBuffer(serializer.loads)
nxtrcv = intent.extraRead
intent.extraRead = ''
return self._check_XMIT_4_done(intent, nxtrcv)
def _next_XMIT_5(self, intent):
return self._finishIntent(intent, intent.result)
def _next_XMIT_6(self, intent):
if hasattr(intent, 'socket'):
_safeSocketShutdown(intent)
delattr(intent, 'socket')
if hasattr(intent, 'ackbuf'):
delattr(intent, 'ackbuf')
if intent.retry():
intent.stage = self._XMITStepSendConnect
# stage just set won't be executed until retry delay times out
return True
return self._finishIntent(intent, SendStatus.Failed)
def _processIntents(self, filedesc, closed=False):
if filedesc in self._transmitIntents:
intent = self._transmitIntents[filedesc]
del self._transmitIntents[filedesc]
if self._nextTransmitStepCheck(intent, filedesc):
if hasattr(intent, 'socket'):
self._transmitIntents[intent.socket.fileno()] = intent
else:
self._waitingTransmits.append(intent)
return True
return False
def _processIntentTimeouts(self):
procIntents = list(self._transmitIntents.values())
waitIntents = list(self._waitingTransmits)
self._transmitIntents = {}
self._waitingTransmits = []
for intent in procIntents:
if hasattr(intent, '_pauseUntil') and not intent.expired():
self._transmitIntents[intent.socket.fileno()] = intent
continue
if self._nextTransmitStepCheck(intent, -1):
if hasattr(intent, 'socket'):
self._transmitIntents[intent.socket.fileno()] = intent
else:
self._waitingTransmits.append(intent)
for intent in waitIntents:
if self._nextTransmitStepCheck(intent, -1):
if hasattr(intent, 'socket'):
self._transmitIntents[intent.socket.fileno()] = intent
else:
self._waitingTransmits.append(intent)
@staticmethod
def _waitForSendable(sendIntent):
return sendIntent.stage != TCPTransport._XMITStepWaitForAck
@staticmethod
def _socketFile(sendOrRecv):
return sendOrRecv.socket.fileno() \
if getattr(sendOrRecv, 'socket', None) else None
def set_watch(self, watchlist):
self._watches = watchlist
def _runWithExpiry(self, incomingHandler):
xmitOnly = incomingHandler == TransmitOnly or \
isinstance(incomingHandler, TransmitOnly)
if hasattr(self, '_aborting_run'):
delattr(self, '_aborting_run')
while not self.run_time.expired() and \
(not hasattr(self, '_aborting_run') or
(self._aborting_run and
(len(self._transmitIntents) > 0 or
len(self._waitingTransmits) > 0))):
if xmitOnly:
if not self._transmitIntents and not self._waitingTransmits:
return 0
else:
while self._incomingEnvelopes:
rEnv = self._incomingEnvelopes.pop(0)
if incomingHandler is None:
return rEnv
r = Thespian__Run_HandlerResult(incomingHandler(rEnv))
if not r:
return r
wsend, wrecv = fmap(
TCPTransport._socketFile,
partition(TCPTransport._waitForSendable,
filter(lambda T: not T.backoffPause(),
self._transmitIntents.values())))
wrecv = list(filter(None, wrecv))
wsend = list(filter(None, wsend))
wrecv.extend(list(
filter(lambda I: not self._incomingSockets[I].backoffPause(),
filter(None, self._incomingSockets))))
if hasattr(self, '_openSockets'):
wrecv.extend(list(map(lambda s: s.socket.fileno(),
self._openSockets.values())))
delays = list(filter(None,
[self.run_time.remaining()] +
[self._transmitIntents[T].delay()
for T in self._transmitIntents] +
[W.delay() for W in self._waitingTransmits] +
[self._incomingSockets[I].delay()
for I in self._incomingSockets]))
# n.b. if a long period of time has elapsed (e.g. laptop
# sleeping) then delays could be negative.
delay = max(0, timePeriodSeconds(min(delays))) if delays else None
if not xmitOnly:
wrecv.extend([self.socket.fileno()])
else:
# Windows does not support calling select with three
# empty lists, so as a workaround, supply the main
# listener if everything else is pending delays (or
# completed but unrealized) here, and ensure the main
# listener does not accept any listens below.
if not wrecv and not wsend:
if not hasattr(self, 'dummySock'):
self.dummySock = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
wrecv.extend([self.dummySock.fileno()])
if self._watches:
wrecv.extend(self._watches)
rrecv, rsend, rerr = [], [], []
try:
rrecv, rsend, rerr = select.select(wrecv, wsend,
set(wsend+wrecv), delay)
except ValueError as ex:
thesplog('ValueError on select(#%d: %s, #%d: %s, #%d: %s, %s)',
len(wrecv), wrecv, len(wsend), wsend,
len(set(wsend + wrecv)), set(wsend + wrecv),
delay, level=logging.ERROR)
raise
except (OSError, select.error) as ex:
errnum = getattr(ex, 'errno', ex.args[0])
if err_select_retry(errnum):
# probably a change in descriptors
thesplog('select retry on %s', ex, level=logging.DEBUG)
self._check_indicators()
continue
if err_bad_fileno(errnum):
# One of the selected file descriptors was bad,
# but no indication which one. It should not be
# one of the ones locally managed by this
# transport, so it's likely one of the
# user-supplied "watched" file descriptors. Find
# and remove it, then carry on.
if errnum == errno.EBADF:
bad = []
for each in self._watches:
try:
_ = select.select([each], [], [], 0)
except:
bad.append(each)
if not bad:
thesplog('bad internal file descriptor!')
try:
_ = select.select([self.socket.fileno()], [], [], 0)
except:
thesplog('listen %s is bad', self.socket.fileno)
rerr.append(self.socket.fileno)
for each in wrecv:
try:
_ = select.select([each], [], [], 0)
except:
thesplog('wrecv %s is bad', each)
rerr.append(each)
for each in wsend:
try:
select.select([each], [], [], 0)
except:
thesplog('wsend %s is bad', each)
rerr.append(each)
else:
self._watches = [W for W in self._watches if W not in bad]
continue
# If it was a regular file descriptor, fall through to clean it up.
else:
raise
if rerr:
for errfileno in rerr:
self._cancel_fd_ops(errfileno)
origPendingSends = len(self._transmitIntents) + \
len(self._waitingTransmits)
# Handle newly sendable data
for eachs in rsend:
self._processIntents(eachs)
# Handle newly receivable data
for each in rrecv:
# n.b. ignore this if trying to quiesce; may have had
# to supply this fd to avoid calling select with three
# empty lists.
if each == self.socket.fileno() and not xmitOnly:
self._acceptNewIncoming()
continue
# Get idleSockets before checking incoming and
# transmit; those latter may modify _openSockets
# (including replacing the element) so ensure that
# only the sockets indicated by select are processed,
# and only once each.
idleSockets = list(getattr(self, '_openSockets', {}).values())
if each in self._incomingSockets:
incoming = self._incomingSockets[each]
del self._incomingSockets[each]
incoming = self._handlePossibleIncoming(incoming, each)
if incoming:
self._incomingSockets[
incoming.socket.fileno()] = incoming
continue
if self._processIntents(each):
continue
for idle in idleSockets:
rmtaddr = idle.rmtaddr
curOpen = self._openSockets.get(opsKey(rmtaddr), None)
if curOpen and curOpen != idle:
# duplicate sockets to remote, and this one is
# no longer tracked, so close it and keep
# existing openSocket.
_safeSocketShutdown(idle)
else:
fnum = None
try:
fnum = idle.socket.fileno()
except IOError as ex:
if not err_bad_fileno(ex.errno):
raise
if fnum is None or each == fnum:
del self._openSockets[opsKey(rmtaddr)]
incoming = self._handlePossibleIncoming(
TCPIncomingPersistent(rmtaddr, idle.socket),
each)
if incoming:
self._incomingSockets[
incoming.socket.fileno()] = incoming
elif idle.expired():
_safeSocketShutdown(idle)
del self._openSockets[opsKey(rmtaddr)]
# Handle timeouts
self._processIntentTimeouts()
rmvIncoming = []
for I in self._incomingSockets:
newI = self._handlePossibleIncoming(self._incomingSockets[I],
-1)
if newI:
# newI will possibly be new incoming data, but
# it's going to use the same socket
self._incomingSockets[I] = newI
else:
rmvIncoming.append(I)
for I in rmvIncoming:
del self._incomingSockets[I]
watchready = [W for W in self._watches if W in rrecv]
if watchready:
self._incomingEnvelopes.append(
ReceiveEnvelope(self.myAddress, WatchMessage(watchready)))
# Initiate completion operations for transmits (which may
# result in other transmit calls).
senddone = self._finished_intents
self._finished_intents = []
for intent,sts in senddone:
intent.tx_done(sts)
# Check if it's time to quit
if [] == rrecv and [] == rsend:
if [] == rerr and self.run_time.expired():
# Timeout, give up
return Thespian__Run_Expired()
continue
if xmitOnly:
remXmits = len(self._transmitIntents) + \
len(self._waitingTransmits)
if origPendingSends > remXmits or remXmits == 0:
return remXmits
# Handle queued internal "received" data
if not xmitOnly:
while self._incomingEnvelopes:
rEnv = self._incomingEnvelopes.pop(0)
if incomingHandler is None:
return rEnv
r = Thespian__Run_HandlerResult(incomingHandler(rEnv))
if not r:
return r
return Thespian__Run_Terminated() \
if hasattr(self, '_aborting_run') else \
Thespian__Run_Expired()
def _check_indicators(self):
if self._checkChildren:
self._checkChildren = False
self._incomingEnvelopes.append(
ReceiveEnvelope(self.myAddress, ChildMayHaveDied()))
if self._shutdownSignalled:
self._shutdownSignalled = False
self._incomingEnvelopes.append(
ReceiveEnvelope(self.myAddress, ActorExitRequest()))
def _acceptNewIncoming(self):
accepted = False
try:
lsock, rmtTxAddr = self.new_socket(self.socket.accept)
accepted = True
except (OSError, socket.error) as ex:
thesplog('Error accepting incoming: %s', ex)
self._check_indicators()
if not accepted or rmtTxAddr == self.myAddress:
self._incomingEnvelopes.append(Thespian__UpdateWork())
if not accepted:
return
lsock.setblocking(0)
# Disable Nagle to transmit headers and acks asap
lsock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
# Note that the TCPIncoming is initially None.
# Due to the way sockets work, the transmit comes from a
# system-selected port that is different from the port that
# the remote Actor (or admin) is listening on (and which
# represents it's official ActorAddress). Once a successful
# message has been received, the message will indicate the
# originating address and the TCPIncoming object will be
# updated accordingly.
self._incomingSockets[lsock.fileno()] = (
(TCPIncomingPersistent
if hasattr(self, '_openSockets') else
TCPIncoming)
(ActorAddress(None), lsock))
def _handlePossibleIncoming(self, incomingSocket, fileno, closed=False):
if closed:
# Remote closed, so unconditionally drop this socket
incomingSocket.close()
return None
elif incomingSocket.socket and \
(incomingSocket.socket.fileno() == fileno or
not incomingSocket.delay()):
return self._handleReadableIncoming(incomingSocket)
else:
if not incomingSocket.delay():
# No more delay time left
incomingSocket.close()
return None
return incomingSocket
def _finishIncoming(self, incomingSocket, fromRealAddr):
# Only called if incomingSocket can continue to be used; if
# there was an error then incomingSocket should be closed and
# released.
fromAddr = incomingSocket.fromAddress
if fromAddr and isinstance(incomingSocket, TCPIncomingPersistent):
opskey = opsKey(fromAddr)
_safeSocketShutdown(self._openSockets.get(opskey, None))
self._openSockets[opskey] = IdleSocket(incomingSocket.socket,
fromAddr)
for T in self._transmitIntents.values():
if T.targetAddr == fromAddr and T.stage == self._XMITStepRetry:
T.retry(immediately=True)
# This intent will be picked up on the next
# timeout check in the main loop and
# processed; by waiting for main loop
# processing, fairness with read handling is
# allowed.
break
else:
incomingSocket.close()
return None
def _handleReadableIncoming(self, inc):
try:
rdata = inc.socket.recv(min(8192, inc.remainingSize()))
inc.failCount = 0
except socket.error as e:
inc.failCount = getattr(inc, 'failCount', 0) + 1
if err_recv_inprogress(e.errno) and \
inc.failCount < MAX_CONSECUTIVE_READ_FAILURES:
inc.backoffPause(True)
return inc
inc.close()
return None
if not rdata:
# Since this point is only arrived at when select() says
# the socket is readable, this is an indicator of a closed
# socket. Since previous calls didn't detect
# receivedAllData(), this is an aborted/incomplete
# reception. Discard it.
inc.close()
return None
try:
inc.addData(rdata)
except Exception:
# Bad data, so discard it and close the socket.
thesplog('corrupted incoming data; closing socket',
level=logging.WARNING)
inc.close()
return None
return self._addedDataToIncoming(inc)
def _addedDataToIncoming(self, inc, skipFinish=False):
if not inc.receivedAllData():
# Continue running and monitoring this socket
return inc
rdata, extra = '', ''
try:
rdata, extra = inc.data
if isControlMessage(rdata):
raise ValueError('Error: received control message'
' "%s"; expecting incoming data.' %
(str(rdata)))
rEnv = ReceiveEnvelope(*rdata)
except Exception:
import traceback
thesplog('OUCH! Error deserializing received data:'
' %s (rdata="%s", extra="%s")',
traceback.format_exc(), rdata, extra)
try:
inc.socket.send(ackDataErrMsg)
except Exception:
pass # socket will be closed anyhow; AckErr was a courtesy
inc.close()
return None
try:
inc.socket.send(ackMsg)
except socket.error as err:
if err_send_connreset(err):
thesplog('Remote %s closed socket before ACK could be sent',
inc.socket, level=logging.WARNING)
else:
raise
inc.fromAddress = rdata[0]
self._processReceivedEnvelope(rEnv)
if extra and isinstance(inc, TCPIncomingPersistent):
newinc = TCPIncomingPersistent(inc.fromAddress, inc.socket)
try:
newinc.addData(rdata)
except Exception:
# Bad trailing data, so discard it by doing nothing.
thesplog('discarding bad incoming trailing data')
pass
else:
return self._addedDataToIncoming(newinc)
if not skipFinish:
self._finishIncoming(inc, rEnv.sender)
return None
def _processReceivedEnvelope(self, envelope):
if not isinstance(envelope.message, ForwardMessage):
self._incomingEnvelopes.append(envelope)
return
if envelope.message.fwdTo == self.myAddress:
self._incomingEnvelopes.append(
ReceiveEnvelope(envelope.message.fwdFrom,
envelope.message.fwdMessage))
return
# The ForwardMessage has not reached the final destination, so
# update and target it at the next one.
if len(envelope.message.fwdTargets) < 1 and \
envelope.message.fwdTo != self.myAddress:
thesplog('Incorrectly received ForwardMessage destined for'
' %s at %s via %s: %s',
envelope.message.fwdTo, self.myAddress,
list(map(str, envelope.message.fwdTargets)),
envelope.message.fwdMessage,
level=logging.ERROR)
# discard (TBD: send back as Poison? DeadLetter? Routing failure)
return
nextTgt = envelope.message.fwdTargets[0]
envelope.message.fwdTargets = envelope.message.fwdTargets[1:]
self.scheduleTransmit(getattr(self, '_addressMgr', None),
TransmitIntent(nextTgt, envelope.message))
def abort_run(self, drain=False):
self._aborting_run = drain
|
from paraview.simple import Glyph, Sphere
from paraview.servermanager import CreateRenderView
from simphony.cuds import ABCMesh, ABCLattice, ABCParticles
from simphony_paraview.core.api import (
loaded_in_paraview, typical_distance, set_data)
from simphony_paraview.core.fixes import CreateRepresentation
from simphony_paraview.core.compatibility import (
vtkRenderWindowInteractor, vtkInteractorStyleJoystickCamera)
def show(cuds, select=None, testing=None):
""" Show the cuds objects using the default visualisation.
Parameters
----------
cuds :
A top level cuds object (e.g. a mesh). The method will detect
the type of object and create the appropriate visualisation.
select : tuple(CUBA, kind)
The (CUBA, kind) selection of the CUBA attribute to
use. ``kind`` can be one of the {'point', 'particles',
'nodes', 'elements', 'bonds'}
testing : callable(obj, event)
A callable object that accepts an the interactor object and a
time event. The callable will be executed after 1000 msec.
This is commonly used for testing. Default value is None
"""
with loaded_in_paraview(cuds) as source:
# XXX Special workaround to avoid segfault on exit as
# as seen in http://www.paraview.org/Bug/view.php?id=13124
view = CreateRenderView()
representation = CreateRepresentation(source, view)
items = None if select is None else select[1]
message = "Container does not have: {}"
if isinstance(cuds, ABCLattice):
representation.Representation = "Points"
if items not in (None, 'nodes'):
raise ValueError(message.format(items))
elif isinstance(cuds, ABCParticles):
sphere = Sphere(Radius=typical_distance(source))
glyphs = Glyph(Input=source, ScaleMode='off', GlyphType=sphere)
representation = CreateRepresentation(glyphs, view)
if items not in (None, 'particles', 'bonds'):
raise ValueError(message.format(items))
elif isinstance(cuds, ABCMesh):
representation.Representation = "Surface"
if items not in (None, 'points', 'elements'):
raise ValueError(message.format(items))
if select is not None:
set_data(representation, source, select)
interactor = vtkRenderWindowInteractor()
# Note: we cannot use any interactor style supporting manipulation
# of actors. The reason is that something in the chain of responsibility
# is incorrectly setup to refer to the actors.
# See issue https://github.com/simphony/simphony-paraview/issues/23
interactor.SetInteractorStyle(vtkInteractorStyleJoystickCamera())
interactor.SetRenderWindow(view.GetRenderWindow())
interactor.Initialize()
if testing is not None:
timerid = interactor.CreateOneShotTimer(1000)
handler = Handler(testing, timerid)
interactor.AddObserver('TimerEvent', handler)
try:
camera = view.GetActiveCamera()
camera.Elevation(45)
camera.Yaw(45)
view.ResetCamera()
view.StillRender()
interactor.Start()
finally:
interactor.RemoveAllObservers()
class Handler(object):
""" Helper class to hold the callback to execute on timer event.
"""
def __init__(self, callback, timerid):
self.callback = callback
self.timerid = timerid
def __call__(self, obj, event):
self.callback(obj, event)
obj.DestroyTimer(self.timerid)
Flake
from paraview.simple import Glyph, Sphere
from paraview.servermanager import CreateRenderView
from simphony.cuds import ABCMesh, ABCLattice, ABCParticles
from simphony_paraview.core.api import (
loaded_in_paraview, typical_distance, set_data)
from simphony_paraview.core.fixes import CreateRepresentation
from simphony_paraview.core.compatibility import (
vtkRenderWindowInteractor, vtkInteractorStyleJoystickCamera)
def show(cuds, select=None, testing=None):
""" Show the cuds objects using the default visualisation.
Parameters
----------
cuds :
A top level cuds object (e.g. a mesh). The method will detect
the type of object and create the appropriate visualisation.
select : tuple(CUBA, kind)
The (CUBA, kind) selection of the CUBA attribute to
use. ``kind`` can be one of the {'point', 'particles',
'nodes', 'elements', 'bonds'}
testing : callable(obj, event)
A callable object that accepts an the interactor object and a
time event. The callable will be executed after 1000 msec.
This is commonly used for testing. Default value is None
"""
with loaded_in_paraview(cuds) as source:
# XXX Special workaround to avoid segfault on exit as
# as seen in http://www.paraview.org/Bug/view.php?id=13124
view = CreateRenderView()
representation = CreateRepresentation(source, view)
items = None if select is None else select[1]
message = "Container does not have: {}"
if isinstance(cuds, ABCLattice):
representation.Representation = "Points"
if items not in (None, 'nodes'):
raise ValueError(message.format(items))
elif isinstance(cuds, ABCParticles):
sphere = Sphere(Radius=typical_distance(source))
glyphs = Glyph(Input=source, ScaleMode='off', GlyphType=sphere)
representation = CreateRepresentation(glyphs, view)
if items not in (None, 'particles', 'bonds'):
raise ValueError(message.format(items))
elif isinstance(cuds, ABCMesh):
representation.Representation = "Surface"
if items not in (None, 'points', 'elements'):
raise ValueError(message.format(items))
if select is not None:
set_data(representation, source, select)
interactor = vtkRenderWindowInteractor()
# Note: we cannot use any interactor style supporting manipulation
# of actors. The reason is that something in the chain of
# responsibility # is incorrectly setup to refer to the actors.
# See issue https://github.com/simphony/simphony-paraview/issues/23
interactor.SetInteractorStyle(vtkInteractorStyleJoystickCamera())
interactor.SetRenderWindow(view.GetRenderWindow())
interactor.Initialize()
if testing is not None:
timerid = interactor.CreateOneShotTimer(1000)
handler = Handler(testing, timerid)
interactor.AddObserver('TimerEvent', handler)
try:
camera = view.GetActiveCamera()
camera.Elevation(45)
camera.Yaw(45)
view.ResetCamera()
view.StillRender()
interactor.Start()
finally:
interactor.RemoveAllObservers()
class Handler(object):
""" Helper class to hold the callback to execute on timer event.
"""
def __init__(self, callback, timerid):
self.callback = callback
self.timerid = timerid
def __call__(self, obj, event):
self.callback(obj, event)
obj.DestroyTimer(self.timerid)
|
import argparse
import sys
__version__ = '0.3.2'
class Runner:
def __init__(self):
self.args = sys.argv[1:]
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
'--config',
'-c',
help='configuration file'
)
self.parser.add_argument(
'-v', '--verbose',
action='count', default=0,
help="Turns on info-level logging."
)
self.add_args()
def add_args(self):
pass
def run(self, args):
raise NotImplementedError
def __call__(self, **kwargs):
from . import config
args = self.parser.parse_known_args(self.args)[0]
for key, val in kwargs.items():
setattr(args, key, val)
if args.config:
config.CONFIGURATION = config.Configuration(args.config)
else:
config.CONFIGURATION = config.Configuration(config.LOCALE_DIR.joinpath('config.yaml').normpath())
self.run(args)
Updated version number
import argparse
import sys
__version__ = '0.3.3'
class Runner:
def __init__(self):
self.args = sys.argv[1:]
self.parser = argparse.ArgumentParser()
self.parser.add_argument(
'--config',
'-c',
help='configuration file'
)
self.parser.add_argument(
'-v', '--verbose',
action='count', default=0,
help="Turns on info-level logging."
)
self.add_args()
def add_args(self):
pass
def run(self, args):
raise NotImplementedError
def __call__(self, **kwargs):
from . import config
args = self.parser.parse_known_args(self.args)[0]
for key, val in kwargs.items():
setattr(args, key, val)
if args.config:
config.CONFIGURATION = config.Configuration(args.config)
else:
config.CONFIGURATION = config.Configuration(config.LOCALE_DIR.joinpath('config.yaml').normpath())
self.run(args)
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
from matplotlib.ticker import AutoMinorLocator
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
from dotmap import DotMap
SERIES_NUMBER = 0
RENDERER = None
default_options = DotMap()
default_options.rcParams['figure.figsize'] = [8.0, 4.0]
default_options.rcParams['pdf.fonttype'] = 42
default_options.rcParams['ps.fonttype'] = 42
default_options.rcParams['font.size'] = 24
default_options.rcParams['font.family'] = 'Myriad Pro'
default_options.rcParams['text.color'] = 'black'
default_options.x.axis.show = False
default_options.x.axis.color = 'gray'
default_options.x.margin = 0.05
default_options.x.label.color = 'black'
default_options.x.ticks.major.show = True
default_options.x.ticks.major.options.colors = 'black'
default_options.x.ticks.minor.options.colors = 'black'
default_options.x.ticks.minor.count = 5
default_options.x.grid.options.linestyle = 'dotted'
default_options.x.grid.options.linewidth = 0.5
default_options.x.grid.options.which = 'both'
default_options.y = default_options.x
default_options.y2 = default_options.x
default_options.legend.text.options.color = 'black'
default_options.broken.gap_positions = ['bottom']
default_options.bar.width = 0.8
default_options.bar_labels.show = True
default_options.bar_labels.options.ha = 'center'
default_options.bar_labels.format_string = '%d'
default_options.vertical_lines.options.linestyle = '--'
default_options.horizontal_lines.options.linestyle = '--'
default_options.annotation_lines.options.linestyle = '--'
default_options.text.options.ha = 'center'
default_options.text.options.va = 'center'
default_options.text.options.color = 'black'
default_options.inset.options.location = 'lower right'
default_options.inset.options.zoom_level = 1.5
default_options.inset.options.border_color = 'black'
default_options.inset.options.corners = [1, 3]
default_options.inset.marker.options.color = 'gray'
default_options.vertical_shaded.options.alpha = 0.5
default_options.vertical_shaded.options.color = 'red'
default_options.horizontal_shaded = default_options.vertical_shaded
def merge_DotMap(a, b):
for k, v in b.items():
if isinstance(v, DotMap) and k in a:
merge_DotMap(a[k], v)
else:
a[k] = v
def get_nth_dotmap(d, n):
p = DotMap()
for k, v in d.items():
if isinstance(v, DotMap):
p[k] = get_nth_dotmap(v, n)
else:
p[k] = v[n]
return p
def autolabel(ax, rects, bar_labels):
# attach some text labels
# TODO: Make text smaller if it doesn't fit horizontally
top = ax.get_ylim()[1]
for i, rect in enumerate(rects):
height = rect.get_height()
_, y = rect.get_xy()
a = ax.text(rect.get_x() + rect.get_width() / 2.0,
height - (0.005 * top) + y,
bar_labels.format_string % height,
va='top', color='white', **bar_labels.options.toDict())
text_bbox = a.get_window_extent(renderer=RENDERER)
rect_bbox = rect.get_window_extent(renderer=RENDERER)
if text_bbox.y0 < rect_bbox.y0 or text_bbox.y1 > rect_bbox.y1:
a.remove()
ax.text(rect.get_x() + rect.get_width() / 2.0,
height + (0.005 * top) + y,
bar_labels.format_string % height,
va='bottom', color='black',
**bar_labels.options.toDict())
def plot_data(ax, x, y, options, series_options):
global SERIES_NUMBER
if options.plot_type == 'LINE':
for i in xrange(len(x)):
if len(x[i]) > 1:
ax.plot(x[i], y[i], label=str(SERIES_NUMBER),
**series_options[i].toDict())
else:
ax.scatter(x[i], y[i], label=str(SERIES_NUMBER),
**series_options[i].toDict())
SERIES_NUMBER += 1
if options.plot_type == 'SCATTER':
for i in xrange(len(x)):
ax.scatter(x[i], y[i], label=str(SERIES_NUMBER),
**series_options[i].toDict())
SERIES_NUMBER += 1
elif options.plot_type == 'BAR' or options.plot_type == 'STACKED_BAR':
rects = []
for i in xrange(len(x)):
b = np.sum(y[:i], 0) if options.plot_type == 'STACKED_BAR' else 0
offset = SERIES_NUMBER * options.bar.width + \
options.bar.width / 2.0 if options.plot_type == 'BAR' \
else options.bar.width / 2.0
rects += ax.bar(x[i] + offset, y[i], options.bar.width, bottom=b,
label=str(SERIES_NUMBER),
**series_options[i].toDict())
SERIES_NUMBER += 1
if options.bar_labels.show:
autolabel(ax, rects, options.bar_labels)
l = options.x.ticks.major.labels
if len(l.locations) == 0:
l.locations = np.arange(len(l.text)) + (0.8 / 2.0)
if options.best_fit.show:
for i in xrange(len(x)):
m, b = np.polyfit(x[i], y[i], 1)
ax.plot(x[i], [m*p + b for p in x[i]], linestyle='dotted',
label=str(SERIES_NUMBER), color='C%d' % i,
**options.best_fit.options[i].toDict())
SERIES_NUMBER += 1
def apply_options_to_axis(axis, data, options):
t = 'x' if (axis.axes.xaxis == axis) else 'y'
axis.axes.set_axisbelow(True)
axis.axes.margins(**{t: options.margin})
if t == 'x':
axis.axes.spines['top'].set_visible(False)
axis.axes.spines['bottom'].set_visible(False)
else:
axis.axes.spines['left'].set_visible(False)
axis.axes.spines['right'].set_visible(False)
if options.axis.show:
if t == 'x':
if options.position and options.position == 'top':
sp = axis.axes.spines['top']
else:
sp = axis.axes.spines['bottom']
else:
if options.position and options.position == 'right':
sp = axis.axes.spines['right']
else:
sp = axis.axes.spines['left']
sp.set_visible(True)
sp.set_color(options.axis.color)
if options.grid.show:
axis.grid(**options.grid.options.toDict())
if options.label['%slabel' % t]:
getattr(axis.axes, 'set_%slabel' % t)(**options.label.toDict())
if options.position:
p = options.position
axis.set_label_position(p)
if p == 'right':
axis.tick_right()
if p == 'top':
axis.tick_top()
if options.log:
if min(map(min, data)) < 0:
getattr(axis.axes, 'set_%sscale' % t)('symlog')
else:
getattr(axis.axes, 'set_%sscale' % t)('log')
if options.invert:
getattr(axis.axes, 'invert_%saxis' % t)()
if not options.ticks.major.show:
options.ticks.major.options.length = 0
getattr(axis.axes, 'set_%sticks' % t)([])
if options.ticks.major.labels:
l = options.ticks.major.labels
if 'text' in l:
if len(l.locations) == 0:
l.locations = range(len(l.text))
getattr(plt, '%sticks' % t)(l.locations,
l.text, **l.options.toDict())
elif 'locations' in l:
getattr(plt, '%sticks' % t)(l.locations, **l.options.toDict())
else:
getattr(plt, '%sticks' % t)(**l.options.toDict())
if options.ticks.major.options:
axis.axes.tick_params(axis=t, which='major',
**options.ticks.major.options.toDict())
if options.ticks.minor.show:
if not options.log:
axis.set_minor_locator(AutoMinorLocator(options.ticks.minor.count))
else:
options.ticks.minor.options.length = 0
if options.ticks.minor.options:
axis.axes.tick_params(axis=t, which='minor',
**options.ticks.minor.options.toDict())
if options.limits:
getattr(axis.axes, 'set_%slim' % t)(options.limits)
def plot_ax(ax, x, y, y2, options):
if options.plot_type == 'BAR':
if y2:
options.bar.width /= len(y) + len(y2)
else:
options.bar.width /= len(y)
plot_data(ax, x, y, options, options.series_options)
apply_options_to_axis(ax.xaxis, x, options.x)
apply_options_to_axis(ax.yaxis, y, options.y)
if y2:
ax2 = ax.twinx()
plot_data(ax2, x, y2, options, options.series2_options)
apply_options_to_axis(ax2.xaxis, x, options.x)
apply_options_to_axis(ax2.yaxis, y2, options.y2)
def plot_broken(x, y, y2, options):
global RENDERER, SERIES_NUMBER
padding = 0.2
if options.broken.yskip:
bottom = [y[0][0] - padding, options.broken.yskip[0]]
top = [options.broken.yskip[1] + 1, y[0][-1] + 1 + padding]
options.broken.subplot.gridspec_kw = {'height_ratios':
[top[1] - top[0],
bottom[1] - bottom[0]]}
new_options = deepcopy(options)
f, (ax, ax2) = plt.subplots(2, 1, sharex=True,
**options.broken.subplot.toDict())
new_options.y.limits = top
else:
left = [x[0][0] - padding, options.broken.xskip[0]]
right = [options.broken.xskip[1] + 1, x[0][-1] + 1 + padding]
options.broken.subplot.gridspec_kw = {'width_ratios':
[left[1] - left[0],
right[1] - right[0]]}
new_options = deepcopy(options)
f, (ax, ax2) = plt.subplots(1, 2, sharey=True,
**options.broken.subplot.toDict())
new_options.x.limits = left
axes = [ax, ax2]
RENDERER = f.canvas.get_renderer()
# plot first axes
new_options.legend.options.labels = []
merge_DotMap(new_options, get_nth_dotmap(options.broken.options, 0))
plt.sca(ax)
plot_ax(ax, x, y, y2, new_options)
SERIES_NUMBER = 0
# plot second axes
if options.broken.yskip:
options.y.limits = bottom
options.x.label.ylabel = ''
options.x.ticks.major.show = False
options.x.ticks.minor.show = False
else:
options.x.limits = right
options.y.label.ylabel = ''
options.y.ticks.major.show = False
options.y.ticks.minor.show = False
merge_DotMap(options, get_nth_dotmap(options.broken.options, 1))
plt.sca(ax2)
plot_ax(ax2, x, y, y2, options)
# draw in 'gap' markers
d = .03 # .015
trans = ax.transScale + ax.transLimits
d_dict = {'middle': [0.5, 0.5], 'bottom': [0, 0], 'top': [1, 1],
'zero': trans.transform([0, 0])}
kwargs = dict(transform=ax.transAxes, color='black', clip_on=False)
if options.broken.yskip:
k1, k2 = options.broken.subplot.gridspec_kw['height_ratios']
k = k1 / float(k2)
for p in options.broken.gap_positions:
d2 = d_dict[p][0]
kwargs.update(transform=ax.transAxes)
ax.plot((d2-d, d2+d), (-d/k, +d/k), **kwargs)
# switch to the bottom axes
kwargs.update(transform=ax2.transAxes)
ax2.plot((d2-d, d2+d), (1-d, 1+d), **kwargs)
else:
k1, k2 = options.broken.subplot.gridspec_kw['width_ratios']
k = k1 / float(k2)
for p in options.broken.gap_positions:
d2 = d_dict[p][1]
kwargs.update(transform=ax.transAxes)
ax.plot((1-d/k, 1+d/k), (d2-d, d2+d), **kwargs)
# switch to the right axes
kwargs.update(transform=ax2.transAxes)
ax2.plot((-d, +d), (d2-d, d2+d), **kwargs)
return axes
def plot(x, y, my_options={}, y2=None):
global RENDERER
options = default_options.copy()
merge_DotMap(options, my_options)
plt.rcParams.update(options.rcParams.toDict())
if options.broken.yskip or options.broken.xskip:
axes = plot_broken(x, y, y2, options)
else:
f, ax = plt.subplots()
RENDERER = f.canvas.get_renderer()
axes = [ax]
plot_ax(ax, x, y, y2, options)
plt.tight_layout(pad=0)
if options.inset.show:
locations = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
ax_inset = zoomed_inset_axes(axes[0], options.inset.options.zoom_level,
loc=locations[
options.inset.options.location])
SERIES_NUMBER = 0
del options.x.label
del options.y.label
options.x.limits = options.inset.options.x.limits
options.y.limits = options.inset.options.y.limits
plot_ax(ax_inset, x, y, y2, options)
for sp in [ax_inset.axes.spines[i]
for i in ['top', 'bottom', 'left', 'right']]:
sp.set_visible(True)
sp.set_color(options.inset.options.border_color)
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.setp(ax_inset, xticks=[], yticks=[])
mark_inset(axes[0], ax_inset, loc1=options.inset.options.corners[0],
loc2=options.inset.options.corners[1], fc="none",
ec=options.inset.options.marker.options.color)
plt.draw()
if options.x.axis.stretch:
box = ax.get_position()
ax.set_position([box.x0, box.y0,
box.width * options.x.axis.stretch, box.height])
if options.y.axis.stretch:
box = ax.get_position()
ax.set_position([box.x0, box.y0,
box.width, box.height * options.y.axis.stretch])
if options.legend.options.labels:
handles, labels = axes[0].get_legend_handles_labels()
labels, handles = zip(*sorted(zip(labels, handles),
key=lambda t: t[0]))
l = axes[-1].legend(handles=handles,
**options.legend.options.toDict())
for t in l.get_texts():
t.update(options.legend.text.options.toDict())
if options.vertical_lines.lines:
for l in options.vertical_lines.lines:
axes[0].axvline(l, **options.vertical_lines.options.toDict())
if options.vertical_shaded.limits:
for l, r in options.vertical_shaded.limits:
axes[0].axvspan(l, r, **options.vertical_shaded.options.toDict())
if options.horizontal_lines.lines:
for l in options.horizontal_lines.lines:
axes[0].axhline(l, **options.horizontal_lines.options.toDict())
if options.horizontal_shaded.limits:
for l, r in options.horizontal_shaded.limits:
axes[0].axhspan(l, r, **options.horizontal_shaded.options.toDict())
for i in xrange(len(options.text.labels)):
axes[-1].text(*options.text.positions[i], s=options.text.labels[i],
transform=axes[0].transAxes,
**options.text.options.toDict())
for i in options.annotation_lines.lines:
axes[0].annotate('', xy=i[0], xytext=i[1], arrowprops=dict(
arrowstyle='-', **options.annotation_lines.options.toDict()))
print options['output_fn']
plt.savefig(options['output_fn'], bbox_inches='tight', pad_inches=0)
scatter plot should use 's' instead of 'markersize'
#!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
from matplotlib.ticker import AutoMinorLocator
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes, mark_inset
from dotmap import DotMap
SERIES_NUMBER = 0
RENDERER = None
default_options = DotMap()
default_options.rcParams['figure.figsize'] = [8.0, 4.0]
default_options.rcParams['pdf.fonttype'] = 42
default_options.rcParams['ps.fonttype'] = 42
default_options.rcParams['font.size'] = 24
default_options.rcParams['font.family'] = 'Myriad Pro'
default_options.rcParams['text.color'] = 'black'
default_options.x.axis.show = False
default_options.x.axis.color = 'gray'
default_options.x.margin = 0.05
default_options.x.label.color = 'black'
default_options.x.ticks.major.show = True
default_options.x.ticks.major.options.colors = 'black'
default_options.x.ticks.minor.options.colors = 'black'
default_options.x.ticks.minor.count = 5
default_options.x.grid.options.linestyle = 'dotted'
default_options.x.grid.options.linewidth = 0.5
default_options.x.grid.options.which = 'both'
default_options.y = default_options.x
default_options.y2 = default_options.x
default_options.legend.text.options.color = 'black'
default_options.broken.gap_positions = ['bottom']
default_options.bar.width = 0.8
default_options.bar_labels.show = True
default_options.bar_labels.options.ha = 'center'
default_options.bar_labels.format_string = '%d'
default_options.vertical_lines.options.linestyle = '--'
default_options.horizontal_lines.options.linestyle = '--'
default_options.annotation_lines.options.linestyle = '--'
default_options.text.options.ha = 'center'
default_options.text.options.va = 'center'
default_options.text.options.color = 'black'
default_options.inset.options.location = 'lower right'
default_options.inset.options.zoom_level = 1.5
default_options.inset.options.border_color = 'black'
default_options.inset.options.corners = [1, 3]
default_options.inset.marker.options.color = 'gray'
default_options.vertical_shaded.options.alpha = 0.5
default_options.vertical_shaded.options.color = 'red'
default_options.horizontal_shaded = default_options.vertical_shaded
def merge_DotMap(a, b):
for k, v in b.items():
if isinstance(v, DotMap) and k in a:
merge_DotMap(a[k], v)
else:
a[k] = v
def get_nth_dotmap(d, n):
p = DotMap()
for k, v in d.items():
if isinstance(v, DotMap):
p[k] = get_nth_dotmap(v, n)
else:
p[k] = v[n]
return p
def autolabel(ax, rects, bar_labels):
# attach some text labels
# TODO: Make text smaller if it doesn't fit horizontally
top = ax.get_ylim()[1]
for i, rect in enumerate(rects):
height = rect.get_height()
_, y = rect.get_xy()
a = ax.text(rect.get_x() + rect.get_width() / 2.0,
height - (0.005 * top) + y,
bar_labels.format_string % height,
va='top', color='white', **bar_labels.options.toDict())
text_bbox = a.get_window_extent(renderer=RENDERER)
rect_bbox = rect.get_window_extent(renderer=RENDERER)
if text_bbox.y0 < rect_bbox.y0 or text_bbox.y1 > rect_bbox.y1:
a.remove()
ax.text(rect.get_x() + rect.get_width() / 2.0,
height + (0.005 * top) + y,
bar_labels.format_string % height,
va='bottom', color='black',
**bar_labels.options.toDict())
def plot_data(ax, x, y, options, series_options):
global SERIES_NUMBER
if options.plot_type == 'LINE':
for i in xrange(len(x)):
if len(x[i]) > 1:
ax.plot(x[i], y[i], label=str(SERIES_NUMBER),
**series_options[i].toDict())
else:
if 'markersize' in options.series_options[i].toDict():
options.series_options[i].s = \
options.series_options[i].markersize
del options.series_options[i].markersize
ax.scatter(x[i], y[i], label=str(SERIES_NUMBER),
**series_options[i].toDict())
SERIES_NUMBER += 1
if options.plot_type == 'SCATTER':
for i in xrange(len(x)):
ax.scatter(x[i], y[i], label=str(SERIES_NUMBER),
**series_options[i].toDict())
SERIES_NUMBER += 1
elif options.plot_type == 'BAR' or options.plot_type == 'STACKED_BAR':
rects = []
for i in xrange(len(x)):
b = np.sum(y[:i], 0) if options.plot_type == 'STACKED_BAR' else 0
offset = SERIES_NUMBER * options.bar.width + \
options.bar.width / 2.0 if options.plot_type == 'BAR' \
else options.bar.width / 2.0
rects += ax.bar(x[i] + offset, y[i], options.bar.width, bottom=b,
label=str(SERIES_NUMBER),
**series_options[i].toDict())
SERIES_NUMBER += 1
if options.bar_labels.show:
autolabel(ax, rects, options.bar_labels)
l = options.x.ticks.major.labels
if len(l.locations) == 0:
l.locations = np.arange(len(l.text)) + (0.8 / 2.0)
if options.best_fit.show:
for i in xrange(len(x)):
m, b = np.polyfit(x[i], y[i], 1)
ax.plot(x[i], [m*p + b for p in x[i]], linestyle='dotted',
label=str(SERIES_NUMBER), color='C%d' % i,
**options.best_fit.options[i].toDict())
SERIES_NUMBER += 1
def apply_options_to_axis(axis, data, options):
t = 'x' if (axis.axes.xaxis == axis) else 'y'
axis.axes.set_axisbelow(True)
axis.axes.margins(**{t: options.margin})
if t == 'x':
axis.axes.spines['top'].set_visible(False)
axis.axes.spines['bottom'].set_visible(False)
else:
axis.axes.spines['left'].set_visible(False)
axis.axes.spines['right'].set_visible(False)
if options.axis.show:
if t == 'x':
if options.position and options.position == 'top':
sp = axis.axes.spines['top']
else:
sp = axis.axes.spines['bottom']
else:
if options.position and options.position == 'right':
sp = axis.axes.spines['right']
else:
sp = axis.axes.spines['left']
sp.set_visible(True)
sp.set_color(options.axis.color)
if options.grid.show:
axis.grid(**options.grid.options.toDict())
if options.label['%slabel' % t]:
getattr(axis.axes, 'set_%slabel' % t)(**options.label.toDict())
if options.position:
p = options.position
axis.set_label_position(p)
if p == 'right':
axis.tick_right()
if p == 'top':
axis.tick_top()
if options.log:
if min(map(min, data)) < 0:
getattr(axis.axes, 'set_%sscale' % t)('symlog')
else:
getattr(axis.axes, 'set_%sscale' % t)('log')
if options.invert:
getattr(axis.axes, 'invert_%saxis' % t)()
if not options.ticks.major.show:
options.ticks.major.options.length = 0
getattr(axis.axes, 'set_%sticks' % t)([])
if options.ticks.major.labels:
l = options.ticks.major.labels
if 'text' in l:
if len(l.locations) == 0:
l.locations = range(len(l.text))
getattr(plt, '%sticks' % t)(l.locations,
l.text, **l.options.toDict())
elif 'locations' in l:
getattr(plt, '%sticks' % t)(l.locations, **l.options.toDict())
else:
getattr(plt, '%sticks' % t)(**l.options.toDict())
if options.ticks.major.options:
axis.axes.tick_params(axis=t, which='major',
**options.ticks.major.options.toDict())
if options.ticks.minor.show:
if not options.log:
axis.set_minor_locator(AutoMinorLocator(options.ticks.minor.count))
else:
options.ticks.minor.options.length = 0
if options.ticks.minor.options:
axis.axes.tick_params(axis=t, which='minor',
**options.ticks.minor.options.toDict())
if options.limits:
getattr(axis.axes, 'set_%slim' % t)(options.limits)
def plot_ax(ax, x, y, y2, options):
if options.plot_type == 'BAR':
if y2:
options.bar.width /= len(y) + len(y2)
else:
options.bar.width /= len(y)
plot_data(ax, x, y, options, options.series_options)
apply_options_to_axis(ax.xaxis, x, options.x)
apply_options_to_axis(ax.yaxis, y, options.y)
if y2:
ax2 = ax.twinx()
plot_data(ax2, x, y2, options, options.series2_options)
apply_options_to_axis(ax2.xaxis, x, options.x)
apply_options_to_axis(ax2.yaxis, y2, options.y2)
def plot_broken(x, y, y2, options):
global RENDERER, SERIES_NUMBER
padding = 0.2
if options.broken.yskip:
bottom = [y[0][0] - padding, options.broken.yskip[0]]
top = [options.broken.yskip[1] + 1, y[0][-1] + 1 + padding]
options.broken.subplot.gridspec_kw = {'height_ratios':
[top[1] - top[0],
bottom[1] - bottom[0]]}
new_options = deepcopy(options)
f, (ax, ax2) = plt.subplots(2, 1, sharex=True,
**options.broken.subplot.toDict())
new_options.y.limits = top
else:
left = [x[0][0] - padding, options.broken.xskip[0]]
right = [options.broken.xskip[1] + 1, x[0][-1] + 1 + padding]
options.broken.subplot.gridspec_kw = {'width_ratios':
[left[1] - left[0],
right[1] - right[0]]}
new_options = deepcopy(options)
f, (ax, ax2) = plt.subplots(1, 2, sharey=True,
**options.broken.subplot.toDict())
new_options.x.limits = left
axes = [ax, ax2]
RENDERER = f.canvas.get_renderer()
# plot first axes
new_options.legend.options.labels = []
merge_DotMap(new_options, get_nth_dotmap(options.broken.options, 0))
plt.sca(ax)
plot_ax(ax, x, y, y2, new_options)
SERIES_NUMBER = 0
# plot second axes
if options.broken.yskip:
options.y.limits = bottom
options.x.label.ylabel = ''
options.x.ticks.major.show = False
options.x.ticks.minor.show = False
else:
options.x.limits = right
options.y.label.ylabel = ''
options.y.ticks.major.show = False
options.y.ticks.minor.show = False
merge_DotMap(options, get_nth_dotmap(options.broken.options, 1))
plt.sca(ax2)
plot_ax(ax2, x, y, y2, options)
# draw in 'gap' markers
d = .03 # .015
trans = ax.transScale + ax.transLimits
d_dict = {'middle': [0.5, 0.5], 'bottom': [0, 0], 'top': [1, 1],
'zero': trans.transform([0, 0])}
kwargs = dict(transform=ax.transAxes, color='black', clip_on=False)
if options.broken.yskip:
k1, k2 = options.broken.subplot.gridspec_kw['height_ratios']
k = k1 / float(k2)
for p in options.broken.gap_positions:
d2 = d_dict[p][0]
kwargs.update(transform=ax.transAxes)
ax.plot((d2-d, d2+d), (-d/k, +d/k), **kwargs)
# switch to the bottom axes
kwargs.update(transform=ax2.transAxes)
ax2.plot((d2-d, d2+d), (1-d, 1+d), **kwargs)
else:
k1, k2 = options.broken.subplot.gridspec_kw['width_ratios']
k = k1 / float(k2)
for p in options.broken.gap_positions:
d2 = d_dict[p][1]
kwargs.update(transform=ax.transAxes)
ax.plot((1-d/k, 1+d/k), (d2-d, d2+d), **kwargs)
# switch to the right axes
kwargs.update(transform=ax2.transAxes)
ax2.plot((-d, +d), (d2-d, d2+d), **kwargs)
return axes
def plot(x, y, my_options={}, y2=None):
global RENDERER
options = default_options.copy()
merge_DotMap(options, my_options)
plt.rcParams.update(options.rcParams.toDict())
if options.broken.yskip or options.broken.xskip:
axes = plot_broken(x, y, y2, options)
else:
f, ax = plt.subplots()
RENDERER = f.canvas.get_renderer()
axes = [ax]
plot_ax(ax, x, y, y2, options)
plt.tight_layout(pad=0)
if options.inset.show:
locations = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
ax_inset = zoomed_inset_axes(axes[0], options.inset.options.zoom_level,
loc=locations[
options.inset.options.location])
SERIES_NUMBER = 0
del options.x.label
del options.y.label
options.x.limits = options.inset.options.x.limits
options.y.limits = options.inset.options.y.limits
plot_ax(ax_inset, x, y, y2, options)
for sp in [ax_inset.axes.spines[i]
for i in ['top', 'bottom', 'left', 'right']]:
sp.set_visible(True)
sp.set_color(options.inset.options.border_color)
plt.xticks(visible=False)
plt.yticks(visible=False)
plt.setp(ax_inset, xticks=[], yticks=[])
mark_inset(axes[0], ax_inset, loc1=options.inset.options.corners[0],
loc2=options.inset.options.corners[1], fc="none",
ec=options.inset.options.marker.options.color)
plt.draw()
if options.x.axis.stretch:
box = ax.get_position()
ax.set_position([box.x0, box.y0,
box.width * options.x.axis.stretch, box.height])
if options.y.axis.stretch:
box = ax.get_position()
ax.set_position([box.x0, box.y0,
box.width, box.height * options.y.axis.stretch])
if options.legend.options.labels:
handles, labels = axes[0].get_legend_handles_labels()
labels, handles = zip(*sorted(zip(labels, handles),
key=lambda t: t[0]))
l = axes[-1].legend(handles=handles,
**options.legend.options.toDict())
for t in l.get_texts():
t.update(options.legend.text.options.toDict())
if options.vertical_lines.lines:
for l in options.vertical_lines.lines:
axes[0].axvline(l, **options.vertical_lines.options.toDict())
if options.vertical_shaded.limits:
for l, r in options.vertical_shaded.limits:
axes[0].axvspan(l, r, **options.vertical_shaded.options.toDict())
if options.horizontal_lines.lines:
for l in options.horizontal_lines.lines:
axes[0].axhline(l, **options.horizontal_lines.options.toDict())
if options.horizontal_shaded.limits:
for l, r in options.horizontal_shaded.limits:
axes[0].axhspan(l, r, **options.horizontal_shaded.options.toDict())
for i in xrange(len(options.text.labels)):
axes[-1].text(*options.text.positions[i], s=options.text.labels[i],
transform=axes[0].transAxes,
**options.text.options.toDict())
for i in options.annotation_lines.lines:
axes[0].annotate('', xy=i[0], xytext=i[1], arrowprops=dict(
arrowstyle='-', **options.annotation_lines.options.toDict()))
print options['output_fn']
plt.savefig(options['output_fn'], bbox_inches='tight', pad_inches=0)
|
#!/usr/bin/env python
from .plugin import SimStatePlugin
from ..s_action_object import ast_stripping_op as _actual_ast_stripping_op
import sys
import functools
import logging
l = logging.getLogger('simuvex.plugins.solver')
#pylint:disable=unidiomatic-typecheck
#
# Timing stuff
#
_timing_enabled = False
import time
lt = logging.getLogger('simuvex.plugins.solver.timing')
def ast_stripping_op(f, *args, **kwargs):
the_solver = kwargs.pop('the_solver', None)
if _timing_enabled:
the_solver = args[0] if the_solver is None else the_solver
s = the_solver.state
start = time.time()
r = _actual_ast_stripping_op(f, *args, **kwargs)
end = time.time()
duration = end-start
if s.scratch.sim_procedure is None and s.scratch.bbl_addr is not None:
location = "bbl 0x%x, stmt %d (inst 0x%x)" % (s.scratch.bbl_addr, s.scratch.stmt_idx, s.scratch.ins_addr)
elif s.scratch.sim_procedure is not None:
location = "sim_procedure %s" % s.scratch.sim_procedure
else:
location = "unknown"
lt.log(int((end-start)*10), '%s took %s seconds at %s', f.__name__, round(duration, 2), location)
if break_time >= 0 and duration > break_time:
import ipdb; ipdb.set_trace()
else:
r = _actual_ast_stripping_op(f, *args, **kwargs)
return r
#pylint:disable=global-variable-undefined
def enable_timing():
global _timing_enabled
_timing_enabled = True
lt.setLevel(1)
def disable_timing():
global _timing_enabled
_timing_enabled = False
import os
if os.environ.get('SOLVER_TIMING', False):
enable_timing()
else:
disable_timing()
break_time = float(os.environ.get('SOLVER_BREAK_TIME', -1))
#
# Various over-engineered crap
#
def auto_actions(f):
@functools.wraps(f)
def autoed_f(self, *args, **kwargs):
return ast_stripping_op(f, self, *args, **kwargs)
return autoed_f
def unsat_catcher(f):
@functools.wraps(f)
def wrapped_f(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except claripy.UnsatError:
e_type, value, traceback = sys.exc_info()
raise SimUnsatError, ("Got an unsat result", e_type, value), traceback
return wrapped_f
import claripy
class SimSolver(SimStatePlugin):
def __init__(self, solver=None): #pylint:disable=redefined-outer-name
l.debug("Creating SimSolverClaripy.")
SimStatePlugin.__init__(self)
self._stored_solver = solver
def _ana_getstate(self):
return self._stored_solver, self.state
def _ana_setstate(self, s):
self._stored_solver, self.state = s
def set_state(self, state):
SimStatePlugin.set_state(self, state)
@property
def _solver(self):
if self._stored_solver is not None:
return self._stored_solver
exact = not (o.APPROXIMATE_MEMORY_INDICES in self.state.options or o.APPROXIMATE_GUARDS in self.state.options)
if o.ABSTRACT_SOLVER in self.state.options:
self._stored_solver = claripy.LightFrontend(claripy.backend_vsa)
elif o.COMPOSITE_SOLVER in self.state.options:
self._stored_solver = claripy.CompositeFrontend(claripy.backend_z3, solver_class=claripy.FullFrontend if exact else claripy.HybridFrontend)
elif o.SYMBOLIC in self.state.options:
self._stored_solver = claripy.FullFrontend(claripy.backend_z3) if exact else claripy.HybridFrontend(claripy.backend_z3)
else:
self._stored_solver = claripy.LightFrontend(claripy.backend_vsa)
return self._stored_solver
@property
def constraints(self):
return self._solver.constraints
#
# Get unconstrained stuff
#
def Unconstrained(self, name, bits, **kwargs):
if o.SYMBOLIC_INITIAL_VALUES in self.state.options:
# Return a symbolic value
if o.ABSTRACT_MEMORY in self.state.options:
l.debug("Creating new top StridedInterval")
r = claripy.TSI(bits=bits, name=name, uninitialized=True, **kwargs)
else:
l.debug("Creating new unconstrained BV named %s", name)
r = claripy.BVS(name, bits, **kwargs)
self.state.log.add_event('unconstrained', name=iter(r.variables).next(), bits=bits, **kwargs)
return r
else:
# Return a default value, aka. 0
return claripy.BVV(0, bits)
#
# Various passthroughs
#
def downsize(self):
return self._solver.downsize()
def __getattr__(self, a):
try:
f = getattr(self._solver, a)
except AttributeError:
f = getattr(claripy._all_operations, a)
if hasattr(f, '__call__'):
ff = functools.partial(ast_stripping_op, f, the_solver=self)
ff.__doc__ = f.__doc__
return ff
else:
return f
def __dir__(self):
return sorted(set(dir(super(SimSolver, self)) + dir(claripy._all_operations)))
@auto_actions
def add(self, *constraints):
return self._solver.add(constraints)
@unsat_catcher
@auto_actions
def satisfiable(self, **kwargs):
if o.SYMBOLIC not in self.state.options:
if self._solver.result is None:
return True
else:
return self._solver.result.sat
return self._solver.satisfiable(**kwargs)
@unsat_catcher
@auto_actions
def solution(self, e, v, **kwargs):
return self._solver.solution(e, v, **kwargs)
#
# And these return raw results
#
@unsat_catcher
@auto_actions
def _any_raw(self, e, extra_constraints=()):
if not isinstance(e, claripy.ast.Base):
l.warning("SimSolver.any_raw received a %s (expects an AST)", type(e).__name__)
return e
return self._solver.eval(e, 1, extra_constraints=extra_constraints)[0]
@auto_actions
def _any_n_raw(self, e, n, extra_constraints=()):
try:
return self._solver.eval(e, n, extra_constraints=extra_constraints)
except claripy.UnsatError:
return [ ]
@unsat_catcher
@auto_actions
def _min_raw(self, e, extra_constraints=()):
return self._solver.min(e, extra_constraints=extra_constraints)
@unsat_catcher
@auto_actions
def _max_raw(self, e, extra_constraints=()):
return self._solver.max(e, extra_constraints=extra_constraints)
def symbolic(self, e): # pylint:disable=R0201
if type(e) in (int, str, float, bool, long, claripy.bv.BVV):
return False
return e.symbolic
def single_valued(self, e):
if self.state.mode == 'static':
if type(e) in (int, str, float, bool, long, claripy.bv.BVV):
return True
else:
return e.cardinality <= 1
else:
# All symbolic expressions are not single-valued
return not self.symbolic(e)
@auto_actions
def simplify(self, *args):
if len(args) == 0:
return self._solver.simplify()
elif isinstance(args[0], claripy.ast.Base):
return claripy.simplify(args[0])
else:
return args[0]
def variables(self, e): #pylint:disable=no-self-use
return e.variables
#
# Branching stuff
#
def copy(self):
return SimSolver(solver=self._solver.branch())
def merge(self, others, merge_flag, flag_values): # pylint: disable=W0613
#import ipdb; ipdb.set_trace()
merging_occurred, self._stored_solver = self._solver.merge([ oc._solver for oc in others ], merge_flag, flag_values)
#import ipdb; ipdb.set_trace()
return merging_occurred, [ ]
def widen(self, others, merge_flag, flag_values):
merging_occurred, _ = self.merge(others, merge_flag, flag_values)
return merging_occurred
#
# Other stuff
#
def any_str(self, e, extra_constraints=()):
return self.any_n_str(e, 1, extra_constraints=extra_constraints)[0]
def any_n_str_iter(self, e, n, extra_constraints=()):
for s in self._any_n_raw(e, n, extra_constraints=extra_constraints):
if type(s) is claripy.bv.BVV:
yield ("%x" % s.value).zfill(s.bits/4).decode('hex')
else:
yield ("%x" % s).zfill(len(e)/4).decode('hex')
def any_n_str(self, e, n, extra_constraints=()):
return list(self.any_n_str_iter(e, n, extra_constraints=extra_constraints))
def any_int(self, e, extra_constraints=()):
r = self._any_raw(e, extra_constraints=extra_constraints)
return r.value if type(r) is claripy.bv.BVV else r
def any_n_int(self, e, n, extra_constraints=()):
rr = self._any_n_raw(e, n, extra_constraints=extra_constraints)
return [ r.value if type(r) is claripy.bv.BVV else r for r in rr ]
def min_int(self, e, extra_constraints=()):
r = self._min_raw(e, extra_constraints=extra_constraints)
return r.value if type(r) is claripy.bv.BVV else r
def max_int(self, e, extra_constraints=()):
r = self._max_raw(e, extra_constraints=extra_constraints)
return r.value if type(r) is claripy.bv.BVV else r
def exactly_n(self, e, n, extra_constraints=()):
r = self._any_n_raw(e, n, extra_constraints=extra_constraints)
if len(r) != n:
raise SimValueError("concretized %d values (%d required) in exactly_n" % (len(r), n))
return r
def exactly_n_int(self, e, n, extra_constraints=()):
r = self.any_n_int(e, n, extra_constraints=extra_constraints)
if len(r) != n:
raise SimValueError("concretized %d values (%d required) in exactly_n" % (len(r), n))
return r
def exactly_int(self, e, extra_constraints=(), default=None):
try:
r = self.any_n_int(e, 1, extra_constraints=extra_constraints)
except (SimValueError, SimSolverModeError):
if default is not None:
return default
raise
if len(r) != 1:
if default is None:
raise SimValueError("concretized %d values (%d required) in exactly_int", len(r), 1)
else:
return default
return r[0]
@auto_actions
def unique(self, e, extra_constraints=()):
if not isinstance(e, claripy.ast.Base):
return True
# if we don't want to do symbolic checks, assume symbolic variables are multivalued
if o.SYMBOLIC not in self.state.options and self.symbolic(e):
return False
r = self._any_n_raw(e, 2, extra_constraints=extra_constraints)
if len(r) == 1:
self.add(e == r[0])
return True
elif len(r) == 0:
raise SimValueError("unsatness during uniqueness check(ness)")
else:
return False
SimStatePlugin.register_default('solver_engine', SimSolver)
from .. import s_options as o
from ..s_errors import SimValueError, SimUnsatError, SimSolverModeError
support passing an exact argument to any_int and friends
#!/usr/bin/env python
from .plugin import SimStatePlugin
from ..s_action_object import ast_stripping_op as _actual_ast_stripping_op
import sys
import functools
import logging
l = logging.getLogger('simuvex.plugins.solver')
#pylint:disable=unidiomatic-typecheck
#
# Timing stuff
#
_timing_enabled = False
import time
lt = logging.getLogger('simuvex.plugins.solver.timing')
def ast_stripping_op(f, *args, **kwargs):
the_solver = kwargs.pop('the_solver', None)
if _timing_enabled:
the_solver = args[0] if the_solver is None else the_solver
s = the_solver.state
start = time.time()
r = _actual_ast_stripping_op(f, *args, **kwargs)
end = time.time()
duration = end-start
if s.scratch.sim_procedure is None and s.scratch.bbl_addr is not None:
location = "bbl 0x%x, stmt %d (inst 0x%x)" % (s.scratch.bbl_addr, s.scratch.stmt_idx, s.scratch.ins_addr)
elif s.scratch.sim_procedure is not None:
location = "sim_procedure %s" % s.scratch.sim_procedure
else:
location = "unknown"
lt.log(int((end-start)*10), '%s took %s seconds at %s', f.__name__, round(duration, 2), location)
if break_time >= 0 and duration > break_time:
import ipdb; ipdb.set_trace()
else:
r = _actual_ast_stripping_op(f, *args, **kwargs)
return r
#pylint:disable=global-variable-undefined
def enable_timing():
global _timing_enabled
_timing_enabled = True
lt.setLevel(1)
def disable_timing():
global _timing_enabled
_timing_enabled = False
import os
if os.environ.get('SOLVER_TIMING', False):
enable_timing()
else:
disable_timing()
break_time = float(os.environ.get('SOLVER_BREAK_TIME', -1))
#
# Various over-engineered crap
#
def auto_actions(f):
@functools.wraps(f)
def autoed_f(self, *args, **kwargs):
return ast_stripping_op(f, self, *args, **kwargs)
return autoed_f
def unsat_catcher(f):
@functools.wraps(f)
def wrapped_f(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except claripy.UnsatError:
e_type, value, traceback = sys.exc_info()
raise SimUnsatError, ("Got an unsat result", e_type, value), traceback
return wrapped_f
import claripy
class SimSolver(SimStatePlugin):
def __init__(self, solver=None): #pylint:disable=redefined-outer-name
l.debug("Creating SimSolverClaripy.")
SimStatePlugin.__init__(self)
self._stored_solver = solver
def _ana_getstate(self):
return self._stored_solver, self.state
def _ana_setstate(self, s):
self._stored_solver, self.state = s
def set_state(self, state):
SimStatePlugin.set_state(self, state)
@property
def _solver(self):
if self._stored_solver is not None:
return self._stored_solver
exact = not (o.APPROXIMATE_MEMORY_INDICES in self.state.options or o.APPROXIMATE_GUARDS in self.state.options)
if o.ABSTRACT_SOLVER in self.state.options:
self._stored_solver = claripy.LightFrontend(claripy.backend_vsa)
elif o.COMPOSITE_SOLVER in self.state.options:
self._stored_solver = claripy.CompositeFrontend(claripy.backend_z3, solver_class=claripy.FullFrontend if exact else claripy.HybridFrontend)
elif o.SYMBOLIC in self.state.options:
self._stored_solver = claripy.FullFrontend(claripy.backend_z3) if exact else claripy.HybridFrontend(claripy.backend_z3)
else:
self._stored_solver = claripy.LightFrontend(claripy.backend_vsa)
return self._stored_solver
@property
def constraints(self):
return self._solver.constraints
#
# Get unconstrained stuff
#
def Unconstrained(self, name, bits, **kwargs):
if o.SYMBOLIC_INITIAL_VALUES in self.state.options:
# Return a symbolic value
if o.ABSTRACT_MEMORY in self.state.options:
l.debug("Creating new top StridedInterval")
r = claripy.TSI(bits=bits, name=name, uninitialized=True, **kwargs)
else:
l.debug("Creating new unconstrained BV named %s", name)
r = claripy.BVS(name, bits, **kwargs)
self.state.log.add_event('unconstrained', name=iter(r.variables).next(), bits=bits, **kwargs)
return r
else:
# Return a default value, aka. 0
return claripy.BVV(0, bits)
#
# Various passthroughs
#
def downsize(self):
return self._solver.downsize()
def __getattr__(self, a):
try:
f = getattr(self._solver, a)
except AttributeError:
f = getattr(claripy._all_operations, a)
if hasattr(f, '__call__'):
ff = functools.partial(ast_stripping_op, f, the_solver=self)
ff.__doc__ = f.__doc__
return ff
else:
return f
def __dir__(self):
return sorted(set(dir(super(SimSolver, self)) + dir(claripy._all_operations)))
@auto_actions
def add(self, *constraints):
return self._solver.add(constraints)
@unsat_catcher
@auto_actions
def satisfiable(self, **kwargs):
if o.SYMBOLIC not in self.state.options:
if self._solver.result is None:
return True
else:
return self._solver.result.sat
return self._solver.satisfiable(**kwargs)
@unsat_catcher
@auto_actions
def solution(self, e, v, **kwargs):
return self._solver.solution(e, v, **kwargs)
#
# And these return raw results
#
@unsat_catcher
@auto_actions
def _any_raw(self, e, extra_constraints=(), exact=None):
if not isinstance(e, claripy.ast.Base):
l.warning("SimSolver.any_raw received a %s (expects an AST)", type(e).__name__)
return e
return self._solver.eval(e, 1, extra_constraints=extra_constraints, exact=exact)[0]
@auto_actions
def _any_n_raw(self, e, n, extra_constraints=(), exact=None):
try:
return self._solver.eval(e, n, extra_constraints=extra_constraints, exact=exact)
except claripy.UnsatError:
return [ ]
@unsat_catcher
@auto_actions
def _min_raw(self, e, extra_constraints=(), exact=None):
return self._solver.min(e, extra_constraints=extra_constraints, exact=exact)
@unsat_catcher
@auto_actions
def _max_raw(self, e, extra_constraints=(), exact=None):
return self._solver.max(e, extra_constraints=extra_constraints, exact=exact)
def symbolic(self, e): # pylint:disable=R0201
if type(e) in (int, str, float, bool, long, claripy.bv.BVV):
return False
return e.symbolic
def single_valued(self, e):
if self.state.mode == 'static':
if type(e) in (int, str, float, bool, long, claripy.bv.BVV):
return True
else:
return e.cardinality <= 1
else:
# All symbolic expressions are not single-valued
return not self.symbolic(e)
@auto_actions
def simplify(self, *args):
if len(args) == 0:
return self._solver.simplify()
elif isinstance(args[0], claripy.ast.Base):
return claripy.simplify(args[0])
else:
return args[0]
def variables(self, e): #pylint:disable=no-self-use
return e.variables
#
# Branching stuff
#
def copy(self):
return SimSolver(solver=self._solver.branch())
def merge(self, others, merge_flag, flag_values): # pylint: disable=W0613
#import ipdb; ipdb.set_trace()
merging_occurred, self._stored_solver = self._solver.merge([ oc._solver for oc in others ], merge_flag, flag_values)
#import ipdb; ipdb.set_trace()
return merging_occurred, [ ]
def widen(self, others, merge_flag, flag_values):
merging_occurred, _ = self.merge(others, merge_flag, flag_values)
return merging_occurred
#
# Other stuff
#
def any_str(self, e, extra_constraints=(), exact=None):
return self.any_n_str(e, 1, extra_constraints=extra_constraints, exact=exact)[0]
def any_n_str_iter(self, e, n, extra_constraints=(), exact=None):
for s in self._any_n_raw(e, n, extra_constraints=extra_constraints, exact=exact):
if type(s) is claripy.bv.BVV:
yield ("%x" % s.value).zfill(s.bits/4).decode('hex')
else:
yield ("%x" % s).zfill(len(e)/4).decode('hex')
def any_n_str(self, e, n, extra_constraints=(), exact=None):
return list(self.any_n_str_iter(e, n, extra_constraints=extra_constraints, exact=exact))
def any_int(self, e, extra_constraints=(), exact=None):
r = self._any_raw(e, extra_constraints=extra_constraints, exact=exact)
return r.value if type(r) is claripy.bv.BVV else r
def any_n_int(self, e, n, extra_constraints=(), exact=None):
rr = self._any_n_raw(e, n, extra_constraints=extra_constraints, exact=exact)
return [ r.value if type(r) is claripy.bv.BVV else r for r in rr ]
def min_int(self, e, extra_constraints=(), exact=None):
r = self._min_raw(e, extra_constraints=extra_constraints, exact=exact)
return r.value if type(r) is claripy.bv.BVV else r
def max_int(self, e, extra_constraints=(), exact=None):
r = self._max_raw(e, extra_constraints=extra_constraints, exact=exact)
return r.value if type(r) is claripy.bv.BVV else r
def exactly_n(self, e, n, extra_constraints=(), exact=None):
r = self._any_n_raw(e, n, extra_constraints=extra_constraints, exact=exact)
if len(r) != n:
raise SimValueError("concretized %d values (%d required) in exactly_n" % (len(r), n))
return r
def exactly_n_int(self, e, n, extra_constraints=(), exact=None):
r = self.any_n_int(e, n, extra_constraints=extra_constraints, exact=exact)
if len(r) != n:
raise SimValueError("concretized %d values (%d required) in exactly_n" % (len(r), n))
return r
def exactly_int(self, e, extra_constraints=(), default=None, exact=None):
try:
r = self.any_n_int(e, 1, extra_constraints=extra_constraints, exact=exact)
except (SimValueError, SimSolverModeError):
if default is not None:
return default
raise
if len(r) != 1:
if default is None:
raise SimValueError("concretized %d values (%d required) in exactly_int", len(r), 1)
else:
return default
return r[0]
@auto_actions
def unique(self, e, extra_constraints=(), exact=None):
if not isinstance(e, claripy.ast.Base):
return True
# if we don't want to do symbolic checks, assume symbolic variables are multivalued
if o.SYMBOLIC not in self.state.options and self.symbolic(e):
return False
r = self._any_n_raw(e, 2, extra_constraints=extra_constraints, exact=exact)
if len(r) == 1:
self.add(e == r[0])
return True
elif len(r) == 0:
raise SimValueError("unsatness during uniqueness check(ness)")
else:
return False
SimStatePlugin.register_default('solver_engine', SimSolver)
from .. import s_options as o
from ..s_errors import SimValueError, SimUnsatError, SimSolverModeError
|
#coding: utf-8
"""
Created on 23.07.2012
@author: pirogov
"""
import copy
import datetime
from django.db.models import fields as dj_fields
from django.utils.encoding import force_unicode
from m3.ui import actions as m3_actions
from m3.ui.actions.interfaces import ISelectablePack
from m3.core.exceptions import RelatedError, ApplicationLogicException
from m3.db import safe_delete
from m3.ui.ext.fields.complex import ExtSearchField
import ui, tools
class BaseWindowAction(m3_actions.Action):
"""
базовый Группа который возвращает окно
"""
win_params = {} #параметы для формирования окна
request = None #request выолнения
context = None #context выполнения, будет возвращен экшеном
win = None #экземпляр окна которое вернет экшен
def create_window(self):
"""
создает объект окна
например self.win = EditWindow()
"""
raise NotImplementedError()
def set_windows_params(self):
"""
заполняет словарь win_params
например self.win_params['title'] = u'Привет из ада'
"""
pass
def _apply_windows_params(self):
"""
передает параметры в экземлпяр окна
перекрывается в крайних случаях
"""
self.win.set_params(self.win_params)
def configure_window(self):
"""
дополнительно конфигурирует окно,
только через функции окна,
например self.win.make_read_only()
никакого self.win.grid.top_bar.items[8].text = u'Ух ты, 9 кнопок'
"""
pass
def run(self, request, context):
"""
сам обработчки, перекрывает в крайних случаях
"""
new_self = copy.copy(self)
new_self.win_params = (self.__class__.win_params or {}).copy()
new_self.request = request
new_self.context = context
new_self.set_windows_params()
new_self.create_window()
new_self._apply_windows_params()
new_self.configure_window()
return m3_actions.ExtUIScriptResult(
new_self.win, context=new_self.context)
class ObjectListWindowAction(BaseWindowAction):
"""
Действие, которое возвращает окно со списком элементов справочника.
"""
url = '/list-window$'
is_select_mode = False #режим показа окна (True - выбор, False - список),
def set_windows_params(self):
params = self.win_params
params['pack'] = self.parent
params['title'] = self.parent.title
params['is_select_mode'] = self.is_select_mode
params['id_param_name'] = self.parent.id_param_name
params['height'] = self.parent.height
params['width'] = self.parent.width
params['read_only'] = not self.parent.has_sub_permission(
self.request.user, self.parent.PERM_EDIT, self.request)
self.win_params = self.parent.get_list_window_params(
params, self.request, self.context)
def create_window(self):
self.win = self.parent.create_list_window(
is_select_mode=self.win_params['is_select_mode'],
request=self.request,
context=self.context)
class ObjectSelectWindowAction(ObjectListWindowAction):
"""
Действие, возвращающее окно выбора из справочника
"""
url = '/select-window$'
is_select_mode = True
class ObjectEditWindowAction(BaseWindowAction):
"""
редактирование элемента справочника
"""
url = '/edit-window$'
def set_windows_params(self):
try:
obj, create_new = self.parent.get_obj(self.request, self.context)
except self.parent.get_not_found_exception():
raise ApplicationLogicException(self.parent.MSG_DOESNOTEXISTS)
self.win_params['object'] = obj
self.win_params['create_new'] = create_new
self.win_params['form_url'] = self.parent.save_action.get_absolute_url()
# заголовок окна по-умолчанию
self.win_params['title'] = self.parent.format_window_title(
u'Добавление' if create_new else u'Редактирование')
self.win_params = self.parent.get_edit_window_params(
self.win_params, self.request, self.context)
def create_window(self):
'вернем окно для создания или редактирования'
assert self.win_params.has_key('create_new'), (
u'может забыли вызвать родителький set_windows_params?')
self.win = self.parent.create_edit_window(
self.win_params['create_new'], self.request, self.context)
def configure_window(self):
'настройка окна'
# проверим право редактирования
if not self.parent.has_sub_permission(
self.request.user, self.parent.PERM_EDIT, self.request):
exclude_list = ['close_btn', 'cancel_btn']
self.win.make_read_only(True, exclude_list)
class ObjectSaveAction(m3_actions.Action):
"""
Действие выполняет сохранение новой записи в справочник
в любом месте можно райзить ApplicationLogicException
"""
url = '/save$'
request = None
context = None
win = None
obj = None
create_new = None
def create_window(self):
'вернем окно для создания или редактирования'
self.win = self.parent.create_edit_window(self.create_new, self.request, self.context)
def create_obj(self):
'создание объекта'
try:
self.obj, self.create_new = self.parent.get_obj(self.request, self.context)
except self.parent.get_not_found_exception():
raise ApplicationLogicException(self.parent.MSG_DOESNOTEXISTS)
def bind_win(self):
'биндим форму к реквесту'
self.win.form.bind_to_request(self.request)
def bind_to_obj(self):
'биднинг формы к объекту'
self.win.form.to_object(self.obj)
def save_obj(self):
'сохранеие объекта'
self.parent.save_row(self.obj, self.create_new, self.request, self.context)
def run(self, request, context):
new_self = copy.copy(self)
new_self.request = request
new_self.context = context
new_self.create_obj()
new_self.create_window()
new_self.bind_win()
new_self.bind_to_obj()
new_self.save_obj()
return m3_actions.OperationResult()
class ObjectRowsAction(m3_actions.Action):
"""
Возвращает данные для грида справочника
"""
url = '/rows$'
request = None
context = None
query = None
def set_query(self):
"""устанавливает запрос к базе"""
self.query = self.parent.get_rows_query(self.request, self.context)
def apply_filter(self):
"""Применяет фильтр поиска"""
self.query = self.parent.apply_filter(
self.query,
self.request,
self.context
)
def apply_sort_order(self):
"""Применяет сортировку"""
self.query = self.parent.apply_sort_order(
self.query,
self.request,
self.context
)
def apply_limit(self):
'обрезает по текущей странице'
if getattr(self.parent, 'allow_paging', True):
offset = m3_actions.utils.extract_int(self.request, 'start')
limit = m3_actions.utils.extract_int(self.request, 'limit')
else:
offset = limit = 0
self.query = tools.QuerySplitter(self.query, offset, limit)
def get_rows(self):
'преобразует query в лист'
res = []
for obj in self.query:
prep_obj = self.prepare_object(obj)
if prep_obj:
res.append(prep_obj)
else:
self.query.skip_last()
return res
def prepare_object(self, obj):
"""
возвращает словарь для составления результирующего списка
на вход получает объект, полученный из QuerySet'a
"""
if hasattr(self.parent, 'prepare_row'):
obj = self.parent.prepare_row(obj, self.request, self.context)
if obj is None:
return None
result_dict = {}
def parse_data_indexes(obj, col, result):
# сплит строки вида "asdad[.asdasd]" на "голову" и "хвост"
# "aaa" -> "aaa", None
# "aaa.bbb.ccc" -> "aaa", "bbb.ccc"
col, subcol = (col.split('.', 1) + [None])[:2]
# ------- если есть подиндекс - идем вглубь
if subcol:
obj = getattr(obj, col, None)
sub_dict = result.setdefault(col, {})
parse_data_indexes(obj, subcol, sub_dict)
else:
# --- подиндекса нет - получаем значение
# ищем поле в модели
try:
fld = obj._meta.get_field_by_name(col)[0]
except AttributeError:
fld = None
except IndexError:
fld = None
except dj_fields.FieldDoesNotExist:
fld = None
# получаем значение
obj = getattr(obj, col, None)
if fld:
try:
obj = obj.display()
except AttributeError:
if fld.choices:
# если получаемый атрибут - поле, имеющее choices
# пробуем найти соответствующий значению вариант
for ch in fld.choices:
if obj == ch[0]:
obj = ch[1]
break
else:
obj = u''
else:
# атрибут (не поле) может быть вызываемым
if callable(obj):
obj = obj()
if isinstance(obj, datetime.date):
obj = obj.strftime('%d.%m.%Y')
result[col] = force_unicode(obj)
#заполним объект данными по дата индексам
for col in self.get_column_data_indexes():
parse_data_indexes(obj, col, result_dict)
return result_dict
def get_total_count(self):
'подсчет общего кол-ва объектов'
return self.query.count()
def get_column_data_indexes(self):
'список дата индеков для формирования jsona'
res = []
for col in getattr(self.parent, 'columns', []):
res.append(col['data_index'])
res.append(self.parent.id_field)
return res
def run(self, request, context):
new_self = copy.copy(self)
new_self.request = request
new_self.context = context
new_self.set_query()
new_self.apply_filter()
new_self.apply_sort_order()
total_count = new_self.get_total_count()
new_self.apply_limit()
rows = new_self.get_rows()
return m3_actions.PreJsonResult({
'rows': rows,
'total': total_count
})
class ObjectDeleteAction(m3_actions.Action):
"""
экшен удаления
"""
url = '/delete_row$'
request = None
context = None
def try_delete_objs(self):
"""
удаляет обекты и пытается перехватить исключения
"""
try:
self.delete_objs()
except RelatedError, e:
raise ApplicationLogicException(e.args[0])
except Exception, e:
if e.__class__.__name__ == 'IntegrityError':
message = (u'Не удалось удалить элемент. '
u'Возможно на него есть ссылки.')
raise ApplicationLogicException(message)
else:
# все левые ошибки выпускаем наверх
raise
def delete_objs(self):
"""
удаляет обекты
"""
ids = m3_actions.utils.extract_int_list(
self.request, self.parent.id_param_name)
for i in ids:
self.delete_obj(i)
def delete_obj(self, id_):
'удаление конкретного объекта'
obj = self.parent.delete_row(id_, self.request, self.context)
self.audit(obj)
def run(self, request, context):
new_self = copy.copy(self)
new_self.request = request
new_self.context = context
new_self.try_delete_objs()
return m3_actions.OperationResult()
class ObjectPack(m3_actions.ActionPack, ISelectablePack):
"""
Пакет с действиями, специфичными для работы с редактирование модели
"""
# Заголовок окна справочника
# если не перекрыт в потомках - берется из модели
@property
def title(self):
return unicode(
self.model._meta.verbose_name_plural or
self.model._meta.verbose_name or
repr(self.model))
@property
def short_name(self):
"""имя пака для поиска в контроллере
берется равным имени класса модели"""
return self.model.__name__.lower()
@property
def url(self):
return r'/%s' % self.short_name
# Список колонок состоящий из словарей
# все параметры словаря передаются в add_column
# список параметров смотри в BaseExtGridColumn
# кроме filterable - признак что колонка будет учавтовать в фильтрации
#url = u'/pack'
columns = [
{
'header':u'Наименование',
'data_index':'__unicode__',
},
# {
# 'data_index':'',
# 'width':,
# 'header':u'',
# 'filterable':True,
# 'sortable':True,
# 'sort_fields':['foo','bar'],
# },
# {
# 'header':u'Группирующая Колонка 1',
# 'columns': [
# {
# 'data_index':'school.name',
# 'width':200,
# 'header':u'Колонка 1',
# 'filterable':True
# },
# ]
# },
# {
# 'data_index':'school.parent.name',
# 'width':200,
# 'header':u'Родитель',
# 'renderer':'parent_render'
# },
]
# плоский список полей фильтрации
_all_filter_fields = None
# словарь data_index:sort_order
_sort_fields = None
# Настройки вида справочника (задаются конечным разработчиком)
model = None
# название поля, идентифицирующего объект и название параметра,
# который будет передаваться в запросе на модификацию/удаление
@property
def id_param_name(self):
return '%s_id' % self.short_name
#data_index колонки, идентифицирующей объект
#этот параметр будет браться из модели и передаваться как ID в ExtDataStore
#т.е в post запросе редактирования будет лужеть {id_param_name:obj.id_field}
id_field = 'id'
# поле/метод, предоставляющее значение для отображения в DictSelectField
# ПОКА НЕ РАБОТАЕТ извлечение вложенных полей - конфликт с ExtJS
column_name_on_select = '__unicode__'
# Список дополнительных полей модели по которым будет идти поиск
# основной список береться из colums по признаку filterable
filter_fields = []
allow_paging = True
#пак будет настраивать грид на возможность редактирования
read_only = False
# Порядок сортировки элементов списка. Работает следующим образом:
# 1. Если в list_columns модели списка есть поле code, то устанавливается сортировка по возрастанию этого поля;
# 2. Если в list_columns модели списка нет поля code, но есть поле name, то устанавливается сортировка по возрастанию поля name;
# Пример list_sort_order = ['code', '-name']
list_sort_order = None
# Окно для редактирования элемента справочника:
add_window = None # Нового
edit_window = None # Уже существующего
# Флаг разрешающий/запрещающий удаление,
# если None - то удаление возможно при наличии add_window/edit_window
can_delete = None
# Группа отвечающие за отображение форм:
list_window = ui.BaseListWindow # Форма списка
select_window = ui.BaseSelectWindow # Форма выбора @UndefinedVariable
#размеры окна выбора по умолчанию
width, height = 510, 400
# права доступа для базовых справочников
PERM_EDIT = 'edit'
sub_permissions = {PERM_EDIT: u'Редактирование'}
MSG_DOESNOTEXISTS = (u'Запись не найдена в базе данных.<br/>' +
u'Возможно, она была удалена. Пожалуйста, обновите таблицу.')
def __init__(self):
super(ObjectPack, self).__init__()
# В отличие от обычных паков в этом экшены создаются самостоятельно,
# а не контроллером
# Чтобы было удобно обращаться к ним по имени
self.list_window_action = ObjectListWindowAction()
self.select_window_action = ObjectSelectWindowAction()
self.rows_action = ObjectRowsAction()
# Но привязать их все равно нужно
self.actions.extend([
self.list_window_action,
self.select_window_action,
self.rows_action
])
if self.add_window and not self.read_only:
self.new_window_action = ObjectEditWindowAction()
self.actions.append(self.new_window_action)
else:
self.new_window_action = None
if self.edit_window and not self.read_only:
self.edit_window_action = ObjectEditWindowAction()
self.actions.append(self.edit_window_action)
else:
self.edit_window_action = None
if (self.add_window or self.edit_window) and not self.read_only:
self.save_action = ObjectSaveAction()
self.actions.append(self.save_action)
else:
self.save_action = None
if self.can_delete is None:
self.can_delete = (
self.add_window or self.edit_window) and not self.read_only
if self.can_delete:
self.delete_action = ObjectDeleteAction()
self.actions.append(self.delete_action)
else:
self.delete_action = None
# построение плоского списка колонок
self._all_filter_fields = self.filter_fields
self._sort_fields = {}
def flatify(cols):
for c in cols:
sub_cols = c.get('columns', None)
if not sub_cols is None:
flatify(sub_cols)
else:
data_index = c['data_index']
field = data_index.replace('.', '__')
# поле(поля) для сортировки
if c.get('sortable', False):
sort_fields = c.get('sort_fields', field)
try:
sort_fields = list(sort_fields)
except:
sort_fields = [sort_fields]
self._sort_fields[data_index] = sort_fields
# поле для фильтрации
if c.get('filterable'):
self._all_filter_fields.append(field)
flatify(self.columns)
def replace_action(self, action_attr_name, new_action):
"""заменяет экшен в паке"""
if getattr(self, action_attr_name, None):
self.actions.remove(getattr(self, action_attr_name))
setattr(self, action_attr_name, new_action)
if getattr(self, action_attr_name):
self.actions.append(getattr(self, action_attr_name))
def get_default_action(self):
"""Воздвращает действие по умолчанию
(действие для значка на раб.столе/пункта меню)
Используется пи упрощенном встраивании в UI (add_to_XXX=True)"""
return self.list_window_action
def get_display_text(self, key, attr_name=None):
""" Получить отображаемое значение записи
(или атрибута attr_name) по ключу key """
row = self.get_row(key)
if row is not None:
try:
text = getattr(row, attr_name)
except AttributeError:
try:
text = getattr(row, self.column_name_on_select)
except AttributeError:
raise Exception(
u'Не получается получить поле %s для '
u'DictSelectField.pack = %s' % (attr_name, self))
# getattr может возвращать метод, например verbose_name
if callable(text):
return text()
else:
return unicode(text)
def get_edit_window_params(self, params, request, context):
"""
возвращает словарь параметров которые будут переданы окну редактирования
"""
return params
def get_list_window_params(self, params, request, context):
"""
возвращает словарь параметров которые будут переданы окну списка
"""
return params
def format_window_title(self, action):
"""
Форматирование заголовка окна.
Заголовок примет вид "Модель: Действие"
(например "Сотрудник: Добавление")
"""
return "%s: %s" % (self.model._meta.verbose_name.capitalize(), action)
#==================== ФУНКЦИИ ВОЗВРАЩАЮЩИЕ АДРЕСА =====================
def get_list_url(self):
"""
Возвращает адрес формы списка элементов справочника.
Используется для присвоения адресов в прикладном приложении.
"""
return self.list_window_action.get_absolute_url()
def get_select_url(self):
"""
Возвращает адрес формы списка элементов справочника.
Используется для присвоения адресов в прикладном приложении.
"""
return self.select_window_action.get_absolute_url()
def get_edit_url(self):
"""
Возвращает адрес формы редактирования элемента справочника.
"""
if self.edit_window_action:
return self.edit_window_action.get_absolute_url()
def get_rows_url(self):
"""
Возвращает адрес по которому запрашиваются элементы грида
"""
return self.rows_action.get_absolute_url()
def get_autocomplete_url(self):
""" Получить адрес для запроса элементов
подходящих введенному в поле тексту """
return self.get_rows_url()
def get_not_found_exception(self):
"""возвращает Группа исключения 'не найден'"""
return self.model.DoesNotExist
def configure_grid(self, grid):
"""
конфигурирования grid для работы с этим паком
создает колонки и задает экшены
"""
get_url = lambda x: x.get_absolute_url() if x else None
grid.url_data = get_url(self.rows_action)
if not self.read_only:
grid.url_new = get_url(self.new_window_action)
grid.url_edit = get_url(self.edit_window_action)
grid.url_delete = get_url(self.delete_action)
for col in self.columns:
temp = {}
temp.update(col)
if temp.has_key('filterable'):
temp.pop('filterable')
grid.add_column(**temp)
#TODO перенести в Группа грида сделать метод add_search_field
if self.get_filter_fields():
#поиск по гриду если есть по чему искать
grid.top_bar.search_field = ExtSearchField(
empty_text=u'Поиск', width=200, component_for_search=grid)
grid.top_bar.add_fill()
grid.top_bar.items.append(grid.top_bar.search_field)
grid.row_id_name = self.id_param_name
grid.allow_paging = self.allow_paging
grid.store.remote_sort = self.allow_paging
def create_edit_window(self, create_new, request, context):
"""
получить окно редактирования / создания объекта
"""
if create_new:
return self.add_window()
else:
return self.edit_window()
def create_list_window(self, is_select_mode, request, context):
"""
получить окно списка / выбора объектов
is_select_mode - режим показа окна (True -выбор, False -список),
"""
if is_select_mode:
return self.select_window()
else:
return self.list_window()
def get_rows_query(self, request, context):
"""
возвращает выборку из БД для получения списка данных
"""
#q = super(,self).get_rows_query(request, context)
#return q
return self.model.objects.all().select_related()
def get_filter_fields(self, request=None, context=None):
"""Возвращает список data_index колонок по которым будет
производиться поиск"""
return self._all_filter_fields[:]
def get_sort_order(self, data_index, reverse=False):
"""Возвращает ключи сортировки для указанного data_index"""
sort_order = self._sort_fields[data_index]
if reverse:
sort_order = ['-%s' % s for s in sort_order]
return sort_order
def apply_filter(self, query, request, context):
"""Возвращает переданную выборку
отфильторованной по параметрам запроса"""
return m3_actions.utils.apply_search_filter(
query,
request.REQUEST.get('filter'),
self.get_filter_fields()
)
def apply_sort_order(self, query, request, context):
"""Возвращает переданную выборку
отсортированной по параметрам запроса"""
sorting_key = request.REQUEST.get('sort')
if sorting_key:
reverse = request.REQUEST.get('dir') == 'DESC'
sort_order = self.get_sort_order(
data_index=sorting_key,
reverse=reverse)
query = query.order_by(*sort_order)
return query
def prepare_row(self, obj, request, context):
"""
установка дополнительный атрибутов объекта
перед возвратом json'a строк грида
или может вернуть proxy_object
obj из for obj in query из get_rows_query
"""
return obj
def get_row(self, row_id):
"""
функция возвращает объект по иди
используется в dictselectfield'ax
Если id нет, значит нужно создать новый объект
"""
if row_id == 0:
record = self.model()
else:
record = self.model.objects.get(id=row_id)
return record
def get_obj(self, request, context):
"""
возвращает tuple (объет, create_new)
для создания, редатирования записи
"""
obj_id = m3_actions.utils.extract_int(request, self.id_param_name)
create_new = (obj_id == 0)
record = self.get_row(obj_id)
return record, create_new
def save_row(self, obj, create_new, request, context):
"""
сохраняет объект
при необходимости делается raise ApplicationLogicException
"""
obj.save()
def delete_row(self, obj_id, request, context):
"""
удаление объекта
если вернет модель то она отдасться аудитору
"""
obj = self.model.objects.get(id=obj_id)
result = True
if hasattr(obj, 'safe_delete'):
result = obj.safe_delete()
else:
result = safe_delete(obj)
#в случе успеха safe_delete возвращет true
if not result:
raise RelatedError(u'Не удалось удалить элемент %s. '
u'Возможно на него есть ссылки.' % obj_id)
return obj
#-----------------------------------------------------------------------
# По умолчанию ни меню ни десктоп не расширяется
# add_to_desktop = True
# add_to_menu = True
#
# Если методы extend_menu/extend_desktop не реализованы,
# меню будет расширяться на основе title и get_default_action
#
# Методы extend_X приоритетны
# def extend_menu(self, menu):
# """
# Расширение главного меню.
#
# Возвращаемый результат должен иметь вид:
# return (
# # добавление пунктов в меню "справочники"
# menu.dicts(
# menu.Item(u'Dict 1', self),
# menu.SubMenu(u'Dict SubMenu',
# menu.Item(u'Dict 2', self.some_action),
# ),
# ),
#
# # добавление пунктов в меню "реестры"
# menu.registries(
# menu.Item(u'Reg 1'),
# menu.SubMenu(u'Regs SubMenu',
# menu.Item(u'Reg 2'),
# ),
# ),
#
# # добавление пунктов в меню "администрирование"
# menu.administry(
# menu.Item(u'Admin item 1')
# ),
#
# # добавление пунктов в "корень" меню
# menu.Item(name=u'item 1', self.some_action),
#
# # добавление подменю в "корень" меню
# menu.SubMenu(u'SubMenu',
# menu.Item(u'Item 2', self.some_action),
# menu.SubMenu(u'SubSubMenu',
# menu.Item(u'Item 3', self.some_action),
# ),
# ),
# )
#
# любой из элементов можно отключить вернув вместо него None.
# например:
# menu.Item(u'Name', url='/') if some_condition else None
#
# Пустые подменю автоматически "схлопываются" (не видны в Главном Меню)
# """
# pass
#
#
# def extend_desktop(self, desk):
# """
# Расширение Рабочего Стола.
# Результат должен иметь вид:
# return (
# desk.Item(u'Ярлык 1', pack=self.list_action),
# ...
# )
# любой из элементов можно отключить вернув вместо него None.
# например:
# desk.Item(u'Name', pack=self) if some_condition else None
# """
# pass
#===============================================================================
# SelectorWindowAction
#===============================================================================
class SelectorWindowAction(m3_actions.Action):
"""
Экшн показа окна выбора с пользовательским экшном обработки выбранных
элементов. Например, множественный выбор элементов справочника, для
последующего создания связок с ними.
"""
url = r'/selector_window'
# признак показа окна множественного выбора
multi_select = True
# url экшна обработки результата выбора
callback_url = None
# пак, объекты модели которого выбираются
data_pack = None
def configure_action(self, request, context):
"""
Настройка экшна. Здесь нужно назначать пак и callback
"""
pass
def configure_context(self, request, context):
"""
В данном методе происходит конфигурирование контекста для окна выбора.
Возвращаемый результат должен быть экземпляром ActionContext.
"""
return m3_actions.ActionContext()
def configure_window(self, win, request, context):
"""
В данном методе происходит конфигурирование окна выбора.
"""
return win
def run(self, request, context):
"""
Выполнение экшна.
Без крайней необходимости не перекрывать!
"""
new_self = copy.copy(self)
new_self.configure_action(request, context)
assert new_self.data_pack, u'Не задан ActionPack-источник данных!'
assert new_self.callback_url, u'Не задан Callback!'
new_context = new_self.configure_context(request, context)
# вызов экшна показа окна выбора
win_result = new_self.data_pack.select_window_action.run(
request, context)
win = getattr(win_result, 'data', None)
if not win:
return win_result
if not isinstance(win, ui.BaseSelectWindow):
raise ApplicationLogicException(
u'Класс окна выбора должен быть потомком BaseSelectWindow!')
win = new_self.configure_window(win, request, context)
win.callback_url = new_self.callback_url
if new_self.multi_select:
win.enable_multi_select()
return m3_actions.ExtUIScriptResult(win, new_context)
capitalize для заголовков. дежавю.
#coding: utf-8
"""
Created on 23.07.2012
@author: pirogov
"""
import copy
import datetime
from django.db.models import fields as dj_fields
from django.utils.encoding import force_unicode
from m3.ui import actions as m3_actions
from m3.ui.actions.interfaces import ISelectablePack
from m3.core.exceptions import RelatedError, ApplicationLogicException
from m3.db import safe_delete
from m3.ui.ext.fields.complex import ExtSearchField
import ui, tools
class BaseWindowAction(m3_actions.Action):
"""
базовый Группа который возвращает окно
"""
win_params = {} #параметы для формирования окна
request = None #request выолнения
context = None #context выполнения, будет возвращен экшеном
win = None #экземпляр окна которое вернет экшен
def create_window(self):
"""
создает объект окна
например self.win = EditWindow()
"""
raise NotImplementedError()
def set_windows_params(self):
"""
заполняет словарь win_params
например self.win_params['title'] = u'Привет из ада'
"""
pass
def _apply_windows_params(self):
"""
передает параметры в экземлпяр окна
перекрывается в крайних случаях
"""
self.win.set_params(self.win_params)
def configure_window(self):
"""
дополнительно конфигурирует окно,
только через функции окна,
например self.win.make_read_only()
никакого self.win.grid.top_bar.items[8].text = u'Ух ты, 9 кнопок'
"""
pass
def run(self, request, context):
"""
сам обработчки, перекрывает в крайних случаях
"""
new_self = copy.copy(self)
new_self.win_params = (self.__class__.win_params or {}).copy()
new_self.request = request
new_self.context = context
new_self.set_windows_params()
new_self.create_window()
new_self._apply_windows_params()
new_self.configure_window()
return m3_actions.ExtUIScriptResult(
new_self.win, context=new_self.context)
class ObjectListWindowAction(BaseWindowAction):
"""
Действие, которое возвращает окно со списком элементов справочника.
"""
url = '/list-window$'
is_select_mode = False #режим показа окна (True - выбор, False - список),
def set_windows_params(self):
params = self.win_params
params['pack'] = self.parent
params['title'] = self.parent.title
params['is_select_mode'] = self.is_select_mode
params['id_param_name'] = self.parent.id_param_name
params['height'] = self.parent.height
params['width'] = self.parent.width
params['read_only'] = not self.parent.has_sub_permission(
self.request.user, self.parent.PERM_EDIT, self.request)
self.win_params = self.parent.get_list_window_params(
params, self.request, self.context)
def create_window(self):
self.win = self.parent.create_list_window(
is_select_mode=self.win_params['is_select_mode'],
request=self.request,
context=self.context)
class ObjectSelectWindowAction(ObjectListWindowAction):
"""
Действие, возвращающее окно выбора из справочника
"""
url = '/select-window$'
is_select_mode = True
class ObjectEditWindowAction(BaseWindowAction):
"""
редактирование элемента справочника
"""
url = '/edit-window$'
def set_windows_params(self):
try:
obj, create_new = self.parent.get_obj(self.request, self.context)
except self.parent.get_not_found_exception():
raise ApplicationLogicException(self.parent.MSG_DOESNOTEXISTS)
self.win_params['object'] = obj
self.win_params['create_new'] = create_new
self.win_params['form_url'] = self.parent.save_action.get_absolute_url()
# заголовок окна по-умолчанию
self.win_params['title'] = self.parent.format_window_title(
u'Добавление' if create_new else u'Редактирование')
self.win_params = self.parent.get_edit_window_params(
self.win_params, self.request, self.context)
def create_window(self):
'вернем окно для создания или редактирования'
assert self.win_params.has_key('create_new'), (
u'может забыли вызвать родителький set_windows_params?')
self.win = self.parent.create_edit_window(
self.win_params['create_new'], self.request, self.context)
def configure_window(self):
'настройка окна'
# проверим право редактирования
if not self.parent.has_sub_permission(
self.request.user, self.parent.PERM_EDIT, self.request):
exclude_list = ['close_btn', 'cancel_btn']
self.win.make_read_only(True, exclude_list)
class ObjectSaveAction(m3_actions.Action):
"""
Действие выполняет сохранение новой записи в справочник
в любом месте можно райзить ApplicationLogicException
"""
url = '/save$'
request = None
context = None
win = None
obj = None
create_new = None
def create_window(self):
'вернем окно для создания или редактирования'
self.win = self.parent.create_edit_window(self.create_new, self.request, self.context)
def create_obj(self):
'создание объекта'
try:
self.obj, self.create_new = self.parent.get_obj(self.request, self.context)
except self.parent.get_not_found_exception():
raise ApplicationLogicException(self.parent.MSG_DOESNOTEXISTS)
def bind_win(self):
'биндим форму к реквесту'
self.win.form.bind_to_request(self.request)
def bind_to_obj(self):
'биднинг формы к объекту'
self.win.form.to_object(self.obj)
def save_obj(self):
'сохранеие объекта'
self.parent.save_row(self.obj, self.create_new, self.request, self.context)
def run(self, request, context):
new_self = copy.copy(self)
new_self.request = request
new_self.context = context
new_self.create_obj()
new_self.create_window()
new_self.bind_win()
new_self.bind_to_obj()
new_self.save_obj()
return m3_actions.OperationResult()
class ObjectRowsAction(m3_actions.Action):
"""
Возвращает данные для грида справочника
"""
url = '/rows$'
request = None
context = None
query = None
def set_query(self):
"""устанавливает запрос к базе"""
self.query = self.parent.get_rows_query(self.request, self.context)
def apply_filter(self):
"""Применяет фильтр поиска"""
self.query = self.parent.apply_filter(
self.query,
self.request,
self.context
)
def apply_sort_order(self):
"""Применяет сортировку"""
self.query = self.parent.apply_sort_order(
self.query,
self.request,
self.context
)
def apply_limit(self):
'обрезает по текущей странице'
if getattr(self.parent, 'allow_paging', True):
offset = m3_actions.utils.extract_int(self.request, 'start')
limit = m3_actions.utils.extract_int(self.request, 'limit')
else:
offset = limit = 0
self.query = tools.QuerySplitter(self.query, offset, limit)
def get_rows(self):
'преобразует query в лист'
res = []
for obj in self.query:
prep_obj = self.prepare_object(obj)
if prep_obj:
res.append(prep_obj)
else:
self.query.skip_last()
return res
def prepare_object(self, obj):
"""
возвращает словарь для составления результирующего списка
на вход получает объект, полученный из QuerySet'a
"""
if hasattr(self.parent, 'prepare_row'):
obj = self.parent.prepare_row(obj, self.request, self.context)
if obj is None:
return None
result_dict = {}
def parse_data_indexes(obj, col, result):
# сплит строки вида "asdad[.asdasd]" на "голову" и "хвост"
# "aaa" -> "aaa", None
# "aaa.bbb.ccc" -> "aaa", "bbb.ccc"
col, subcol = (col.split('.', 1) + [None])[:2]
# ------- если есть подиндекс - идем вглубь
if subcol:
obj = getattr(obj, col, None)
sub_dict = result.setdefault(col, {})
parse_data_indexes(obj, subcol, sub_dict)
else:
# --- подиндекса нет - получаем значение
# ищем поле в модели
try:
fld = obj._meta.get_field_by_name(col)[0]
except AttributeError:
fld = None
except IndexError:
fld = None
except dj_fields.FieldDoesNotExist:
fld = None
# получаем значение
obj = getattr(obj, col, None)
if fld:
try:
obj = obj.display()
except AttributeError:
if fld.choices:
# если получаемый атрибут - поле, имеющее choices
# пробуем найти соответствующий значению вариант
for ch in fld.choices:
if obj == ch[0]:
obj = ch[1]
break
else:
obj = u''
else:
# атрибут (не поле) может быть вызываемым
if callable(obj):
obj = obj()
if isinstance(obj, datetime.date):
obj = obj.strftime('%d.%m.%Y')
result[col] = force_unicode(obj)
#заполним объект данными по дата индексам
for col in self.get_column_data_indexes():
parse_data_indexes(obj, col, result_dict)
return result_dict
def get_total_count(self):
'подсчет общего кол-ва объектов'
return self.query.count()
def get_column_data_indexes(self):
'список дата индеков для формирования jsona'
res = []
for col in getattr(self.parent, 'columns', []):
res.append(col['data_index'])
res.append(self.parent.id_field)
return res
def run(self, request, context):
new_self = copy.copy(self)
new_self.request = request
new_self.context = context
new_self.set_query()
new_self.apply_filter()
new_self.apply_sort_order()
total_count = new_self.get_total_count()
new_self.apply_limit()
rows = new_self.get_rows()
return m3_actions.PreJsonResult({
'rows': rows,
'total': total_count
})
class ObjectDeleteAction(m3_actions.Action):
"""
экшен удаления
"""
url = '/delete_row$'
request = None
context = None
def try_delete_objs(self):
"""
удаляет обекты и пытается перехватить исключения
"""
try:
self.delete_objs()
except RelatedError, e:
raise ApplicationLogicException(e.args[0])
except Exception, e:
if e.__class__.__name__ == 'IntegrityError':
message = (u'Не удалось удалить элемент. '
u'Возможно на него есть ссылки.')
raise ApplicationLogicException(message)
else:
# все левые ошибки выпускаем наверх
raise
def delete_objs(self):
"""
удаляет обекты
"""
ids = m3_actions.utils.extract_int_list(
self.request, self.parent.id_param_name)
for i in ids:
self.delete_obj(i)
def delete_obj(self, id_):
'удаление конкретного объекта'
obj = self.parent.delete_row(id_, self.request, self.context)
self.audit(obj)
def run(self, request, context):
new_self = copy.copy(self)
new_self.request = request
new_self.context = context
new_self.try_delete_objs()
return m3_actions.OperationResult()
class ObjectPack(m3_actions.ActionPack, ISelectablePack):
"""
Пакет с действиями, специфичными для работы с редактирование модели
"""
# Заголовок окна справочника
# если не перекрыт в потомках - берется из модели
@property
def title(self):
return unicode(
self.model._meta.verbose_name_plural or
self.model._meta.verbose_name or
repr(self.model)).capitalize()
@property
def short_name(self):
"""имя пака для поиска в контроллере
берется равным имени класса модели"""
return self.model.__name__.lower()
@property
def url(self):
return r'/%s' % self.short_name
# Список колонок состоящий из словарей
# все параметры словаря передаются в add_column
# список параметров смотри в BaseExtGridColumn
# кроме filterable - признак что колонка будет учавтовать в фильтрации
#url = u'/pack'
columns = [
{
'header':u'Наименование',
'data_index':'__unicode__',
},
# {
# 'data_index':'',
# 'width':,
# 'header':u'',
# 'filterable':True,
# 'sortable':True,
# 'sort_fields':['foo','bar'],
# },
# {
# 'header':u'Группирующая Колонка 1',
# 'columns': [
# {
# 'data_index':'school.name',
# 'width':200,
# 'header':u'Колонка 1',
# 'filterable':True
# },
# ]
# },
# {
# 'data_index':'school.parent.name',
# 'width':200,
# 'header':u'Родитель',
# 'renderer':'parent_render'
# },
]
# плоский список полей фильтрации
_all_filter_fields = None
# словарь data_index:sort_order
_sort_fields = None
# Настройки вида справочника (задаются конечным разработчиком)
model = None
# название поля, идентифицирующего объект и название параметра,
# который будет передаваться в запросе на модификацию/удаление
@property
def id_param_name(self):
return '%s_id' % self.short_name
#data_index колонки, идентифицирующей объект
#этот параметр будет браться из модели и передаваться как ID в ExtDataStore
#т.е в post запросе редактирования будет лужеть {id_param_name:obj.id_field}
id_field = 'id'
# поле/метод, предоставляющее значение для отображения в DictSelectField
# ПОКА НЕ РАБОТАЕТ извлечение вложенных полей - конфликт с ExtJS
column_name_on_select = '__unicode__'
# Список дополнительных полей модели по которым будет идти поиск
# основной список береться из colums по признаку filterable
filter_fields = []
allow_paging = True
#пак будет настраивать грид на возможность редактирования
read_only = False
# Порядок сортировки элементов списка. Работает следующим образом:
# 1. Если в list_columns модели списка есть поле code, то устанавливается сортировка по возрастанию этого поля;
# 2. Если в list_columns модели списка нет поля code, но есть поле name, то устанавливается сортировка по возрастанию поля name;
# Пример list_sort_order = ['code', '-name']
list_sort_order = None
# Окно для редактирования элемента справочника:
add_window = None # Нового
edit_window = None # Уже существующего
# Флаг разрешающий/запрещающий удаление,
# если None - то удаление возможно при наличии add_window/edit_window
can_delete = None
# Группа отвечающие за отображение форм:
list_window = ui.BaseListWindow # Форма списка
select_window = ui.BaseSelectWindow # Форма выбора @UndefinedVariable
#размеры окна выбора по умолчанию
width, height = 510, 400
# права доступа для базовых справочников
PERM_EDIT = 'edit'
sub_permissions = {PERM_EDIT: u'Редактирование'}
MSG_DOESNOTEXISTS = (u'Запись не найдена в базе данных.<br/>' +
u'Возможно, она была удалена. Пожалуйста, обновите таблицу.')
def __init__(self):
super(ObjectPack, self).__init__()
# В отличие от обычных паков в этом экшены создаются самостоятельно,
# а не контроллером
# Чтобы было удобно обращаться к ним по имени
self.list_window_action = ObjectListWindowAction()
self.select_window_action = ObjectSelectWindowAction()
self.rows_action = ObjectRowsAction()
# Но привязать их все равно нужно
self.actions.extend([
self.list_window_action,
self.select_window_action,
self.rows_action
])
if self.add_window and not self.read_only:
self.new_window_action = ObjectEditWindowAction()
self.actions.append(self.new_window_action)
else:
self.new_window_action = None
if self.edit_window and not self.read_only:
self.edit_window_action = ObjectEditWindowAction()
self.actions.append(self.edit_window_action)
else:
self.edit_window_action = None
if (self.add_window or self.edit_window) and not self.read_only:
self.save_action = ObjectSaveAction()
self.actions.append(self.save_action)
else:
self.save_action = None
if self.can_delete is None:
self.can_delete = (
self.add_window or self.edit_window) and not self.read_only
if self.can_delete:
self.delete_action = ObjectDeleteAction()
self.actions.append(self.delete_action)
else:
self.delete_action = None
# построение плоского списка колонок
self._all_filter_fields = self.filter_fields
self._sort_fields = {}
def flatify(cols):
for c in cols:
sub_cols = c.get('columns', None)
if not sub_cols is None:
flatify(sub_cols)
else:
data_index = c['data_index']
field = data_index.replace('.', '__')
# поле(поля) для сортировки
if c.get('sortable', False):
sort_fields = c.get('sort_fields', field)
try:
sort_fields = list(sort_fields)
except:
sort_fields = [sort_fields]
self._sort_fields[data_index] = sort_fields
# поле для фильтрации
if c.get('filterable'):
self._all_filter_fields.append(field)
flatify(self.columns)
def replace_action(self, action_attr_name, new_action):
"""заменяет экшен в паке"""
if getattr(self, action_attr_name, None):
self.actions.remove(getattr(self, action_attr_name))
setattr(self, action_attr_name, new_action)
if getattr(self, action_attr_name):
self.actions.append(getattr(self, action_attr_name))
def get_default_action(self):
"""Воздвращает действие по умолчанию
(действие для значка на раб.столе/пункта меню)
Используется пи упрощенном встраивании в UI (add_to_XXX=True)"""
return self.list_window_action
def get_display_text(self, key, attr_name=None):
""" Получить отображаемое значение записи
(или атрибута attr_name) по ключу key """
row = self.get_row(key)
if row is not None:
try:
text = getattr(row, attr_name)
except AttributeError:
try:
text = getattr(row, self.column_name_on_select)
except AttributeError:
raise Exception(
u'Не получается получить поле %s для '
u'DictSelectField.pack = %s' % (attr_name, self))
# getattr может возвращать метод, например verbose_name
if callable(text):
return text()
else:
return unicode(text)
def get_edit_window_params(self, params, request, context):
"""
возвращает словарь параметров которые будут переданы окну редактирования
"""
return params
def get_list_window_params(self, params, request, context):
"""
возвращает словарь параметров которые будут переданы окну списка
"""
return params
def format_window_title(self, action):
"""
Форматирование заголовка окна.
Заголовок примет вид "Модель: Действие"
(например "Сотрудник: Добавление")
"""
return "%s: %s" % (self.model._meta.verbose_name.capitalize(), action)
#==================== ФУНКЦИИ ВОЗВРАЩАЮЩИЕ АДРЕСА =====================
def get_list_url(self):
"""
Возвращает адрес формы списка элементов справочника.
Используется для присвоения адресов в прикладном приложении.
"""
return self.list_window_action.get_absolute_url()
def get_select_url(self):
"""
Возвращает адрес формы списка элементов справочника.
Используется для присвоения адресов в прикладном приложении.
"""
return self.select_window_action.get_absolute_url()
def get_edit_url(self):
"""
Возвращает адрес формы редактирования элемента справочника.
"""
if self.edit_window_action:
return self.edit_window_action.get_absolute_url()
def get_rows_url(self):
"""
Возвращает адрес по которому запрашиваются элементы грида
"""
return self.rows_action.get_absolute_url()
def get_autocomplete_url(self):
""" Получить адрес для запроса элементов
подходящих введенному в поле тексту """
return self.get_rows_url()
def get_not_found_exception(self):
"""возвращает Группа исключения 'не найден'"""
return self.model.DoesNotExist
def configure_grid(self, grid):
"""
конфигурирования grid для работы с этим паком
создает колонки и задает экшены
"""
get_url = lambda x: x.get_absolute_url() if x else None
grid.url_data = get_url(self.rows_action)
if not self.read_only:
grid.url_new = get_url(self.new_window_action)
grid.url_edit = get_url(self.edit_window_action)
grid.url_delete = get_url(self.delete_action)
for col in self.columns:
temp = {}
temp.update(col)
if temp.has_key('filterable'):
temp.pop('filterable')
grid.add_column(**temp)
#TODO перенести в Группа грида сделать метод add_search_field
if self.get_filter_fields():
#поиск по гриду если есть по чему искать
grid.top_bar.search_field = ExtSearchField(
empty_text=u'Поиск', width=200, component_for_search=grid)
grid.top_bar.add_fill()
grid.top_bar.items.append(grid.top_bar.search_field)
grid.row_id_name = self.id_param_name
grid.allow_paging = self.allow_paging
grid.store.remote_sort = self.allow_paging
def create_edit_window(self, create_new, request, context):
"""
получить окно редактирования / создания объекта
"""
if create_new:
return self.add_window()
else:
return self.edit_window()
def create_list_window(self, is_select_mode, request, context):
"""
получить окно списка / выбора объектов
is_select_mode - режим показа окна (True -выбор, False -список),
"""
if is_select_mode:
return self.select_window()
else:
return self.list_window()
def get_rows_query(self, request, context):
"""
возвращает выборку из БД для получения списка данных
"""
#q = super(,self).get_rows_query(request, context)
#return q
return self.model.objects.all().select_related()
def get_filter_fields(self, request=None, context=None):
"""Возвращает список data_index колонок по которым будет
производиться поиск"""
return self._all_filter_fields[:]
def get_sort_order(self, data_index, reverse=False):
"""Возвращает ключи сортировки для указанного data_index"""
sort_order = self._sort_fields[data_index]
if reverse:
sort_order = ['-%s' % s for s in sort_order]
return sort_order
def apply_filter(self, query, request, context):
"""Возвращает переданную выборку
отфильторованной по параметрам запроса"""
return m3_actions.utils.apply_search_filter(
query,
request.REQUEST.get('filter'),
self.get_filter_fields()
)
def apply_sort_order(self, query, request, context):
"""Возвращает переданную выборку
отсортированной по параметрам запроса"""
sorting_key = request.REQUEST.get('sort')
if sorting_key:
reverse = request.REQUEST.get('dir') == 'DESC'
sort_order = self.get_sort_order(
data_index=sorting_key,
reverse=reverse)
query = query.order_by(*sort_order)
return query
def prepare_row(self, obj, request, context):
"""
установка дополнительный атрибутов объекта
перед возвратом json'a строк грида
или может вернуть proxy_object
obj из for obj in query из get_rows_query
"""
return obj
def get_row(self, row_id):
"""
функция возвращает объект по иди
используется в dictselectfield'ax
Если id нет, значит нужно создать новый объект
"""
if row_id == 0:
record = self.model()
else:
record = self.model.objects.get(id=row_id)
return record
def get_obj(self, request, context):
"""
возвращает tuple (объет, create_new)
для создания, редатирования записи
"""
obj_id = m3_actions.utils.extract_int(request, self.id_param_name)
create_new = (obj_id == 0)
record = self.get_row(obj_id)
return record, create_new
def save_row(self, obj, create_new, request, context):
"""
сохраняет объект
при необходимости делается raise ApplicationLogicException
"""
obj.save()
def delete_row(self, obj_id, request, context):
"""
удаление объекта
если вернет модель то она отдасться аудитору
"""
obj = self.model.objects.get(id=obj_id)
result = True
if hasattr(obj, 'safe_delete'):
result = obj.safe_delete()
else:
result = safe_delete(obj)
#в случе успеха safe_delete возвращет true
if not result:
raise RelatedError(u'Не удалось удалить элемент %s. '
u'Возможно на него есть ссылки.' % obj_id)
return obj
#-----------------------------------------------------------------------
# По умолчанию ни меню ни десктоп не расширяется
# add_to_desktop = True
# add_to_menu = True
#
# Если методы extend_menu/extend_desktop не реализованы,
# меню будет расширяться на основе title и get_default_action
#
# Методы extend_X приоритетны
# def extend_menu(self, menu):
# """
# Расширение главного меню.
#
# Возвращаемый результат должен иметь вид:
# return (
# # добавление пунктов в меню "справочники"
# menu.dicts(
# menu.Item(u'Dict 1', self),
# menu.SubMenu(u'Dict SubMenu',
# menu.Item(u'Dict 2', self.some_action),
# ),
# ),
#
# # добавление пунктов в меню "реестры"
# menu.registries(
# menu.Item(u'Reg 1'),
# menu.SubMenu(u'Regs SubMenu',
# menu.Item(u'Reg 2'),
# ),
# ),
#
# # добавление пунктов в меню "администрирование"
# menu.administry(
# menu.Item(u'Admin item 1')
# ),
#
# # добавление пунктов в "корень" меню
# menu.Item(name=u'item 1', self.some_action),
#
# # добавление подменю в "корень" меню
# menu.SubMenu(u'SubMenu',
# menu.Item(u'Item 2', self.some_action),
# menu.SubMenu(u'SubSubMenu',
# menu.Item(u'Item 3', self.some_action),
# ),
# ),
# )
#
# любой из элементов можно отключить вернув вместо него None.
# например:
# menu.Item(u'Name', url='/') if some_condition else None
#
# Пустые подменю автоматически "схлопываются" (не видны в Главном Меню)
# """
# pass
#
#
# def extend_desktop(self, desk):
# """
# Расширение Рабочего Стола.
# Результат должен иметь вид:
# return (
# desk.Item(u'Ярлык 1', pack=self.list_action),
# ...
# )
# любой из элементов можно отключить вернув вместо него None.
# например:
# desk.Item(u'Name', pack=self) if some_condition else None
# """
# pass
#===============================================================================
# SelectorWindowAction
#===============================================================================
class SelectorWindowAction(m3_actions.Action):
"""
Экшн показа окна выбора с пользовательским экшном обработки выбранных
элементов. Например, множественный выбор элементов справочника, для
последующего создания связок с ними.
"""
url = r'/selector_window'
# признак показа окна множественного выбора
multi_select = True
# url экшна обработки результата выбора
callback_url = None
# пак, объекты модели которого выбираются
data_pack = None
def configure_action(self, request, context):
"""
Настройка экшна. Здесь нужно назначать пак и callback
"""
pass
def configure_context(self, request, context):
"""
В данном методе происходит конфигурирование контекста для окна выбора.
Возвращаемый результат должен быть экземпляром ActionContext.
"""
return m3_actions.ActionContext()
def configure_window(self, win, request, context):
"""
В данном методе происходит конфигурирование окна выбора.
"""
return win
def run(self, request, context):
"""
Выполнение экшна.
Без крайней необходимости не перекрывать!
"""
new_self = copy.copy(self)
new_self.configure_action(request, context)
assert new_self.data_pack, u'Не задан ActionPack-источник данных!'
assert new_self.callback_url, u'Не задан Callback!'
new_context = new_self.configure_context(request, context)
# вызов экшна показа окна выбора
win_result = new_self.data_pack.select_window_action.run(
request, context)
win = getattr(win_result, 'data', None)
if not win:
return win_result
if not isinstance(win, ui.BaseSelectWindow):
raise ApplicationLogicException(
u'Класс окна выбора должен быть потомком BaseSelectWindow!')
win = new_self.configure_window(win, request, context)
win.callback_url = new_self.callback_url
if new_self.multi_select:
win.enable_multi_select()
return m3_actions.ExtUIScriptResult(win, new_context)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.