repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
mitsuhiko/pip | docs/conf.py | 1 | 6800 | # -*- coding: utf-8 -*-
#
# pip documentation build configuration file, created by
# sphinx-quickstart on Tue Apr 22 22:08:49 2008
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.pardir))
#sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
#extensions = ['sphinx.ext.autodoc']
extensions = ['docs.pipext']
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pip'
copyright = '2008-2014, PyPA'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
try:
from pip import __version__
# The short X.Y version.
version = '.'.join(__version__.split('.')[:2])
# The full version, including alpha/beta/rc tags.
release = __version__
except ImportError:
version = release = 'dev'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
if not on_rtd:
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
except ImportError:
pass
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '_static/piplogo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = 'favicon.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'pipdocs'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pip.tex', u'pip Documentation',
u'The pip developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
| mit |
inodb/cbioportal | core/src/main/scripts/importer/validateData.py | 2 | 172331 | #!/usr/bin/env python2.7
#
# Copyright (c) 2016 The Hyve B.V.
# This code is licensed under the GNU Affero General Public License (AGPL),
# version 3, or (at your option) any later version.
#
#
# This file is part of cBioPortal.
#
# cBioPortal is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Data validation script - validate files before import into portal.
Run with the command line option --help for usage information.
"""
# imports
import sys
import os
import logging.handlers
from collections import OrderedDict
import argparse
import re
import csv
import itertools
import requests
import json
import xml.etree.ElementTree as ET
import cbioportal_common
# ------------------------------------------------------------------------------
# globals
# study-specific globals
DEFINED_SAMPLE_IDS = None
DEFINED_SAMPLE_ATTRIBUTES = None
PATIENTS_WITH_SAMPLES = None
DEFINED_CANCER_TYPES = None
# GSVA globals
GSVA_SAMPLE_IDS = None
GSVA_GENESET_IDS = None
# ----------------------------------------------------------------------------
VALIDATOR_IDS = {
cbioportal_common.MetaFileTypes.CNA:'CNAValidator',
cbioportal_common.MetaFileTypes.CNA_LOG2:'ContinuousValuesValidator',
cbioportal_common.MetaFileTypes.CNA_CONTINUOUS:'ContinuousValuesValidator',
cbioportal_common.MetaFileTypes.EXPRESSION:'ContinuousValuesValidator',
cbioportal_common.MetaFileTypes.METHYLATION:'ContinuousValuesValidator',
cbioportal_common.MetaFileTypes.MUTATION:'MutationsExtendedValidator',
cbioportal_common.MetaFileTypes.CANCER_TYPE:'CancerTypeValidator',
cbioportal_common.MetaFileTypes.SAMPLE_ATTRIBUTES:'SampleClinicalValidator',
cbioportal_common.MetaFileTypes.PATIENT_ATTRIBUTES:'PatientClinicalValidator',
cbioportal_common.MetaFileTypes.SEG:'SegValidator',
cbioportal_common.MetaFileTypes.FUSION:'FusionValidator',
cbioportal_common.MetaFileTypes.PROTEIN:'ProteinLevelValidator',
cbioportal_common.MetaFileTypes.GISTIC_GENES: 'GisticGenesValidator',
cbioportal_common.MetaFileTypes.TIMELINE:'TimelineValidator',
cbioportal_common.MetaFileTypes.MUTATION_SIGNIFICANCE:'MutationSignificanceValidator',
cbioportal_common.MetaFileTypes.GENE_PANEL_MATRIX:'GenePanelMatrixValidator',
cbioportal_common.MetaFileTypes.GSVA_SCORES:'GsvaScoreValidator',
cbioportal_common.MetaFileTypes.GSVA_PVALUES:'GsvaPvalueValidator'
}
# ----------------------------------------------------------------------------
# class definitions
class MaxLevelTrackingHandler(logging.Handler):
"""Handler that does nothing but track the maximum msg level emitted."""
def __init__(self):
"""Initialize the handler with an attribute to track the level."""
super(MaxLevelTrackingHandler, self).__init__()
self.max_level = logging.NOTSET
def emit(self, record):
"""Update the maximum level with a new record."""
self.max_level = max(self.max_level, record.levelno)
def get_exit_status(self):
"""Return an exit status for the validator script based on max_level."""
if self.max_level <= logging.INFO:
return 0
elif self.max_level == logging.WARNING:
return 3
elif self.max_level == logging.ERROR:
return 1
else:
return 2
class LineCountHandler(logging.Handler):
"""Handler that does nothing but track the number of lines with error and warnings."""
def __init__(self):
"""Initialize the handler with an attribute to track the lines."""
super(LineCountHandler, self).__init__()
self.warning_lines = set()
self.error_lines = set()
def emit(self, record):
"""Update the line sets."""
if hasattr(record, 'line_number'):
if record.levelno == logging.WARNING:
self.warning_lines.add(record.line_number)
if record.levelno == logging.ERROR:
self.error_lines.add(record.line_number)
def get_nr_lines_with_error(self):
"""Return the number of lines with an error."""
return len(self.error_lines)
def get_nr_lines_with_warning(self):
"""Return the number of lines with an warning."""
return len(self.warning_lines)
def get_nr_lines_with_issue(self):
"""Return the number of lines with an error or warning."""
return len(self.error_lines | self.warning_lines)
class Jinja2HtmlHandler(logging.handlers.BufferingHandler):
"""Logging handler that formats aggregated HTML reports using Jinja2."""
def __init__(self, study_dir, output_filename, cbio_version, *args, **kwargs):
"""Set study directory name, output filename and buffer size."""
self.study_dir = study_dir
self.output_filename = output_filename
self.cbio_version = cbio_version
self.max_level = logging.NOTSET
self.closed = False
# get the directory name of the currently running script,
# resolving any symlinks
self.template_dir = os.path.dirname(os.path.realpath(__file__))
super(Jinja2HtmlHandler, self).__init__(*args, **kwargs)
def emit(self, record):
"""Buffer a message if the buffer is not full."""
self.max_level = max(self.max_level, record.levelno)
if len(self.buffer) < self.capacity:
return super(Jinja2HtmlHandler, self).emit(record)
def flush(self):
"""Do nothing; emit() caps the buffer and close() renders output."""
pass
def shouldFlush(self, record):
"""Never flush; emit() caps the buffer and close() renders output."""
return False
def generateHtml(self):
"""Render the HTML page for the current content in self.buffer """
# require Jinja2 only if it is actually used
import jinja2
j_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(self.template_dir),
# trim whitespace around Jinja2 operators
trim_blocks=True,
lstrip_blocks=True)
# refer to this function so that it can be used in the template:
j_env.filters['os.path.relpath'] = os.path.relpath
template = j_env.get_template('validation_report_template.html.jinja')
# pylint falsely infers template to be a string -- trust me, it's not
doc = template.render( # pylint: disable=no-member
study_dir=self.study_dir,
cbio_version=self.cbio_version,
record_list=self.buffer,
max_level=logging.getLevelName(self.max_level))
with open(self.output_filename, 'w') as f:
f.write(doc)
class ErrorFileFormatter(cbioportal_common.ValidationMessageFormatter):
"""Fasta-like formatter listing lines on which error messages occurred."""
def __init__(self, study_dir):
"""Initialize a logging Formatter with an appropriate format string."""
super(ErrorFileFormatter, self).__init__(
'>%(rel_filename)s | %(levelname)s: %(message)s\n%(line_string)s')
self.study_dir = study_dir
def format(self, record):
"""Aggregate line numbers to a line_string and format the record."""
record.line_string = self.format_aggregated(
record, 'line_number',
single_fmt='%d',
multiple_fmt='%s', join_string=',', max_join=None,
optional=False)
record.rel_filename = os.path.relpath(record.filename_,
self.study_dir)
return super(ErrorFileFormatter, self).format(record)
class LineMessageFilter(logging.Filter):
"""Filter that selects only validation messages about a line in a file."""
def filter(self, record):
return int(hasattr(record, 'filename_') and
hasattr(record, 'line_number'))
class CombiningLoggerAdapter(logging.LoggerAdapter):
"""LoggerAdapter that combines its own context info with that in calls."""
def process(self, msg, kwargs):
"""Add contextual information from call to that from LoggerAdapter."""
extra = self.extra.copy()
if 'extra' in kwargs:
# add elements from the call, possibly overwriting
extra.update(kwargs['extra'])
kwargs["extra"] = extra
return msg, kwargs
class PortalInstance(object):
"""Represent a portal instance, storing the data needed for validation.
This holds a number of dictionaries representing the particular
datatypes queried from the portal, each of which may be None
if the checks are to be skipped.
"""
def __init__(self, cancer_type_dict, hugo_entrez_map, alias_entrez_map, geneset_id_list):
"""Represent a portal instance with the given dictionaries."""
self.cancer_type_dict = cancer_type_dict
self.hugo_entrez_map = hugo_entrez_map
self.alias_entrez_map = alias_entrez_map
self.geneset_id_list = geneset_id_list
self.entrez_set = set()
for entrez_map in (hugo_entrez_map, alias_entrez_map):
if entrez_map is not None:
for entrez_list in entrez_map.values():
for entrez_id in entrez_list:
self.entrez_set.add(entrez_id)
#Set defaults for genome version and species
self.species = 'human'
self.ncbi_build = '37'
self.genome_build = 'hg19'
def load_genome_info(self, properties_filename):
"""Retrieves the species and genome information from portal.properties."""
with open(properties_filename, 'r') as properties_file:
for line in properties_file:
line = line.strip()
if line.startswith('#') or '=' not in line:
continue
sp_line = line.split('=', 1)
if sp_line[0] == 'species':
self.species = sp_line[1]
elif sp_line[0] == 'ncbi.build':
self.ncbi_build = sp_line[1]
elif sp_line[0] == 'ucsc.build':
self.genome_build = sp_line[1]
class Validator(object):
"""Abstract validator class for tab-delimited data files.
Subclassed by validators for specific data file types, which
should define a 'REQUIRED_HEADERS' attribute listing the required
column headers and a `REQUIRE_COLUMN_ORDER` boolean stating
whether their position is significant. Unless ALLOW_BLANKS is
set to True, empty cells in lines below the column header will
be reported as errors.
The methods `processTopLines`, `checkHeader`, `checkLine` and `onComplete`
may be overridden (calling their superclass methods) to perform any
appropriate validation tasks. The superclass `checkHeader` method sets
self.cols to the list of column names found in the header of the file
and self.numCols to the number of columns.
"""
REQUIRED_HEADERS = []
REQUIRE_COLUMN_ORDER = True
ALLOW_BLANKS = False
def __init__(self, study_dir, meta_dict, portal_instance, logger, relaxed_mode):
"""Initialize a validator for a particular data file.
:param study_dir: the path at which the study files can be found
:param meta_dict: dictionary of fields found in corresponding meta file
(such as stable id and data file name)
:param portal_instance: a PortalInstance object for which to validate
:param logger: logger instance for writing the log messages
:param relaxed_mode: relaxes validation of headerless clinical data to
prevent fast-failing
"""
self.filename = os.path.join(study_dir, meta_dict['data_filename'])
self.filenameShort = os.path.basename(self.filename)
self.line_number = 0
self.cols = []
self.numCols = 0
self.newlines = ('',)
self.studyId = ''
self.headerWritten = False
# This one is set to True if file could be parsed/read until the end (happens in onComplete)
self.fileCouldBeParsed = False
self.portal = portal_instance
self.logger = CombiningLoggerAdapter(
logger,
extra={'filename_': self.filename})
self.line_count_handler = None
self.meta_dict = meta_dict
self.relaxed_mode = relaxed_mode
self.fill_in_attr_defs = False
def validate(self):
"""Validate the data file."""
# add a handler to keep track of the number of lines with errors
self.line_count_handler = LineCountHandler()
self.logger.logger.addHandler(self.line_count_handler)
try:
# actually validate the data file
self._validate_file()
finally:
self.logger.logger.removeHandler(self.line_count_handler)
def _validate_file(self):
"""Read through the data file and validate as much as can be parsed."""
self.logger.debug('Starting validation of file')
try:
opened_file = open(self.filename, 'rU')
except IOError:
self.logger.error('File could not be opened')
return
with opened_file as data_file:
# parse any block of start-of-file comment lines and the tsv header
top_comments = []
line_number = 0
for line_number, line in enumerate(data_file,
start=line_number + 1):
self.line_number = line_number
if line.startswith('#'):
top_comments.append(line)
else:
header_line = line
# end of the file's header
break
# if the loop wasn't broken by a non-commented line
else:
self.logger.error('No column header or data found in file',
extra={'line_number': self.line_number})
return
# parse start-of-file comment lines, if any
if not self.processTopLines(top_comments):
self.logger.error(
'Invalid header comments, file cannot be parsed')
if not self.relaxed_mode:
return
else:
self.logger.info('Ignoring missing or invalid header comments. '
'Continuing with validation...')
self.fill_in_attr_defs = True
# read five data lines to detect quotes in the tsv file
first_data_lines = []
for i, line in enumerate(data_file):
first_data_lines.append(line)
if i >= 4:
break
sample_content = header_line + ''.join(first_data_lines)
try:
dialect = csv.Sniffer().sniff(sample_content, delimiters='\t')
except csv.Error:
self.logger.error('Not a valid tab separated file. Check if all lines have the same number of columns and if all separators are tabs.')
return
# sniffer assumes " if no quote character exists
if dialect.quotechar == '"' and not (
dialect.delimiter + '"' in sample_content or
'"' + dialect.delimiter in sample_content):
dialect.quoting = csv.QUOTE_NONE
if not self._checkTsvDialect(dialect):
self.logger.error(
'Invalid file format, file cannot be parsed')
return
# parse the first non-commented line as the tsv header
header_cols = csv.reader(
[header_line],
delimiter='\t',
quoting=csv.QUOTE_NONE,
strict=True).next()
if self.checkHeader(header_cols) > 0:
if not self.relaxed_mode:
self.logger.error(
'Invalid column header, file cannot be parsed')
return
else:
self.logger.warning('Ignoring invalid column header. '
'Continuing with validation...')
# read through the data lines of the file
csvreader = csv.reader(itertools.chain(first_data_lines,
data_file),
delimiter='\t',
quoting=csv.QUOTE_NONE,
strict=True)
for line_number, fields in enumerate(csvreader,
start=line_number + 1):
self.line_number = line_number
if all(x.strip() == '' for x in fields):
self.logger.error(
'Blank line',
extra={'line_number': self.line_number})
elif fields[0].startswith('#'):
self.logger.error(
"Data line starting with '#' skipped",
extra={'line_number': self.line_number})
else:
self.checkLine(fields)
# (tuple of) string(s) of the newlines read (for 'rU' mode files)
self.newlines = data_file.newlines
# after the entire file has been read
self.onComplete()
def onComplete(self):
"""Perform final validations after all lines have been checked.
Overriding methods should call this superclass method *after* their own
validations, as it logs the message that validation was completed.
"""
self._checkLineBreaks()
# finalize
self.fileCouldBeParsed = True
self.logger.info('Validation of file complete')
self.logger.info('Read %d lines. '
'Lines with warning: %d. Lines with error: %d',
self.line_number,
self.line_count_handler.get_nr_lines_with_warning(),
self.line_count_handler.get_nr_lines_with_error())
def processTopLines(self, line_list):
"""Hook to validate any list of comment lines above the TSV header.
Return False if these lines are invalid and the file cannot be
parsed, True otherwise.
"""
return True
def checkHeader(self, cols):
"""Check that the header has the correct items and set self.cols.
:param cols: The list of column headers to be validated
:return the number of errors found.
"""
num_errors = 0
# TODO check for end-of-line whitespace
self.cols = cols
self.numCols = len(self.cols)
num_errors += self._checkRepeatedColumns()
if self.REQUIRE_COLUMN_ORDER:
num_errors += self._checkOrderedRequiredColumns()
else:
num_errors += self._checkUnorderedRequiredColumns()
return num_errors
def checkLine(self, data):
"""Check data values from a line after the file header.
:param data: The list of values parsed from the line
"""
if data[:self.numCols] == self.cols:
if self.logger.isEnabledFor(logging.ERROR):
self.logger.error(
'Repeated header',
extra={'line_number': self.line_number,
'cause': ', '.join(data[:self.numCols])})
line_col_count = len(data)
if line_col_count != self.numCols:
self.logger.error('Expected %d columns based on header, '
'found %d',
self.numCols, line_col_count,
extra={'line_number': self.line_number})
if not self.ALLOW_BLANKS:
for col_index, col_name in enumerate(self.cols):
if col_index < line_col_count and data[col_index].strip() == '':
self.logger.error(
'Blank cell found in column',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': "'%s' (in column '%s')" % (
data[col_index], col_name)})
def _checkUnorderedRequiredColumns(self):
"""Check for missing column headers, independent of their position.
Return the number of errors encountered.
"""
num_errors = 0
for col_name in self.REQUIRED_HEADERS:
if col_name not in self.cols:
self.logger.error(
'Missing column: %s',
col_name,
extra={'line_number': self.line_number,
'cause': ', '.join(
self.cols[:len(self.REQUIRED_HEADERS)]) +
', (...)'})
num_errors += 1
return num_errors
def _checkOrderedRequiredColumns(self):
"""Check if the column header for each position is correct.
Return the number of errors encountered.
"""
num_errors = 0
for col_index, col_name in enumerate(self.REQUIRED_HEADERS):
if col_index >= self.numCols:
num_errors += 1
self.logger.error(
"Invalid header: expected '%s' in column %d,"
" found end of line",
col_name, col_index + 1,
extra={'line_number': self.line_number})
elif self.cols[col_index] != col_name:
num_errors += 1
self.logger.error(
"Invalid header: expected '%s' in this column",
col_name,
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': self.cols[col_index]})
return num_errors
def _checkTsvDialect(self, dialect):
"""Check if a csv.Dialect subclass describes a valid cBio data file."""
if dialect.delimiter != '\t':
self.logger.error('Not a tab-delimited file',
extra={'cause': 'delimiters of type: %s' %
repr(dialect.delimiter)})
return False
if dialect.quoting != csv.QUOTE_NONE:
self.logger.warning('Found quotation marks around field(s) in the first rows of the file. '
'Fields and values surrounded by quotation marks might be incorrectly '
'loaded (i.e. with the quotation marks included as part of the value)',
extra={'cause': 'quotation marks of type: [%s] ' %
repr(dialect.quotechar)[1:-1]})
return True
def _checkLineBreaks(self):
"""Checks line breaks, reports to user."""
if self.newlines not in("\r\n","\r","\n"):
self.logger.error('No line breaks recognized in file',
extra={'cause': repr(self.newlines)[1:-1]})
def checkInt(self, value):
"""Checks if a value is an integer."""
try:
int(value)
return True
except ValueError:
return False
def checkFloat(self, value):
"""Check if a string represents a floating-point numeral."""
try:
float(value)
return True
except ValueError:
return False
def checkSampleId(self, sample_id, column_number):
"""Check whether a sample id is defined, logging an error if not.
Return True if the sample id was valid, False otherwise.
"""
if sample_id not in DEFINED_SAMPLE_IDS:
self.logger.error(
'Sample ID not defined in clinical file',
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': sample_id})
return False
return True
# TODO: let this function know the column numbers for logging messages
def checkGeneIdentification(self, gene_symbol=None, entrez_id=None):
"""Attempt to resolve a symbol-Entrez pair, logging any issues.
It will fail to resolve in these cases:
1. (error) Entrez gene id and gene symbol are both missing (None)
If self.portal.hugo_entrez_map and self.portal.alias_entrez_map are
defined:
2. (warning) Only one of the identifiers is supplied, and its value
cannot be found in the portal
3. (error) The gene symbol maps to multiple Entrez gene ids
4. (error) The gene alias maps to multiple Entrez gene ids
Furthermore, the function logs a warning in the following cases, if
self.portal.hugo_entrez_map and self.portal.alias_entrez_map are
defined:
1. (warning) Entrez gene id exists, but the gene symbol specified is not
known to the portal
2. (warning) Gene symbol and Entrez gene id do not match
3. (warning) The Hugo gene symbol maps to a single Entrez gene id,
but is also associated to other genes as an alias.
Return the Entrez gene id (or gene symbol if the PortalInstance maps are
undefined and the mapping step is skipped), or None if no gene could be
unambiguously identified.
"""
# set to upper, as both maps contain symbols in upper
if gene_symbol is not None:
gene_symbol = gene_symbol.upper()
if entrez_id is not None:
try:
entrez_as_int = int(entrez_id)
except ValueError:
entrez_as_int = None
if entrez_as_int is None:
self.logger.warning(
'Entrez gene id is not an integer. '
'This record will not be loaded.',
extra={'line_number': self.line_number,
'cause': entrez_id})
return None
elif entrez_as_int <= 0:
self.logger.error(
'Entrez gene id is non-positive.',
extra={'line_number': self.line_number,
'cause': entrez_id})
return None
# check whether at least one is present
if entrez_id is None and gene_symbol is None:
self.logger.error(
'No Entrez gene id or gene symbol provided for gene.',
extra={'line_number': self.line_number})
return None
# if portal information is absent, skip the rest of the checks
if (self.portal.hugo_entrez_map is None or
self.portal.alias_entrez_map is None):
return entrez_id or gene_symbol
# try to use the portal maps to resolve to a single Entrez gene id
identified_entrez_id = None
if entrez_id is not None:
if entrez_id in self.portal.entrez_set:
# set the value to be returned
identified_entrez_id = entrez_id
# some warnings if the gene symbol is specified too
if gene_symbol is not None:
if (gene_symbol not in self.portal.hugo_entrez_map and
gene_symbol not in self.portal.alias_entrez_map):
self.logger.warning(
'Entrez gene id exists, but gene symbol specified '
'is not known to the cBioPortal instance. The '
'gene symbol will be ignored. Might be '
'wrong mapping, new or deprecated gene symbol.',
extra={'line_number': self.line_number,
'cause': gene_symbol})
elif entrez_id not in itertools.chain(
self.portal.hugo_entrez_map.get(gene_symbol, []),
self.portal.alias_entrez_map.get(gene_symbol, [])):
self.logger.warning(
'Entrez gene id and gene symbol do not match. '
'The gene symbol will be ignored. Might be '
'wrong mapping or recycled gene symbol.',
extra={'line_number': self.line_number,
'cause': '(%s, %s)' % (gene_symbol,
entrez_id)})
else:
self.logger.warning(
'Entrez gene id not known to the cBioPortal instance. '
'This record will not be loaded. Might be new or deprecated '
'Entrez gene id.',
extra={'line_number': self.line_number,
'cause': entrez_id})
# no Entrez gene id, only a gene symbol
elif gene_symbol is not None:
# count canonical gene symbols and aliases that map this symbol to
# a gene
num_entrezs_for_hugo = len(
self.portal.hugo_entrez_map.get(gene_symbol, []))
num_entrezs_for_alias = len(
self.portal.alias_entrez_map.get(gene_symbol, []))
if num_entrezs_for_hugo == 1:
# set the value to be returned
identified_entrez_id = \
self.portal.hugo_entrez_map[gene_symbol][0]
# check if there are other *different* Entrez gene ids associated
# with this gene symbol
other_entrez_ids_in_aliases = [
x for x in
self.portal.alias_entrez_map.get(gene_symbol, []) if
x != identified_entrez_id]
if len(other_entrez_ids_in_aliases) >= 1:
# give a warning, as the symbol may have been used to refer
# to different entrez_ids over time
self.logger.warning(
'Gene symbol maps to a single Entrez gene id, '
'but is also associated to other genes as an '
'alias. The system will assume the official gene '
'symbol to be the intended one.',
extra={'line_number': self.line_number,
'cause': gene_symbol})
elif num_entrezs_for_hugo > 1:
# nb: this should actually never occur, see also https://github.com/cBioPortal/cbioportal/issues/799
self.logger.error(
'Gene symbol maps to multiple Entrez gene ids (%s), '
'please specify which one you mean.',
'/'.join(self.portal.hugo_entrez_map[gene_symbol]),
extra={'line_number': self.line_number,
'cause': gene_symbol})
# no canonical symbol, but a single unambiguous alias
elif num_entrezs_for_alias == 1:
# set the value to be returned
identified_entrez_id = \
self.portal.alias_entrez_map[gene_symbol][0]
# no canonical symbol, and multiple different aliases
elif num_entrezs_for_alias > 1:
# Loader deals with this, so give warning
# TODO: move matched IDs out of the message for collapsing
self.logger.warning(
'Gene alias maps to multiple Entrez gene ids (%s), '
'please specify which one you mean or choose a non-ambiguous symbol.',
'/'.join(self.portal.alias_entrez_map[gene_symbol]),
extra={'line_number': self.line_number,
'cause': gene_symbol})
# no canonical symbol and no alias
else:
self.logger.warning(
'Gene symbol not known to the cBioPortal instance. This '
'record will not be loaded.',
extra={'line_number': self.line_number,
'cause': gene_symbol})
return identified_entrez_id
def checkDriverAnnotationColumn(self, driver_value=None, driver_annotation=None):
"""Ensures that cbp_driver_annotation is filled when the cbp_driver column
contains "Putative_Driver" or "Putative_Passenger".
"""
if driver_annotation is None and (driver_value is "Putative_Driver" or driver_value is "Putative_Passenger"):
self.logger.error(
'This line does not contain a value '
'for cbp_driver_annotation, and cbp_driver '
'contains "Putative_Driver" or '
'"Putative_Passenger".',
extra={'line_number': self.line_number,
'cause': driver_annotation})
return None
def checkDriverTiersColumnsValues(self, driver_tiers_value=None, driver_tiers_annotation=None):
"""Ensures that there are no mutations with one multiclass column filled and
the other empty.
"""
if driver_tiers_value is None and driver_tiers_annotation is not None:
self.logger.error(
'This line has no value for cbp_driver_tiers '
'and a value for cbp_driver_tiers_annotation. '
'Please, fill the cbp_driver_tiers column.',
extra={'line_number': self.line_number,
'cause': driver_tiers_value})
if driver_tiers_annotation is None and driver_tiers_value is not None:
self.logger.error(
'This line has no value for cbp_driver_annotation '
'and a value for cbp_driver_tiers. Please, fill '
'the annotation column.',
extra={'line_number': self.line_number,
'cause': driver_tiers_annotation})
return None
def _checkRepeatedColumns(self):
num_errors = 0
seen = set()
for col_num, col in enumerate(self.cols):
if col not in seen:
seen.add(col)
else:
num_errors += 1
self.logger.error('Repeated column header',
extra={'line_number': self.line_number,
'column_number': col_num,
'cause': col})
return num_errors
class FeaturewiseFileValidator(Validator):
"""Validates a file with rows for features and columns for ids and samples.
The first few columns (collectively defined in the class attributes
REQUIRED_HEADERS and OPTIONAL_HEADERS) identify the features
(e.g. genes) and the rest correspond to the samples.
Subclasses should override the parseFeatureColumns(self,nonsample_col_vals)
method to check the non-sample columns preceding them, returning the unique
id of the feature. The method can find the names of the columns recognized
in the file in self.nonsample_cols. checkValue(self, value, col_index)
should also be overridden to check a value in a sample column.
"""
OPTIONAL_HEADERS = []
REQUIRE_COLUMN_ORDER = False
def __init__(self, *args, **kwargs):
super(FeaturewiseFileValidator, self).__init__(*args, **kwargs)
self.nonsample_cols = []
self.num_nonsample_cols = 0
self.sampleIds = []
self._feature_id_lines = {}
def checkHeader(self, cols):
"""Validate the header and read sample IDs from it.
Return the number of fatal errors.
"""
num_errors = super(FeaturewiseFileValidator, self).checkHeader(cols)
# collect non-sample columns:
for col_name in self.cols:
if col_name in self.REQUIRED_HEADERS + self.OPTIONAL_HEADERS:
# add it to the list of non-sample columns in the file:
self.nonsample_cols.append(col_name)
else:
# reached samples group
break
self.num_nonsample_cols = len(self.nonsample_cols)
num_errors += self._set_sample_ids_from_columns()
return num_errors
def checkLine(self, data):
"""Check the feature and sample columns in a data line."""
super(FeaturewiseFileValidator, self).checkLine(data)
# parse and check the feature identifiers (implemented by subclasses)
feature_id = self.parseFeatureColumns(data[:self.num_nonsample_cols])
# skip line if no feature was identified
if feature_id is None:
return
# skip line with an error if the feature was encountered before
if feature_id in self._feature_id_lines:
self.logger.warning(
'Duplicate line for a previously listed feature/gene, '
'this line will be ignored.',
extra={
'line_number': self.line_number,
'cause': '%s (already defined on line %d)' % (
feature_id,
self._feature_id_lines[feature_id])})
return
# remember the feature id and check the value for each sample
self._feature_id_lines[feature_id] = self.line_number
for column_index, value in enumerate(data):
if column_index >= len(self.nonsample_cols):
# checkValue() should be implemented by subclasses
self.checkValue(value, column_index)
def parseFeatureColumns(self, nonsample_col_vals):
"""Override to check vals in the non-sample cols and return the id."""
raise NotImplementedError('The {} class did not provide a method to '
'validate values in sample columns.'.format(
self.__class__.__name__))
def checkValue(self, value, column_index):
"""Override to validate a value in a sample column."""
raise NotImplementedError('The {} class did not provide a method to '
'validate values in sample columns.'.format(
self.__class__.__name__))
def _set_sample_ids_from_columns(self):
"""Extracts sample IDs from column headers and set self.sampleIds."""
num_errors = 0
# check whether any sample columns are present
if len(self.cols[self.num_nonsample_cols:]) == 0:
self.logger.error('No sample columns found',
extra={'line_number': self.line_number})
num_errors += 1
# set self.sampleIds to the list of sample column names
self.sampleIds = self.cols[self.num_nonsample_cols:]
# validate each sample id
for index, sample_id in enumerate(self.sampleIds):
if not self.checkSampleId(
sample_id,
column_number=self.num_nonsample_cols + index + 1):
num_errors += 1
if ' ' in sample_id:
self.logger.error(
'White space in SAMPLE_ID is not supported',
extra={'line_number': self.line_number,
'cause': sample_id})
num_errors += 1
return num_errors
class GenewiseFileValidator(FeaturewiseFileValidator):
"""FeatureWiseValidator that has gene symbol and/or Entrez gene id as feature columns."""
REQUIRED_HEADERS = []
OPTIONAL_HEADERS = ['Hugo_Symbol', 'Entrez_Gene_Id']
ALLOW_BLANKS = True
NULL_VALUES = ["NA"]
def checkHeader(self, cols):
"""Validate the header and read sample IDs from it.
Return the number of fatal errors.
"""
num_errors = super(GenewiseFileValidator, self).checkHeader(cols)
# see if at least one of the gene identifiers is in the right place
if ('Hugo_Symbol' in self.sampleIds or
'Entrez_Gene_Id' in self.sampleIds):
self.logger.error('Hugo_Symbol or Entrez_Gene_Id need to be placed before the '
'sample ID columns of the file.',
extra={'line_number': self.line_number})
num_errors += 1
elif not ('Hugo_Symbol' in self.nonsample_cols or
'Entrez_Gene_Id' in self.nonsample_cols):
self.logger.error('At least one of the columns Hugo_Symbol or '
'Entrez_Gene_Id needs to be present.',
extra={'line_number': self.line_number})
num_errors += 1
elif ('Entrez_Gene_Id' not in self.nonsample_cols):
self.logger.warning('The recommended column Entrez_Gene_Id was not found. '
'Using Hugo_Symbol for all gene parsing.',
extra={'line_number': self.line_number})
return num_errors
def parseFeatureColumns(self, nonsample_col_vals):
"""Check the gene identifier columns."""
hugo_symbol = None
entrez_id = None
if 'Hugo_Symbol' in self.nonsample_cols:
hugo_index = self.nonsample_cols.index('Hugo_Symbol')
hugo_symbol = nonsample_col_vals[hugo_index].strip()
# treat empty string as a missing value
if hugo_symbol == '':
hugo_symbol = None
if 'Entrez_Gene_Id' in self.nonsample_cols:
entrez_index = self.nonsample_cols.index('Entrez_Gene_Id')
entrez_id = nonsample_col_vals[entrez_index].strip()
# treat the empty string as a missing value
if entrez_id == '':
entrez_id = None
return self.checkGeneIdentification(hugo_symbol, entrez_id)
class CNAValidator(GenewiseFileValidator):
"""Sub-class CNA validator."""
ALLOWED_VALUES = ['-2', '-1.5', '-1', '0', '1', '2'] + GenewiseFileValidator.NULL_VALUES
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
if value.strip() not in self.ALLOWED_VALUES:
if self.logger.isEnabledFor(logging.ERROR):
self.logger.error(
'Invalid CNA value: possible values are [%s]',
', '.join(self.ALLOWED_VALUES),
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
class MutationsExtendedValidator(Validator):
"""Sub-class mutations_extended validator."""
# TODO - maybe this should comply to https://wiki.nci.nih.gov/display/TCGA/Mutation+Annotation+Format+%28MAF%29+Specification ?
REQUIRED_HEADERS = [
'Tumor_Sample_Barcode',
'Hugo_Symbol', # Required to initialize the Mutation Mapper tabs
'Variant_Classification', # seems to be important during loading/filtering step.
]
REQUIRE_COLUMN_ORDER = False
ALLOW_BLANKS = True
# MutationFilter.java filters these types. Therefore, there is no reason to add warnings and errors for them
SKIP_VARIANT_TYPES = [
'Silent',
'Intron',
'3\'UTR',
'3\'Flank',
'5\'UTR',
'5\'Flank',
'IGR',
'RNA'
]
NULL_AA_CHANGE_VALUES = ('', 'NULL', 'NA')
NULL_DRIVER_VALUES = ('Putative_Passenger', 'Putative_Driver', 'NA', 'Unknown', '')
NULL_DRIVER_TIERS_VALUES = ('', 'NA')
# extra unofficial Variant classification values from https://github.com/mskcc/vcf2maf/issues/88:
EXTRA_VARIANT_CLASSIFICATION_VALUES = ['Splice_Region', 'Fusion']
# MAF values for Variant_Classification column
# from https://wiki.nci.nih.gov/display/TCGA/Mutation+Annotation+Format+%28MAF%29+Specification + EXTRA values + Unknown:
VARIANT_CLASSIFICATION_VALUES = [
'Frame_Shift_Del',
'Frame_Shift_Ins',
'In_Frame_Del',
'In_Frame_Ins',
'Missense_Mutation',
'Nonsense_Mutation',
'Splice_Site',
'Translation_Start_Site',
'Nonstop_Mutation',
'Targeted_Region',
'De_novo_Start_InFrame',
'De_novo_Start_OutOfFrame'] + SKIP_VARIANT_TYPES + EXTRA_VARIANT_CLASSIFICATION_VALUES + ['Unknown']
# Used for mapping column names to the corresponding function that does a check on the value.
CHECK_FUNCTION_MAP = {
'Matched_Norm_Sample_Barcode':'checkMatchedNormSampleBarcode',
'NCBI_Build':'checkNCBIbuild',
'Verification_Status':'checkVerificationStatus',
'Validation_Status':'checkValidationStatus',
't_alt_count':'check_t_alt_count',
't_ref_count':'check_t_ref_count',
'n_alt_count':'check_n_alt_count',
'n_ref_count':'check_n_ref_count',
'Tumor_Sample_Barcode': 'checkNotBlank',
'Hugo_Symbol': 'checkNotBlank',
'HGVSp_Short': 'checkAminoAcidChange',
'Amino_Acid_Change': 'checkAminoAcidChange',
'Variant_Classification': 'checkVariantClassification',
'SWISSPROT': 'checkSwissProt',
'Start_Position': 'checkStartPosition',
'End_Position': 'checkEndPosition',
'cbp_driver': 'checkDriver',
'cbp_driver_tiers': 'checkDriverTiers',
'cbp_driver_annotation': 'checkFilterAnnotation',
'cbp_driver_tiers_annotation': 'checkFilterAnnotation',
'Mutation_Status': 'checkMutationStatus'
}
def __init__(self, *args, **kwargs):
super(MutationsExtendedValidator, self).__init__(*args, **kwargs)
# FIXME: consider making this attribute a local var in in checkLine(),
# it really only makes sense there
self.extraCols = []
self.extra_exists = False
self.extra = ''
self.tiers = set()
def checkHeader(self, cols):
"""Validate header, requiring at least one gene id column."""
num_errors = super(MutationsExtendedValidator, self).checkHeader(cols)
if not ('Hugo_Symbol' in self.cols or 'Entrez_Gene_Id' in self.cols):
self.logger.error('At least one of the columns Hugo_Symbol or '
'Entrez_Gene_Id needs to be present.',
extra={'line_number': self.line_number})
num_errors += 1
elif ('Entrez_Gene_Id' not in self.cols):
self.logger.warning('The recommended column Entrez_Gene_Id was not found. '
'Using Hugo_Symbol for all gene parsing',
extra={'line_number': self.line_number})
if not 'SWISSPROT' in self.cols:
self.logger.warning(
'Including the SWISSPROT column is recommended to make sure '
'that the UniProt canonical isoform is used when drawing Pfam '
'domains in the mutations view',
extra={'line_number': self.line_number})
elif not 'swissprot_identifier' in self.meta_dict:
self.logger.warning(
"A SWISSPROT column was found in datafile without specifying "
"associated 'swissprot_identifier' in metafile, assuming "
"'swissprot_identifier: name'.",
extra={'column_number': self.cols.index('SWISSPROT') + 1})
# one of these columns should be present:
if not ('HGVSp_Short' in self.cols or 'Amino_Acid_Change' in self.cols):
self.logger.error('At least one of the columns HGVSp_Short or '
'Amino_Acid_Change needs to be present.',
extra={'line_number': self.line_number})
num_errors += 1
# raise errors if the filter_annotations are found without the "filter" columns
if 'cbp_driver_annotation' in self.cols and 'cbp_driver' not in self.cols:
self.logger.error('Column cbp_driver_annotation '
'found without any cbp_driver '
'column.', extra={'column_number': self.cols.index('cbp_driver_annotation')})
if 'cbp_driver_tiers_annotation' in self.cols and 'cbp_driver_tiers' not in self.cols:
self.logger.error('Column cbp_driver_tiers_annotation '
'found without any cbp_driver_tiers '
'column.', extra={'column_number': self.cols.index('cbp_driver_tiers_annotation')})
# raise errors if the "filter" columns are found without the filter_annotations
if 'cbp_driver' in self.cols and 'cbp_driver_annotation' not in self.cols:
self.logger.error('Column cbp_driver '
'found without any cbp_driver_annotation '
'column.', extra={'column_number': self.cols.index('cbp_driver')})
if 'cbp_driver_tiers' in self.cols and 'cbp_driver_tiers_annotation' not in self.cols:
self.logger.error('Column cbp_driver_tiers '
'found without any cbp_driver_tiers_annotation '
'column.', extra={'column_number': self.cols.index('cbp_driver_tiers')})
return num_errors
def checkLine(self, data):
"""Each value in each line is checked individually.
From the column name (stored in self.cols), the
corresponding function to check the value is selected from
CHECK_FUNCTION_MAP. Will emit a generic warning
message if this function returns False. If the function sets
self.extra_exists to True, self.extra will be used in this
message.
"""
super(MutationsExtendedValidator, self).checkLine(data)
if self.skipValidation(data):
return
for col_name in self.CHECK_FUNCTION_MAP:
# if optional column was found, validate it:
if col_name in self.cols:
col_index = self.cols.index(col_name)
value = data[col_index]
# get the checking method for this column if available, or None
checking_function = getattr(
self,
self.CHECK_FUNCTION_MAP[col_name])
if not checking_function(value):
self.printDataInvalidStatement(value, col_index)
elif self.extra_exists or self.extra:
raise RuntimeError(('Checking function %s set an error '
'message but reported no error') %
checking_function.__name__)
# validate Tumor_Sample_Barcode value to make sure it exists in study sample list:
sample_id_column_index = self.cols.index('Tumor_Sample_Barcode')
value = data[sample_id_column_index]
self.checkSampleId(value, column_number=sample_id_column_index + 1)
# parse hugo and entrez to validate them together
hugo_symbol = None
entrez_id = None
if 'Hugo_Symbol' in self.cols:
hugo_symbol = data[self.cols.index('Hugo_Symbol')].strip()
# treat the empty string or 'Unknown' as a missing value
if hugo_symbol in ('', 'Unknown'):
hugo_symbol = None
if 'Entrez_Gene_Id' in self.cols:
entrez_id = data[self.cols.index('Entrez_Gene_Id')].strip()
# treat the empty string or 0 as a missing value
if entrez_id in ('', '0'):
entrez_id = None
# validate hugo and entrez together:
self.checkGeneIdentification(hugo_symbol, entrez_id)
# parse custom driver annotation values to validate them together
driver_value = None
driver_annotation = None
driver_tiers_value = None
driver_tiers_annotation = None
if 'cbp_driver' in self.cols:
driver_value = data[self.cols.index('cbp_driver')].strip()
# treat the empty string as a missing value
if driver_value in (''):
driver_value = None
if 'cbp_driver_annotation' in self.cols:
driver_annotation = data[self.cols.index('cbp_driver_annotation')].strip()
# treat the empty string as a missing value
if driver_annotation in (''):
driver_annotation = None
if 'cbp_driver_tiers' in self.cols:
driver_tiers_value = data[self.cols.index('cbp_driver_tiers')].strip()
# treat the empty string as a missing value
if driver_tiers_value in (''):
driver_tiers_value = None
if 'cbp_driver_tiers_annotation' in self.cols:
driver_tiers_annotation = data[self.cols.index('cbp_driver_tiers_annotation')].strip()
# treat the empty string as a missing value
if driver_tiers_annotation in (''):
driver_tiers_annotation = None
self.checkDriverAnnotationColumn(driver_value, driver_annotation)
self.checkDriverTiersColumnsValues(driver_tiers_value, driver_tiers_annotation)
# check if a non-blank amino acid change exists for non-splice sites
if ('Variant_Classification' not in self.cols or
data[self.cols.index('Variant_Classification')] not in (
'Splice_Site', )):
aachange_value_found = False
for aa_col in ('HGVSp_Short', 'Amino_Acid_Change'):
if (aa_col in self.cols and
data[self.cols.index(aa_col)] not in
self.NULL_AA_CHANGE_VALUES):
aachange_value_found = True
if not aachange_value_found:
self.logger.warning(
'No Amino_Acid_Change or HGVSp_Short value. This '
'mutation record will get a generic "MUTATED" flag',
extra={'line_number': self.line_number})
def printDataInvalidStatement(self, value, col_index):
"""Prints out statement for invalid values detected."""
message = ("Value in column '%s' is invalid" %
self.cols[col_index])
if self.extra_exists:
message = self.extra
self.extra = ''
self.extra_exists = False
self.logger.error(
message,
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
# These functions check values of the MAF according to their name.
# The mapping of which function checks which value is a global value
# at the top of the script. If any other checks need to be added for
# another field name, add the map in the global corresponding to
# the function name that is created to check it.
def checkNCBIbuild(self, value):
if value != '':
# based on MutationDataUtils.getNcbiBuild
if self.portal.species == "human":
if value not in [str(self.portal.ncbi_build), self.portal.genome_build, 'GRCh'+str(self.portal.ncbi_build)]:
return False
elif self.portal.species == "mouse":
if value not in [str(self.portal.ncbi_build), self.portal.genome_build, 'GRCm'+str(self.portal.ncbi_build)]:
return False
return True
def checkMatchedNormSampleBarcode(self, value):
if value != '':
if 'normal_samples_list' in self.meta_dict and self.meta_dict['normal_samples_list'] != '':
normal_samples_list = [x.strip() for x in self.meta_dict['normal_samples_list'].split(',')]
if value not in normal_samples_list:
self.extra = "Normal sample id not in list of sample ids configured in corresponding metafile. " \
"Please check your metafile field 'normal_samples_list'."
self.extra_exists = True
return False
return True
def checkVerificationStatus(self, value):
# if value is not blank, then it should be one of these:
if self.checkNotBlank(value) and value.lower() not in ('verified', 'unknown', 'na'):
# Giving only warning instead of error because not used in front end.
self.logger.warning(
"Value in 'Verification_Status' not in MAF format",
extra={'line_number': self.line_number,
'cause':value})
# return without error (just warning above)
return True
return True
def checkValidationStatus(self, value):
# if value is not blank, then it should be one of these:
if self.checkNotBlank(value) and value.lower() not in ('untested', 'inconclusive',
'valid', 'invalid', 'na', 'redacted', 'unknown'):
# Giving only warning instead of error because front end can handle unofficial values.
self.logger.warning(
"Value in 'Validation_Status' not in MAF format",
extra={'line_number': self.line_number,
'cause':value})
# return without error (just warning above)
return True
return True
def check_t_alt_count(self, value):
if not self.checkInt(value) and value not in ('', '.'):
return False
return True
def check_t_ref_count(self, value):
if not self.checkInt(value) and value not in ('', '.'):
return False
return True
def check_n_alt_count(self, value):
if not self.checkInt(value) and value not in ('', '.'):
return False
return True
def check_n_ref_count(self, value):
if not self.checkInt(value) and value not in ('', '.'):
return False
return True
def checkAminoAcidChange(self, value):
"""Test whether a string is a valid amino acid change specification."""
# TODO implement this test more properly,
# may require bundling the hgvs package:
# https://pypi.python.org/pypi/hgvs/
if value not in self.NULL_AA_CHANGE_VALUES:
value = value.strip()
# there should only be a 'p.' prefix at the very start
if len(value) > 1 and 'p.' in value[1:]:
# return with an error message
self.extra = ("Unexpected 'p.' within amino acid change, "
"only one variant can be listed on each line")
self.extra_exists = True
return False
# lines in this format are single mutations, so the haplotype
# syntax supported by HGVS strings is not applicable
if ';' in value or '+' in value:
# return with an error message
self.extra = ("Unexpected ';' or '+' in amino acid change, "
"multi-variant allele notation is not supported")
self.extra_exists = True
return False
# commas are not allowed. They are used internally in certain
# servlets, via GeneticAlterationUtil.getMutationMap().
if ',' in value:
# return with an error message
self.extra = 'Comma in amino acid change'
self.extra_exists = True
return False
return True
def skipValidation(self, data):
"""Test whether the mutation is silent and should be skipped."""
is_silent = False
variant_classification = data[self.cols.index('Variant_Classification')]
if 'variant_classification_filter' in self.meta_dict:
self.SKIP_VARIANT_TYPES = [x.strip()
for x
in self.meta_dict['variant_classification_filter'].split(',')]
hugo_symbol = data[self.cols.index('Hugo_Symbol')]
entrez_id = '0'
if 'Entrez_Gene_Id' in self.cols:
entrez_id = data[self.cols.index('Entrez_Gene_Id')]
if hugo_symbol == 'Unknown' and entrez_id == '0':
is_silent = True
if variant_classification == 'IGR':
self.logger.info("This variant (Gene symbol 'Unknown', Entrez gene ID 0) will be filtered out",
extra={'line_number': self.line_number,
'cause': variant_classification})
else:
# the MAF specification documents the use of Unknown and 0 here
# for intergenic mutations, and since the Variant_Classification
# column is often invalid, cBioPortal interprets this combination
# (or just the symbol if the Entrez column is absent) as such,
# but with a warning:
self.logger.warning(
"Gene specification (Gene symbol 'Unknown', Entrez gene ID 0) for this variant "
"implies intergenic even though Variant_Classification is "
"not 'IGR'; this variant will be filtered out",
extra={'line_number': self.line_number,
'cause': variant_classification})
elif variant_classification in self.SKIP_VARIANT_TYPES:
self.logger.info("Line will not be loaded due to the variant "
"classification filter. Filtered types: [%s]",
', '.join(self.SKIP_VARIANT_TYPES),
extra={'line_number': self.line_number,
'cause': variant_classification})
is_silent = True
return is_silent
def checkNotBlank(self, value):
"""Test whether a string is blank."""
if value is None or value.strip() == '':
return False
return True
def checkVariantClassification(self, value):
"""Validate according to MAF standard list and give warning when value is not recognized."""
#if blank, return False:
if not self.checkNotBlank(value):
return False
else:
# check whether value conforms to MAF list of values, give warning otherwise:
if value not in self.VARIANT_CLASSIFICATION_VALUES:
self.logger.warning(
'Given value for Variant_Classification column is not one of the expected values. This '
'can result in mapping issues and subsequent missing features in the mutation view UI, '
'such as missing COSMIC information.',
extra={'line_number': self.line_number,
'cause':value})
# return without error (just warning above)
return True
# if no reasons to return with a message were found, return valid
return True
def checkSwissProt(self, value):
"""Validate the name or accession in the SWISSPROT column."""
if value is None or value.strip() in ['', 'NA', '[Not Available]']:
self.logger.warning(
'Missing value in SWISSPROT column; this column is '
'recommended to make sure that the UniProt canonical isoform '
'is used when drawing Pfam domains in the mutations view.',
extra={'line_number': self.line_number,
'cause':value})
# no value to test, return without error
return True
if self.meta_dict.get('swissprot_identifier', 'name') == 'accession':
if not re.match(
# regex from http://www.uniprot.org/help/accession_numbers
r'^([OPQ][0-9][A-Z0-9]{3}[0-9]|'
r'[A-NR-Z][0-9]([A-Z][A-Z0-9]{2}[0-9]){1,2})$',
value):
# return this as an error
self.extra = 'SWISSPROT value is not a UniProtKB accession.'
self.extra_exists = True
return False
else:
# format described on http://www.uniprot.org/help/entry_name
if not re.match(
r'^[A-Z0-9]{1,5}_[A-Z0-9]{1,5}$',
value):
# if there is a ',' then give a more detailed message:
if ',' in value:
self.logger.warning('SWISSPROT value is not a single UniProtKB/Swiss-Prot name. '
'Found multiple separated by a `,`. '
'Loader will try to find UniProt accession using Entrez gene id or '
'gene symbol.',
extra={'line_number': self.line_number, 'cause': value})
else:
self.logger.warning('SWISSPROT value is not a (single) UniProtKB/Swiss-Prot name. '
'Loader will try to find UniProt accession using Entrez gene id or '
'gene symbol.',
extra={'line_number': self.line_number, 'cause': value})
return True
# if no reasons to return with a message were found, return valid
return True
def checkStartPosition(self, value):
"""Check that the Start_Position value is an integer."""
if value.isdigit() == False or (value.isdigit() and '.' in value):
self.logger.error(
'The start position of this variant is not '
'an integer',
extra={'line_number': self.line_number,
'column_number': self.cols.index('Start_Position'),
'cause': value})
# if no reasons to return with a message were found, return valid
return True
def checkEndPosition(self, value):
"""Check that the End_Position value is an integer."""
if value.isdigit() == False or (value.isdigit() and '.' in value):
self.logger.error(
'The end position of this variant is not '
'an integer',
extra={'line_number': self.line_number,
'column_number': self.cols.index('End_Position'),
'cause': value})
# if no reasons to return with a message were found, return valid
return True
def checkDriver(self, value):
"""Validate the values in the cbp_driver column."""
if value not in self.NULL_DRIVER_VALUES:
self.extra = 'Only "Putative_Passenger", "Putative_Driver", "NA", "Unknown" and "" (empty) are allowed.'
self.extra_exists = True
return False
return True
def checkDriverTiers(self, value):
"""Report the tiers in the cbp_driver_tiers column (skipping the empty values)."""
if value not in self.NULL_DRIVER_TIERS_VALUES:
self.logger.info('Values contained in the column cbp_driver_tiers that will appear in the "Mutation Color" '
'menu of the Oncoprint',
extra={'line_number': self.line_number, 'column_number': self.cols.index('cbp_driver_tiers'), 'cause': value})
self.tiers.add(value)
if len(self.tiers) > 10:
self.logger.warning('cbp_driver_tiers contains more than 10 different tiers.',
extra={'line_number': self.line_number, 'column_number': self.cols.index('cbp_driver_tiers'),
'cause': value})
if len(value) > 50:
self.extra= 'cbp_driver_tiers column does not support values longer than 50 characters'
self.extra_exists = True
return False
return True
def checkFilterAnnotation(self, value):
"""Check if the annotation values are smaller than 80 characters."""
if len(value) > 80:
self.extra = 'cbp_driver_annotation and cbp_driver_tiers_annotation columns do not support annotations longer than 80 characters'
self.extra_exists = True
return False
return True
def checkMutationStatus(self, value):
"""Check values in mutation status column."""
if value.lower() in ['loh', 'none', 'wildtype']:
self.logger.info('Mutation will not be loaded due to value in Mutation_Status',
extra={'line_number': self.line_number, 'cause': value})
if value.lower() not in ['none', 'germline', 'somatic', 'loh', 'post-transcriptional modification', 'unknown', 'wildtype'] and value != '':
self.logger.warning('Mutation_Status value is not in MAF format',
extra={'line_number': self.line_number, 'cause': value})
return True
class ClinicalValidator(Validator):
"""Abstract Validator class for clinical data files.
Subclasses define the columns that must be present in REQUIRED_HEADERS,
and the value of the 'is_patient_attribute' property for attributes
defined in this file in PROP_IS_PATIENT_ATTRIBUTE.
"""
REQUIRE_COLUMN_ORDER = False
PROP_IS_PATIENT_ATTRIBUTE = None
NULL_VALUES = ["[not applicable]", "[not available]", "[pending]", "[discrepancy]","[completed]","[null]", "", "na"]
ALLOW_BLANKS = True
METADATA_LINES = ('display_name',
'description',
'datatype',
'priority')
# Attributes required to have certain properties because of hard-coded use.
# Note: the 'when_wrong' property (found in some attributes like METASTATIC_SITE),
# can be set to WARNING to indicate that only a WARNING should be given
# if this attribute is found in the "wrong" file (e.g. a PATIENT attribute found
# in a SAMPLE file or vice-versa).
PREDEFINED_ATTRIBUTES = {
'AGE': {
'is_patient_attribute': '1',
'datatype': 'NUMBER'
},
'CANCER_TYPE': {
'is_patient_attribute': '0',
'datatype': 'STRING'
},
'CANCER_TYPE_DETAILED': {
'is_patient_attribute': '0',
'datatype': 'STRING'
},
'DETAILED_CANCER_TYPE': {
'is_patient_attribute': '0',
'datatype': 'STRING'
},
'DFS_STATUS': {
'is_patient_attribute': '1',
'datatype': 'STRING'
},
'DFS_MONTHS': {
'is_patient_attribute': '1',
'datatype': 'NUMBER'
},
'DRIVER_MUTATIONS': {
'is_patient_attribute': '0'
},
'ERG_FUSION_ACGH': {
'is_patient_attribute': '0'
},
'ETS_RAF_SPINK1_STATUS': {
'is_patient_attribute': '0'
},
'GENDER': {
'is_patient_attribute': '1',
'datatype': 'STRING'
},
'GLEASON_SCORE': {
'is_patient_attribute': '0',
'when_wrong': 'WARNING'
},
'GLEASON_SCORE_1': {
'is_patient_attribute': '0'
},
'GLEASON_SCORE_2': {
'is_patient_attribute': '0'
},
'HISTOLOGY': {
'is_patient_attribute': '0'
},
'KNOWN_MOLECULAR_CLASSIFIER': {
'is_patient_attribute': '0'
},
'METASTATIC_SITE': {
'is_patient_attribute': '0',
'datatype': 'STRING',
'when_wrong': 'WARNING'
},
'OS_STATUS': {
'is_patient_attribute': '1',
'datatype': 'STRING'
},
'OS_MONTHS': {
'is_patient_attribute': '1',
'datatype': 'NUMBER'
},
'OTHER_SAMPLE_ID': {
'is_patient_attribute': '0',
'datatype': 'STRING'
},
'PATIENT_DISPLAY_NAME': {
'is_patient_attribute': '1',
'datatype': 'STRING'
},
'PRIMARY_SITE': {
'is_patient_attribute': '0',
'datatype': 'STRING',
'when_wrong': 'WARNING'
},
'SAMPLE_CLASS': {
'is_patient_attribute': '0',
'datatype': 'STRING'
},
'SAMPLE_DISPLAY_NAME': {
'is_patient_attribute': '0',
'datatype': 'STRING'
},
'SAMPLE_TYPE': {
'is_patient_attribute': '0',
'datatype': 'STRING'
},
'SERUM_PSA': {
'is_patient_attribute': '0'
},
'SEX': {
'is_patient_attribute': '1',
'datatype': 'STRING'
},
'TMPRSS2_ERG_FUSION_STATUS': {
'is_patient_attribute': '0'
},
'TUMOR_GRADE': {
'is_patient_attribute': '0'
},
'TUMOR_SITE': {
'is_patient_attribute': '0',
'datatype': 'STRING',
'when_wrong': 'WARNING'
},
'TUMOR_STAGE_2009': {
'is_patient_attribute': '0'
},
'TUMOR_TISSUE_SITE': {
'is_patient_attribute': '0',
'datatype': 'STRING',
'when_wrong': 'WARNING'
},
'TUMOR_TYPE': {
'is_patient_attribute': '0',
'datatype': 'STRING'
},
'TYPE_OF_CANCER': {
'is_patient_attribute': '0',
'datatype': 'STRING'
},
}
def __init__(self, *args, **kwargs):
"""Initialize the instance attributes of the data file validator."""
super(ClinicalValidator, self).__init__(*args, **kwargs)
self.attr_defs = []
self.defined_attributes = set()
def processTopLines(self, line_list):
"""Parse the attribute definitions above the column header."""
if not line_list:
if not self.relaxed_mode:
self.logger.warning(
'No data type definition headers found in clinical data file',
extra={'line_number': self.line_number})
else:
self.logger.info('Ignoring missing or invalid data type definition '
' headers. Continuing with validation...')
return False
if len(line_list) != len(self.METADATA_LINES):
self.logger.error(
'%d comment lines at start of clinical data file, expected %d',
len(line_list),
len(self.METADATA_LINES))
return False
# remove the # signs
line_list = [line[1:] for line in line_list]
attr_defs = None
num_attrs = 0
csvreader = csv.reader(line_list,
delimiter='\t',
quoting=csv.QUOTE_NONE,
strict=True)
invalid_values = False
for line_index, row in enumerate(csvreader):
if attr_defs is None:
# make a list of as many dictionaries as there are columns
num_attrs = len(row)
attr_defs = [OrderedDict() for i in range(num_attrs)]
elif len(row) != num_attrs:
if not self.relaxed_mode:
self.logger.error(
'Varying numbers of columns in clinical header (%d, %d)',
num_attrs,
len(row),
extra={'line_number': line_index + 1})
return False
for col_index, value in enumerate(row):
# test for invalid values in these columns
if value.strip().lower() in self.NULL_VALUES:
self.logger.error(
'Empty %s field in clinical attribute definition',
self.METADATA_LINES[line_index],
extra={'line_number': line_index + 1,
'column_number': col_index + 1,
'cause': value})
invalid_values = True
if self.METADATA_LINES[line_index] in ('display_name',
'description'):
pass
elif self.METADATA_LINES[line_index] == 'datatype':
VALID_DATATYPES = ('STRING', 'NUMBER', 'BOOLEAN')
if value not in VALID_DATATYPES:
self.logger.error(
'Invalid data type definition, must be one of '
'[%s]',
', '.join(VALID_DATATYPES),
extra={'line_number': line_index + 1,
'colum_number': col_index + 1,
'cause': value})
invalid_values = True
invalid_values = True
elif self.METADATA_LINES[line_index] == 'priority':
try:
if int(value) < 0:
raise ValueError()
except ValueError:
self.logger.error(
'Priority definition should be an integer, and should be '
'greater than or equal to zero',
extra={'line_number': line_index + 1,
'column_number': col_index + 1,
'cause': value})
invalid_values = True
else:
if not self.relaxed_mode:
raise RuntimeError('Unknown clinical header line name')
attr_defs[col_index][self.METADATA_LINES[line_index]] = value
self.attr_defs = attr_defs
return not invalid_values
def checkHeader(self, cols):
"""Validate the attributes defined in the column headers and above."""
num_errors = super(ClinicalValidator, self).checkHeader(cols)
if self.numCols != len(self.attr_defs):
if not self.relaxed_mode:
self.logger.error(
'Varying numbers of columns in clinical header (%d, %d)',
len(self.attr_defs),
len(self.cols),
extra={'line_number': self.line_number})
num_errors += 1
# fill in missing attr_defs data if in relaxed mode and clinical data is headerless
if self.fill_in_attr_defs:
self.logger.info('Filling in missing attribute properties for clinical data.')
missing_attr_defs = {}
for col_index, col_name in enumerate(cols):
missing_attr_defs[col_index] = {'display_name': col_name,
'description': col_name,
'datatype': 'STRING',
'priority': '1'}
self.attr_defs = missing_attr_defs
for col_index, col_name in enumerate(self.cols):
# Front end can have issues with lower case attribute names as discussed
# in https://github.com/cBioPortal/cbioportal/issues/3518
if not col_name.isupper():
self.logger.error(
"Attribute name not in upper case.",
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': col_name})
# do not check the special ID columns as attributes,
# just parse them with the correct data type
if col_name in ('PATIENT_ID', 'SAMPLE_ID'):
self.attr_defs[col_index] = {'display_name': '',
'description': '',
'datatype': 'STRING',
'priority': '0'}
continue
# check predefined (hard-coded) attribute definitions
if col_name in self.PREDEFINED_ATTRIBUTES:
for attr_property in self.PREDEFINED_ATTRIBUTES[col_name]:
if attr_property == 'is_patient_attribute':
expected_level = \
self.PREDEFINED_ATTRIBUTES[col_name][attr_property]
if self.PROP_IS_PATIENT_ATTRIBUTE != expected_level:
# check if only warning should be given:
if ('when_wrong' in self.PREDEFINED_ATTRIBUTES[col_name] and
self.PREDEFINED_ATTRIBUTES[col_name]['when_wrong'] == 'WARNING'):
self.logger.warning(
'Attribute expected to be a %s-level attribute. Some *minor* details will be '
'missing in patient/sample view for this study',
{'0': 'sample', '1': 'patient'}[expected_level],
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': col_name})
else:
self.logger.error(
'Attribute must be a %s-level attribute',
{'0': 'sample', '1': 'patient'}[expected_level],
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': col_name})
# check pre-header datatype property:
if attr_property == 'datatype':
# check pre-header metadata if applicable -- if these were
# found missing or unparseable, `relaxed mode' has made
# validation continue assuming all attributes to be
# unformatted strings
if not self.fill_in_attr_defs:
value = self.attr_defs[col_index][attr_property]
expected_value = \
self.PREDEFINED_ATTRIBUTES[col_name][attr_property]
if (value != expected_value and
not self.fill_in_attr_defs):
self.logger.error(
"%s definition for attribute '%s' must be %s",
attr_property,
col_name,
expected_value,
extra={'line_number':
self.METADATA_LINES.index(
attr_property) + 1,
'column_number': col_index + 1,
'cause': value})
self.defined_attributes.add(col_name)
return num_errors
def checkLine(self, data):
"""Check the values in a line of data."""
super(ClinicalValidator, self).checkLine(data)
for col_index, col_name in enumerate(self.cols):
# treat cells beyond the end of the line as blanks,
# super().checkLine() has already logged an error
value = ''
if col_index < len(data):
value = data[col_index].strip()
data_type = self.attr_defs[col_index]['datatype']
# if not blank, check if values match the datatype
if value.strip().lower() in self.NULL_VALUES:
pass
elif data_type == 'NUMBER':
if not self.checkFloat(value):
self.logger.error(
'Value of numeric attribute is not a real number',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'column_name': col_name,
'cause': value})
elif data_type == 'BOOLEAN':
VALID_BOOLEANS = ('TRUE', 'FALSE')
if not value in VALID_BOOLEANS:
self.logger.error(
'Value of boolean attribute must be one of [%s]',
', '.join(VALID_BOOLEANS),
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'column_name': col_name,
'cause': value})
# make sure that PATIENT_ID is present
if col_name == 'PATIENT_ID':
if value.strip().lower() in self.NULL_VALUES:
self.logger.error(
'Missing PATIENT_ID',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
class SampleClinicalValidator(ClinicalValidator):
"""Validator for files defining and setting sample-level attributes."""
REQUIRED_HEADERS = ['SAMPLE_ID', 'PATIENT_ID']
PROP_IS_PATIENT_ATTRIBUTE = '0'
INVALID_SAMPLE_ID_CHARACTERS = set(',;+/=*')
def __init__(self, *args, **kwargs):
"""Initialize the validator to track sample ids defined."""
super(SampleClinicalValidator, self).__init__(*args, **kwargs)
self.sample_id_lines = {}
self.sampleIds = self.sample_id_lines.viewkeys()
self.patient_ids = set()
def checkLine(self, data):
"""Check the values in a line of data."""
super(SampleClinicalValidator, self).checkLine(data)
for col_index, col_name in enumerate(self.cols):
# treat cells beyond the end of the line as blanks,
# super().checkLine() has already logged an error
value = ''
if col_index < len(data):
value = data[col_index].strip()
if col_name == 'SAMPLE_ID':
if value.strip().lower() in self.NULL_VALUES:
self.logger.error(
'Missing SAMPLE_ID',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
continue
if ' ' in value:
self.logger.error(
'White space in SAMPLE_ID is not supported',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
# invalid characters in sample_id can cause problems in different parts of the portal code,
# so block them here:
if any((c in self.INVALID_SAMPLE_ID_CHARACTERS) for c in value):
self.logger.error(
'A number of special characters, such as ' + str(list(self.INVALID_SAMPLE_ID_CHARACTERS)) +
' are not allowed in SAMPLE_ID',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
if value in self.sample_id_lines:
if value.startswith('TCGA-'):
self.logger.warning(
'TCGA sample defined twice in clinical file, this '
'line will be ignored assuming truncated barcodes',
extra={
'line_number': self.line_number,
'column_number': col_index + 1,
'cause': '%s (already defined on line %d)' % (
value,
self.sample_id_lines[value])})
else:
self.logger.error(
'Sample defined twice in clinical file',
extra={
'line_number': self.line_number,
'column_number': col_index + 1,
'cause': '%s (already defined on line %d)' % (
value,
self.sample_id_lines[value])})
else:
self.sample_id_lines[value] = self.line_number
elif col_name == 'PATIENT_ID':
self.patient_ids.add(value)
# TODO: check the values in the other documented columns
class PatientClinicalValidator(ClinicalValidator):
"""Validator for files defining and setting patient-level attributes."""
REQUIRED_HEADERS = ['PATIENT_ID']
PROP_IS_PATIENT_ATTRIBUTE = '1'
def __init__(self, *args, **kwargs):
"""Initialize the validator to track patient IDs referenced."""
super(PatientClinicalValidator, self).__init__(*args, **kwargs)
self.patient_id_lines = {}
def checkHeader(self, cols):
"""Validate headers in patient-specific clinical data files."""
num_errors = super(PatientClinicalValidator, self).checkHeader(cols)
# do not allow the SAMPLE_ID column in this file
if 'SAMPLE_ID' in self.cols:
self.logger.error(
'SAMPLE_ID column found in a patient attribute file',
extra={'line_number': self.line_number,
'column_number': self.cols.index('SAMPLE_ID'),
'cause': 'SAMPLE_ID'})
# refuse to define attributes also defined in the sample-level file
for attribute_id in self.defined_attributes:
if attribute_id in DEFINED_SAMPLE_ATTRIBUTES:
# log this as a file-aspecific error, using the base logger
self.logger.logger.error(
'Clinical attribute is defined both as sample-level and '
'as patient-level',
extra={'cause': attribute_id})
# warnings about missing optional columns
if 'OS_MONTHS' not in self.cols or 'OS_STATUS' not in self.cols:
self.logger.warning(
'Columns OS_MONTHS and/or OS_STATUS not found. Overall '
'survival analysis feature will not be available for this '
'study.')
if 'DFS_MONTHS' not in self.cols or 'DFS_STATUS' not in self.cols:
self.logger.warning(
'Columns DFS_MONTHS and/or DFS_STATUS not found. Disease '
'free analysis feature will not be available for this study.')
return num_errors
def checkLine(self, data):
"""Check the values in a line of data."""
super(PatientClinicalValidator, self).checkLine(data)
osstatus_is_deceased = False
osmonths_value = None
for col_index, col_name in enumerate(self.cols):
# treat cells beyond the end of the line as blanks,
# super().checkLine() has already logged an error
value = ''
if col_index < len(data):
value = data[col_index].strip()
if col_name == 'PATIENT_ID':
if ' ' in value:
self.logger.error(
'White space in PATIENT_ID is not supported',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
if value in self.patient_id_lines:
self.logger.error(
'Patient defined multiple times in file',
extra={
'line_number': self.line_number,
'column_number': self.cols.index('PATIENT_ID') + 1,
'cause': '%s (already defined on line %d)' % (
value,
self.patient_id_lines[value])})
else:
self.patient_id_lines[value] = self.line_number
if value not in PATIENTS_WITH_SAMPLES:
self.logger.warning(
'Clinical data defined for a patient with '
'no samples',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
elif col_name == 'OS_STATUS':
if value == 'DECEASED':
osstatus_is_deceased = True
elif (value.lower() not in self.NULL_VALUES and
value not in ('LIVING', 'DECEASED')):
self.logger.error(
'Value in OS_STATUS column is not LIVING or '
'DECEASED',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
elif col_name == 'DFS_STATUS':
if (value.lower() not in self.NULL_VALUES and
value not in ('DiseaseFree',
'Recurred/Progressed',
'Recurred',
'Progressed')):
self.logger.error(
'Value in DFS_STATUS column is not DiseaseFree, '
'Recurred/Progressed, Recurred or Progressed',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
elif col_name == 'OS_MONTHS':
osmonths_value = value
if osstatus_is_deceased and (
osmonths_value is None or
osmonths_value.lower() in self.NULL_VALUES):
if osmonths_value is None or osmonths_value == '':
osmonths_value = '<none>'
self.logger.warning(
'OS_MONTHS is not specified for deceased patient. Patient '
'will be excluded from survival curve and month of death '
'will not be shown on patient view timeline.',
extra={'line_number': self.line_number,
'cause': osmonths_value})
def onComplete(self):
"""Perform final validations based on the data parsed."""
for patient_id in PATIENTS_WITH_SAMPLES:
if patient_id not in self.patient_id_lines:
self.logger.warning(
'Missing clinical data for a patient associated with '
'samples',
extra={'cause': patient_id})
super(PatientClinicalValidator, self).onComplete()
class SegValidator(Validator):
"""Validator for .seg files."""
REQUIRED_HEADERS = [
'ID',
'chrom',
'loc.start',
'loc.end',
'num.mark',
'seg.mean']
REQUIRE_COLUMN_ORDER = True
def __init__(self, *args, **kwargs):
"""Initialize validator to track coverage of the genome."""
super(SegValidator, self).__init__(*args, **kwargs)
self.chromosome_lengths = self.load_chromosome_lengths(
self.meta_dict['reference_genome_id'],
self.logger.logger)
# add 23 and 24 "chromosomes" as aliases to X and Y, respectively:
self.chromosome_lengths['23'] = self.chromosome_lengths['X']
self.chromosome_lengths['24'] = self.chromosome_lengths['Y']
def checkLine(self, data):
super(SegValidator, self).checkLine(data)
parsed_coords = {}
for col_index, col_name in enumerate(self.cols):
value = data[col_index].strip()
if col_name == 'ID':
self.checkSampleId(value, column_number=col_index + 1)
elif col_name == 'chrom':
if value in self.chromosome_lengths:
parsed_coords[col_name] = value
else:
self.logger.error(
('Unknown chromosome, must be one of (%s)' %
'|'.join(self.chromosome_lengths.keys())),
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
elif col_name in ('loc.start', 'loc.end'):
try:
# convert possible scientific notation to python scientific notation
if "e+" in value:
value = float(value.replace("e+", "e"))
if not value.is_integer():
# raise value error 'Genomic position is not an integer'
raise ValueError()
parsed_coords[col_name] = int(value)
except ValueError:
self.logger.error(
'Genomic position is not an integer',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
# skip further validation specific to this column
continue
# 0 is the first base, and loc.end is not part of the segment
# 'chrom' has already been read, as column order is fixed
if parsed_coords[col_name] < 0 or (
'chrom' in parsed_coords and
parsed_coords[col_name] > self.chromosome_lengths[
parsed_coords['chrom']]):
self.logger.error(
'Genomic position beyond end of chromosome '
'(chr%s:0-%s)',
parsed_coords['chrom'],
self.chromosome_lengths[parsed_coords['chrom']],
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
# not a valid coordinate usable in further validations
del parsed_coords[col_name]
elif col_name == 'num.mark':
if not self.checkInt(value):
# also check if the value is an int in scientific notation (1e+05)
if not ("e+" in value and self.checkFloat(value)):
self.logger.error(
'Number of probes is not an integer',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
elif col_name == 'seg.mean':
if not self.checkFloat(value):
self.logger.error(
'Mean segment copy number is not a number',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
else:
raise RuntimeError('Could not validate column type: ' +
col_name)
if 'loc.start' in parsed_coords and 'loc.end' in parsed_coords:
# the convention for genomic coordinates (at least at UCSC) is that
# the chromosome starts at 0 and end positions are excluded.
# see also https://groups.google.com/forum/#!topic/igv-help/LjffjxPul2M
if parsed_coords['loc.start'] == parsed_coords['loc.end']:
self.logger.warning(
'Segment is zero bases wide and will not be loaded',
extra={'line_number': self.line_number,
'cause': '{}-{}'.format(parsed_coords['loc.start'],
parsed_coords['loc.end'])})
elif parsed_coords['loc.start'] > parsed_coords['loc.end']:
self.logger.error(
'Start position of segment is greater than end position',
extra={'line_number': self.line_number,
'cause': '{}-{}'.format(parsed_coords['loc.start'],
parsed_coords['loc.end'])})
# TODO check for overlap and low genome coverage
# this could be implemented by sorting the segments for a patient
# by (chromosome and) start position and checking if the start position
# of each segment comes after the end position of the previous one,
# meanwhile adding up the number of (non-overlapping) bases covered on
# that chromosome in that patient.
@staticmethod
def load_chromosome_lengths(genome_build, logger):
"""Get the length of each chromosome from USCS and return a dict.
The dict will not include unplaced contigs, alternative haplotypes or
the mitochondrial chromosome.
"""
chrom_size_dict = {}
chrom_size_url = (
'http://hgdownload.cse.ucsc.edu'
'/goldenPath/{build}/bigZips/{build}.chrom.sizes').format(
build=genome_build)
logger.debug("Retrieving chromosome lengths from '%s'",
chrom_size_url)
r = requests.get(chrom_size_url)
try:
r.raise_for_status()
except requests.exceptions.HTTPError as e:
raise IOError('Error retrieving chromosome lengths from UCSC: ' +
e.message)
for line in r.text.splitlines():
try:
# skip comment lines
if line.startswith('#'):
continue
cols = line.split('\t', 1)
if not (len(cols) == 2 and
cols[0].startswith('chr')):
raise IOError()
# skip unplaced sequences
if cols[0].endswith('_random') or cols[0].startswith('chrUn_'):
continue
# skip entries for alternative haplotypes
if re.search(r'_hap[0-9]+$', cols[0]):
continue
# skip the mitochondrial chromosome
if cols[0] == 'chrM':
continue
# remove the 'chr' prefix
chrom_name = cols[0][3:]
try:
chrom_size = int(cols[1])
except ValueError:
raise IOError()
chrom_size_dict[chrom_name] = chrom_size
except IOError:
raise IOError(
"Unexpected response from {url}: {line}".format(
url=chrom_size_url, line=repr(line)))
return chrom_size_dict
class ContinuousValuesValidator(GenewiseFileValidator):
"""Validator for matrix files mapping floats to gene/sample combinations.
Allowing missing values indicated by GenewiseFileValidator.NULL_VALUES.
"""
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
stripped_value = value.strip()
if stripped_value not in self.NULL_VALUES and not self.checkFloat(stripped_value):
self.logger.error("Value is neither a real number nor " + ', '.join(self.NULL_VALUES),
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
class FusionValidator(Validator):
"""Basic validation for fusion data. Validates:
1. Required column headers and the order
2. Values of Hugo_Symbol, Entrez_Gene_Id, Fusion and Tumor_Sample_Barcode
3. Uniqueness of lines
"""
REQUIRED_HEADERS = [
'Hugo_Symbol',
'Entrez_Gene_Id',
'Center',
'Tumor_Sample_Barcode',
'Fusion',
'DNA_support',
'RNA_support',
'Method',
'Frame']
REQUIRE_COLUMN_ORDER = True
ALLOW_BLANKS = True
def __init__(self, *args, ** kwargs):
super(FusionValidator, self).__init__(*args, **kwargs)
self.fusion_entries = {}
def checkLine(self, data):
super(FusionValidator, self).checkLine(data)
# parse hugo and entrez to validate them together
hugo_symbol = None
entrez_id = None
if 'Hugo_Symbol' in self.cols:
hugo_symbol = data[self.cols.index('Hugo_Symbol')].strip()
# treat the empty string or 'Unknown' as a missing value
if hugo_symbol == '':
hugo_symbol = None
if 'Entrez_Gene_Id' in self.cols:
entrez_id = data[self.cols.index('Entrez_Gene_Id')].strip()
# treat empty string, 0 or 'NA' as a missing value
if entrez_id in ['', '0', 'NA']:
entrez_id = None
# validate hugo and entrez together:
self.checkGeneIdentification(hugo_symbol, entrez_id)
# validate uniqueness based on Hugo_Symbol, Entrez_Gene_Id, Tumor_Sample_Barcode and Fusion
fusion_entry = "\t".join([data[self.cols.index('Hugo_Symbol')],
data[self.cols.index('Entrez_Gene_Id')],
data[self.cols.index('Tumor_Sample_Barcode')],
data[self.cols.index('Fusion')]])
if fusion_entry in self.fusion_entries:
self.logger.warning(
'Duplicate entry in fusion data.',
extra = {'line_number': self.line_number,
'cause': '%s (already defined on line %d)' % (
fusion_entry,
self.fusion_entries[fusion_entry])})
else:
self.fusion_entries[fusion_entry] = self.line_number
class MutationSignificanceValidator(Validator):
# TODO add checks for mutsig files
ALLOW_BLANKS = True
pass
class GenePanelMatrixValidator(Validator):
REQUIRED_HEADERS = ['SAMPLE_ID']
# TODO check that other column headers are valid profile stable ids
# TODO check that sample ids are references in clincal data file
# TODO check that referenced gene panel stable id is valid
class ProteinLevelValidator(FeaturewiseFileValidator):
REQUIRED_HEADERS = ['Composite.Element.REF']
ALLOW_BLANKS = True
NULL_VALUES = ["NA"]
def parseFeatureColumns(self, nonsample_col_vals):
"""Check the IDs in the first column."""
# the ID consists of a space-separated list of gene symbols and/or
# Entrez identifiers, separated by a pipe symbol from the name of the
# antibody probe used to detect these genes. The values on the line
# will be loaded for each gene in the list, or for fictional genes that
# encode specific phosphorylated versions of the genes' protein
# products if the antibody name has a particular format.
value = nonsample_col_vals[0].strip()
if '|' not in value:
self.logger.error('No pipe symbol in Composite.Element.REF column',
extra={'line_number': self.line_number,
'column_number': 1,
'cause': nonsample_col_vals[0]})
return None
symbol_element, antibody = value.split('|', 1)
symbol_list = symbol_element.split(' ')
for symbol in symbol_list:
entrez_id = None
if symbol.strip() == 'NA':
self.logger.warning(
'Gene symbol NA will be ignored, assuming Not Available',
extra={'line_number': self.line_number,
'column_number': 1,
'cause': nonsample_col_vals[0]})
elif self.checkInt(symbol):
entrez_id = self.checkGeneIdentification(entrez_id=symbol)
else:
entrez_id = self.checkGeneIdentification(gene_symbol=symbol)
# TODO: return a value for (this phospo-version of) each gene
return antibody
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
stripped_value = value.strip()
if stripped_value not in self.NULL_VALUES and not self.checkFloat(stripped_value):
self.logger.error("Value is neither a real number nor " + ', '.join(self.NULL_VALUES),
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
class TimelineValidator(Validator):
REQUIRED_HEADERS = [
'PATIENT_ID',
'START_DATE',
'STOP_DATE',
'EVENT_TYPE']
REQUIRE_COLUMN_ORDER = True
ALLOW_BLANKS = True
def checkLine(self, data):
super(TimelineValidator, self).checkLine(data)
# TODO check the values
class CancerTypeValidator(Validator):
"""Validator for tab-separated cancer type definition files."""
REQUIRED_HEADERS = []
REQUIRE_COLUMN_ORDER = True
# check this in the subclass to avoid emitting an error twice
ALLOW_BLANKS = True
COLS = (
'type_of_cancer',
'name',
'clinical_trial_keywords',
'color',
'parent_type_of_cancer'
)
def __init__(self, *args, **kwargs):
"""Initialize a file validator with a defined_cancer_types field."""
super(CancerTypeValidator, self).__init__(*args, **kwargs)
self.cols = self.__class__.COLS
self.numCols = len(self.cols)
self.defined_cancer_types = []
def checkHeader(self, cols):
"""Check the first uncommented line just like any other data line."""
return self.checkLine(cols)
def checkLine(self, data):
"""Check a data line in a cancer type file."""
# track whether any errors are emitted while validating this line
tracking_handler = MaxLevelTrackingHandler()
self.logger.logger.addHandler(tracking_handler)
try:
super(CancerTypeValidator, self).checkLine(data)
if len(data) != 5:
self.logger.error('Lines in cancer type files must have these '
'5 columns, in order: [%s]',
', '.join(self.cols),
extra={'line_number': self.line_number,
'cause': '<%d columns>' % len(data)})
# no assumptions can be made about the meaning of each column
return
line_cancer_type = data[self.cols.index('type_of_cancer')].lower().strip()
# check each column
for col_index, field_name in enumerate(self.cols):
value = data[col_index].strip()
if value == '':
self.logger.error(
"Blank value in '%s' column",
field_name,
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
elif field_name == 'color':
# validate whether the color field is one of the
# keywords on https://www.w3.org/TR/css3-color/#svg-color
if value.lower() not in [
'aliceblue', 'antiquewhite', 'aqua', 'aquamarine',
'azure', 'beige', 'bisque', 'black',
'blanchedalmond', 'blue', 'blueviolet', 'brown',
'burlywood', 'cadetblue', 'chartreuse', 'chocolate',
'coral', 'cornflowerblue', 'cornsilk', 'crimson',
'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod',
'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki',
'darkmagenta', 'darkolivegreen', 'darkorange',
'darkorchid', 'darkred', 'darksalmon',
'darkseagreen', 'darkslateblue', 'darkslategray',
'darkslategrey', 'darkturquoise', 'darkviolet',
'deeppink', 'deepskyblue', 'dimgray', 'dimgrey',
'dodgerblue', 'firebrick', 'floralwhite',
'forestgreen', 'fuchsia', 'gainsboro', 'ghostwhite',
'gold', 'goldenrod', 'gray', 'green', 'greenyellow',
'grey', 'honeydew', 'hotpink', 'indianred',
'indigo', 'ivory', 'khaki', 'lavender',
'lavenderblush', 'lawngreen', 'lemonchiffon',
'lightblue', 'lightcoral', 'lightcyan',
'lightgoldenrodyellow', 'lightgray', 'lightgreen',
'lightgrey', 'lightpink', 'lightsalmon',
'lightseagreen', 'lightskyblue', 'lightslategray',
'lightslategrey', 'lightsteelblue', 'lightyellow',
'lime', 'limegreen', 'linen', 'magenta', 'maroon',
'mediumaquamarine', 'mediumblue', 'mediumorchid',
'mediumpurple', 'mediumseagreen', 'mediumslateblue',
'mediumspringgreen', 'mediumturquoise',
'mediumvioletred', 'midnightblue', 'mintcream',
'mistyrose', 'moccasin', 'navajowhite', 'navy',
'oldlace', 'olive', 'olivedrab', 'orange',
'orangered', 'orchid', 'palegoldenrod', 'palegreen',
'paleturquoise', 'palevioletred', 'papayawhip',
'peachpuff', 'peru', 'pink', 'plum', 'powderblue',
'purple', 'red', 'rosybrown', 'royalblue',
'saddlebrown', 'salmon', 'sandybrown', 'seagreen',
'seashell', 'sienna', 'silver', 'skyblue',
'slateblue', 'slategray', 'slategrey', 'snow',
'springgreen', 'steelblue', 'tan', 'teal',
'thistle', 'tomato', 'turquoise', 'violet', 'wheat',
'white', 'whitesmoke', 'yellow', 'yellowgreen',
'rebeccapurple']:
self.logger.error(
'Color field is not a CSS3 color keyword, '
'see the table on https://en.wikipedia.org/wiki/Web_colors#X11_color_names',
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
elif field_name == 'parent_type_of_cancer':
parent_cancer_type = value.lower()
# if parent_cancer_type is not 'tissue' (which is a special case when building the oncotree),
# then give error if the given parent is not found in the DB or in the given cancer types of the
# current study:
if (parent_cancer_type != 'tissue' and
self.portal.cancer_type_dict is not None and not
(parent_cancer_type in self.portal.cancer_type_dict or
parent_cancer_type in self.defined_cancer_types)):
self.logger.error(
"Unknown parent for cancer type '%s'",
line_cancer_type,
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
# check for duplicated (possibly inconsistent) cancer types
if line_cancer_type in self.defined_cancer_types:
self.logger.error(
'Cancer type defined a second time in this file',
extra={'line_number': self.line_number,
'cause': line_cancer_type})
# compare the cancer_type definition with the portal instance
if (self.portal.cancer_type_dict is not None and
line_cancer_type in self.portal.cancer_type_dict):
existing_info = self.portal.cancer_type_dict[line_cancer_type]
# depending on version, the API may not return this field
if 'short_name' in existing_info:
if existing_info['short_name'].lower() != line_cancer_type:
self.logger.error(
"Attempting to validate against invalid cancer type "
"in portal: short name '%s' does not match id '%s'",
existing_info['short_name'],
line_cancer_type,
extra={'line_number': self.line_number})
return
for col_index, field_name in enumerate(self.cols):
value = data[col_index]
# this field is loaded into the database in lowercase
if field_name == 'parent_type_of_cancer':
value = value.lower()
if (
field_name in existing_info and
value != existing_info[field_name]):
self.logger.error(
"'%s' field of cancer type '%s' does not match "
"the portal, '%s' expected",
field_name,
line_cancer_type,
existing_info[field_name],
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
elif self.portal.cancer_type_dict is not None:
self.logger.warning(
'New disease type will be added to the portal',
extra={'line_number': self.line_number,
'cause': line_cancer_type})
# if no errors have been emitted while validating this line
if tracking_handler.max_level < logging.ERROR:
# add the cancer type defined on this line to the list
self.defined_cancer_types.append(line_cancer_type)
finally:
self.logger.logger.removeHandler(tracking_handler)
class GisticGenesValidator(Validator):
"""Validator for files with information aggregated from GISTIC output.
This file type is produced by the cBioPortal data transformation pipelines,
based on the `table_{amp|del}.conf_*.txt` files in combination with data
from `{amp|del}_genes_conf_*.txt`.
"""
REQUIRED_HEADERS = [
'chromosome',
'peak_start',
'peak_end',
'genes_in_region',
'amp',
'cytoband',
'q_value']
REQUIRE_COLUMN_ORDER = False
ALLOW_BLANKS = True
NULL_VALUES = ['']
def __init__(self, *args, **kwargs):
"""Initialize a GisticGenesValidator with the given parameters."""
super(GisticGenesValidator, self).__init__(*args, **kwargs)
# checkLine() expects particular values here, for the 'amp' column
if not self.meta_dict['reference_genome_id'].startswith('hg'):
if not self.meta_dict['reference_genome_id'].startswith('mm'):
raise RuntimeError(
"GisticGenesValidator requires the metadata field "
"reference_genome_id to start with 'hg' or 'mm'")
if self.meta_dict['genetic_alteration_type'] not in (
'GISTIC_GENES_AMP', 'GISTIC_GENES_DEL'):
raise RuntimeError(
"Genetic alteration type '{}' not supported by "
"GisticGenesValidator.".format(
self.meta_dict['genetic_alteration_type']))
def checkLine(self, data):
"""Check the values on a data line."""
super(GisticGenesValidator, self).checkLine(data)
# properties to be validated in relation to each other if
# individually sensible values are found
parsed_chromosome = None
parsed_peak_start = None
parsed_peak_end = None
parsed_gene_list = None
cytoband_chromosome = None
parsed_cytoband = None
# perform specific validations for each known column
for col_index, col_name in enumerate(self.cols):
# treat cells beyond the end of the line as blanks,
# super().checkLine() has already logged an error
value = ''
if col_index < len(data):
value = data[col_index]
# of the required columns, only genes_in_region can be blank
if ((col_name in self.REQUIRED_HEADERS and
col_name != 'genes_in_region') and
value.strip() in self.NULL_VALUES):
self.logger.error("Empty cell in column '%s'",
col_name,
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
# skip to the next column
continue
if col_name == 'chromosome':
parsed_chromosome = self.parse_chromosome_num(
value, column_number=col_index + 1)
elif col_name == 'peak_start':
parsed_peak_start = self.parse_genomic_coord(
value, column_number=col_index + 1)
elif col_name == 'peak_end':
parsed_peak_end = self.parse_genomic_coord(
value, column_number=col_index + 1)
elif col_name == 'genes_in_region':
parsed_gene_list = self.parse_gene_list(
value, column_number=col_index + 1)
elif col_name == 'amp':
self.parse_amp_value(
value, column_number=col_index + 1)
elif col_name == 'cytoband':
cytoband_chromosome, parsed_cytoband = self.parse_cytoband(
value, column_number=col_index + 1)
elif col_name == 'q_value':
self.parse_q_value(
value, column_number=col_index + 1)
# check if the start and the end of the peak are in the right order
if parsed_peak_start is not None and parsed_peak_end is not None:
if parsed_peak_start > parsed_peak_end:
# is an error according to UCSC "0" convention, end location excluded.
# see also https://groups.google.com/forum/#!topic/igv-help/LjffjxPul2M
self.logger.error(
'Start position of peak is not lower than end position',
extra={'line_number': self.line_number,
'cause': '{}/{}'.format(parsed_peak_start,
parsed_peak_end)})
elif parsed_peak_end == parsed_peak_start:
# cBioPortal seems to filter out regions in which the narrow
# peak (based on all samples) is 0 bases wide. I have seen
# examples of peaks of length 0 at the end position of the
# corresponding `wide peak' in Firehose data.
self.logger.warning(
'Peak is 0 bases wide and will not be shown in cBioPortal',
extra={'line_number': self.line_number,
'cause': '{}-{}'.format(parsed_peak_start,
parsed_peak_end)})
# check coordinates with the cytoband specification
if cytoband_chromosome and parsed_cytoband:
if parsed_chromosome:
if cytoband_chromosome != parsed_chromosome:
self.logger.error(
'Cytoband and chromosome specifications do not match',
extra={'line_number': self.line_number,
'cause': '(%s%s, %s)' %
(cytoband_chromosome,
parsed_cytoband,
parsed_chromosome)})
# TODO: validate band/coord sets with the UCSC cytoband definitions (using
# parsed_gene_list and some of the other parsed_*list variables
def parse_chromosome_num(self, value, column_number):
"""Parse a chromosome number, logging any errors for this column
Return the parsed value if valid, None otherwise.
"""
# TODO: check if the chromosome exists in the UCSC cytobands file
return value
def parse_genomic_coord(self, value, column_number):
"""Parse a genomic coordinate, logging any errors for this column.
Return the parsed value if valid, None otherwise.
"""
parsed_value = None
try:
parsed_value = int(value)
except ValueError:
self.logger.error("Genomic position is not an integer",
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': value})
return parsed_value
def parse_gene_list(self, value, column_number):
"""Parse a csv gene symbol list, logging any errors for this column.
Return the parsed value if valid, None otherwise.
"""
comma_sep_list = value.strip()
# ignore any trailing comma
if comma_sep_list.endswith(','):
comma_sep_list = comma_sep_list[:-1]
# list to collect parseable gene symbols
parsed_gene_list = []
# give a custom warning if the list is empty
if comma_sep_list.strip() == '':
self.logger.warning(
"No genes listed in GISTIC copy-number altered region",
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': value})
else:
# loop over the comma-separated list of gene symbols. Example of such a
# list: RNA5SP149,snoU13|ENSG00000239096.1,GNB4
for symbol in comma_sep_list.split(','):
symbol = symbol.strip()
# remove the | and trailing part if any (e.g.
# remove |ENSG00000239096.1 from snoU13|ENSG00000239096.1):
symbol = symbol.split('|')[0]
# add valid, unambiguous gene symbols to the list,
# while logging errors about unresolvable ones
# TODO: allow blanks if possible after this fix:
# https://github.com/cBioPortal/cbioportal/issues/884
if self.checkGeneIdentification(symbol, entrez_id=None):
parsed_gene_list.append(symbol)
def parse_amp_value(self, value, column_number):
"""Parse an `amp` column flag, logging any errors for this column.
Return the parsed value if valid, None otherwise.
"""
# 1 for _AMP, 0 for _DEL
expected_value = str(int(
self.meta_dict['genetic_alteration_type'] ==
'GISTIC_GENES_AMP'))
if value != expected_value:
self.logger.error(
"'amp' column must be '%s' in files of genetic "
"alteration type '%s'",
expected_value,
self.meta_dict['genetic_alteration_type'],
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': value})
return None
else:
return int(value)
def parse_cytoband(self, value, column_number):
"""Parse a cytoband with chromosome, logging any errors for this col.
Return a tuple of the chromosome number and the cytoband specification
if valid, a tuple of Nones otherwise.
"""
chromosome_num = None
cytoband = None
# find the index of the (first) p or otherwise q, the arm
arm_index = value.find('p')
if arm_index == -1:
arm_index = value.find('q')
if arm_index == -1:
self.logger.error(
"Cytoband specification contains no 'p' or 'q'",
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': value})
else:
chromosome_num = value[:arm_index]
cytoband = value[arm_index:]
if chromosome_num is not None and chromosome_num == '':
self.logger.error(
'Cytoband specification does not include the chromosome',
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': value})
chromosome_num, cytoband = None, None
# TODO: check if the cytoband exists in the UCSC cytobands file
return chromosome_num, cytoband
def parse_q_value(self, value, column_number):
"""Parse a q-value (numeral), logging any errors for this colum.
Return the parsed value if valid, None otherwise.
"""
parsed_value = None
value_invalid = False
try:
parsed_value = float(value)
except ValueError:
self.logger.error('q-value is not a real number',
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': value})
value_invalid = True
if not value_invalid and (not 0 <= parsed_value <= 1):
self.logger.error('q-value is not between 0 and 1',
extra={'line_number': self.line_number,
'column_number': column_number,
'cause': value})
if value_invalid:
return None
else:
return parsed_value
class GsvaWiseFileValidator(FeaturewiseFileValidator):
"""FeatureWiseValidator that has Gene set ID as feature column."""
REQUIRED_HEADERS = ['geneset_id']
def __init__(self, *args, **kwargs):
super(GsvaWiseFileValidator, self).__init__(*args, **kwargs)
self.geneset_ids = []
def checkHeader(self, cols):
"""Validate the header and read sample IDs from it.
Return the number of fatal errors.
"""
num_errors = super(GsvaWiseFileValidator, self).checkHeader(cols)
global GSVA_SAMPLE_IDS
if GSVA_SAMPLE_IDS != None:
if self.cols != GSVA_SAMPLE_IDS:
self.logger.error('Headers from score and p-value files are different',
extra={'line_number': self.line_number})
num_errors += 1
else:
GSVA_SAMPLE_IDS = self.cols
return num_errors
def parseFeatureColumns(self, nonsample_col_vals):
"""Check the `geneset_id` column."""
global GSVA_GENESET_IDS
geneset_id = nonsample_col_vals[0].strip()
#Check if gene set is present
if geneset_id == '':
# Validator already gives warning for this in checkLine method
pass
# Check if gene set contains whitespace
elif ' ' in geneset_id:
self.logger.error("Whitespace found in `geneset_id`",
extra={'line_number': self.line_number,
'cause': geneset_id})
# Check if gene set is in database
elif self.portal.geneset_id_list is not None and geneset_id not in self.portal.geneset_id_list:
self.logger.warning("Gene set not found in database, please make sure "
"to import gene sets prior to study loading",
extra={'line_number': self.line_number, 'cause': geneset_id})
else:
# Check if this is the second GSVA data file
if GSVA_GENESET_IDS != None:
# Check if gene set is in the first GSVA file
if not geneset_id in GSVA_GENESET_IDS:
self.logger.error('Gene sets in GSVA score and p-value files are not equal',
extra={'line_number': self.line_number})
self.geneset_ids.append(geneset_id)
return geneset_id
def onComplete(self):
global GSVA_GENESET_IDS
if GSVA_GENESET_IDS == None:
GSVA_GENESET_IDS = self.geneset_ids
else:
# Check if geneset ids are the same
if not GSVA_GENESET_IDS == self.geneset_ids:
self.logger.error(
'First columns of GSVA score and p-value files are not equal')
super(GsvaWiseFileValidator, self).onComplete()
class GsvaScoreValidator(GsvaWiseFileValidator):
""" Validator for files containing scores per gene set from GSVA algorithm. The GSVA algorithm
in R can calculate a GSVA score or GSVA-like score (such as ssGSEA) per sample per gene set.
"""
# Score must be between -1 and 1
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
stripped_value = float(value.strip())
if stripped_value < -1 or stripped_value > 1:
self.logger.error("Value is not between -1 and 1, and therefor not "
"a valid GSVA score",
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
class GsvaPvalueValidator(GsvaWiseFileValidator):
""" Validator for files containing p-values for GSVA scores. The GSVA algorithm in R can
calculate a p-value for each GSVA score using a bootstrapping method.
"""
# Score must be between -0 and 1
def checkValue(self, value, col_index):
"""Check a value in a sample column."""
stripped_value = float(value.strip())
if stripped_value <= 0 or stripped_value > 1:
self.logger.error("Value is not between 0 and 1, and therefor not a valid p-value",
extra={'line_number': self.line_number,
'column_number': col_index + 1,
'cause': value})
# ------------------------------------------------------------------------------
# Functions
# FIXME: returning simple valid (meta_fn, data_fn) pairs would be cleaner,
# Validator objects can be instantiated with a portal instance elsewhere
def process_metadata_files(directory, portal_instance, logger, relaxed_mode):
"""Parse the meta files in a directory and create data file validators.
Return a tuple of:
1. a dict listing the data file validator (or None) for each meta file
by file type,
2. a dict mapping any case list IDs defined *outside* of the case list
directory to paths of the files in which they were defined
3. the cancer type of the study, and
4. the study id
Possible file types are listed in cbioportal_common.MetaFileTypes.
"""
# get filenames for all meta files in the directory
filenames = [os.path.join(directory, f) for
f in os.listdir(directory) if
re.search(r'(\b|_)meta(\b|[_0-9])', f,
flags=re.IGNORECASE) and
not f.startswith('.') and
not f.endswith('~')]
if len(filenames) == 0:
logger.critical(
'No meta files found in ' + directory +'. Please make sure the directory '\
'is the path to the folder containing the files.')
study_id = None
study_cancer_type = None
validators_by_type = {}
case_list_suffix_fns = {}
stable_ids = []
for filename in filenames:
meta_dictionary = cbioportal_common.parse_metadata_file(
filename, logger, study_id, portal_instance.genome_build)
meta_file_type = meta_dictionary['meta_file_type']
if meta_file_type is None:
continue
# validate stable_id to be unique (check can be removed once we deprecate this field):
if 'stable_id' in meta_dictionary:
stable_id = meta_dictionary['stable_id']
if stable_id in stable_ids:
# stable id already used in other meta file, give error:
logger.error(
'stable_id repeated. It should be unique across all files in a study',
extra={'filename_': filename,
'cause': stable_id})
else:
stable_ids.append(stable_id)
if study_id is None and 'cancer_study_identifier' in meta_dictionary:
study_id = meta_dictionary['cancer_study_identifier']
if meta_file_type == cbioportal_common.MetaFileTypes.STUDY:
if study_cancer_type is not None:
logger.error(
'Encountered a second meta_study file',
extra={'filename_': filename})
else:
study_cancer_type = meta_dictionary['type_of_cancer']
if ('add_global_case_list' in meta_dictionary and
meta_dictionary['add_global_case_list'].lower() == 'true'):
case_list_suffix_fns['all'] = filename
# raise a warning if pmid is existing, but no citation is available.
if 'pmid' in meta_dictionary and not 'citation' in meta_dictionary:
logger.warning(
'Citation is required when giving a pubmed id (pmid).')
# create a list for the file type in the dict
if meta_file_type not in validators_by_type:
validators_by_type[meta_file_type] = []
# check if data_filename is set AND if data_filename is a supported field according to META_FIELD_MAP:
if 'data_filename' in meta_dictionary and 'data_filename' in cbioportal_common.META_FIELD_MAP[meta_file_type]:
validator_class = globals()[VALIDATOR_IDS[meta_file_type]]
validator = validator_class(directory, meta_dictionary,
portal_instance, logger, relaxed_mode)
validators_by_type[meta_file_type].append(validator)
else:
validators_by_type[meta_file_type].append(None)
if study_cancer_type is None:
logger.error(
'Cancer type needs to be defined for a study. Verify that you have a study file '
'and have defined the cancer type correctly.')
# prepend the cancer study id to any case list suffixes
defined_case_list_fns = {}
if study_id is not None:
for suffix in case_list_suffix_fns:
defined_case_list_fns[study_id + '_' + suffix] = \
case_list_suffix_fns[suffix]
return (validators_by_type, defined_case_list_fns,
study_cancer_type, study_id)
def processCaseListDirectory(caseListDir, cancerStudyId, logger,
prev_stableid_files=None):
"""Validate the case lists in a directory and return an id/file mapping.
Args:
caseListDir (str): path to the case list directory.
cancerStudyId (str): cancer_study_identifier expected in the files.
logger: logging.Logger instance through which to send output.
prev_stableid_files (Optional): dict mapping the stable IDs of any case
lists already defined to the files they were defined in.
Returns:
Dict[str, str]: dict mapping the stable IDs of all valid defined case
lists to the files they were defined in, including the
prev_stableid_files argument
"""
logger.debug('Validating case lists')
stableid_files = {}
# include the previously defined stable IDs
if prev_stableid_files is not None:
stableid_files.update(prev_stableid_files)
case_list_fns = [os.path.join(caseListDir, fn) for
fn in os.listdir(caseListDir) if
not (fn.startswith('.') or fn.endswith('~'))]
for case in case_list_fns:
meta_dictionary = cbioportal_common.parse_metadata_file(
case, logger, cancerStudyId, case_list=True)
# skip if invalid, errors have already been emitted
if meta_dictionary['meta_file_type'] is None:
continue
# check for duplicated stable ids
stable_id = meta_dictionary['stable_id']
if not stable_id.startswith(cancerStudyId + '_'):
logger.error('Stable_id of case list does not start with the '
'study id (%s) followed by an underscore',
cancerStudyId,
extra={'filename_': case,
'cause': stable_id})
elif stable_id in stableid_files:
logger.error('Multiple case lists with this stable_id defined '
'in the study',
extra={'filename_': case,
'cause': '%s (already defined in %s)' % (
stable_id,
os.path.relpath(stableid_files[stable_id],
os.path.dirname(caseListDir)))})
else:
stableid_files[stable_id] = case
if 'case_list_category' in meta_dictionary:
# Valid case list categories
VALID_CATEGORIES = ['all_cases_in_study',
'all_cases_with_mutation_data',
'all_cases_with_cna_data',
'all_cases_with_log2_cna_data',
'all_cases_with_methylation_data',
'all_cases_with_mrna_array_data',
'all_cases_with_mrna_rnaseq_data',
'all_cases_with_rppa_data',
'all_cases_with_microrna_data',
'all_cases_with_mutation_and_cna_data',
'all_cases_with_mutation_and_cna_and_mrna_data',
'all_cases_with_gsva_data',
'other']
# If the case list category is invalid, the importer will crash.
if meta_dictionary['case_list_category'] not in VALID_CATEGORIES:
logger.error('Invalid case list category',
extra={'filename_': case,
'cause': meta_dictionary['case_list_category']})
# Check for any duplicate sample IDs
sample_ids = [x.strip() for x in meta_dictionary['case_list_ids'].split('\t')]
seen_sample_ids = set()
dupl_sample_ids = set()
for sample_id in sample_ids:
if sample_id not in seen_sample_ids:
seen_sample_ids.add(sample_id)
else:
dupl_sample_ids.add(sample_id)
# Duplicate samples IDs are removed by the importer, therefore this is
# only a warning.
if len(dupl_sample_ids) > 0:
logger.warning('Duplicate Sample ID in case list',
extra={'filename_': case,
'cause': ', '.join(dupl_sample_ids)})
for value in seen_sample_ids:
# Compare case list sample ids with clinical file
if value not in DEFINED_SAMPLE_IDS:
logger.error(
'Sample id not defined in clinical file',
extra={'filename_': case,
'cause': value})
# Check if there are white spaces in the sample id
if ' ' in value:
logger.error(
'White space in sample id is not supported',
extra={'filename_': case,
'cause': value})
logger.info('Validation of case list folder complete')
return stableid_files
def validate_defined_caselists(cancer_study_id, case_list_ids, file_types, logger):
"""Validate the set of case lists defined in a study.
Args:
cancer_study_id (str): the study ID to be expected in the stable IDs
case_list_ids (Iterable[str]): stable ids of defined case lists
file_types (Dict[str, str]): listing of the MetaFileTypes with high-
dimensional data in this study--these may imply certain case lists
logger: logging.Logger instance to log output to
"""
if cancer_study_id + '_all' not in case_list_ids:
logger.error(
"No case list found for stable_id '%s', consider adding "
"'add_global_case_list: true' to the study metadata file",
cancer_study_id + '_all')
# TODO: check for required suffixes based on the defined profiles
def validate_dependencies(validators_by_meta_type, logger):
"""Validation after all meta files are individually validated.
Here we validate that the required cross-linking between expression,
zscore, gsva score and gsva pvalue files is present in the form of
source_stable_id, which is used to link the profiles to each other.
"""
# retrieve values from cbioportal_common.py
expression_stable_ids = cbioportal_common.expression_stable_ids
expression_zscores_source_stable_ids = cbioportal_common.expression_zscores_source_stable_ids
gsva_scores_stable_id = cbioportal_common.gsva_scores_stable_id
gsva_scores_source_stable_id = cbioportal_common.gsva_scores_source_stable_id
gsva_pvalues_source_stable_id = cbioportal_common.gsva_pvalues_source_stable_id
gsva_scores_filename = cbioportal_common.gsva_scores_filename
gsva_pvalues_filename = cbioportal_common.gsva_pvalues_filename
# validation specific for Z-SCORE expression data
for expression_zscores_source_stable_id in expression_zscores_source_stable_ids:
# check if 'source_stable_id' of EXPRESSION Z-SCORE is an EXPRESSION 'stable_id'
if not expression_zscores_source_stable_id in expression_stable_ids:
logger.error(
"Invalid source_stable_id. Expected one of ['" + "', '".join(expression_stable_ids) +
"'], which are stable ids of expression files in this study",
extra={'filename_': expression_zscores_source_stable_ids[expression_zscores_source_stable_id],
'cause': expression_zscores_source_stable_id})
# validation specific for GSVA data
if any(m in validators_by_meta_type for m in ["meta_gsva_pvalues", "meta_gsva_scores"]):
# When missing a gsva file, no subsequent validation will be done
missing_gsva_file = False
# check if both files are present
if not "meta_gsva_pvalues" in validators_by_meta_type:
logger.error('Required meta GSVA p-value file is missing')
missing_gsva_file = True
if not "meta_gsva_scores" in validators_by_meta_type:
logger.error('Required meta GSVA score file is missing')
missing_gsva_file = True
if not "meta_expression" in validators_by_meta_type:
logger.error('Required meta expression file is missing.')
missing_gsva_file = True
# check `source_stable_id` in GSVA_SCORES and GSVA_PVALUES
if not missing_gsva_file:
# check if 'source_stable_id' of GSVA_SCORES is an EXPRESSION 'stable_id'
if not gsva_scores_source_stable_id in expression_stable_ids:
logger.error(
"Invalid source_stable_id. Expected one of ['" + "', '".join(expression_stable_ids) +
"'], which are stable ids of expression files in this study",
extra={'filename_': gsva_scores_filename,
'cause': gsva_scores_source_stable_id})
# check if 'source_stable_id'of GSVA_PVALUES is an GSVA_SCORES 'stable_id'
if not gsva_pvalues_source_stable_id == gsva_scores_stable_id:
logger.error(
"Invalid source_stable_id. Expected '" + gsva_scores_stable_id + "', "
"which is the stable id of the gsva score file in this study",
extra={'filename_': gsva_pvalues_filename,
'cause': gsva_pvalues_source_stable_id})
# Validate that there is a Z-SCORE expression file for GSVA study
if len(expression_zscores_source_stable_ids) == 0:
logger.error(
"Study contains GSVA data and is missing Z-Score expression file. "
"Please add a Z-Score expression file calculated from the same "
"expression file used to calculate GSVA scores")
else:
# Validate that GSVA_SCORES 'source_stable_id' is also a 'source_stable_id'
# in a Z-SCORE expression file
if not gsva_scores_source_stable_id in expression_zscores_source_stable_ids.keys():
logger.error(
"source_stable_id does not match source_stable_id from Z-Score expression files. "
"Please make sure sure that Z-Score expression file is added for '" +
gsva_scores_source_stable_id + "'. Current Z-Score source stable ids found are ['" +
"', '".join(expression_zscores_source_stable_ids.keys()) +"'].",
extra={'filename_': gsva_scores_filename,
'cause': gsva_scores_source_stable_id})
def request_from_portal_api(server_url, api_name, logger):
"""Send a request to the portal API and return the decoded JSON object."""
if api_name == 'genesets':
service_url = server_url + '/api/' + api_name + "?pageSize=999999999"
# TODO: change API for genes, gene aliases and cancer types to non-legacy
else:
service_url = server_url + '/api-legacy/' + api_name
logger.debug("Requesting %s from portal at '%s'",
api_name, server_url)
# this may raise a requests.exceptions.RequestException subclass,
# usually because the URL provided on the command line was invalid or
# did not include the http:// part
response = requests.get(service_url)
try:
response.raise_for_status()
except requests.exceptions.HTTPError as e:
raise IOError(
'Connection error for URL: {url}. Administrator: please check if '
'[{url}] is accessible. Message: {msg}'.format(url=service_url,
msg=e.message))
return response.json()
def read_portal_json_file(dir_path, api_name, logger):
"""Parse a JSON file named `api_name`.json in `dir_path`.
Replacing any forward slashes in the API name by underscores.
"""
parsed_json = None
json_fn = os.path.join(dir_path, '{}.json'.format(
api_name.replace('/', '_')))
if os.path.isfile(json_fn):
logger.debug('Reading portal information from %s',
json_fn)
with open(json_fn, 'rU') as json_file:
parsed_json = json.load(json_file)
return parsed_json
def index_api_data(parsed_json, id_field):
"""Transform a list of dicts into a dict indexed by one of their fields.
>>> index_api_data([{'id': 'eggs', 'val1': 42, 'foo': True},
... {'id': 'spam', 'val1': 1, 'foo': True}], 'id')
{'eggs': {'val1': 42, 'foo': True}, 'spam': {'val1': 1, 'foo': True}}
>>> index_api_data([{'id': 'eggs', 'val1': 42, 'foo': True},
... {'id': 'spam', 'val1': 1, 'foo': True}], 'val1')
{1: {'foo': True, 'id': 'spam'}, 42: {'foo': True, 'id': 'eggs'}}
"""
transformed_dict = {}
for attr in parsed_json:
# make a copy of the attr dict
# remove id field:
if not id_field in attr:
raise RuntimeError("Field '{}' not found in json object".format(
id_field))
id_val = attr[id_field]
if id_val in transformed_dict:
raise RuntimeError("Identifier '{}' found more than once in json "
"object".format(id_val))
# make a copy of the sub-dictionary without the id field
attr_dict = dict(attr)
del attr_dict[id_field]
transformed_dict[id_val] = attr_dict
return transformed_dict
def transform_symbol_entrez_map(json_data,
id_field='hugo_gene_symbol',
values_field='entrez_gene_id'):
"""Transform a list of homogeneous dicts into a dict of lists.
Using the values of the `id_field` entries as the keys, mapping to lists
of corresponding `values_field` entries.
>>> transform_symbol_entrez_map(
... [{"hugo_gene_symbol": "A1BG", "entrez_gene_id": 1},
... {"hugo_gene_symbol": "A2M", "entrez_gene_id": 2}])
{'A2M': [2], 'A1BG': [1]}
>>> transform_symbol_entrez_map(
... [{"gene_alias": "A1B", "entrez_gene_id": 1},
... {"gene_alias": "ANG3", "entrez_gene_id": 738},
... {"gene_alias": "ANG3", "entrez_gene_id": 9068}],
... id_field="gene_alias")
{'ANG3': [738, 9068], 'A1B': [1]}
"""
result_dict = {}
for data_item in json_data:
symbol = data_item[id_field].upper()
if symbol not in result_dict:
result_dict[symbol] = []
result_dict[symbol].append(
data_item['entrez_gene_id'])
return result_dict
def index_geneset_id_list(json_data,
id_field = "genesetId"):
result_list = []
for data_item in json_data:
geneset_id = data_item[id_field]
if geneset_id not in result_list:
result_list.append(geneset_id)
return result_list
def load_portal_info(path, logger, offline=False):
"""Create a PortalInstance object based on a server API or offline dir.
If `offline` is True, interpret `path` as the path to a directory of JSON
files. Otherwise expect `path` to be the URL of a cBioPortal server and
use its web API.
"""
portal_dict = {}
for api_name, transform_function in (
('cancertypes',
lambda json_data: index_api_data(json_data, 'id')),
('genes',
lambda json_data: transform_symbol_entrez_map(
json_data, 'hugo_gene_symbol')),
('genesaliases',
lambda json_data: transform_symbol_entrez_map(
json_data, 'gene_alias')),
('genesets',
lambda json_data: index_geneset_id_list(json_data, 'genesetId'))):
if offline:
parsed_json = read_portal_json_file(path, api_name, logger)
else:
parsed_json = request_from_portal_api(path, api_name, logger)
if parsed_json is not None and transform_function is not None:
parsed_json = transform_function(parsed_json)
portal_dict[api_name] = parsed_json
if all(d is None for d in portal_dict.values()):
raise IOError('No portal information found at {}'.format(
path))
return PortalInstance(cancer_type_dict = portal_dict['cancertypes'],
hugo_entrez_map = portal_dict['genes'],
alias_entrez_map = portal_dict['genesaliases'],
geneset_id_list = portal_dict['genesets'])
# ------------------------------------------------------------------------------
def interface(args=None):
parser = argparse.ArgumentParser(description='cBioPortal study validator')
parser.add_argument('-s', '--study_directory',
type=str, required=True, help='path to directory.')
portal_mode_group = parser.add_mutually_exclusive_group()
portal_mode_group.add_argument('-u', '--url_server',
type=str,
default='http://localhost/cbioportal',
help='URL to cBioPortal server. You can '
'set this if your URL is not '
'http://localhost/cbioportal')
portal_mode_group.add_argument('-p', '--portal_info_dir',
type=str,
help='Path to a directory of cBioPortal '
'info files to be used instead of '
'contacting a server')
portal_mode_group.add_argument('-n', '--no_portal_checks',
action='store_true',
help='Skip tests requiring information '
'from the cBioPortal installation')
parser.add_argument('-P', '--portal_properties', type=str,
help='portal.properties file path (default: assumed hg19)',
required=False)
parser.add_argument('-html', '--html_table', type=str, required=False,
help='path to html report output file')
parser.add_argument('-e', '--error_file', type=str, required=False,
help='File to which to write line numbers on which '
'errors were found, for scripts')
parser.add_argument('-v', '--verbose', required=False, action='store_true',
help='report status info messages in addition '
'to errors and warnings')
parser.add_argument('-r', '--relaxed_clinical_definitions', required=False,
action='store_true',
help='Option to enable relaxed mode for validator when '
'validating clinical data without header definitions')
parser = parser.parse_args(args)
return parser
def validate_study(study_dir, portal_instance, logger, relaxed_mode):
"""Validate the study in `study_dir`, logging messages to `logger`, and relaxing
clinical data validation if `relaxed_mode` is true.
This will verify that the study is compatible with the portal configuration
represented by the PortalInstance object `portal_instance`, if its
attributes are not None.
"""
global DEFINED_CANCER_TYPES
global DEFINED_SAMPLE_IDS
global DEFINED_SAMPLE_ATTRIBUTES
global PATIENTS_WITH_SAMPLES
if portal_instance.cancer_type_dict is None:
logger.warning('Skipping validations relating to cancer types '
'defined in the portal')
if (portal_instance.hugo_entrez_map is None or
portal_instance.alias_entrez_map is None):
logger.warning('Skipping validations relating to gene identifiers and '
'aliases defined in the portal')
if portal_instance.geneset_id_list is None:
logger.warning('Skipping validations relating to gene set identifiers')
# walk over the meta files in the dir and get properties of the study
(validators_by_meta_type,
defined_case_list_fns,
study_cancer_type,
study_id) = process_metadata_files(study_dir, portal_instance, logger, relaxed_mode)
# first parse and validate cancer type files
studydefined_cancer_types = []
if cbioportal_common.MetaFileTypes.CANCER_TYPE in validators_by_meta_type:
cancer_type_validators = validators_by_meta_type[
cbioportal_common.MetaFileTypes.CANCER_TYPE]
if len(cancer_type_validators) > 1:
logger.error(
'Multiple cancer type files detected',
extra={'cause': ', '.join(
validator.filenameShort for validator in
validators_by_meta_type[
cbioportal_common.MetaFileTypes.CANCER_TYPE])})
else:
cancer_type_validators[0].validate()
studydefined_cancer_types = (
cancer_type_validators[0].defined_cancer_types)
DEFINED_CANCER_TYPES = studydefined_cancer_types
# next check the cancer type of the meta_study file
if cbioportal_common.MetaFileTypes.STUDY not in validators_by_meta_type:
logger.error('No valid study file detected')
return
if portal_instance.cancer_type_dict is not None and not (
study_cancer_type in portal_instance.cancer_type_dict or
study_cancer_type in DEFINED_CANCER_TYPES):
logger.error(
'Cancer type of study is neither known to the portal nor defined '
'in a cancer_type file',
extra={'cause': study_cancer_type})
# then validate the clinical data
if cbioportal_common.MetaFileTypes.SAMPLE_ATTRIBUTES not in \
validators_by_meta_type:
logger.error('No sample attribute file detected')
return
if len(validators_by_meta_type[
cbioportal_common.MetaFileTypes.SAMPLE_ATTRIBUTES]) > 1:
logger.error(
'Multiple sample attribute files detected',
extra={'cause': ', '.join(
validator.filenameShort for validator in
validators_by_meta_type[
cbioportal_common.MetaFileTypes.SAMPLE_ATTRIBUTES])})
# parse the data file(s) that define sample IDs valid for this study
defined_sample_ids = None
for sample_validator in validators_by_meta_type[
cbioportal_common.MetaFileTypes.SAMPLE_ATTRIBUTES]:
sample_validator.validate()
if sample_validator.fileCouldBeParsed:
if defined_sample_ids is None:
defined_sample_ids = set()
# include parsed sample IDs in the set (union)
defined_sample_ids |= sample_validator.sampleIds
# this will be set if a file was successfully parsed
if defined_sample_ids is None:
logger.error("Sample file could not be parsed. Please fix "
"the problems found there first before continuing.")
if not relaxed_mode:
return
DEFINED_SAMPLE_IDS = defined_sample_ids
DEFINED_SAMPLE_ATTRIBUTES = sample_validator.defined_attributes
PATIENTS_WITH_SAMPLES = sample_validator.patient_ids
if len(validators_by_meta_type.get(
cbioportal_common.MetaFileTypes.PATIENT_ATTRIBUTES,
[])) > 1:
logger.error(
'Multiple patient attribute files detected',
extra={'cause': ', '.join(
validator.filenameShort for validator in
validators_by_meta_type[
cbioportal_common.MetaFileTypes.PATIENT_ATTRIBUTES])})
# next validate all other data files
for meta_file_type in validators_by_meta_type:
# skip cancer type and clinical files, they have already been validated
if meta_file_type in (cbioportal_common.MetaFileTypes.CANCER_TYPE,
cbioportal_common.MetaFileTypes.SAMPLE_ATTRIBUTES):
continue
for validator in validators_by_meta_type[meta_file_type]:
# if there was no validator for this meta file
if validator is None:
continue
validator.validate()
# additional validation after all meta files are validated
validate_dependencies(validators_by_meta_type, logger)
# finally validate the case list directory if present
case_list_dirname = os.path.join(study_dir, 'case_lists')
if not os.path.isdir(case_list_dirname):
logger.info("No directory named 'case_lists' found, so assuming no custom case lists.")
else:
# add case lists IDs defined in the directory to any previous ones
defined_case_list_fns = processCaseListDirectory(
case_list_dirname, study_id, logger,
prev_stableid_files=defined_case_list_fns)
validate_defined_caselists(
study_id, defined_case_list_fns.keys(),
file_types=validators_by_meta_type.keys(),
logger=logger)
logger.info('Validation complete')
def get_pom_path():
"""
Get location of pom.xml. In system and integration test this is mocked.
"""
pom_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))))) + "/pom.xml"
return pom_path
def main_validate(args):
"""Main function: process parsed arguments and validate the study."""
# get a logger to emit messages
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
exit_status_handler = MaxLevelTrackingHandler()
logger.addHandler(exit_status_handler)
# process the options
study_dir = args.study_directory
server_url = args.url_server
html_output_filename = args.html_table
relaxed_mode = False
if hasattr(args, 'relaxed_clinical_definitions') and args.relaxed_clinical_definitions:
relaxed_mode = True
# determine the log level for terminal and html output
output_loglevel = logging.INFO
if args.verbose:
output_loglevel = logging.DEBUG
# check existence of directory
if not os.path.exists(study_dir):
print >> sys.stderr, 'directory cannot be found: ' + study_dir
return 2
# set default message handler
text_handler = logging.StreamHandler(sys.stdout)
text_handler.setFormatter(
cbioportal_common.LogfileStyleFormatter(study_dir))
collapsing_text_handler = cbioportal_common.CollapsingLogMessageHandler(
capacity=1e6,
flushLevel=logging.CRITICAL,
target=text_handler)
collapsing_text_handler.setLevel(output_loglevel)
logger.addHandler(collapsing_text_handler)
# set default to unknown because validator can be run independently from cBioPortal
cbio_version = "unknown"
# get pom path to retrieve cBioPortal version
pom_path = get_pom_path()
try:
# parse xml
xml_root = ET.parse(pom_path).getroot()
except IOError:
logger.warning('Unable to read xml containing cBioPortal version.')
else:
for xml_child in xml_root:
# to circumvent the default namespace (possibly varying apache url) split on '}'
if xml_child.tag.split("}")[1] == "version":
cbio_version = xml_child.text
# output cBioPortal version
logger.info("Running validation from cBioPortal version %s" % cbio_version)
collapsing_html_handler = None
html_handler = None
# add html table handler if applicable
if html_output_filename:
# just to make sure users get dependency error at start:
import jinja2 # pylint: disable=import-error
html_handler = Jinja2HtmlHandler(
study_dir,
html_output_filename,
cbio_version = cbio_version,
capacity=1e5)
# TODO extend CollapsingLogMessageHandler to flush to multiple targets,
# and get rid of the duplicated buffering of messages here
collapsing_html_handler = cbioportal_common.CollapsingLogMessageHandler(
capacity=1e6,
flushLevel=logging.CRITICAL,
target=html_handler)
collapsing_html_handler.setLevel(output_loglevel)
logger.addHandler(collapsing_html_handler)
if args.error_file:
errfile_handler = logging.FileHandler(args.error_file, 'w')
errfile_handler.setFormatter(ErrorFileFormatter(study_dir))
# TODO extend CollapsingLogMessageHandler to flush to multiple targets,
# and get rid of the duplicated buffering of messages here
coll_errfile_handler = cbioportal_common.CollapsingLogMessageHandler(
capacity=1e6,
flushLevel=logging.CRITICAL,
target=errfile_handler)
coll_errfile_handler.setLevel(logging.WARNING)
coll_errfile_handler.addFilter(LineMessageFilter())
logger.addHandler(coll_errfile_handler)
# load portal-specific information
if args.no_portal_checks:
portal_instance = PortalInstance(cancer_type_dict=None,
hugo_entrez_map=None,
alias_entrez_map=None,
geneset_id_list=None)
elif args.portal_info_dir:
portal_instance = load_portal_info(args.portal_info_dir, logger,
offline=True)
else:
portal_instance = load_portal_info(server_url, logger)
if args.portal_properties:
portal_instance.load_genome_info(args.portal_properties)
validate_study(study_dir, portal_instance, logger, relaxed_mode)
if html_handler is not None:
collapsing_html_handler.flush()
html_handler.generateHtml()
return exit_status_handler.get_exit_status()
# ------------------------------------------------------------------------------
# vamanos
if __name__ == '__main__':
try:
# parse command line options
parsed_args = interface()
# run the script
exit_status = main_validate(parsed_args)
finally:
logging.shutdown()
del logging._handlerList[:] # workaround for harmless exceptions on exit
print >>sys.stderr, ('Validation of study {status}.'.format(
status={0: 'succeeded',
1: 'failed',
2: 'not performed as problems occurred',
3: 'succeeded with warnings'}.get(exit_status, 'unknown')))
sys.exit(exit_status)
| agpl-3.0 |
timpalpant/calibre | src/calibre/ebooks/metadata/rb.py | 24 | 1669 | __license__ = 'GPL v3'
__copyright__ = '2008, Ashish Kulkarni <kulkarni.ashish@gmail.com>'
'''Read meta information from RB files'''
import sys, struct
from calibre.ebooks.metadata import MetaInformation, string_to_authors
MAGIC = '\xb0\x0c\xb0\x0c\x02\x00NUVO\x00\x00\x00\x00'
def get_metadata(stream):
""" Return metadata as a L{MetaInfo} object """
title = 'Unknown'
mi = MetaInformation(title, ['Unknown'])
stream.seek(0)
try:
if not stream.read(14) == MAGIC:
print >>sys.stderr, u'Couldn\'t read RB header from file'
return mi
stream.read(10)
read_i32 = lambda: struct.unpack('<I', stream.read(4))[0]
stream.seek(read_i32())
toc_count = read_i32()
for i in range(toc_count):
stream.read(32)
length, offset, flag = read_i32(), read_i32(), read_i32()
if flag == 2: break
else:
print >>sys.stderr, u'Couldn\'t find INFO from RB file'
return mi
stream.seek(offset)
info = stream.read(length).splitlines()
for line in info:
if not '=' in line:
continue
key, value = line.split('=')
if key.strip() == 'TITLE':
mi.title = value.strip()
elif key.strip() == 'AUTHOR':
mi.author = value
mi.authors = string_to_authors(value)
except Exception as err:
msg = u'Couldn\'t read metadata from rb: %s with error %s'%(mi.title, unicode(err))
print >>sys.stderr, msg.encode('utf8')
raise
return mi
| gpl-3.0 |
timj/scons | test/option--W.py | 5 | 1590 | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import TestSCons
test = TestSCons.TestSCons()
test.write('SConstruct', "")
test.option_not_yet_implemented('-W', 'foo .')
test.option_not_yet_implemented('--what-if', '=foo .')
test.option_not_yet_implemented('--new-file', '=foo .')
test.option_not_yet_implemented('--assume-new', '=foo .')
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
jordanemedlock/psychtruths | temboo/Library/Google/Drive/Permissions/List.py | 5 | 4627 | # -*- coding: utf-8 -*-
###############################################################################
#
# List
# Lists a file's permissions.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class List(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the List Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(List, self).__init__(temboo_session, '/Library/Google/Drive/Permissions/List')
def new_input_set(self):
return ListInputSet()
def _make_result_set(self, result, path):
return ListResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListChoreographyExecution(session, exec_id, path)
class ListInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the List
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
super(ListInputSet, self)._set_input('AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
super(ListInputSet, self)._set_input('ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
super(ListInputSet, self)._set_input('ClientSecret', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Selector specifying a subset of fields to include in the response.)
"""
super(ListInputSet, self)._set_input('Fields', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The ID of the file.)
"""
super(ListInputSet, self)._set_input('FileID', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
super(ListInputSet, self)._set_input('RefreshToken', value)
class ListResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the List Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class ListChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListResultSet(response, path)
| apache-2.0 |
kaichogami/sympy | sympy/polys/tests/test_ring_series.py | 29 | 23563 | from sympy.polys.domains import QQ, EX, RR
from sympy.polys.rings import ring
from sympy.polys.ring_series import (_invert_monoms, rs_integrate,
rs_trunc, rs_mul, rs_square, rs_pow, _has_constant_term, rs_hadamard_exp,
rs_series_from_list, rs_exp, rs_log, rs_newton, rs_series_inversion,
rs_compose_add, rs_asin, rs_atan, rs_atanh, rs_tan, rs_cot, rs_sin, rs_cos,
rs_cos_sin, rs_sinh, rs_cosh, rs_tanh, _tan1, rs_fun, rs_nth_root,
rs_LambertW, rs_series_reversion, rs_is_puiseux, rs_series)
from sympy.utilities.pytest import raises
from sympy.core.compatibility import range
from sympy.core.symbol import symbols
from sympy.functions import (sin, cos, exp, tan, cot, atan, asin, atanh,
tanh, log, sqrt)
from sympy.core.numbers import Rational
from sympy.core import expand
def is_close(a, b):
tol = 10**(-10)
assert abs(a - b) < tol
def test_ring_series1():
R, x = ring('x', QQ)
p = x**4 + 2*x**3 + 3*x + 4
assert _invert_monoms(p) == 4*x**4 + 3*x**3 + 2*x + 1
assert rs_hadamard_exp(p) == x**4/24 + x**3/3 + 3*x + 4
R, x = ring('x', QQ)
p = x**4 + 2*x**3 + 3*x + 4
assert rs_integrate(p, x) == x**5/5 + x**4/2 + 3*x**2/2 + 4*x
R, x, y = ring('x, y', QQ)
p = x**2*y**2 + x + 1
assert rs_integrate(p, x) == x**3*y**2/3 + x**2/2 + x
assert rs_integrate(p, y) == x**2*y**3/3 + x*y + y
def test_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = (y + t*x)**4
p1 = rs_trunc(p, x, 3)
assert p1 == y**4 + 4*y**3*t*x + 6*y**2*t**2*x**2
def test_mul_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = 1 + t*x + t*y
for i in range(2):
p = rs_mul(p, p, t, 3)
assert p == 6*x**2*t**2 + 12*x*y*t**2 + 6*y**2*t**2 + 4*x*t + 4*y*t + 1
p = 1 + t*x + t*y + t**2*x*y
p1 = rs_mul(p, p, t, 2)
assert p1 == 1 + 2*t*x + 2*t*y
R1, z = ring('z', QQ)
def test1(p):
p2 = rs_mul(p, z, x, 2)
raises(ValueError, lambda: test1(p))
p1 = 2 + 2*x + 3*x**2
p2 = 3 + x**2
assert rs_mul(p1, p2, x, 4) == 2*x**3 + 11*x**2 + 6*x + 6
def test_square_trunc():
R, x, y, t = ring('x, y, t', QQ)
p = (1 + t*x + t*y)*2
p1 = rs_mul(p, p, x, 3)
p2 = rs_square(p, x, 3)
assert p1 == p2
p = 1 + x + x**2 + x**3
assert rs_square(p, x, 4) == 4*x**3 + 3*x**2 + 2*x + 1
def test_pow_trunc():
R, x, y, z = ring('x, y, z', QQ)
p0 = y + x*z
p = p0**16
for xx in (x, y, z):
p1 = rs_trunc(p, xx, 8)
p2 = rs_pow(p0, 16, xx, 8)
assert p1 == p2
p = 1 + x
p1 = rs_pow(p, 3, x, 2)
assert p1 == 1 + 3*x
assert rs_pow(p, 0, x, 2) == 1
assert rs_pow(p, -2, x, 2) == 1 - 2*x
p = x + y
assert rs_pow(p, 3, y, 3) == x**3 + 3*x**2*y + 3*x*y**2
assert rs_pow(1 + x, Rational(2, 3), x, 4) == 4*x**3/81 - x**2/9 + 2*x/3 + 1
def test_has_constant_term():
R, x, y, z = ring('x, y, z', QQ)
p = y + x*z
assert _has_constant_term(p, x)
p = x + x**4
assert not _has_constant_term(p, x)
p = 1 + x + x**4
assert _has_constant_term(p, x)
p = x + y + x*z
def test_inversion():
R, x = ring('x', QQ)
p = 2 + x + 2*x**2
n = 5
p1 = rs_series_inversion(p, x, n)
assert rs_trunc(p*p1, x, n) == 1
R, x, y = ring('x, y', QQ)
p = 2 + x + 2*x**2 + y*x + x**2*y
p1 = rs_series_inversion(p, x, n)
assert rs_trunc(p*p1, x, n) == 1
R, x, y = ring('x, y', QQ)
p = 1 + x + y
def test2(p):
p1 = rs_series_inversion(p, x, 4)
raises(NotImplementedError, lambda: test2(p))
p = R.zero
def test3(p):
p1 = rs_series_inversion(p, x, 3)
raises(ZeroDivisionError, lambda: test3(p))
def test_series_reversion():
R, x, y = ring('x, y', QQ)
p = rs_tan(x, x, 10)
assert rs_series_reversion(p, x, 8, y) == rs_atan(y, y, 8)
p = rs_sin(x, x, 10)
assert rs_series_reversion(p, x, 8, y) == 5*y**7/112 + 3*y**5/40 + \
y**3/6 + y
def test_series_from_list():
R, x = ring('x', QQ)
p = 1 + 2*x + x**2 + 3*x**3
c = [1, 2, 0, 4, 4]
r = rs_series_from_list(p, c, x, 5)
pc = R.from_list(list(reversed(c)))
r1 = rs_trunc(pc.compose(x, p), x, 5)
assert r == r1
R, x, y = ring('x, y', QQ)
c = [1, 3, 5, 7]
p1 = rs_series_from_list(x + y, c, x, 3, concur=0)
p2 = rs_trunc((1 + 3*(x+y) + 5*(x+y)**2 + 7*(x+y)**3), x, 3)
assert p1 == p2
R, x = ring('x', QQ)
h = 25
p = rs_exp(x, x, h) - 1
p1 = rs_series_from_list(p, c, x, h)
p2 = 0
for i, cx in enumerate(c):
p2 += cx*rs_pow(p, i, x, h)
assert p1 == p2
def test_log():
R, x = ring('x', QQ)
p = 1 + x
p1 = rs_log(p, x, 4)/x**2
assert p1 == 1/3*x - 1/2 + x**(-1)
p = 1 + x +2*x**2/3
p1 = rs_log(p, x, 9)
assert p1 == -17*x**8/648 + 13*x**7/189 - 11*x**6/162 - x**5/45 + \
7*x**4/36 - x**3/3 + x**2/6 + x
p2 = rs_series_inversion(p, x, 9)
p3 = rs_log(p2, x, 9)
assert p3 == -p1
R, x, y = ring('x, y', QQ)
p = 1 + x + 2*y*x**2
p1 = rs_log(p, x, 6)
assert p1 == (4*x**5*y**2 - 2*x**5*y - 2*x**4*y**2 + x**5/5 + 2*x**4*y -
x**4/4 - 2*x**3*y + x**3/3 + 2*x**2*y - x**2/2 + x)
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_log(x + a, x, 5) == -EX(1/(4*a**4))*x**4 + EX(1/(3*a**3))*x**3 \
- EX(1/(2*a**2))*x**2 + EX(1/a)*x + EX(log(a))
assert rs_log(x + x**2*y + a, x, 4) == -EX(a**(-2))*x**3*y + \
EX(1/(3*a**3))*x**3 + EX(1/a)*x**2*y - EX(1/(2*a**2))*x**2 + \
EX(1/a)*x + EX(log(a))
p = x + x**2 + 3
assert rs_log(p, x, 10).compose(x, 5) == EX(log(3) + 19281291595/9920232)
def test_exp():
R, x = ring('x', QQ)
p = x + x**4
for h in [10, 30]:
q = rs_series_inversion(1 + p, x, h) - 1
p1 = rs_exp(q, x, h)
q1 = rs_log(p1, x, h)
assert q1 == q
p1 = rs_exp(p, x, 30)
assert p1.coeff(x**29) == QQ(74274246775059676726972369, 353670479749588078181744640000)
prec = 21
p = rs_log(1 + x, x, prec)
p1 = rs_exp(p, x, prec)
assert p1 == x + 1
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[exp(a), a])
assert rs_exp(x + a, x, 5) == exp(a)*x**4/24 + exp(a)*x**3/6 + \
exp(a)*x**2/2 + exp(a)*x + exp(a)
assert rs_exp(x + x**2*y + a, x, 5) == exp(a)*x**4*y**2/2 + \
exp(a)*x**4*y/2 + exp(a)*x**4/24 + exp(a)*x**3*y + \
exp(a)*x**3/6 + exp(a)*x**2*y + exp(a)*x**2/2 + exp(a)*x + exp(a)
R, x, y = ring('x, y', EX)
assert rs_exp(x + a, x, 5) == EX(exp(a)/24)*x**4 + EX(exp(a)/6)*x**3 + \
EX(exp(a)/2)*x**2 + EX(exp(a))*x + EX(exp(a))
assert rs_exp(x + x**2*y + a, x, 5) == EX(exp(a)/2)*x**4*y**2 + \
EX(exp(a)/2)*x**4*y + EX(exp(a)/24)*x**4 + EX(exp(a))*x**3*y + \
EX(exp(a)/6)*x**3 + EX(exp(a))*x**2*y + EX(exp(a)/2)*x**2 + \
EX(exp(a))*x + EX(exp(a))
def test_newton():
R, x = ring('x', QQ)
p = x**2 - 2
r = rs_newton(p, x, 4)
f = [1, 0, -2]
assert r == 8*x**4 + 4*x**2 + 2
def test_compose_add():
R, x = ring('x', QQ)
p1 = x**3 - 1
p2 = x**2 - 2
assert rs_compose_add(p1, p2) == x**6 - 6*x**4 - 2*x**3 + 12*x**2 - 12*x - 7
def test_fun():
R, x, y = ring('x, y', QQ)
p = x*y + x**2*y**3 + x**5*y
assert rs_fun(p, rs_tan, x, 10) == rs_tan(p, x, 10)
assert rs_fun(p, _tan1, x, 10) == _tan1(p, x, 10)
def test_nth_root():
R, x, y = ring('x, y', QQ)
r1 = rs_nth_root(1 + x**2*y, 4, x, 10)
assert rs_nth_root(1 + x**2*y, 4, x, 10) == -77*x**8*y**4/2048 + \
7*x**6*y**3/128 - 3*x**4*y**2/32 + x**2*y/4 + 1
assert rs_nth_root(1 + x*y + x**2*y**3, 3, x, 5) == -x**4*y**6/9 + \
5*x**4*y**5/27 - 10*x**4*y**4/243 - 2*x**3*y**4/9 + 5*x**3*y**3/81 + \
x**2*y**3/3 - x**2*y**2/9 + x*y/3 + 1
assert rs_nth_root(8*x, 3, x, 3) == 2*x**QQ(1, 3)
assert rs_nth_root(8*x + x**2 + x**3, 3, x, 3) == x**QQ(4,3)/12 + 2*x**QQ(1,3)
r = rs_nth_root(8*x + x**2*y + x**3, 3, x, 4)
assert r == -x**QQ(7,3)*y**2/288 + x**QQ(7,3)/12 + x**QQ(4,3)*y/12 + 2*x**QQ(1,3)
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_nth_root(x + a, 3, x, 4) == EX(5/(81*a**QQ(8, 3)))*x**3 - \
EX(1/(9*a**QQ(5, 3)))*x**2 + EX(1/(3*a**QQ(2, 3)))*x + EX(a**QQ(1, 3))
assert rs_nth_root(x**QQ(2, 3) + x**2*y + 5, 2, x, 3) == -EX(sqrt(5)/100)*\
x**QQ(8, 3)*y - EX(sqrt(5)/16000)*x**QQ(8, 3) + EX(sqrt(5)/10)*x**2*y + \
EX(sqrt(5)/2000)*x**2 - EX(sqrt(5)/200)*x**QQ(4, 3) + \
EX(sqrt(5)/10)*x**QQ(2, 3) + EX(sqrt(5))
def test_atan():
R, x, y = ring('x, y', QQ)
assert rs_atan(x, x, 9) == -x**7/7 + x**5/5 - x**3/3 + x
assert rs_atan(x*y + x**2*y**3, x, 9) == 2*x**8*y**11 - x**8*y**9 + \
2*x**7*y**9 - x**7*y**7/7 - x**6*y**9/3 + x**6*y**7 - x**5*y**7 + \
x**5*y**5/5 - x**4*y**5 - x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_atan(x + a, x, 5) == -EX((a**3 - a)/(a**8 + 4*a**6 + 6*a**4 + \
4*a**2 + 1))*x**4 + EX((3*a**2 - 1)/(3*a**6 + 9*a**4 + \
9*a**2 + 3))*x**3 - EX(a/(a**4 + 2*a**2 + 1))*x**2 + \
EX(1/(a**2 + 1))*x + EX(atan(a))
assert rs_atan(x + x**2*y + a, x, 4) == -EX(2*a/(a**4 + 2*a**2 + 1)) \
*x**3*y + EX((3*a**2 - 1)/(3*a**6 + 9*a**4 + 9*a**2 + 3))*x**3 + \
EX(1/(a**2 + 1))*x**2*y - EX(a/(a**4 + 2*a**2 + 1))*x**2 + EX(1/(a**2 \
+ 1))*x + EX(atan(a))
def test_asin():
R, x, y = ring('x, y', QQ)
assert rs_asin(x + x*y, x, 5) == x**3*y**3/6 + x**3*y**2/2 + x**3*y/2 + \
x**3/6 + x*y + x
assert rs_asin(x*y + x**2*y**3, x, 6) == x**5*y**7/2 + 3*x**5*y**5/40 + \
x**4*y**5/2 + x**3*y**3/6 + x**2*y**3 + x*y
def test_tan():
R, x, y = ring('x, y', QQ)
assert rs_tan(x, x, 9)/x**5 == \
17/315*x**2 + 2/15 + 1/3*x**(-2) + x**(-4)
assert rs_tan(x*y + x**2*y**3, x, 9) == 4*x**8*y**11/3 + 17*x**8*y**9/45 + \
4*x**7*y**9/3 + 17*x**7*y**7/315 + x**6*y**9/3 + 2*x**6*y**7/3 + \
x**5*y**7 + 2*x**5*y**5/15 + x**4*y**5 + x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[tan(a), a])
assert rs_tan(x + a, x, 5) == (tan(a)**5 + 5*tan(a)**3/3 +
2*tan(a)/3)*x**4 + (tan(a)**4 + 4*tan(a)**2/3 + 1/3)*x**3 + \
(tan(a)**3 + tan(a))*x**2 + (tan(a)**2 + 1)*x + tan(a)
assert rs_tan(x + x**2*y + a, x, 4) == (2*tan(a)**3 + 2*tan(a))*x**3*y + \
(tan(a)**4 + 4/3*tan(a)**2 + 1/3)*x**3 + (tan(a)**2 + 1)*x**2*y + \
(tan(a)**3 + tan(a))*x**2 + (tan(a)**2 + 1)*x + tan(a)
R, x, y = ring('x, y', EX)
assert rs_tan(x + a, x, 5) == EX(tan(a)**5 + 5*tan(a)**3/3 +
2*tan(a)/3)*x**4 + EX(tan(a)**4 + 4*tan(a)**2/3 + EX(1)/3)*x**3 + \
EX(tan(a)**3 + tan(a))*x**2 + EX(tan(a)**2 + 1)*x + EX(tan(a))
assert rs_tan(x + x**2*y + a, x, 4) == EX(2*tan(a)**3 +
2*tan(a))*x**3*y + EX(tan(a)**4 + 4*tan(a)**2/3 + EX(1)/3)*x**3 + \
EX(tan(a)**2 + 1)*x**2*y + EX(tan(a)**3 + tan(a))*x**2 + \
EX(tan(a)**2 + 1)*x + EX(tan(a))
p = x + x**2 + 5
assert rs_atan(p, x, 10).compose(x, 10) == EX(atan(5) + 67701870330562640 / \
668083460499)
def test_cot():
R, x, y = ring('x, y', QQ)
assert rs_cot(x**6 + x**7, x, 8) == x**(-6) - x**(-5) + x**(-4) - \
x**(-3) + x**(-2) - x**(-1) + 1 - x + x**2 - x**3 + x**4 - x**5 + \
2*x**6/3 - 4*x**7/3
assert rs_cot(x + x**2*y, x, 5) == -x**4*y**5 - x**4*y/15 + x**3*y**4 - \
x**3/45 - x**2*y**3 - x**2*y/3 + x*y**2 - x/3 - y + x**(-1)
def test_sin():
R, x, y = ring('x, y', QQ)
assert rs_sin(x, x, 9)/x**5 == \
-1/5040*x**2 + 1/120 - 1/6*x**(-2) + x**(-4)
assert rs_sin(x*y + x**2*y**3, x, 9) == x**8*y**11/12 - \
x**8*y**9/720 + x**7*y**9/12 - x**7*y**7/5040 - x**6*y**9/6 + \
x**6*y**7/24 - x**5*y**7/2 + x**5*y**5/120 - x**4*y**5/2 - \
x**3*y**3/6 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[sin(a), cos(a), a])
assert rs_sin(x + a, x, 5) == sin(a)*x**4/24 - cos(a)*x**3/6 - \
sin(a)*x**2/2 + cos(a)*x + sin(a)
assert rs_sin(x + x**2*y + a, x, 5) == -sin(a)*x**4*y**2/2 - \
cos(a)*x**4*y/2 + sin(a)*x**4/24 - sin(a)*x**3*y - cos(a)*x**3/6 + \
cos(a)*x**2*y - sin(a)*x**2/2 + cos(a)*x + sin(a)
R, x, y = ring('x, y', EX)
assert rs_sin(x + a, x, 5) == EX(sin(a)/24)*x**4 - EX(cos(a)/6)*x**3 - \
EX(sin(a)/2)*x**2 + EX(cos(a))*x + EX(sin(a))
assert rs_sin(x + x**2*y + a, x, 5) == -EX(sin(a)/2)*x**4*y**2 - \
EX(cos(a)/2)*x**4*y + EX(sin(a)/24)*x**4 - EX(sin(a))*x**3*y - \
EX(cos(a)/6)*x**3 + EX(cos(a))*x**2*y - EX(sin(a)/2)*x**2 + \
EX(cos(a))*x + EX(sin(a))
def test_cos():
R, x, y = ring('x, y', QQ)
assert rs_cos(x, x, 9)/x**5 == \
1/40320*x**3 - 1/720*x + 1/24*x**(-1) - 1/2*x**(-3) + x**(-5)
assert rs_cos(x*y + x**2*y**3, x, 9) == x**8*y**12/24 - \
x**8*y**10/48 + x**8*y**8/40320 + x**7*y**10/6 - \
x**7*y**8/120 + x**6*y**8/4 - x**6*y**6/720 + x**5*y**6/6 - \
x**4*y**6/2 + x**4*y**4/24 - x**3*y**4 - x**2*y**2/2 + 1
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', QQ[sin(a), cos(a), a])
assert rs_cos(x + a, x, 5) == cos(a)*x**4/24 + sin(a)*x**3/6 - \
cos(a)*x**2/2 - sin(a)*x + cos(a)
assert rs_cos(x + x**2*y + a, x, 5) == -cos(a)*x**4*y**2/2 + \
sin(a)*x**4*y/2 + cos(a)*x**4/24 - cos(a)*x**3*y + sin(a)*x**3/6 - \
sin(a)*x**2*y - cos(a)*x**2/2 - sin(a)*x + cos(a)
R, x, y = ring('x, y', EX)
assert rs_cos(x + a, x, 5) == EX(cos(a)/24)*x**4 + EX(sin(a)/6)*x**3 - \
EX(cos(a)/2)*x**2 - EX(sin(a))*x + EX(cos(a))
assert rs_cos(x + x**2*y + a, x, 5) == -EX(cos(a)/2)*x**4*y**2 + \
EX(sin(a)/2)*x**4*y + EX(cos(a)/24)*x**4 - EX(cos(a))*x**3*y + \
EX(sin(a)/6)*x**3 - EX(sin(a))*x**2*y - EX(cos(a)/2)*x**2 - \
EX(sin(a))*x + EX(cos(a))
def test_cos_sin():
R, x, y = ring('x, y', QQ)
cos, sin = rs_cos_sin(x, x, 9)
assert cos == rs_cos(x, x, 9)
assert sin == rs_sin(x, x, 9)
cos, sin = rs_cos_sin(x + x*y, x, 5)
assert cos == rs_cos(x + x*y, x, 5)
assert sin == rs_sin(x + x*y, x, 5)
def test_atanh():
R, x, y = ring('x, y', QQ)
assert rs_atanh(x, x, 9)/x**5 == 1/7*x**2 + 1/5 + 1/3*x**(-2) + x**(-4)
assert rs_atanh(x*y + x**2*y**3, x, 9) == 2*x**8*y**11 + x**8*y**9 + \
2*x**7*y**9 + x**7*y**7/7 + x**6*y**9/3 + x**6*y**7 + x**5*y**7 + \
x**5*y**5/5 + x**4*y**5 + x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_atanh(x + a, x, 5) == EX((a**3 + a)/(a**8 - 4*a**6 + 6*a**4 - \
4*a**2 + 1))*x**4 - EX((3*a**2 + 1)/(3*a**6 - 9*a**4 + \
9*a**2 - 3))*x**3 + EX(a/(a**4 - 2*a**2 + 1))*x**2 - EX(1/(a**2 - \
1))*x + EX(atanh(a))
assert rs_atanh(x + x**2*y + a, x, 4) == EX(2*a/(a**4 - 2*a**2 + \
1))*x**3*y - EX((3*a**2 + 1)/(3*a**6 - 9*a**4 + 9*a**2 - 3))*x**3 - \
EX(1/(a**2 - 1))*x**2*y + EX(a/(a**4 - 2*a**2 + 1))*x**2 - \
EX(1/(a**2 - 1))*x + EX(atanh(a))
p = x + x**2 + 5
assert rs_atanh(p, x, 10).compose(x, 10) == EX(-733442653682135/5079158784 \
+ atanh(5))
def test_sinh():
R, x, y = ring('x, y', QQ)
assert rs_sinh(x, x, 9)/x**5 == 1/5040*x**2 + 1/120 + 1/6*x**(-2) + x**(-4)
assert rs_sinh(x*y + x**2*y**3, x, 9) == x**8*y**11/12 + \
x**8*y**9/720 + x**7*y**9/12 + x**7*y**7/5040 + x**6*y**9/6 + \
x**6*y**7/24 + x**5*y**7/2 + x**5*y**5/120 + x**4*y**5/2 + \
x**3*y**3/6 + x**2*y**3 + x*y
def test_cosh():
R, x, y = ring('x, y', QQ)
assert rs_cosh(x, x, 9)/x**5 == 1/40320*x**3 + 1/720*x + 1/24*x**(-1) + \
1/2*x**(-3) + x**(-5)
assert rs_cosh(x*y + x**2*y**3, x, 9) == x**8*y**12/24 + \
x**8*y**10/48 + x**8*y**8/40320 + x**7*y**10/6 + \
x**7*y**8/120 + x**6*y**8/4 + x**6*y**6/720 + x**5*y**6/6 + \
x**4*y**6/2 + x**4*y**4/24 + x**3*y**4 + x**2*y**2/2 + 1
def test_tanh():
R, x, y = ring('x, y', QQ)
assert rs_tanh(x, x, 9)/x**5 == -17/315*x**2 + 2/15 - 1/3*x**(-2) + x**(-4)
assert rs_tanh(x*y + x**2*y**3, x, 9) == 4*x**8*y**11/3 - \
17*x**8*y**9/45 + 4*x**7*y**9/3 - 17*x**7*y**7/315 - x**6*y**9/3 + \
2*x**6*y**7/3 - x**5*y**7 + 2*x**5*y**5/15 - x**4*y**5 - \
x**3*y**3/3 + x**2*y**3 + x*y
# Constant term in series
a = symbols('a')
R, x, y = ring('x, y', EX)
assert rs_tanh(x + a, x, 5) == EX(tanh(a)**5 - 5*tanh(a)**3/3 +
2*tanh(a)/3)*x**4 + EX(-tanh(a)**4 + 4*tanh(a)**2/3 - QQ(1, 3))*x**3 + \
EX(tanh(a)**3 - tanh(a))*x**2 + EX(-tanh(a)**2 + 1)*x + EX(tanh(a))
p = rs_tanh(x + x**2*y + a, x, 4)
assert (p.compose(x, 10)).compose(y, 5) == EX(-1000*tanh(a)**4 + \
10100*tanh(a)**3 + 2470*tanh(a)**2/3 - 10099*tanh(a) + QQ(530, 3))
def test_RR():
rs_funcs = [rs_sin, rs_cos, rs_tan, rs_cot, rs_atan, rs_tanh]
sympy_funcs = [sin, cos, tan, cot, atan, tanh]
R, x, y = ring('x, y', RR)
a = symbols('a')
for rs_func, sympy_func in zip(rs_funcs, sympy_funcs):
p = rs_func(2 + x, x, 5).compose(x, 5)
q = sympy_func(2 + a).series(a, 0, 5).removeO()
is_close(p.as_expr(), q.subs(a, 5).n())
p = rs_nth_root(2 + x, 5, x, 5).compose(x, 5)
q = ((2 + a)**QQ(1, 5)).series(a, 0, 5).removeO()
is_close(p.as_expr(), q.subs(a, 5).n())
def test_is_regular():
R, x, y = ring('x, y', QQ)
p = 1 + 2*x + x**2 + 3*x**3
assert not rs_is_puiseux(p, x)
p = x + x**QQ(1,5)*y
assert rs_is_puiseux(p, x)
assert not rs_is_puiseux(p, y)
p = x + x**2*y**QQ(1,5)*y
assert not rs_is_puiseux(p, x)
def test_puiseux():
R, x, y = ring('x, y', QQ)
p = x**QQ(2,5) + x**QQ(2,3) + x
r = rs_series_inversion(p, x, 1)
r1 = -x**QQ(14,15) + x**QQ(4,5) - 3*x**QQ(11,15) + x**QQ(2,3) + \
2*x**QQ(7,15) - x**QQ(2,5) - x**QQ(1,5) + x**QQ(2,15) - x**QQ(-2,15) \
+ x**QQ(-2,5)
assert r == r1
r = rs_nth_root(1 + p, 3, x, 1)
assert r == -x**QQ(4,5)/9 + x**QQ(2,3)/3 + x**QQ(2,5)/3 + 1
r = rs_log(1 + p, x, 1)
assert r == -x**QQ(4,5)/2 + x**QQ(2,3) + x**QQ(2,5)
r = rs_LambertW(p, x, 1)
assert r == -x**QQ(4,5) + x**QQ(2,3) + x**QQ(2,5)
p1 = x + x**QQ(1,5)*y
r = rs_exp(p1, x, 1)
assert r == x**QQ(4,5)*y**4/24 + x**QQ(3,5)*y**3/6 + x**QQ(2,5)*y**2/2 + \
x**QQ(1,5)*y + 1
r = rs_atan(p, x, 2)
assert r == -x**QQ(9,5) - x**QQ(26,15) - x**QQ(22,15) - x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
r = rs_atan(p1, x, 2)
assert r == x**QQ(9,5)*y**9/9 + x**QQ(9,5)*y**4 - x**QQ(7,5)*y**7/7 - \
x**QQ(7,5)*y**2 + x*y**5/5 + x - x**QQ(3,5)*y**3/3 + x**QQ(1,5)*y
r = rs_asin(p, x, 2)
assert r == x**QQ(9,5)/2 + x**QQ(26,15)/2 + x**QQ(22,15)/2 + \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cot(p, x, 1)
assert r == -x**QQ(14,15) + x**QQ(4,5) - 3*x**QQ(11,15) + \
2*x**QQ(2,3)/3 + 2*x**QQ(7,15) - 4*x**QQ(2,5)/3 - x**QQ(1,5) + \
x**QQ(2,15) - x**QQ(-2,15) + x**QQ(-2,5)
r = rs_cos_sin(p, x, 2)
assert r[0] == x**QQ(28,15)/6 - x**QQ(5,3) + x**QQ(8,5)/24 - x**QQ(7,5) - \
x**QQ(4,3)/2 - x**QQ(16,15) - x**QQ(4,5)/2 + 1
assert r[1] == -x**QQ(9,5)/2 - x**QQ(26,15)/2 - x**QQ(22,15)/2 - \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_atanh(p, x, 2)
assert r == x**QQ(9,5) + x**QQ(26,15) + x**QQ(22,15) + x**QQ(6,5)/3 + x + \
x**QQ(2,3) + x**QQ(2,5)
r = rs_sinh(p, x, 2)
assert r == x**QQ(9,5)/2 + x**QQ(26,15)/2 + x**QQ(22,15)/2 + \
x**QQ(6,5)/6 + x + x**QQ(2,3) + x**QQ(2,5)
r = rs_cosh(p, x, 2)
assert r == x**QQ(28,15)/6 + x**QQ(5,3) + x**QQ(8,5)/24 + x**QQ(7,5) + \
x**QQ(4,3)/2 + x**QQ(16,15) + x**QQ(4,5)/2 + 1
r = rs_tanh(p, x, 2)
assert r == -x**QQ(9,5) - x**QQ(26,15) - x**QQ(22,15) - x**QQ(6,5)/3 + \
x + x**QQ(2,3) + x**QQ(2,5)
def test1():
R, x = ring('x', QQ)
r = rs_sin(x, x, 15)*x**(-5)
assert r == x**8/6227020800 - x**6/39916800 + x**4/362880 - x**2/5040 + \
QQ(1,120) - x**-2/6 + x**-4
p = rs_sin(x, x, 10)
r = rs_nth_root(p, 2, x, 10)
assert r == -67*x**QQ(17,2)/29030400 - x**QQ(13,2)/24192 + \
x**QQ(9,2)/1440 - x**QQ(5,2)/12 + x**QQ(1,2)
p = rs_sin(x, x, 10)
r = rs_nth_root(p, 7, x, 10)
r = rs_pow(r, 5, x, 10)
assert r == -97*x**QQ(61,7)/124467840 - x**QQ(47,7)/16464 + \
11*x**QQ(33,7)/3528 - 5*x**QQ(19,7)/42 + x**QQ(5,7)
r = rs_exp(x**QQ(1,2), x, 10)
assert r == x**QQ(19,2)/121645100408832000 + x**9/6402373705728000 + \
x**QQ(17,2)/355687428096000 + x**8/20922789888000 + \
x**QQ(15,2)/1307674368000 + x**7/87178291200 + \
x**QQ(13,2)/6227020800 + x**6/479001600 + x**QQ(11,2)/39916800 + \
x**5/3628800 + x**QQ(9,2)/362880 + x**4/40320 + x**QQ(7,2)/5040 + \
x**3/720 + x**QQ(5,2)/120 + x**2/24 + x**QQ(3,2)/6 + x/2 + \
x**QQ(1,2) + 1
def test_puiseux2():
R, y = ring('y', QQ)
S, x = ring('x', R)
p = x + x**QQ(1,5)*y
r = rs_atan(p, x, 3)
assert r == (y**13/13 + y**8 + 2*y**3)*x**QQ(13,5) - (y**11/11 + y**6 +
y)*x**QQ(11,5) + (y**9/9 + y**4)*x**QQ(9,5) - (y**7/7 +
y**2)*x**QQ(7,5) + (y**5/5 + 1)*x - y**3*x**QQ(3,5)/3 + y*x**QQ(1,5)
def test_rs_series():
x, a, b, c = symbols('x, a, b, c')
assert rs_series(a, a, 5).as_expr() == a
assert rs_series(sin(a), a, 5).as_expr() == (sin(a).series(a, 0,
5)).removeO()
assert rs_series(sin(a) + cos(a), a, 5).as_expr() == ((sin(a) +
cos(a)).series(a, 0, 5)).removeO()
assert rs_series(sin(a)*cos(a), a, 5).as_expr() == ((sin(a)*
cos(a)).series(a, 0, 5)).removeO()
p = (sin(a) - a)*(cos(a**2) + a**4/2)
assert expand(rs_series(p, a, 10).as_expr()) == expand(p.series(a, 0,
10).removeO())
p = sin(a**2/2 + a/3) + cos(a/5)*sin(a/2)**3
assert expand(rs_series(p, a, 5).as_expr()) == expand(p.series(a, 0,
5).removeO())
p = sin(x**2 + a)*(cos(x**3 - 1) - a - a**2)
assert expand(rs_series(p, a, 5).as_expr()) == expand(p.series(a, 0,
5).removeO())
p = sin(a**2 - a/3 + 2)**5*exp(a**3 - a/2)
assert expand(rs_series(p, a, 10).as_expr()) == expand(p.series(a, 0,
10).removeO())
p = sin(a + b + c)
assert expand(rs_series(p, a, 5).as_expr()) == expand(p.series(a, 0,
5).removeO())
p = tan(sin(a**2 + 4) + b + c)
assert expand(rs_series(p, a, 6).as_expr()) == expand(p.series(a, 0,
6).removeO())
p = a**QQ(2,5) + a**QQ(2,3) + a
r = rs_series(tan(p), a, 2)
assert r.as_expr() == a**QQ(9,5) + a**QQ(26,15) + a**QQ(22,15) + a**QQ(6,5)/3 + \
a + a**QQ(2,3) + a**QQ(2,5)
r = rs_series(exp(p), a, 1)
assert r.as_expr() == a**QQ(4,5)/2 + a**QQ(2,3) + a**QQ(2,5) + 1
r = rs_series(sin(p), a, 2)
assert r.as_expr() == -a**QQ(9,5)/2 - a**QQ(26,15)/2 - a**QQ(22,15)/2 - \
a**QQ(6,5)/6 + a + a**QQ(2,3) + a**QQ(2,5)
r = rs_series(cos(p), a, 2)
assert r.as_expr() == a**QQ(28,15)/6 - a**QQ(5,3) + a**QQ(8,5)/24 - a**QQ(7,5) - \
a**QQ(4,3)/2 - a**QQ(16,15) - a**QQ(4,5)/2 + 1
assert rs_series(sin(a)/7, a, 5).as_expr() == (sin(a)/7).series(a, 0,
5).removeO()
| bsd-3-clause |
DreamSourceLab/DSView | libsigrokdecode4DSL/decoders/ds243x/pd.py | 3 | 11365 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2017 Kevin Redon <kingkevin@cuvoodoo.info>
## Copyright (C) 2017 Soeren Apel <soeren@apelpie.net>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, see <http://www.gnu.org/licenses/>.
##
import sigrokdecode as srd
# Dictionary of FUNCTION commands and their names.
commands_2432 = {
0x0f: 'Write scratchpad',
0xaa: 'Read scratchpad',
0x55: 'Copy scratchpad',
0xf0: 'Read memory',
0x5a: 'Load first secret',
0x33: 'Compute next secret',
0xa5: 'Read authenticated page',
}
commands_2433 = {
0x0f: 'Write scratchpad',
0xaa: 'Read scratchpad',
0x55: 'Copy scratchpad',
0xf0: 'Read memory',
}
# Maxim DS243x family code, present at the end of the ROM code.
family_codes = {
0x33: ('DS2432', commands_2432),
0x23: ('DS2433', commands_2433),
}
# Calculate the CRC-16 checksum.
# Initial value: 0x0000, xor-in: 0x0000, polynom 0x8005, xor-out: 0xffff.
def crc16(byte_array):
reverse = 0xa001 # Use the reverse polynom to make algo simpler.
crc = 0x0000 # Initial value.
# Reverse CRC calculation.
for byte in byte_array:
for bit in range(8):
if (byte ^ crc) & 1:
crc = (crc >> 1) ^ reverse
else:
crc >>= 1
byte >>= 1
crc ^= 0xffff # Invert CRC.
return crc
class Decoder(srd.Decoder):
api_version = 3
id = 'ds243x'
name = 'DS243x'
longname = 'Maxim DS2432/3'
desc = 'Maxim DS243x series 1-Wire EEPROM protocol.'
license = 'gplv2+'
inputs = ['onewire_network']
outputs = []
tags = ['IC', 'Memory']
annotations = (
('text', 'Human-readable text'),
)
binary = (
('mem_read', 'Data read from memory'),
)
def __init__(self):
self.reset()
def reset(self):
# Bytes for function command.
self.bytes = []
self.family_code = None
self.family = ''
self.commands = commands_2432 # Use max command set until we know better.
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
self.out_binary = self.register(srd.OUTPUT_BINARY)
def putx(self, data):
self.put(self.ss, self.es, self.out_ann, data)
def decode(self, ss, es, data):
code, val = data
if code == 'RESET/PRESENCE':
self.ss, self.es = ss, es
self.putx([0, ['Reset/presence: %s'
% ('true' if val else 'false')]])
self.bytes = []
elif code == 'ROM':
self.ss, self.es = ss, es
self.family_code = val & 0xff
s = None
if self.family_code in family_codes:
self.family, self.commands = family_codes[val & 0xff]
s = 'is 0x%02x, %s detected' % (self.family_code, self.family)
else:
s = '0x%02x unknown' % (self.family_code)
self.putx([0, ['ROM: 0x%016x (%s)' % (val, 'family code ' + s),
'ROM: 0x%016x (%s)' % (val, self.family)]])
self.bytes = []
elif code == 'DATA':
self.bytes.append(val)
if 1 == len(self.bytes):
self.ss, self.es = ss, es
if val not in self.commands:
self.putx([0, ['Unrecognized command: 0x%02x' % val]])
else:
self.putx([0, ['Function command: %s (0x%02x)'
% (self.commands[val], val)]])
elif 0x0f == self.bytes[0]: # Write scratchpad
if 2 == len(self.bytes):
self.ss = ss
elif 3 == len(self.bytes):
self.es = es
self.putx([0, ['Target address: 0x%04x'
% ((self.bytes[2] << 8) + self.bytes[1])]])
elif 4 == len(self.bytes):
self.ss = ss
elif 11 == len(self.bytes):
self.es = es
self.putx([0, ['Data: ' + (','.join(format(n, '#04x')
for n in self.bytes[3:11]))]])
elif 12 == len(self.bytes):
self.ss = ss
elif 13 == len(self.bytes):
self.es = es
self.putx([0, ['CRC: '
+ ('ok' if crc16(self.bytes[0:11]) == (self.bytes[11]
+ (self.bytes[12] << 8)) else 'error')]])
elif 0xaa == self.bytes[0]: # Read scratchpad
if 2 == len(self.bytes):
self.ss = ss
elif 3 == len(self.bytes):
self.es = es
self.putx([0, ['Target address: 0x%04x'
% ((self.bytes[2] << 8) + self.bytes[1])]])
elif 4 == len(self.bytes):
self.ss, self.es = ss, es
self.putx([0, ['Data status (E/S): 0x%02x'
% (self.bytes[3])]])
elif 5 == len(self.bytes):
self.ss = ss
elif 12 == len(self.bytes):
self.es = es
self.putx([0, ['Data: ' + (','.join(format(n, '#04x')
for n in self.bytes[4:12]))]])
elif 13 == len(self.bytes):
self.ss = ss
elif 14 == len(self.bytes):
self.es = es
self.putx([0, ['CRC: '
+ ('ok' if crc16(self.bytes[0:12]) == (self.bytes[12]
+ (self.bytes[13] << 8)) else 'error')]])
elif 0x5a == self.bytes[0]: # Load first secret
if 2 == len(self.bytes):
self.ss = ss
elif 4 == len(self.bytes):
self.es = es
self.putx([0, ['Authorization pattern (TA1, TA2, E/S): '
+ (','.join(format(n, '#04x')
for n in self.bytes[1:4]))]])
elif 4 < len(self.bytes):
self.ss, self.es = ss, es
if (0xaa == self.bytes[-1] or 0x55 == self.bytes[-1]):
self.putx([0, ['End of operation']])
elif 0x33 == self.bytes[0]: # Compute next secret
if 2 == len(self.bytes):
self.ss = ss
elif 3 == len(self.bytes):
self.es = es
self.putx([0, ['Target address: 0x%04x'
% ((self.bytes[2] << 8) + self.bytes[1])]])
elif 3 < len(self.bytes):
self.ss, self.es = ss, es
if (0xaa == self.bytes[-1] or 0x55 == self.bytes[-1]):
self.putx([0, ['End of operation']])
elif 0x55 == self.bytes[0]: # Copy scratchpad
if 2 == len(self.bytes):
self.ss = ss
elif 4 == len(self.bytes):
self.es = es
self.putx([0, ['Authorization pattern (TA1, TA2, E/S): '
+ (','.join(format(n, '#04x')
for n in self.bytes[1:4]))]])
elif 5 == len(self.bytes):
self.ss = ss
elif 24 == len(self.bytes):
self.es = es
mac = ','.join(format(n, '#04x') for n in self.bytes[4:24])
self.putx([0, ['Message authentication code: ' + mac,
'MAC: ' + mac]])
elif 24 < len(self.bytes):
self.ss, self.es = ss, es
if (0xaa == self.bytes[-1] or 0x55 == self.bytes[-1]):
self.putx([0, ['Operation succeeded']])
elif (0 == self.bytes[-1]):
self.putx([0, ['Operation failed']])
elif 0xa5 == self.bytes[0]: # Read authenticated page
if 2 == len(self.bytes):
self.ss = ss
elif 3 == len(self.bytes):
self.es = es
self.putx([0, ['Target address: 0x%04x'
% ((self.bytes[2] << 8) + self.bytes[1])]])
elif 4 == len(self.bytes):
self.ss = ss
elif 35 == len(self.bytes):
self.es = es
self.putx([0, ['Data: ' + (','.join(format(n, '#04x')
for n in self.bytes[3:35]))]])
elif 36 == len(self.bytes):
self.ss, self.es = ss, es
self.putx([0, ['Padding: '
+ ('ok' if 0xff == self.bytes[-1] else 'error')]])
elif 37 == len(self.bytes):
self.ss = ss
elif 38 == len(self.bytes):
self.es = es
self.putx([0, ['CRC: '
+ ('ok' if crc16(self.bytes[0:36]) == (self.bytes[36]
+ (self.bytes[37] << 8)) else 'error')]])
elif 39 == len(self.bytes):
self.ss = ss
elif 58 == len(self.bytes):
self.es = es
mac = ','.join(format(n, '#04x') for n in self.bytes[38:58])
self.putx([0, ['Message authentication code: ' + mac,
'MAC: ' + mac]])
elif 59 == len(self.bytes):
self.ss = ss
elif 60 == len(self.bytes):
self.es = es
self.putx([0, ['MAC CRC: '
+ ('ok' if crc16(self.bytes[38:58]) == (self.bytes[58]
+ (self.bytes[59] << 8)) else 'error')]])
elif 60 < len(self.bytes):
self.ss, self.es = ss, es
if (0xaa == self.bytes[-1] or 0x55 == self.bytes[-1]):
self.putx([0, ['Operation completed']])
elif 0xf0 == self.bytes[0]: # Read memory
if 2 == len(self.bytes):
self.ss = ss
elif 3 == len(self.bytes):
self.es = es
self.putx([0, ['Target address: 0x%04x'
% ((self.bytes[2] << 8) + self.bytes[1])]])
elif 3 < len(self.bytes):
self.ss, self.es = ss, es
self.putx([0, ['Data: 0x%02x' % (self.bytes[-1])]])
bdata = self.bytes[-1].to_bytes(1, byteorder='big')
self.put(ss, es, self.out_binary, [0, bdata])
| gpl-3.0 |
andmos/ansible | lib/ansible/modules/packaging/os/installp.py | 42 | 9242 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Kairo Araujo <kairo@kairo.eti.br>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: installp
author:
- Kairo Araujo (@kairoaraujo)
short_description: Manage packages on AIX
description:
- Manage packages using 'installp' on AIX
version_added: '2.8'
options:
accept_license:
description:
- Whether to accept the license for the package(s).
type: bool
default: no
name:
description:
- One or more packages to install or remove.
- Use C(all) to install all packages available on informed C(repository_path).
type: list
required: true
aliases: [ pkg ]
repository_path:
description:
- Path with AIX packages (required to install).
type: path
state:
description:
- Whether the package needs to be present on or absent from the system.
type: str
choices: [ absent, present ]
default: present
notes:
- If the package is already installed, even the package/fileset is new, the module will not install it.
'''
EXAMPLES = r'''
- name: Install package foo
installp:
name: foo
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Install bos.sysmgt that includes bos.sysmgt.nim.master, bos.sysmgt.nim.spot
installp:
name: bos.sysmgt
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Install bos.sysmgt.nim.master only
installp:
name: bos.sysmgt.nim.master
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Install bos.sysmgt.nim.master and bos.sysmgt.nim.spot
installp:
name: bos.sysmgt.nim.master, bos.sysmgt.nim.spot
repository_path: /repository/AIX71/installp/base
package_license: yes
state: present
- name: Remove packages bos.sysmgt.nim.master
installp:
name: bos.sysmgt.nim.master
state: absent
'''
RETURN = r''' # '''
import os
import re
from ansible.module_utils.basic import AnsibleModule
def _check_new_pkg(module, package, repository_path):
"""
Check if the package of fileset is correct name and repository path.
:param module: Ansible module arguments spec.
:param package: Package/fileset name.
:param repository_path: Repository package path.
:return: Bool, package information.
"""
if os.path.isdir(repository_path):
installp_cmd = module.get_bin_path('installp', True)
rc, package_result, err = module.run_command("%s -l -MR -d %s" % (installp_cmd, repository_path))
if rc != 0:
module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
if package == 'all':
pkg_info = "All packages on dir"
return True, pkg_info
else:
pkg_info = {}
for line in package_result.splitlines():
if re.findall(package, line):
pkg_name = line.split()[0].strip()
pkg_version = line.split()[1].strip()
pkg_info[pkg_name] = pkg_version
return True, pkg_info
return False, None
else:
module.fail_json(msg="Repository path %s is not valid." % repository_path)
def _check_installed_pkg(module, package, repository_path):
"""
Check the package on AIX.
It verifies if the package is installed and informations
:param module: Ansible module parameters spec.
:param package: Package/fileset name.
:param repository_path: Repository package path.
:return: Bool, package data.
"""
lslpp_cmd = module.get_bin_path('lslpp', True)
rc, lslpp_result, err = module.run_command("%s -lcq %s*" % (lslpp_cmd, package))
if rc == 1:
package_state = ' '.join(err.split()[-2:])
if package_state == 'not installed.':
return False, None
else:
module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
if rc != 0:
module.fail_json(msg="Failed to run lslpp.", rc=rc, err=err)
pkg_data = {}
full_pkg_data = lslpp_result.splitlines()
for line in full_pkg_data:
pkg_name, fileset, level = line.split(':')[0:3]
pkg_data[pkg_name] = fileset, level
return True, pkg_data
def remove(module, installp_cmd, packages):
repository_path = None
remove_count = 0
removed_pkgs = []
not_found_pkg = []
for package in packages:
pkg_check, dummy = _check_installed_pkg(module, package, repository_path)
if pkg_check:
if not module.check_mode:
rc, remove_out, err = module.run_command("%s -u %s" % (installp_cmd, package))
if rc != 0:
module.fail_json(msg="Failed to run installp.", rc=rc, err=err)
remove_count += 1
removed_pkgs.append(package)
else:
not_found_pkg.append(package)
if remove_count > 0:
if len(not_found_pkg) > 1:
not_found_pkg.insert(0, "Package(s) not found: ")
changed = True
msg = "Packages removed: %s. %s " % (' '.join(removed_pkgs), ' '.join(not_found_pkg))
else:
changed = False
msg = ("No packages removed, all packages not found: %s" % ' '.join(not_found_pkg))
return changed, msg
def install(module, installp_cmd, packages, repository_path, accept_license):
installed_pkgs = []
not_found_pkgs = []
already_installed_pkgs = {}
accept_license_param = {
True: '-Y',
False: '',
}
# Validate if package exists on repository path.
for package in packages:
pkg_check, pkg_data = _check_new_pkg(module, package, repository_path)
# If package exists on repository path, check if package is installed.
if pkg_check:
pkg_check_current, pkg_info = _check_installed_pkg(module, package, repository_path)
# If package is already installed.
if pkg_check_current:
# Check if package is a package and not a fileset, get version
# and add the package into already installed list
if package in pkg_info.keys():
already_installed_pkgs[package] = pkg_info[package][1]
else:
# If the package is not a package but a fileset, confirm
# and add the fileset/package into already installed list
for key in pkg_info.keys():
if package in pkg_info[key]:
already_installed_pkgs[package] = pkg_info[key][1]
else:
if not module.check_mode:
rc, out, err = module.run_command("%s -a %s -X -d %s %s" % (installp_cmd, accept_license_param[accept_license], repository_path, package))
if rc != 0:
module.fail_json(msg="Failed to run installp", rc=rc, err=err)
installed_pkgs.append(package)
else:
not_found_pkgs.append(package)
if len(installed_pkgs) > 0:
installed_msg = (" Installed: %s." % ' '.join(installed_pkgs))
else:
installed_msg = ''
if len(not_found_pkgs) > 0:
not_found_msg = (" Not found: %s." % ' '.join(not_found_pkgs))
else:
not_found_msg = ''
if len(already_installed_pkgs) > 0:
already_installed_msg = (" Already installed: %s." % already_installed_pkgs)
else:
already_installed_msg = ''
if len(installed_pkgs) > 0:
changed = True
msg = ("%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
else:
changed = False
msg = ("No packages installed.%s%s%s" % (installed_msg, not_found_msg, already_installed_msg))
return changed, msg
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='list', required=True, aliases=['pkg']),
repository_path=dict(type='path'),
accept_license=dict(type='bool', default=False),
state=dict(type='str', default='present', choices=['absent', 'present']),
),
supports_check_mode=True,
)
name = module.params['name']
repository_path = module.params['repository_path']
accept_license = module.params['accept_license']
state = module.params['state']
installp_cmd = module.get_bin_path('installp', True)
if state == 'present':
if repository_path is None:
module.fail_json(msg="repository_path is required to install package")
changed, msg = install(module, installp_cmd, name, repository_path, accept_license)
elif state == 'absent':
changed, msg = remove(module, installp_cmd, name)
else:
module.fail_json(changed=False, msg="Unexpected state.")
module.exit_json(changed=changed, msg=msg)
if __name__ == '__main__':
main()
| gpl-3.0 |
tectronics/arsenalsuite | cpp/lib/PyQt4/examples/draganddrop/draggableicons/draggableicons.py | 15 | 5726 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2010 Riverbank Computing Limited.
## Copyright (C) 2010 Nokia Corporation and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
from PyQt4 import QtCore, QtGui
import draggableicons_rc
class DragWidget(QtGui.QFrame):
def __init__(self, parent=None):
super(DragWidget, self).__init__(parent)
self.setMinimumSize(200, 200)
self.setFrameStyle(QtGui.QFrame.Sunken | QtGui.QFrame.StyledPanel)
self.setAcceptDrops(True)
boatIcon = QtGui.QLabel(self)
boatIcon.setPixmap(QtGui.QPixmap(':/images/boat.png'))
boatIcon.move(20, 20)
boatIcon.show()
boatIcon.setAttribute(QtCore.Qt.WA_DeleteOnClose)
carIcon = QtGui.QLabel(self)
carIcon.setPixmap(QtGui.QPixmap(':/images/car.png'))
carIcon.move(120, 20)
carIcon.show()
carIcon.setAttribute(QtCore.Qt.WA_DeleteOnClose)
houseIcon = QtGui.QLabel(self)
houseIcon.setPixmap(QtGui.QPixmap(':/images/house.png'))
houseIcon.move(20, 120)
houseIcon.show()
houseIcon.setAttribute(QtCore.Qt.WA_DeleteOnClose)
def dragEnterEvent(self, event):
if event.mimeData().hasFormat('application/x-dnditemdata'):
if event.source() == self:
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
else:
event.ignore()
dragMoveEvent = dragEnterEvent
def dropEvent(self, event):
if event.mimeData().hasFormat('application/x-dnditemdata'):
itemData = event.mimeData().data('application/x-dnditemdata')
dataStream = QtCore.QDataStream(itemData, QtCore.QIODevice.ReadOnly)
pixmap = QtGui.QPixmap()
offset = QtCore.QPoint()
dataStream >> pixmap >> offset
newIcon = QtGui.QLabel(self)
newIcon.setPixmap(pixmap)
newIcon.move(event.pos() - offset)
newIcon.show()
newIcon.setAttribute(QtCore.Qt.WA_DeleteOnClose)
if event.source() == self:
event.setDropAction(QtCore.Qt.MoveAction)
event.accept()
else:
event.acceptProposedAction()
else:
event.ignore()
def mousePressEvent(self, event):
child = self.childAt(event.pos())
if not child:
return
pixmap = QtGui.QPixmap(child.pixmap())
itemData = QtCore.QByteArray()
dataStream = QtCore.QDataStream(itemData, QtCore.QIODevice.WriteOnly)
dataStream << pixmap << QtCore.QPoint(event.pos() - child.pos())
mimeData = QtCore.QMimeData()
mimeData.setData('application/x-dnditemdata', itemData)
drag = QtGui.QDrag(self)
drag.setMimeData(mimeData)
drag.setPixmap(pixmap)
drag.setHotSpot(event.pos() - child.pos())
tempPixmap = QtGui.QPixmap(pixmap)
painter = QtGui.QPainter()
painter.begin(tempPixmap)
painter.fillRect(pixmap.rect(), QtGui.QColor(127, 127, 127, 127))
painter.end()
child.setPixmap(tempPixmap)
if drag.exec_(QtCore.Qt.CopyAction | QtCore.Qt.MoveAction, QtCore.Qt.CopyAction) == QtCore.Qt.MoveAction:
child.close()
else:
child.show()
child.setPixmap(pixmap)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mainWidget = QtGui.QWidget()
horizontalLayout = QtGui.QHBoxLayout()
horizontalLayout.addWidget(DragWidget())
horizontalLayout.addWidget(DragWidget())
mainWidget.setLayout(horizontalLayout)
mainWidget.setWindowTitle(QtCore.QObject.tr(mainWidget, "Draggable Icons"))
mainWidget.show()
sys.exit(app.exec_())
| gpl-2.0 |
waytai/odoo | addons/resource/faces/task.py | 433 | 126405 | #@+leo-ver=4
#@+node:@file task.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# mreithinger@web.de
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
"""
This module contains all classes for project plan objects
"""
#@<< Imports >>
#@+node:<< Imports >>
import pcalendar
import resource
import types
import sys
import datetime
import operator as op
import warnings
import locale
import weakref
import opcode
import new
try:
set
except NameError:
from sets import Set as set
#@-node:<< Imports >>
#@nl
_is_source = True
STRICT = 3
SLOPPY = 2
SMART = 1
#@+others
#@+node:Exceptions
#@+node:class AttributeError
class AttributeError(AttributeError):
#@ << class AttributeError declarations >>
#@+node:<< class AttributeError declarations >>
is_frozen = False
#@-node:<< class AttributeError declarations >>
#@nl
#@-node:class AttributeError
#@+node:class RecursionError
class RecursionError(Exception):
"""This exception is raised in cas of cirular dependencies
within an project"""
#@ << class RecursionError declarations >>
#@+node:<< class RecursionError declarations >>
pass
#@-node:<< class RecursionError declarations >>
#@nl
#@-node:class RecursionError
#@+node:class _IncompleteError
class _IncompleteError(Exception):
"""This exception is raised, when there is not enough
data specified to calculate as task"""
#@ @+others
#@+node:__init__
def __init__(self, *args):
if isinstance(args[0], (basestring)):
Exception.__init__(self, *args)
else:
Exception.__init__(self,
"Not enough data for calculating task, "\
"maybe you have a recursive reference.",
*args)
#@-node:__init__
#@-others
#@-node:class _IncompleteError
#@-node:Exceptions
#@+node:Proxies for self referencing
#@+node:class _MeProxy
class _MeProxy(object):
"""
A Proxy class for the me attribute of tasks in the compile case
"""
#@ << declarations >>
#@+node:<< declarations >>
__slots__ = "task"
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, task):
object.__setattr__(self, "task", task)
#@-node:__init__
#@+node:__getattr__
def __getattr__(self, name):
if self.task._is_frozen:
return getattr(self.task, name)
if name in ("name", "up", "root", "path",
"depth", "index", "calendar",
"children", "resource", "balance"):
return getattr(self.task, name)
value = self.task.__dict__.get(name, _NEVER_USED_)
def make_val(default):
if value is _NEVER_USED_: return default
return value
if name in ("start", "end"):
return self.task._to_start(make_val("1.1.2006"))
if name in ("length", "effort", "duration", "todo", "done",
"buffer", "performed", "performed_effort",
"performed_end", "performed_start",
"performed_work_time" ):
return self.task._to_delta(make_val("0d"))
if name in ("complete", "priority", "efficiency"):
return make_val(0)
if value is _NEVER_USED_:
raise AttributeError("'%s' is not a valid attribute." % (name))
return value
#@-node:__getattr__
#@+node:__setattr__
def __setattr__(self, name, value):
self.task._set_attrib(name, value)
#@-node:__setattr__
#@+node:__iter__
def __iter__(self):
return iter(self.task)
#@nonl
#@-node:__iter__
#@+node:add_attrib
def add_attrib(self, name_or_iter, val=None):
if not isinstance(name_or_iter, str):
for n, v in name_or_iter:
setattr(self, n, v)
else:
setattr(self, name_or_iter, val)
#@-node:add_attrib
#@-others
#@nonl
#@-node:class _MeProxy
#@+node:class _MeProxyRecalc
class _MeProxyRecalc(_MeProxy):
"""
A Proxy class for the me attribute of tasks in the recalc case
"""
#@ @+others
#@+node:__setattr__
def __setattr__(self, name, value):
if self.task._properties.has_key(name):
self.task._set_attrib(name, value)
#@-node:__setattr__
#@-others
#@-node:class _MeProxyRecalc
#@+node:class _MeProxyError
class _MeProxyError(_MeProxy):
#@ << declarations >>
#@+node:<< declarations >>
__slots__ = ("task", "attrib", "exc")
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, task, attrib, exc):
_MeProxy.__init__(self, task)
object.__setattr__(self, "attrib", attrib)
object.__setattr__(self, "exc", exc)
#@-node:__init__
#@+node:__setattr__
def __setattr__(self, name, value):
if name == self.attrib or not self.attrib:
raise self.exc
#@-node:__setattr__
#@-others
#@-node:class _MeProxyError
#@+node:class _MeProxyWarn
class _MeProxyWarn(_MeProxy):
#@ << declarations >>
#@+node:<< declarations >>
__slots__ = ("task", "attrib", "message")
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, task, attrib, message):
_MeProxy.__init__(self, task)
object.__setattr__(self, "attrib", attrib)
object.__setattr__(self, "message", message)
#@-node:__init__
#@+node:__setattr__
def __setattr__(self, name, value):
if name == self.attrib or not self.attrib:
warnings.warn(self.message, RuntimeWarning, 2)
if not self.attrib:
#warn only one time!
object.__setattr__(self, "attrib", 1)
#@-node:__setattr__
#@-others
#@-node:class _MeProxyWarn
#@-node:Proxies for self referencing
#@+node:Task instrumentation
#@+doc
# This section contains code for byte code instrumenting
# the task functions
#@-doc
#@nonl
#@+node:_int_to_arg
def _int_to_arg(value):
return value % 256, value / 256
#@-node:_int_to_arg
#@+node:_correct_labels
def _correct_labels(old_code, new_code):
#@ << localize dot variables >>
#@+node:<< localize dot variables >>
hasjrel = opcode.hasjrel
hasjabs = opcode.hasjabs
HAVE_ARGUMENT = opcode.HAVE_ARGUMENT
#@nonl
#@-node:<< localize dot variables >>
#@nl
#@ << loop initialization >>
#@+node:<< loop initialization >>
labels = {}
old_new_map = {} # map old code offset to new code offset
n = len(old_code)
i = 0
j = 0
#@nonl
#@-node:<< loop initialization >>
#@nl
while i < n:
op = old_code[i]
nop = new_code[j]
old_new_map[i] = j
i = i + 1
j = j + 1
if op >= HAVE_ARGUMENT:
oparg = old_code[i] + old_code[i + 1] * 256
i = i + 2
j = j + 2
if nop != op:
j += 3 # skip the 3 addition opcodes for attrib access
else:
#@ << add label if necessary >>
#@+node:<< add label if necessary >>
label = -1
if op in hasjrel:
label = i + oparg
elif op in hasjabs:
label = oparg
if label >= 0:
labels[i] = label
#@nonl
#@-node:<< add label if necessary >>
#@nl
for offset, label in labels.iteritems():
new_offset = old_new_map[offset]
new_label = old_new_map[label]
op = new_code[new_offset - 3]
#change jump arguments
if op in hasjrel:
jump = _int_to_arg(new_label - new_offset)
new_code[new_offset - 2:new_offset] = jump
elif op in hasjabs:
new_code[new_offset - 2:new_offset] = _int_to_arg(new_label)
#@nonl
#@-node:_correct_labels
#@+node:_instrument
def _instrument(func):
#@ << localize dot variables >>
#@+node:<< localize dot variables >>
opname = opcode.opname
opmap = opcode.opmap
jumps = opcode.hasjrel + opcode.hasjabs
HAVE_ARGUMENT = opcode.HAVE_ARGUMENT
co = func.func_code
local_names = co.co_varnames
all_names = list(co.co_names)
global_names = set()
#@-node:<< localize dot variables >>
#@nl
#@ << define local functions list_to_dict and is_local >>
#@+node:<< define local functions list_to_dict and is_local >>
def list_to_dict(l):
return dict([(t[1], t[0]) for t in enumerate(l)])
def is_local(name):
return name[0] == "_" and name != "__constraint__"
#@nonl
#@-node:<< define local functions list_to_dict and is_local >>
#@nl
#convert code
#@ << loop initialization >>
#@+node:<< loop initialization >>
# all_name_map maps names to the all_names index
# (same like all_names.index())
all_name_map = list_to_dict(all_names)
if not all_name_map.has_key("me"):
all_name_map["me"] = len(all_names)
all_names.append("me")
#<python 2.5>
for ln in local_names:
if not all_name_map.has_key(ln):
all_name_map[ln] = len(all_names)
all_names.append(ln)
#</python 2.5>
new_local_names = filter(is_local, local_names)
new_local_name_map = list_to_dict(new_local_names)
me_arg = _int_to_arg(all_name_map["me"])
old_lnotab = map(ord, co.co_lnotab)
new_lnotab = []
tab_pos = 0
try:
next_tab_point = old_lnotab[0]
except IndexError:
next_tab_point = None
last_tab_point = 0
code = map(ord, co.co_code)
new_code = []
has_labels = False
n = len(code)
i = 0
#@nonl
#@-node:<< loop initialization >>
#@nl
while i < n:
if i == next_tab_point:
#@ << calculate new tab point >>
#@+node:<< calculate new tab point >>
increment = len(new_code) - last_tab_point
new_lnotab.extend((increment, old_lnotab[tab_pos + 1]))
tab_pos += 2
try:
next_tab_point = i + old_lnotab[tab_pos]
last_tab_point = len(new_code)
except IndexError:
next_tab_point = -1
#@nonl
#@-node:<< calculate new tab point >>
#@nl
op = code[i]
i += 1
if op >= HAVE_ARGUMENT:
#@ << calculate argument >>
#@+node:<< calculate argument >>
arg0 = code[i]
arg1 = code[i+1]
oparg = arg0 + arg1 * 256
#@nonl
#@-node:<< calculate argument >>
#@nl
i += 2
if opname[op] == "LOAD_GLOBAL":
global_names.add(oparg)
elif opname[op] == "STORE_FAST":
#@ << change "store fast" to "store attribute" >>
#@+node:<< change "store fast" to "store attribute" >>
name = local_names[oparg]
if not is_local(name):
new_code.append(opmap["LOAD_GLOBAL"])
new_code.extend(me_arg)
op = opmap["STORE_ATTR"]
arg0, arg1 = _int_to_arg(all_name_map[name])
else:
arg0, arg1 = _int_to_arg(new_local_name_map[name])
#@nonl
#@-node:<< change "store fast" to "store attribute" >>
#@nl
elif opname[op] == "LOAD_FAST":
#@ << change "load fast" to "load attribute" >>
#@+node:<< change "load fast" to "load attribute" >>
name = local_names[oparg]
if not is_local(name):
new_code.append(opmap["LOAD_GLOBAL"])
new_code.extend(me_arg)
op = opmap["LOAD_ATTR"]
arg0, arg1 = _int_to_arg(all_name_map[name])
else:
arg0, arg1 = _int_to_arg(new_local_name_map[name])
#@nonl
#@-node:<< change "load fast" to "load attribute" >>
#@nl
elif op in jumps:
has_labels = True
new_code.extend((op, arg0, arg1))
else:
new_code.append(op)
if has_labels:
_correct_labels(code, new_code)
#@ << create new code and function objects and return >>
#@+node:<< create new code and function objects and return >>
new_code = "".join(map(chr, new_code))
new_lnotab = "".join(map(chr, new_lnotab))
new_co = new.code(co.co_argcount,
len(new_local_names),
max(co.co_stacksize, 2),
co.co_flags,
new_code,
co.co_consts,
tuple(all_names),
tuple(new_local_names),
co.co_filename,
co.co_name,
co.co_firstlineno,
new_lnotab,
co.co_freevars,
co.co_cellvars)
func = new.function(new_co,
func.func_globals,
func.func_name,
func.func_defaults,
func.func_closure)
func.global_names = tuple([all_names[index] for index in global_names])
return func
#@nonl
#@-node:<< create new code and function objects and return >>
#@nl
#@nonl
#@-node:_instrument
#@-node:Task instrumentation
#@+node:Wrappers
#@+node:class _Path
class _Path(object):
"""
This class represents an instrumented path, to
a task. If it points to an attribute of a task, it
not only returns the value of the attribute. You can also
find out the source attribute (task and attribute name)
of the value.
"""
#@ @+others
#@+node:__init__
def __init__(self, task, path_str):
self._task = task
self._path_str = path_str
#@-node:__init__
#@+node:__getattr__
def __getattr__(self, name):
new = getattr(self._task, name)
if isinstance(new, Task):
return _Path(new, self._path_str + "." + name)
return _ValueWrapper(new, [(self._task, name)])
#@-node:__getattr__
#@+node:__str__
def __str__(self):
return self._path_str
#@-node:__str__
#@+node:__iter__
def __iter__(self):
return iter(self._task)
#@nonl
#@-node:__iter__
#@-others
#@-node:class _Path
#@+node:_val
#helper functions for _ValueWrapper
#----------------------------------
def _val(val):
if isinstance(val, _ValueWrapper):
return val._value
return val
#@-node:_val
#@+node:_ref
def _ref(val):
if isinstance(val, _ValueWrapper):
return val._ref
return []
#@-node:_ref
#@+node:_sref
def _sref(val, ref):
if isinstance(val, _ValueWrapper):
val._ref = ref
#@nonl
#@-node:_sref
#@+node:_refsum
def _refsum(refs):
return reduce(lambda a, b: a + b, refs, [])
#@nonl
#@-node:_refsum
#@+node:class _ValueWrapper
class _ValueWrapper(object):
"""
This class represents a value, of a task attribute or
a return value of a task method. It contains the value,
and the supplier of that value
"""
#@ @+others
#@+node:__init__
def __init__(self, value, ref):
self._value = value
self._ref = ref
#@-node:__init__
#@+node:unicode
def unicode(self, *args):
if isinstance(self._value, str):
return unicode(self._value, *args)
return unicode(self._value)
#@nonl
#@-node:unicode
#@+node:_vw
def _vw(self, operand, *args):
refs = _refsum(map(_ref, args))
vals = map(_val, args)
result = operand(*vals)
return self.__class__(result, refs)
#@-node:_vw
#@+node:_cmp
def _cmp(self, operand, *args):
refs = _refsum(map(_ref, args))
vals = map(_val, args)
result = operand(*vals)
map(lambda a: _sref(a, refs), args)
return result
#@-node:_cmp
#@+node:__getattr__
def __getattr__(self, name):
return getattr(self._value, name)
#@-node:__getattr__
#@+node:__getitem__
def __getitem__(self, slice):
return self.__class__(self._value[slice], self._ref)
#@nonl
#@-node:__getitem__
#@+node:__str__
def __str__(self): return str(self._value)
#@-node:__str__
#@+node:__unicode__
def __unicode__(self): return unicode(self._value)
#@nonl
#@-node:__unicode__
#@+node:__repr__
def __repr__(self): return repr(self._value)
#@-node:__repr__
#@+node:__nonzero__
def __nonzero__(self): return bool(self._value)
#@-node:__nonzero__
#@+node:__lt__
def __lt__(self, other): return self._cmp(op.lt, self, other)
#@-node:__lt__
#@+node:__le__
def __le__(self, other): return self._cmp(op.le, self, other)
#@-node:__le__
#@+node:__eq__
def __eq__(self, other): return self._cmp(op.eq, self, other)
#@-node:__eq__
#@+node:__ne__
def __ne__(self, other): return self._cmp(op.ne, self, other)
#@-node:__ne__
#@+node:__gt__
def __gt__(self, other): return self._cmp(op.gt, self, other)
#@-node:__gt__
#@+node:__ge__
def __ge__(self, other): return self._cmp(op.ge, self, other)
#@-node:__ge__
#@+node:__add__
def __add__(self, other): return self._vw(op.add, self, other)
#@nonl
#@-node:__add__
#@+node:__sub__
def __sub__(self, other): return self._vw(op.sub, self, other)
#@-node:__sub__
#@+node:__mul__
def __mul__(self, other): return self._vw(op.mul, self, other)
#@-node:__mul__
#@+node:__floordiv__
def __floordiv__(self, other): return self._vw(op.floordiv, self, other)
#@-node:__floordiv__
#@+node:__mod__
def __mod__(self, other): return self._vw(op.mod, self, other)
#@-node:__mod__
#@+node:__divmod__
def __divmod__(self, other): return self._vw(op.divmod, self, other)
#@-node:__divmod__
#@+node:__pow__
def __pow__(self, other): return self._vw(op.pow, self, other)
#@-node:__pow__
#@+node:__lshift__
def __lshift__(self, other): return self._vw(op.lshift, self, other)
#@-node:__lshift__
#@+node:__rshift__
def __rshift__(self, other): return self._vw(op.rshift, self, other)
#@-node:__rshift__
#@+node:__and__
def __and__(self, other): return self._vw(op.and_, self, other)
#@-node:__and__
#@+node:__xor__
def __xor__(self, other): return self._vw(op.xor, self, other)
#@-node:__xor__
#@+node:__or__
def __or__(self, other): return self._vw(op.or_, self, other)
#@-node:__or__
#@+node:__div__
def __div__(self, other): return self._vw(op.div, self, other)
#@-node:__div__
#@+node:__radd__
def __radd__(self, other): return self._vw(op.add, other, self)
#@-node:__radd__
#@+node:__rsub__
def __rsub__(self, other): return self._vw(op.sub, other, self)
#@-node:__rsub__
#@+node:__rmul__
def __rmul__(self, other): return self._vw(op.mul, other, self)
#@-node:__rmul__
#@+node:__rdiv__
def __rdiv__(self, other): return self._vw(op.div, other, self)
#@-node:__rdiv__
#@+node:__rtruediv__
def __rtruediv__(self, other): return self._vw(op.truediv, other, self)
#@-node:__rtruediv__
#@+node:__rfloordiv__
def __rfloordiv__(self, other): return self._vw(op.floordiv, other, self)
#@-node:__rfloordiv__
#@+node:__rmod__
def __rmod__(self, other): return self._vw(op.mod, other, self)
#@-node:__rmod__
#@+node:__rdivmod__
def __rdivmod__(self, other): return self._vw(op.divmod, other, self)
#@-node:__rdivmod__
#@+node:__rpow__
def __rpow__(self, other): return self._vw(op.pow, other, self)
#@-node:__rpow__
#@+node:__rlshift__
def __rlshift__(self, other): return self._vw(op.lshift, other, self)
#@-node:__rlshift__
#@+node:__rrshift__
def __rrshift__(self, other): return self._vw(op.rshift, other, self)
#@-node:__rrshift__
#@+node:__rand__
def __rand__(self, other): return self._vw(op.and_, other, self)
#@-node:__rand__
#@+node:__rxor__
def __rxor__(self, other): return self._vw(op.xor, other, self)
#@-node:__rxor__
#@+node:__ror__
def __ror__(self, other): return self._vw(op.or_, other, self)
#@-node:__ror__
#@+node:__int__
def __int__(self): return int(self._value)
#@-node:__int__
#@+node:__long__
def __long__(self): return long(self._value)
#@-node:__long__
#@+node:__float__
def __float__(self): return float(self._value)
#@-node:__float__
#@+node:__len__
def __len__(self): return len(self._value)
#@-node:__len__
#@+node:__iter__
def __iter__(self): return iter(self._value)
#@-node:__iter__
#@+node:__hash__
def __hash__(self): return hash(self._value)
#@-node:__hash__
#@-others
#@-node:class _ValueWrapper
#@-node:Wrappers
#@+node:Utilities
#@+node:class _NEVER_USED_
class _NEVER_USED_:
pass
#@-node:class _NEVER_USED_
#@+node:class _StringConverter
class _StringConverter(object):
"""This class is a helper for the to_string mechanism
of tasks"""
#@ @+others
#@+node:__init__
def __init__(self, source, format=None):
self.source = source
self.format = format
#@-node:__init__
#@+node:__getitem__
def __getitem__(self, format):
return _StringConverter(self.source, format)
#@-node:__getitem__
#@+node:__getattr__
def __getattr__(self, name):
class StrWrapper(object):
def __init__(self, value, name, source, format):
self._value = value
self.name = name
self.source = source
self.format = format
def __call__(self, arg):
formatter = self.source.formatter(self.name,
arg,
self.format)
return formatter(self._value(arg))
value = getattr(self.source, name)
if callable(value):
#for methods the wrapper has to
return StrWrapper(value, name, self.source, self.format)
formatter = self.source.formatter(name, format=self.format)
return formatter(value)
#@-node:__getattr__
#@-others
#@-node:class _StringConverter
#@+node:Multi
def Multi(val, **kwargs):
"""returns a directory for mutlivalued attributes"""
return dict(_default=val, **kwargs)
#@nonl
#@-node:Multi
#@+node:create_relative_path
def create_relative_path(from_, to_):
"""
creates a relative path from absolute path
from_ to absolute path to_
"""
from_ = from_.split(".")
to_ = to_.split(".")
for i, parts in enumerate(zip(from_, to_)):
from_part, to_part = parts
if from_part != to_part:
break
from_ = from_[i:]
to_ = to_[i:]
return "up." * len(from_) + ".".join(to_)
#@nonl
#@-node:create_relative_path
#@+node:create_absolute_path
def create_absolute_path(from_, to_):
"""
creates a absolute path from absolute path
from_ to relative path to_
"""
from_ = from_.split(".")
to_ = to_.split(".")
for i, part in enumerate(to_):
if part != "up":
break
from_ = from_[:-i]
to_ = to_[i:]
return "%s.%s" % (".".join(from_), ".".join(to_))
#@-node:create_absolute_path
#@+node:_split_path
def _split_path(path):
try:
index = path.rindex(".")
return path[:index], path[index + 1:]
except:
return path
#@-node:_split_path
#@+node:_to_datetime
_to_datetime = pcalendar.to_datetime
#@nonl
#@-node:_to_datetime
#@+node:_get_tasks_of_sources
def _get_tasks_of_sources(task, attrib_filter="end,start,effort,length,duration"):
#return all source tasks, this task is dependend on
dep_tasks = {}
while task:
for dep in task._sources.values():
for d in dep:
path, attrib = _split_path(d)
if attrib and attrib_filter.find(attrib) >= 0:
dep_tasks[path] = True
task = task.up
return dep_tasks.keys()
#@-node:_get_tasks_of_sources
#@+node:_build_balancing_list
def _build_balancing_list(tasks):
"""
Returns a specialy sorted list of tasks.
If the tasks will allocate resources in the sorting order of that list
correct balancing is ensured
"""
# first sort the list for attributes
index = 0
balancing_list = [(-t.priority, t.balance, index, t) for index, t in enumerate(tasks)]
balancing_list.sort()
#print
#for p, b, i, t in balancing_list:
# print p, b, i, t.path
balancing_list = [ t for p, b, i, t in balancing_list ]
#now correct the presorted list:
#if task a is dependent on task b, b will be moved before a
done_map = { }
count = len(balancing_list)
while len(done_map) < count:
for i in range(count):
to_inspect = balancing_list[i]
if done_map.has_key(to_inspect):
continue
done_map[to_inspect] = True
break
else:
break
#@ << define inspect_depends_on >>
#@+node:<< define inspect_depends_on >>
inspect_path = to_inspect.path + "."
sources = _get_tasks_of_sources(to_inspect)
sources = [ s + "." for s in sources
if not inspect_path.startswith(s) ]
# the if in the later line ignores assignments like
# like start = up.start (i.e. references to parents)
# this will be handled in the second if of inspect_depends_on
# and can cause errors otherwise
def inspect_depends_on(task):
cmp_path = task.path + "."
for src in sources:
if cmp_path.startswith(src):
#task is a source of to_inspect
return True
if inspect_path.startswith(cmp_path):
#to_inspect is a child of task
return True
return False
#@nonl
#@-node:<< define inspect_depends_on >>
#@nl
for j in range(i + 1, count):
check_task = balancing_list[j]
if done_map.has_key(check_task):
continue
if inspect_depends_on(check_task):
del balancing_list[j]
balancing_list.insert(i, check_task)
i += 1 # to_inspect is now at i + 1
return balancing_list
#@-node:_build_balancing_list
#@+node:_as_string
def _as_string(val):
if isinstance(val, basestring):
return '"""%s"""' % val.replace("\n", "\\n")
if isinstance(val, pcalendar._WorkingDateBase):
return '"%s"' % val.strftime("%Y-%m-%d %H:%M")
if isinstance(val, datetime.datetime):
return '"%s"' % val.strftime("%Y-%m-%d %H:%M")
if isinstance(val, datetime.timedelta):
return '"%id %iM"' % (val.days, val.seconds / 60)
if isinstance(val, tuple):
result = map(_as_string, val)
return "(%s)" % ", ".join(result)
if isinstance(val, list):
result = map(_as_string, val)
return "[%s]" % ", ".join(result)
if isinstance(val, resource.Resource):
return val._as_string()
if isinstance(val, Task):
return val.path
return str(val)
#@-node:_as_string
#@+node:_step_tasks
def _step_tasks(task):
if isinstance(task, Task):
yield task
stack = [iter(task.children)]
while stack:
for task in stack[-1]:
yield task
if task.children:
stack.append(iter(task.children))
break
else:
stack.pop()
#@-node:_step_tasks
#@-node:Utilities
#@+node:Cache
instrumentation_cache = {}
balancing_cache = {}
def clear_cache():
instrumentation_cache.clear()
balancing_cache.clear()
#@nonl
#@-node:Cache
#@+node:Resource Allocators
#@+others
#@+node:VariableLoad
def VariableLoad(limit=0):
"""
Allocates the resource with maximal possible load.
If limit is given, a the load is at least limit or more.
"""
try:
balance = me.balance
except NameError:
balance = SLOPPY
if balance != SLOPPY:
raise RuntimeError("You may specify variable_load only with balance=SLOPPY")
return -limit
#@-node:VariableLoad
#@+node:_calc_load
def _calc_load(task, resource):
#changed at the resource instance
load = resource.__dict__.get("load")
if load is not None: return load
load = task.__dict__.get("load")
if load is not None: return load
#inherited by the task
return min(task.load, task.max_load, resource.max_load or 100.0)
#@-node:_calc_load
#@+node:_calc_maxload
def _calc_maxload(task, resource):
#changed at the resource instance
max_load = resource.__dict__.get("max_load")
if max_load: return max_load
#an explicit load can overwrite max_load
load = max(resource.__dict__.get("load", 0),
task.__dict__.get("load"), 0)
#change at the task
max_load = task.__dict__.get("max_load")
if max_load: return max(max_load, load)
#inherited by the resource
max_load = resource.max_load
if max_load: return max(max_load, load)
#inherited by the task
return max(task.max_load, load)
#@-node:_calc_maxload
#@+node:class AllocationAlgorithm
class AllocationAlgorithm(object):
"""This class is a base for resource allocation algorithms"""
#@ @+others
#@+node:test_allocation
def test_allocation(self, task, resource):
"""This method simulates the allocation of a specific resource.
It returns a list of values representing the state of the allocation.
The task allocator calls test_allocation for every alternative resource.
It compares the first items of all return lists, and allocates the
resource with the minum first item value"""
return (task.end, )
#@-node:test_allocation
#@+node:allocate
def allocate(self, task, state):
"""This method eventually allocates a specific resource.
State is the return list of test_allocation"""
pass
#@-node:allocate
#@-others
#@-node:class AllocationAlgorithm
#@+node:class StrictAllocator
class StrictAllocator(AllocationAlgorithm):
"""This class implements the STRICT resource allocation"""
#@ @+others
#@+node:_distribute_len_loads
def _distribute_len_loads(self, task, resource, effort, length):
# A special load calculation, if effort and length are given.
# and the resources have a defined maxload, the load must be
# individually calculated for each resource.
# Formulars: r=resources, t=task
# effort = length * efficiency(t) * sum[load(r) * effiency(r)]
# ==> sum_load = sum[load(r) * effiency(r)]
# = effort / (length * efficiency(t))
#
sum_load = float(effort) / (task.efficiency * length)
# algorithm:
# The goal is to distribute the load (norm_load) equally
# to all resources. If a resource has a max_load(r) < norm_load
# the load of this resource will be max_load(r), and the other
# resources will have another (higher) norm_load
max_loads = map(lambda r: (_calc_maxload(task, r), r), resource)
max_loads.sort()
efficiency_sum = sum(map(lambda r: r.efficiency, resource))
norm_load = sum_load / efficiency_sum
loads = {}
for max_load, r in max_loads[:-1]:
if max_load < norm_load:
loads[r] = max_load
efficiency_sum -= r.efficiency
sum_load -= max_load * r.efficiency
norm_load = sum_load / efficiency_sum
else:
loads[r] = norm_load
max_load, r = max_loads[-1]
loads[r] = norm_load
return loads
#@-node:_distribute_len_loads
#@+node:test_allocation
def test_allocation(self, task, resource):
effort = task.__dict__.get("effort")
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
if task.performed_end:
start = to_start(max(task.performed_end,
task.root.calendar.now,
task.start))
else:
start = task.start
if task.root.has_actual_data and task.complete == 0:
start = max(start, to_start(task.root.calendar.now))
base_start = to_start(task.performed_start or task.start)
calc_load = lambda r: _calc_load(task, r)
loads = map(lambda r: (r, calc_load(r)), resource)
length = task.__dict__.get("length")
duration = task.__dict__.get("duration")
end = task.__dict__.get("end")
#@ << correct length >>
#@+node:<< correct length >>
if length is not None:
length = to_delta(max(length - (task.start - base_start), 0))
#@nonl
#@-node:<< correct length >>
#@nl
#@ << correct duration >>
#@+node:<< correct duration >>
if duration is not None:
delta = task.start.to_datetime() - base_start.to_datetime()
delta = to_delta(delta, True)
duration = to_delta(max(duration - delta, 0), True)
#@nonl
#@-node:<< correct duration >>
#@nl
#@ << check end >>
#@+node:<< check end >>
if end is not None:
length = end - start
if length <= 0: return False
#@nonl
#@-node:<< check end >>
#@nl
#@ << correct effort and (re)calculate length >>
#@+node:<< correct effort and (re)calculate length >>
if effort is not None:
effort -= task.performed_effort
effort = to_delta(max(effort, 0))
if effort <= 0: return False
if length is not None:
#if length and effort is set, the load will be calculated
length = length or task.calendar.minimum_time_unit
loads = self._distribute_len_loads(task, resource,
effort, length)
def calc_load(res):
return loads[res]
else:
#the length depends on the count of resources
factor = sum(map(lambda a: a[0].efficiency * a[1],
loads)) * task.efficiency
length = effort / factor
#@nonl
#@-node:<< correct effort and (re)calculate length >>
#@nl
#@ << set adjust_date and delta >>
#@+node:<< set adjust_date and delta >>
if length is not None:
adjust_date = lambda date: date
delta = to_delta(length).round()
else:
assert(duration is not None)
adjust_date = _to_datetime
delta = datetime.timedelta(minutes=duration)
#@nonl
#@-node:<< set adjust_date and delta >>
#@nl
# find the earliest start date
start, book_load\
= self.balance(task, start, delta, adjust_date,
calc_load, resource)
end = to_end(start + delta)
start = to_start(start)
if effort is None:
#length is frozen ==> a new effort will be calculated
factor = sum(map(lambda a: a[1], loads))
length = end - start
effort = to_delta(length * factor\
+ task.performed_effort).round()
return (end, book_load), resource, calc_load, start, effort
#@-node:test_allocation
#@+node:allocate
def allocate(self, task, state):
# now really book the resource
end_bl, resource, calc_load, start, effort = state
end = end_bl[0]
cal = task.root.calendar
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
task.start = task.performed_start \
and to_start(task.performed_start) \
or to_start(start)
task.end = end
task._unfreeze("length")
task._unfreeze("duration")
length = end - start
for r in resource:
book_load = calc_load(r)
work_time = to_delta(length * book_load).round()
r.book_task(task, start, end, book_load, work_time, False)
#the following lines are important to be exactly at this
#positions in that order:
# done and todo are dependend on:
# - the existence of effort (if effort was set or not set)
# - book_task (they can only be calculated, if the task is booked)
# - booked_resource (to get the booked tasks)
task.booked_resource = resource
task.done = task.done
task.todo = task.todo
task.length = end - task.start
task.effort = to_delta(effort + task.performed_effort)
#@-node:allocate
#@+node:balance
#now effort exists always
def balance(self, task, start, delta, adjust_date,
calc_load, resource):
book_load = max(map(lambda r: r.get_load(task.start, task.scenario), resource))
return start, book_load
#@-node:balance
#@-others
#@-node:class StrictAllocator
#@+node:class SmartAllocator
class SmartAllocator(StrictAllocator):
#@ @+others
#@+node:balance
def balance(self, task, start, delta, adjust_date,
calc_load, resource):
#find the earliest start date, at which all
#resources in the team are free
cal = task.root.calendar
to_start = task._to_start
start = adjust_date(start)
scenario = task.scenario
while True:
#we have finished, when all resources have the
#same next free start date
for r in resource:
max_load = _calc_maxload(task, r)
load = calc_load(r)
#find the next free time of the resource
s = r.find_free_time(start, delta, load, max_load, scenario)
if s != start:
s = to_start(s)
start = adjust_date(s)
break
else:
#only one resource
break
return start, 1.0
#@-node:balance
#@-others
#@-node:class SmartAllocator
#@+node:class SloppyAllocator
class SloppyAllocator(AllocationAlgorithm):
#@ @+others
#@+node:test_allocation
def test_allocation(self, task, resource):
if task.__dict__.has_key("effort"):
return self.test_allocation_effort(task, resource)
return self.test_allocation_length(task, resource)
#@-node:test_allocation
#@+node:test_allocation_length
def test_allocation_length(self, task, resource):
#length is frozen ==> effort will be calculated
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
end = task.end
if task.performed_end:
start = to_start(max(task.performed_end,
task.root.calendar.now,
start))
else:
start = task.start
base_start = to_start(task.performed_start or task.start)
length = to_delta(max(task.length - (start - base_start), 0))
sum_effort = 0
intervals = []
scenario = task.scenario
for r in resource:
date = start
max_load = _calc_maxload(task, r)
book_load = _calc_load(task, r)
while date < end:
#find free time intervals and add them for booking
endi, load = r.end_of_booking_interval(date, task)
endi = min(endi, end)
endi = to_end(endi)
if book_load <= 0:
#variable book_load ==> calc the maxmimal possible book_load >= (the given book_load)
used_book_load = - book_load
diff_load = max_load - load
if diff_load and diff_load >= book_load:
used_book_load = diff_load
else:
used_book_load = max_load
else:
used_book_load = book_load
if max_load - load >= used_book_load:
intervals.append((r, used_book_load, date, endi))
sum_effort = (endi - date) * used_book_load
date = to_start(endi)
return -sum_effort, end, resource, intervals
#@-node:test_allocation_length
#@+node:test_allocation_effort
def test_allocation_effort(self, task, resource):
#effort is frozen ==> length will be calculated
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
intervals = []
effort = task.__dict__.get("effort")
if task.performed_end:
next_date = to_start(max(task.performed_end,
task.root.calendar.now,
task.start))
else:
next_date = task.start
if task.root.has_actual_data and task.complete == 0:
next_date = max(next_date, to_start(task.root.calendar.now))
#walks chronologicly through the booking
#intervals of each resource, and reduces
#the effort for each free interval
#until it becomes 0
alloc_effort = effort
effort -= task.performed_effort
while effort > 0:
date = next_date
interval_resource = []
interval_end = to_start(sys.maxint)
factor = 0
for r in resource:
max_load = _calc_maxload(task, r)
book_load = _calc_load(task, r)
end, load = r.end_of_booking_interval(date, task)
interval_end = to_start(min(end, interval_end))
if book_load <= 0:
#variable book_load ==> calc the maxmimal possible book_load >= (the given book_load)
book_load = - book_load
diff_load = max_load - load
if diff_load and diff_load >= book_load:
book_load = diff_load
else:
book_load = max_load
if book_load + load <= max_load:
resource_factor = book_load * r.efficiency
interval_resource.append((r, book_load, resource_factor))
factor += resource_factor
next_date = interval_end
if factor:
factor *= task.efficiency
length = to_delta(effort / factor).round()
end = date + length
if interval_end >= end:
next_date = interval_end = end
effort = 0
book_end = end
else:
book_end = interval_end
length = book_end - date
minus_effort = length * factor
effort -= minus_effort
book_end = to_end(book_end)
intervals.append((date, book_end, length, interval_resource))
return next_date, alloc_effort, resource, intervals
#@-node:test_allocation_effort
#@+node:allocate
def allocate(self, task, state):
if task.__dict__.has_key("effort"): self.allocate_effort(task, state)
else: self.allocate_length(task, state)
#@-node:allocate
#@+node:allocate_length
def allocate_length(self, task, state):
# now really book the resource
neg_sum_effort, end, resource, intervals = state
cal = task.root.calendar
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
task.start = to_start(task.performed_start or task.start)
task.end = to_end(end)
task._unfreeze("length")
task._unfreeze("duration")
effort = 0
for r, load, s, e in intervals:
work_time = to_delta((e - s) * load).round()
effort += work_time
r.book_task(task, s, e, load, work_time, False)
#see comment at StrictAllocator.allocate
task.booked_resource = resource
task.done = task.done
task.todo = task.todo
task.effort = to_delta(effort + task.performed_effort).round()
#@-node:allocate_length
#@+node:allocate_effort
def allocate_effort(self, task, state):
# now really book the resource
end, effort, resource, intervals = state
to_start = task._to_start
to_end = task._to_end
to_delta = task._to_delta
task.start = task.performed_start \
and to_start(task.performed_start) \
or to_start(intervals[0][0])
task.end = to_end(end)
task._unfreeze("length")
task._unfreeze("duration")
for start, end, length, resources in intervals:
for r, load, factor in resources:
work_time = to_delta(length * load)
r.book_task(task, start, end, load, work_time, False)
task.booked_resource = resource
task.done = task.done
task.todo = task.todo
task.effort = to_delta(effort)
task.length = task.end - task.start
#@-node:allocate_effort
#@-others
#@-node:class SloppyAllocator
#@-others
_smart_allocator = SmartAllocator()
_sloppy_allocator = SloppyAllocator()
_strict_allocator = StrictAllocator()
_allocators = { SMART: _smart_allocator,
SLOPPY: _sloppy_allocator,
STRICT: _strict_allocator }
_allocator_strings = { SMART: "SMART",
SLOPPY: "SLOPPY",
STRICT: "STRICT" }
#@-node:Resource Allocators
#@+node:Load Calculators
#@+node:YearlyMax
def YearlyMax(value):
"""
Calculates a load parameter with a maximal yearly workload
"""
#@ << calculate calendar and time_diff >>
#@+node:<< calculate calendar and time_diff >>
try:
cal = me.calendar
except NameError:
cal = pcalendar._default_calendar
time_diff = cal.Minutes(value)
#@nonl
#@-node:<< calculate calendar and time_diff >>
#@nl
return float(time_diff) / \
(cal.working_days_per_year \
* cal.working_hours_per_day \
* 60)
#@nonl
#@-node:YearlyMax
#@+node:WeeklyMax
def WeeklyMax(value):
"""
Calculates a load parameter with a maximal weekly workload
"""
#@ << calculate calendar and time_diff >>
#@+node:<< calculate calendar and time_diff >>
try:
cal = me.calendar
except NameError:
cal = pcalendar._default_calendar
time_diff = cal.Minutes(value)
#@nonl
#@-node:<< calculate calendar and time_diff >>
#@nl
return float(time_diff) / \
(cal.working_days_per_week \
* cal.working_hours_per_day \
* 60)
#@-node:WeeklyMax
#@+node:MonthlyMax
def MonthlyMax(value):
"""
Calculates a load parameter with a maximal monthly workload
"""
#@ << calculate calendar and time_diff >>
#@+node:<< calculate calendar and time_diff >>
try:
cal = me.calendar
except NameError:
cal = pcalendar._default_calendar
time_diff = cal.Minutes(value)
#@nonl
#@-node:<< calculate calendar and time_diff >>
#@nl
return float(time_diff) / \
(cal.working_days_per_month \
* cal.working_hours_per_day \
* 60)
#@-node:MonthlyMax
#@+node:DailyMax
def DailyMax(value):
"""
Calculates a load parameter with a maximal daily workload
"""
#@ << calculate calendar and time_diff >>
#@+node:<< calculate calendar and time_diff >>
try:
cal = me.calendar
except NameError:
cal = pcalendar._default_calendar
time_diff = cal.Minutes(value)
#@nonl
#@-node:<< calculate calendar and time_diff >>
#@nl
return float(time_diff) / (cal.working_hours_per_day * 60)
#@-node:DailyMax
#@-node:Load Calculators
#@+node:Task
#@+node:class _TaskProperty
class _TaskProperty(object):
#@ @+others
#@+node:__init__
def __init__(self, method):
self.method = method
#@-node:__init__
#@+node:__get__
def __get__(self, instance, owner):
if not instance:
return None
return instance._wrap_attrib(self.method)
#@-node:__get__
#@-others
#@-node:class _TaskProperty
#@+node:class _RoundingTaskProperty
class _RoundingTaskProperty(object):
#@ @+others
#@+node:__init__
def __init__(self, method, name):
self.method = method
self.name = name
#@-node:__init__
#@+node:__get__
def __get__(self, instance, owner):
if not instance:
return None
result = instance._wrap_attrib(self.method).round()
if instance._is_frozen:
#correct the attrib to the rounded value
setattr(instance, self.name, result)
return result
#@-node:__get__
#@-others
#@-node:class _RoundingTaskProperty
#@+node:class Task
class Task(object):
#@ << description >>
#@+node:<< description >>
"""
This class represents a single task in the project tree. A task
can have other child tasks, or is a leaf of the tree. Resources
will be allocated only to leafes. You will never create task
objects by your self, they are created indirectly by Projects.
@var root:
Returns the root project task.
@var up:
Returns the parent task.
@var title:
Specifies an alternative more descriptive name for the task.
@var start:
The start date of the task. Valid values are expressions and
strings specifing a datatime
@var end:
The end date of the task. Valid values are expressions and
strings.
@var effort:
Specifies the effort needed to complete the task. Valid values
are expressions and strings. (Todo: What happens, in case of
specified performance data...)
@var length:
Specifies the time the task occupies the resources. This is
working time, not calendar time. 7d means 7 working days, not one
week. Whether a day is considered a working day or not depends on
the defined working hours and global vacations.
@var duration:
Specifies the time the task occupies the resources. This is
calendar time, not working time. 7d means one week.
@var buffer:
Specifies the time a task can be delayed, without moving dependend
milestones. A Task with a buffer S{<=} 0d is part of the critical
chain. This attribute is readonly.
@var complete:
Specifies what percentage of the task is already completed.
@var todo:
Specifies the effort, which needs to be done to complete a
task. This is another (indirect) way to specify the ME{complete}
attribute.
@var done:
Specifies the work effort, which has been already done. This
attribute is readonly.
@var estimated_effort:
Specifies the estimated_effort given by setting the effort property.
@var performed:
Specifies a list of actual working times performed on the task.
The format is: C{[ (resource, from, to, time), ... ]}
@var performed_work_time:
Specifies the sum of all working times. This attribute is
readonly.
@var performed_effort:
Specifies the complete effort of all working times. This attribute is
readonly.
@var performed_start:
The start date of the performed data.
@var performed_end:
The end date of the performed data.
@var performed_resource:
The resources who have already performed on the task. This attribute is readonly.
@var balance:
Specifies the resource allocation type. Possible values are
CO{STRICT}, CO{SLOPPY}, CO{SMART}.
@var resource:
Specifies the possible resources, that may be allocated for the
task.
@var booked_resource:
Specifies the allocated resources of a task. This attribute is
readonly.
@var load:
Specifies the daily load of a resource for an allocation of the
specified task. A load of 1.0 (default) means the resource is
allocated for as many hours as specified by
ME{working_hours_per_day}. A load of 0.5 means half that many
hours.
@var max_load:
Specify the maximal allowed load sum of all simultaneously
allocated tasks of a resource. A ME{max_load} of 1.0 (default)
means the resource may be fully allocated. A ME{max_load} of 1.3
means the resource may be allocated with 30% overtime.
@var efficiency:
The efficiency of a resource can be used for two purposes. First
you can use it as a crude way to model a team. A team of 5 people
should have an efficiency of 5.0. Keep in mind that you cannot
track the member of the team individually if you use this
feature. The other use is to model performance variations between
your resources.
@var milestone:
Specified if the task is a milestone. The possible values are
C{True} or "later". If the start date of the milestone is not
a valid working date, the milestone will appear at the previous
working date before the given start date. If "later" is specified
the milestone will appear at the next valid working date.
A milestone has always an effort of 0d.
@var priority:
Specifies a priority between 1 and 1000. A task with higher
priority is more likely to get the requested resources. The
default priority is 500.
@var children:
Specifies a list of all subtasks. A task without children is
called a leaf task index{leaf task} otherwise it is called a
parent task index{parent task}. This attribute is readonly.
@var depth:
Specifies the depth of the task within the hierachy. This
attribute is readonly.
@var index:
Specifies a structural index number. This attribute is readonly.
@var path:
Specifies the path.
@var copy_src:
Specifies the path to an other task. When you set this attribute,
all attributes (except of ME{start} and ME{end}) of copy_src will
be copied to the current task. This is usefull if you want to
define the same task, in diffent project definitions. It acts like
a task link.
@var scenario:
The scenario which is currently evaluated. This attribute is readonly.
@var dont_inherit:
A list of attribute names, which will be not inherited by
subtasks.
@var calendar:
Specifies the task calendar.
@var working_days_per_week:
Specifies the days within a working week. This value is used
internally to convert time differences from weeks to days. The
default value is 5 days.
@var working_days_per_month:
Specifies the days within a working month. This value is used
internally to convert time differences from months to days. The
default value is 20 days.
@var working_days_per_year:
Specifies the days within a working year. This value is used
internally to convert time differences from years to days The
default value is 200 days.
@var working_hours_per_day:
Specifies the hours within a working day. This value is used
internally to convert time differences from are entered in days to
hours. The default value is 8 hours.
@var minimum_time_unit:
Specifies the minimum resolution in minutes for the task
scheduling. The default value is 15 minutes.
@var vacation:
Specifies a public vacation for the calendar. This attribute is
specified as a list of date literals or date literal intervals. Be
aware that the end of an interval is excluded, i.e. it is the
first working date.
@var extra_work:
Specifies additional worktime. This attribute is specified as a
list of date literals or date literal intervals. Be aware that the
end of an interval is excluded, i.e. it is the first working date.
@var working_days:
Specifies the weekly working time within calendar. The format of
this attribute is: [ (day_range, time_range, ...), (day_range, time_range, ...), ... ].
day_range is a comma sperated string of week days. Valid values
are mon, tue, wed, thu, fri, sat, sun.
time_range is string specifing a time interval like
8:00-10:00. You can specified any number of time_ranges, following
the first.
@var now:
Specifies the current daytime and is a date literal. ME{now} is
used to calculate several task attributes.
"""
#@nonl
#@-node:<< description >>
#@nl
#@ << declarations >>
#@+node:<< declarations >>
# Variables for the gui interface
_date_completion = { "Date": 'Date("|")',
"max": "max(|)",
"min": "min(|)",
"Multi" : "Multi(|)" }
_delta_completion = { "Delta" : 'Delta("|")',
"Multi" : "Multi(|)" }
__attrib_completions__ = { \
"def NewTask():" : "def |NewTask():\n",
"milestone": 'milestone = True',
"start": 'start = ',
"end": 'end = ',
"effort": 'effort = "|"',
"duration": 'duration = "|"',
"length": 'length = "|"',
"todo": 'todo = "|"',
"done": 'done = "|"',
"title": 'title = "|"',
"load": 'load = ',
"max_load": 'max_load = ',
"efficiency": 'efficiency = ',
"complete": 'complete = ',
"copy_src": 'copy_src =',
"__constraint__": '__constraint__():\n|"',
"priority": 'priority = ',
"balance" : 'balance = ',
"resource": 'resource = ',
"performed" : 'performed = [(|resource, "2002-02-01", "2002-02-05", "2H"),]',
"add_attrib": "add_attrib(|'name', None)",
"working_days_per_week": 'working_days_per_week = ',
"working_days_per_month": 'working_days_per_month = ',
"working_days_per_year": 'working_days_per_year = ',
"working_hours_per_day": 'working_hours_per_day = ',
"minimum_time_unit": 'minimum_time_unit = ',
"vacation": 'vacation = [("|2002-02-01", "2002-02-05")]',
"extra_work": 'extra_work = [("|2002-02-01", "2002-02-05")]',
"working_days" : 'working_days = ["|mon,tue,wed,thu,fri", "8:00-12:00", "13:00-17:00"]',
"now": 'now = "|"',
"calendar" : 'calendar = ',
"#load": { "YearlyMax": 'YearlyMax("|")',
"WeeklyMax": 'WeeklyMax("|")',
"MonthlyMax": 'MonthlyMax("|")',
"DailyMax": 'DailyMax("|")',
"VariableLoad" : "VariableLoad(|)"},
"#max_load": { "YearlyMax": 'YearlyMax("|")',
"WeeklyMax": 'WeeklyMax("|")',
"MonthlyMax": 'MonthlyMax("|")',
"DailyMax": 'DailyMax("|")' },
"#start": _date_completion,
"#end": _date_completion,
"#effort": _delta_completion,
"#duration": _delta_completion,
"#length": _delta_completion,
"#todo": _delta_completion,
"#done": _delta_completion,
"#resource" : "get_resource_completions",
"#calendar" : "get_calendar_completions",
"#balance": { "STRICT": "STRICT",
"SMART": "SMART",
"SLOPPY": "SLOPPY" } }
formats = { "start" : "%x %H:%M",
"end" : "%x %H:%M",
"performed_start" : "%x %H:%M",
"performed_end" : "%x %H:%M",
"load" : "%.2f",
"length" : "%dd{ %HH}{ %MM}",
"effort" : "%dd{ %HH}{ %MM}",
"estimated_effort" : "%dd{ %HH}{ %MM}",
"performed_effort" : "%dd{ %HH}{ %MM}",
"duration" : "%dd{ %HH}{ %MM}",
"complete" : "%i",
"priority" : "%i",
"todo" : "%dd{ %HH}{ %MM}",
"done" : "%dd{ %HH}{ %MM}",
"efficiency" : "%.2f",
"buffer" : "%dd{ %HH}{ %MM}",
"costs" : "%.2f",
"sum" : "%.2f",
"max" : "%.2f",
"min" : "%.2f",
"milestone" : "%s",
"resource" : "%s",
"booked_resource" : "%s",
"performed_resource" : "%s" }
_constraint = None
_is_frozen = False
_is_compiled = False
_is_parent_referer = False
scenario = None # only for autocompletion
milestone = False
performed = ()
performed_resource = ()
booked_resource = ()
_performed_resource_length = ()
_resource_length = ()
dont_inherit = ()
performed_start = None
performed_end = None
performed_work_time = pcalendar.Minutes(0)
_setting_hooks = {}
#@nonl
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, func, name, parent=None, index=1):
assert(type(func) == types.FunctionType)
func_key = (func.func_code, func.func_closure and id(func.func_closure))
try:
instrumented = instrumentation_cache[func_key]
except KeyError:
instrumented = _instrument(func)
instrumented.org_code = func_key
instrumentation_cache[func_key] = instrumented
func.task_func = instrumented # will be used in the gui
self._function = instrumented
self.name = name
self.up = parent
self.children = []
self._sources = {} # all tasks, I am linked to
self._dependencies = {} # all tasks that link to me
self._original_values = {}
self._properties = {} # a registry of all non standard attributes
self.title = self.name
self.root = parent and parent.root or self
self.scenario = self.root.scenario
self.path = parent and parent.path + "." + name or name
self.depth = len(self.path.split(".")) - 1
self.index = parent and ("%s.%i" % (parent.index, index)) \
or str(index)
if self.formats.has_key(name):
raise AttributeError("Task name '%s' hides attribute of parent." \
% name)
cal = self.calendar
self._to_delta = cal.Minutes
self._to_start = cal.StartDate
self._to_end = cal.EndDate
#@-node:__init__
#@+node:__iter__
def __iter__(self):
return _step_tasks(self)
#@-node:__iter__
#@+node:__repr__
def __repr__(self):
return "<Task %s>" % self.name
#@-node:__repr__
#@+node:__cmp__
def __cmp__(self, other):
try:
return cmp(self.path, other.path)
except Exception:
return cmp(self.path, other)
#@-node:__cmp__
#@+node:__getattr__
def __getattr__(self, name):
try:
if name[0] != "_":
parent = self.up
while parent:
if name not in parent.dont_inherit:
result = getattr(parent, name)
if not (isinstance(result, Task) and result.up == parent):
return result
parent = parent.up
except AttributeError:
pass
except IndexError:
raise AttributeError()
exception = AttributeError("'%s' is not a valid attribute of '%s'."
% (name, self.path))
exception.is_frozen = self._is_frozen
raise exception
#@-node:__getattr__
#@+node:_idendity_
def _idendity_(self): return self.root.id + self.path[4:]
#@-node:_idendity_
#@+node:_set_hook
def _set_hook(cls, attrib_name, function=None):
if function:
cls._setting_hooks[attrib_name] = function
else:
try:
del cls._setting_hooks[attrib_name]
except KeyError: pass
_set_hook = classmethod(_set_hook)
#@nonl
#@-node:_set_hook
#@+node:Public methods
#@+node:to_string
def to_string(self): return _StringConverter(self)
to_string = property(to_string)
#@nonl
#@-node:to_string
#@+node:indent_name
def indent_name(self, ident=" "):
"""
returns a indented name, according to its depth in the hierachy.
"""
return ident * self.depth + self.name
indent_name.attrib_method = True
indent_name.__call_completion__ = "indent_name()"
#@-node:indent_name
#@+node:costs
def costs(self, cost_name, mode="ep"):
"""
calculates the resource costs for the task.
cost_name is the name of a rate attribute of the reosurce
mode is character combination:
e calculates the estimated costs
p calculates the performed costs
==> pe calculates all costs
"""
if self.children:
return sum([ c.costs(cost_name, mode) for c in self.children])
costs = 0
if 'e' in mode:
costs += sum(map(lambda rl: getattr(rl[0], cost_name) * rl[1],
self._resource_length))
if 'p' in mode:
costs += sum(map(lambda rl: getattr(rl[0], cost_name) * rl[1],
self._performed_resource_length))
costs /= (60.0 * self.root.calendar.working_hours_per_day)
return round(costs, 2)
costs.attrib_method = True
costs.__call_completion__ = 'costs("|")'
#@-node:costs
#@+node:sum
def sum(self, attrib_name):
val = 0
if self.children:
val += sum(map(lambda c: c.sum(attrib_name), self.children))
if self.is_inherited(attrib_name):
return val
if attrib_name not in self.dont_inherit:
return val
return val + getattr(self, attrib_name)
sum.attrib_method = True
sum.__call_completion__ = 'sum("|")'
#@-node:sum
#@+node:min
def min(self, attrib_name):
if self.children:
return min(map(lambda c: c.min(attrib_name), self.children))
return getattr(self, attrib_name)
min.attrib_method = True
min.__call_completion__ = 'min("|")'
#@-node:min
#@+node:max
def max(self, attrib_name):
if self.children:
return max(map(lambda c: c.max(attrib_name), self.children))
return getattr(self, attrib_name)
max.attrib_method = True
max.__call_completion__ = 'max("|")'
#@-node:max
#@+node:all_resources
def all_resources(self):
result = self._all_resources_as_dict()
result = result.keys()
result.sort()
return result
#@-node:all_resources
#@+node:get_task
def get_task(self, path=None):
"""
Returns a task with the given path.
"""
if not path:
return self
names = path.split(".")
rest = ".".join(names[1:])
result = getattr(self, names[0], None)
return isinstance(result, Task) and result.get_task(rest) or None
#@-node:get_task
#@+node:snapshot
def snapshot(self, indent="", name=None):
text = indent + "def %s():\n" % (name or self.name)
indent += " "
for name in ("priority", "balance", "complete",
"milestone", "end", "start", "effort", "load"):
val = getattr(self, name, None)
if val is None:
continue
if name[0] == "_":
name = name[1:]
text += "%s%s = %s\n" % (indent, name, _as_string(val))
for name in self._properties:
if name.startswith("performed"): continue
val = getattr(self, name, None)
try:
if issubclass(val, resource.Resource): continue
except TypeError:
pass
text += "%s%s = %s\n" % (indent, name, _as_string(val))
resources = tuple(self._iter_booked_resources())
if resources:
text += "%sresource = \\\n" % indent
def make_resource(res):
return "%s %s" \
% (indent, res.snapshot())
text += "&\\\n".join(map(make_resource, resources)) + "\n"
def make_resource_booking(res):
def make_booking(booking):
return '%s (%s, "%s", "%s", "%sM"),' \
% (indent, res.name,
booking.book_start.strftime("%Y%m%d %H:%M"),
booking.book_end.strftime("%Y%m%d %H:%M"),
booking.work_time)
return "\n".join(map(make_booking, res.get_bookings(self)))
text += "%sperformed = [\n" % indent
text += "\n".join(map(make_resource_booking, resources)) + "]"
child_text = map(lambda c: c.snapshot(indent), self.children)
text += "\n\n"
text += "".join(child_text)
return text
#@-node:snapshot
#@+node:is_inherited
def is_inherited(self, attrib_name):
return not self.__dict__.has_key(attrib_name)
#@-node:is_inherited
#@+node:formatter
def formatter(self, attrib_name, arg=None, format=None):
"""returns a function which is able
to convert the value of the given attrib_name to a string"""
formats = self.formats
format = format or formats.get(attrib_name)
if attrib_name in ("start", "end", "length", "effort",
"done", "todo", "buffer", "estimated_effort",
"performed_effort", "performed_start", "performed_end"):
def save_strftime(v):
try:
return v.strftime(format)
#except AttributeError: some bug avoid catching this exception
except Exception:
return str(v)
return save_strftime
if attrib_name == "duration":
def save_strftime(v):
try:
return v.strftime(format, True)
except AttributeError:
return str(v)
return save_strftime
if attrib_name in ("booked_resource", "performed_resource"):
def get_resource_name(v):
title = getattr(v, "title", None)
if title: return title
return ", ".join([r.title for r in v])
return get_resource_name
if arg and attrib_name in ("costs", "sum", "max", "min"):
format = formats.get("%s(%s)" % (attrib_name, arg), format)
if format:
return lambda v: locale.format(format, v, True)
return str
#@-node:formatter
#@-node:Public methods
#@+node:Resource allocation Methods
#@+node:_all_resources_as_dict
def _all_resources_as_dict(self):
if self.children:
result = {}
for c in self.children:
result.update(c._all_resources_as_dict())
return result
if self.resource:
return dict(map(lambda r: (r, 1), self.resource.all_members()))
return {}
#@-node:_all_resources_as_dict
#@+node:_test_allocation
def _test_allocation(self, resource_state, allocator):
resource = self.resource._get_resources(resource_state)
if not resource:
return False
return allocator.test_allocation(self, resource)
#@-node:_test_allocation
#@+node:_allocate
def _allocate(self, state, allocator):
allocator.allocate(self, state)
#activate cache for done and todo
if self.start.to_datetime() > self.end.to_datetime():
#this can happen when performed effort are
#during non working time
tmp = self.start
self.start = self.end
self.end = tmp
for r in self.performed_resource:
r.correct_bookings(self)
self._resource_length = map(lambda r: (weakref.proxy(r), \
r.length_of(self)),
self._iter_booked_resources())
#@-node:_allocate
#@+node:_convert_performed
def _convert_performed(self, all_resources):
performed = self.performed
if not performed: return False
if not isinstance(performed, (tuple, list)) \
or not isinstance(performed[0], (tuple, list)) \
or not len(performed[0]) >= 3:
self._raise(TypeError("""The format of the performed attribute must be:
[( res_name, start_literal, end_literal, working_time ), ... ].
"""), "performed")
round_down_delta = self.root.calendar.minimum_time_unit / 2
round_down_delta = datetime.timedelta(minutes=round_down_delta)
def convert_item(index):
item = performed[index]
res, start, end = item[:3]
if isinstance(res, str):
found = filter(lambda r: r.name == res, all_resources)
if found: res = found[0]
try:
if not isinstance(res, (resource.Resource,
resource._MetaResource)):
raise ValueError("the resource '%s' is unknown." % res)
start = _to_datetime(start)
end = _to_datetime(end)
if len(item) > 3:
working_time = self._to_delta(item[3]).round()
else:
working_time = self._to_delta(end - start, True)
return ((res, start, end, working_time), index)
except Exception, exc:
self._raise(exc.__class__("Item %i: %s" \
% (index + 1, str(exc))),
"performed")
converted = dict(map(convert_item, range(len(performed))))
converted = converted.items()
converted.sort()
#check for overlapping items
last_res = None
for item, index in converted:
res, start, end, work_time = item
if last_res == res and start < last_end:
self._warn("Items %i, %i: %s and %s are overlapping." \
% (last_index + 1, index + 1,
str(performed[last_index]),
str(performed[index])),
"performed")
last_res = res
last_end = end
last_index = index
self._performed = map(lambda x: x[0], converted)
return True
#@-node:_convert_performed
#@+node:_allocate_performed
def _allocate_performed(self, performed):
if not performed: return
to_delta = self._to_delta
to_start = self._to_start
to_end = self._to_end
last = datetime.datetime.min
first = datetime.datetime.max
effort = 0
work_time_sum = 0
zero_minutes = to_delta(0)
minimum_time_unit = to_delta(self.calendar.minimum_time_unit)
summary = {}
for item in performed:
res, start, end, work_time = item
effort += work_time * self.efficiency * res.efficiency
work_time_sum += work_time
res = res()
ss, es, wts = summary.get(res, (datetime.datetime.max,
datetime.datetime.min,
zero_minutes))
summary[res] = (min(ss, start), max(es, end), wts + work_time)
for r, v in summary.iteritems():
start, end, work_time = v
assert(start.__class__ is datetime.datetime)
assert(end.__class__ is datetime.datetime)
#the booking limits should be inside the workingtime
#to display them correct in resource charts
cstart = to_start(start).to_datetime()
if cstart > start: cstart = to_end(start).to_datetime()
cend = to_end(end).to_datetime()
if cend < end: cend = to_start(end).to_datetime()
if self.root.is_snapshot:
delta = to_end(cend) - to_start(cstart)
else:
delta = to_delta(cend - cstart).round()
if not delta:
delta = minimum_time_unit
book_load = float(work_time) / delta
r().book_task(self, cstart, cend, book_load, work_time, True)
last = max(end, last)
first = min(start, first)
self._performed_resource_length = tuple([ (r, v[2]) for r, v in summary.iteritems() ])
self.performed_resource = tuple(summary.keys())
self.performed_end = last
self.performed_start = first
self.performed_effort = to_delta(effort)
self.performed_work_time = to_delta(work_time_sum)
self._check_completion()
#@-node:_allocate_performed
#@+node:_iter_booked_resources
def _iter_booked_resources(self):
result = dict(map(lambda r: (r, 1), self.performed_resource))
result.update(dict(map(lambda r: (r, 1), self.booked_resource)))
return result.iterkeys()
#@-node:_iter_booked_resources
#@-node:Resource allocation Methods
#@+node:Compile Methods
#@+node:_generate
def _generate(self, deferred=None):
do_raise = False
deferred = deferred or [ self ]
while deferred:
new_deferred = []
for task in deferred:
task._compile(new_deferred, do_raise)
do_raise = deferred == new_deferred
deferred = new_deferred
#@-node:_generate
#@+node:_recalc_properties
def _recalc_properties(self):
if not self._properties: return
self.__compile_function([], False, _MeProxyRecalc(self))
self._is_compiled = True
#@-node:_recalc_properties
#@+node:_compile
def _compile(self, deferred, do_raise):
self.dont_inherit = ()
self._constraint = None
self._original_values.clear()
self._properties.clear()
try:
self.__at_compile
#@ << raise child recursion error >>
#@+node:<< raise child recursion error >>
self._raise(RecursionError("A child defines a "\
"recursive definition at %s" % self.path))
#@-node:<< raise child recursion error >>
#@nl
except AttributeError:
self.__at_compile = self, ""
try:
self.__compile_function(deferred, do_raise, _MeProxy(self))
finally:
del self.__at_compile
for c in self.children:
if not c._is_compiled:
c._compile(deferred, do_raise)
if self._is_compiled:
self.__check_milestone()
self.__check_task()
self.root.has_actual_data |= self.__dict__.has_key("performed")
#@-node:_compile
#@+node:__compile_function
def __compile_function(self, deferred, do_raise, me_instance):
self._is_compiled = self._is_frozen
restore_globals = []
globals_ = self._function.func_globals
#@ << set function global values >>
#@+node:<< set function global values >>
def to_value_wrapper(a):
if isinstance(a, _ValueWrapper):
return a
return _ValueWrapper(a, [(None, None)])
def my_max(*args):
return max(map(to_value_wrapper, args))
def my_min(*args):
return min(map(to_value_wrapper, args))
globals_["me"] = me_instance
if self._is_compiled:
globals_["up"] = self.up
globals_["root"] = self.root
else:
globals_["up"] = _Path(self.up, "up")
globals_["root"] = _Path(self.root, "root")
globals_["Delta"] = self._to_delta
globals_["Date"] = self._to_start
globals_["max"] = my_max
globals_["min"] = my_min
globals_["add_attrib"] = me_instance.add_attrib
#@nonl
#@-node:<< set function global values >>
#@nl
#@ << set me in global functions >>
#@+node:<< set me in global functions >>
#@+at
# Is used for functions like YearlyMax, MonthlyMax, ....
#@-at
#@@code
for name in self._function.global_names:
try:
obj = globals_[name]
if isinstance(obj, types.FunctionType):
fg = obj.func_globals
if not fg.has_key("me") and "me" in obj.func_code.co_names:
restore_globals.append(fg)
fg["me"] = me_instance
except KeyError: continue
#@nonl
#@-node:<< set me in global functions >>
#@nl
try:
#@ << eval function >>
#@+node:<< eval function >>
if do_raise:
try:
self._function()
self._is_compiled = True
except _IncompleteError, e:
src = e.args[1]
if src is not self:
self.__at_compile = e.args[1:]
src._compile([], True)
raise
else:
try:
self._function()
self._is_compiled = True
except AttributeError, e:
#print "AttributeError:", e, self.name, e.is_frozen, do_raise
deferred.append(self)
except _IncompleteError:
#print "_IncompleteError:", id(self), self.name, do_raise
deferred.append(self)
except RecursionError:
self._is_parent_referer = True
deferred.append(self)
#@nonl
#@-node:<< eval function >>
#@nl
finally:
for fg in restore_globals:
del fg["me"]
#@-node:__compile_function
#@-node:Compile Methods
#@+node:Setting methods
#@+node:_set_attrib
def _set_attrib(self, name, value):
if value is _NEVER_USED_: return
try:
value = self._setting_hooks[name](self, name, value)
except KeyError: pass
if name == "__constraint__":
self._constraint = value
return
if type(value) == types.FunctionType:
if value.func_code.co_argcount == 0:
#@ << add child task >>
#@+node:<< add child task >>
try:
task = self.__dict__[value.func_name]
except KeyError:
task = Task(value, value.func_name, self, len(self.children) + 1)
self.children.append(task)
setattr(self, value.func_name, task)
return
#@nonl
#@-node:<< add child task >>
#@nl
if name[0] == "_":
#private vars will not be set
return
if isinstance(value, _Path):
value = value._task
set_method = getattr(self, "_set_" + name, None)
if set_method:
#@ << set standard attribute >>
#@+node:<< set standard attribute >>
if type(value) == types.DictionaryType:
self.root.all_scenarios.update(value.keys())
value = value.get(self.scenario, value["_default"])
self.__set_sources(name, value)
self._original_values[name] = value
set_method(_val(value))
#@nonl
#@-node:<< set standard attribute >>
#@nl
else:
#@ << set userdefined attribute >>
#@+node:<< set userdefined attribute >>
if callable( getattr(self.__class__, name, None)):
raise NameError('You may not use "%s" as attribute' % name)
setattr(self, name, value)
self._properties[name] = True
self.__set_sources(name, value)
#@nonl
#@-node:<< set userdefined attribute >>
#@nl
#@-node:_set_attrib
#@+node:read only attributes
#@+node:_set_name
def _set_name(self, value):
raise AttributeError("The attribute 'name' is readonly.")
#@nonl
#@-node:_set_name
#@+node:_set_done
def _set_done(self, value):
raise AttributeError("The attribute 'done' is readonly.")
#@nonl
#@-node:_set_done
#@+node:_set_performed_work_time
def _set_performed_work_time(self, value):
raise AttributeError("The attribute 'performed_work_time' is readonly.")
#@nonl
#@-node:_set_performed_work_time
#@+node:_set_booked_resource
def _set_booked_resource(self, value):
raise AttributeError("The attribute 'booked_resource' is readonly.")
#@nonl
#@-node:_set_booked_resource
#@+node:_set_performed_effort
def _set_performed_effort(self, value):
raise AttributeError("The attribute 'performed_effort' is readonly.")
#@nonl
#@-node:_set_performed_effort
#@+node:_set_children
def _set_children(self, value):
raise AttributeError("The attribute 'children' is readonly.")
#@nonl
#@-node:_set_children
#@+node:_set_depth
def _set_depth(self, value):
raise AttributeError("The attribute 'depth' is readonly.")
#@nonl
#@-node:_set_depth
#@+node:_set_index
def _set_index(self, value):
raise AttributeError("The attribute 'index' is readonly.")
#@nonl
#@-node:_set_index
#@+node:_set_scenario
def _set_scenario(self, value):
raise AttributeError("The attribute 'scenario' is readonly.")
#@nonl
#@-node:_set_scenario
#@+node:_set_buffer
def _set_buffer(self, value):
raise AttributeError("The attribute 'buffer' is readonly.")
#@nonl
#@-node:_set_buffer
#@-node:read only attributes
#@+node:_set_start
def _set_start(self, value):
self.__start_class = value.__class__
self.start = self._to_start(value).round()
#@-node:_set_start
#@+node:_set_end
def _set_end(self, value):
self.end = self._to_end(value)
#@-node:_set_end
#@+node:_set_max_load
def _set_max_load(self, max_load):
self.max_load = float(max_load)
#@-node:_set_max_load
#@+node:_set_load
def _set_load(self, load):
self.load = float(load)
#@-node:_set_load
#@+node:_set_length
def _set_length(self, value):
self.length = self._to_delta(value).round()
#@-node:_set_length
#@+node:_set_effort
def _set_effort(self, value):
self.effort = self._to_delta(value).round()
#@-node:_set_effort
#@+node:_set_duration
def _set_duration(self, value):
self.duration = self._to_delta(value, True).round()
#@-node:_set_duration
#@+node:_set_complete
def _set_complete(self, value):
self.complete = value
#@-node:_set_complete
#@+node:_set_done
def _set_done(self, value):
self.done = self._to_delta(value).round()
#@-node:_set_done
#@+node:_set_todo
def _set_todo(self, value):
self.todo = self._to_delta(value).round()
#@-node:_set_todo
#@+node:_set_milestone
def _set_milestone(self, value):
self.milestone = value
#@-node:_set_milestone
#@+node:_set_resource
def _set_resource(self, value):
if not value:
self.resource = None
return
if isinstance(value, (tuple, list)):
value = reduce(lambda a, b: a & b, value)
self.resource = value()
#@-node:_set_resource
#@+node:_set_copy_src
def _set_copy_src(self, value):
if isinstance(value, _MeProxy):
raise RuntimeError("Cannot copy me.")
if not value._is_compiled:
raise _IncompleteError(value, "copy_src")
if value.resource and not self.resource:
self.resource = value.resource
if value.balance and not self.balance:
self.balance = value.balance
copy_parms = ("priority", "todo", "complete",
"_constraint", "load", "length",
"effort", "duration")
for p in copy_parms:
v = value.__dict__.get(p)
if v: setattr(self, p, v)
self.copy_src = value
self._properties.update(value._properties)
for k in value._properties.iterkeys():
setattr(self, k, getattr(value, k))
#@-node:_set_copy_src
#@+node:__set_sources
def __set_sources(self, attrib_name, value):
#@ << find references >>
#@+node:<< find references >>
def make_ref(val):
if isinstance(val, _ValueWrapper):
return val._ref
if isinstance(val, Task):
return [(val, "")]
return []
if isinstance(value, (list, tuple)):
sources = _refsum(map(make_ref, value))
else:
sources = make_ref(value)
#@nonl
#@-node:<< find references >>
#@nl
if not sources: return
#track only dependcies within the same project
root = self.root
sources = [ task.path + "." + attrib
for task, attrib in sources
if task and task.root is root ]
self._sources[attrib_name] = tuple(sources)
attr_path = self.path + "." + attrib_name
#set dependencies of my sources
for d in sources:
path, attrib = _split_path(d)
task = self.get_task(path)
r_d = task._dependencies
d_l = r_d.setdefault(attrib, {})
d_l[attr_path] = True
#@-node:__set_sources
#@+node:Calendar Setters
#@+node:_set_calendar
def _set_calendar(self, value):
self.calendar = value
self._to_delta = value.Minutes
self._to_start = value.StartDate
self._to_end = value.EndDate
self.__renew_dates()
#@-node:_set_calendar
#@+node:__renew_dates
def __renew_dates(self):
for attrib in ("effort", "start", "end", "length", "todo"):
try:
self._set_attrib(attrib, self._original_values[attrib])
except KeyError:
pass
#@-node:__renew_dates
#@+node:__make_calendar
def __make_calendar(self):
if not "calendar" in self.__dict__:
cal = self.calendar = self.calendar.clone()
self._to_delta = cal.Minutes
self._to_start = cal.StartDate
self._to_end = cal.EndDate
#@nonl
#@-node:__make_calendar
#@+node:_set_vacation
def _set_vacation(self, value):
self.__make_calendar()
self.calendar.set_vacation(value)
self._properties["vacation"] = True
self.vacation = value
self.__renew_dates()
#@-node:_set_vacation
#@+node:_set_extra_work
def _set_extra_work(self, value):
self.__make_calendar()
self.calendar.set_extra_work(value)
self._properties["extra_work"] = True
self.extra_work = value
self.__renew_dates()
#@-node:_set_extra_work
#@+node:_set_working_days
def _set_working_days(self, value):
if type(value[0]) is str:
value = (value, )
self.working_days = value
self._properties["working_days"] = True
self.__make_calendar()
for v in value:
day_range = v[0]
tranges = tuple(v[1:])
self.calendar.set_working_days(day_range, *tranges)
self.__renew_dates()
#@nonl
#@-node:_set_working_days
#@+node:_set_minimum_time_unit
def _set_minimum_time_unit(self, value):
self.__make_calendar()
self.calendar.minimum_time_unit = value
self._properties["minimum_time_unit"] = True
#@-node:_set_minimum_time_unit
#@+node:_get_minimum_time_unit
def _get_minimum_time_unit(self):
return self.calendar.minimum_time_unit
minimum_time_unit = property(_get_minimum_time_unit)
#@-node:_get_minimum_time_unit
#@+node:_set_working_days_per_week
def _set_working_days_per_week(self, value):
self.__make_calendar()
self.calendar.working_days_per_week = value
self._properties["working_days_per_week"] = True
#@-node:_set_working_days_per_week
#@+node:_get_working_days_per_week
def _get_working_days_per_week(self):
return self.calendar.working_days_per_week
working_days_per_week = property(_get_working_days_per_week)
#@-node:_get_working_days_per_week
#@+node:_set_working_days_per_month
def _set_working_days_per_month(self, value):
self.__make_calendar()
self.calendar.working_days_per_month = value
self._properties["working_days_per_month"] = True
#@-node:_set_working_days_per_month
#@+node:_get_working_days_per_month
def _get_working_days_per_month(self):
return self.calendar.working_days_per_month
working_days_per_month = property(_get_working_days_per_month)
#@-node:_get_working_days_per_month
#@+node:_set_working_days_per_year
def _set_working_days_per_year(self, value):
self.__make_calendar()
self.calendar.working_days_per_year = value
self._properties["working_days_per_year"] = True
#@-node:_set_working_days_per_year
#@+node:_get_working_days_per_year
def _get_working_days_per_year(self):
return self.calendar.working_days_per_year
working_days_per_year = property(_get_working_days_per_year)
#@-node:_get_working_days_per_year
#@+node:_set_working_hours_per_day
def _set_working_hours_per_day(self, value):
self.__make_calendar()
self.calendar.working_hours_per_day = value
self._properties["set_working_hours_per_day"] = True
#@-node:_set_working_hours_per_day
#@+node:_get_working_hours_per_day
def _get_working_hours_per_day(self):
return self.calendar.working_hours_per_day
working_hours_per_day = property(_get_working_hours_per_day)
#@-node:_get_working_hours_per_day
#@+node:_set_now
def _set_now(self, value):
proxy = weakref.proxy
self.calendar.now = _to_datetime(value)
#@-node:_set_now
#@-node:Calendar Setters
#@-node:Setting methods
#@+node:Freezer Methods
#@+node:_unfreeze
def _unfreeze(self, attrib_name):
if self.__dict__.has_key(attrib_name):
del self.__dict__[attrib_name]
#@-node:_unfreeze
#@+node:_wrap_attrib
def _wrap_attrib(self, method):
attrib_name = method.__name__[7:]
recursion_attrib = "_rec" + attrib_name
try:
dest, dattr = self.__at_compile
raise RecursionError("Recursive definition of %s(%s) and %s(%s)." \
% (self.path, attrib_name, dest.path, dattr))
except AttributeError: pass
if not self._is_compiled:
raise _IncompleteError(self, attrib_name)
try:
getattr(self, recursion_attrib)
raise RecursionError(self, attrib_name)
except AttributeError: pass
setattr(self, recursion_attrib, True)
try:
result = method(self)
if self._is_frozen:
setattr(self, attrib_name, result)
return result
finally:
delattr(self, recursion_attrib)
#@-node:_wrap_attrib
#@+node:_find_frozen
def _find_frozen(self, attrib_name, default=None):
value = self.__dict__.get(attrib_name)
if value is not None:
return value
up = self.up
return up and up._find_frozen(attrib_name) or default
#@-node:_find_frozen
#@-node:Freezer Methods
#@+node:Calculation Methods
#@+node:__calc_performed_effort
def __calc_performed_effort(self):
if self.children:
return self._to_delta(sum([ t.performed_effort for t in self.children ]))
return pcalendar.Minutes(0)
performed_effort = _TaskProperty(__calc_performed_effort)
#@-node:__calc_performed_effort
#@+node:__calc_estimated_effort
def __calc_estimated_effort(self):
if self.children:
return self._to_delta(sum([ t.estimated_effort for t in self.children ]))
return self.effort
estimated_effort = _TaskProperty(__calc_estimated_effort)
#@-node:__calc_estimated_effort
#@+node:__calc_start
def __calc_start(self):
to_start = self._to_start
if self.children:
try:
return min([ to_start(t.start) for t in self.children
if not t._is_parent_referer ])
except ValueError:
#@ << raise child recursion error >>
#@+node:<< raise child recursion error >>
self._raise(RecursionError("A child defines a "\
"recursive definition at %s" % self.path))
#@-node:<< raise child recursion error >>
#@nl
try:
end = self.end
duration = self.__dict__.get("duration")
if duration is not None:
start = end.to_datetime() - datetime.timedelta(minutes=duration)
else:
start = end - self.length
return to_start(start)
except RecursionError:
start = self._find_frozen("start")
if start: return to_start(start)
#@ << raise recursion error >>
#@+node:<< raise recursion error >>
raise RecursionError("you have to specify a "\
"start or an end at %s" % self.path)
#@nonl
#@-node:<< raise recursion error >>
#@nl
start = _TaskProperty(__calc_start)
#@-node:__calc_start
#@+node:__calc_end
def __calc_end(self):
to_end = self._to_end
if self.children:
try:
return max([ to_end(t.end) for t in self.children
if not t._is_parent_referer ])
except ValueError:
#@ << raise child recursion error >>
#@+node:<< raise child recursion error >>
self._raise(RecursionError("A child defines a "\
"recursive definition at %s" % self.path))
#@-node:<< raise child recursion error >>
#@nl
try:
start = self.start
duration = self.__dict__.get("duration")
if duration is not None:
end = start.to_datetime() + datetime.timedelta(minutes=duration)
else:
end = start + self.length
return to_end(end)
except RecursionError:
end = self._find_frozen("end")
if end: return to_end(end)
#@ << raise recursion error >>
#@+node:<< raise recursion error >>
raise RecursionError("you have to specify a "\
"start or an end at %s" % self.path)
#@nonl
#@-node:<< raise recursion error >>
#@nl
end = _TaskProperty(__calc_end)
#@-node:__calc_end
#@+node:__calc_load
def __calc_load(self):
length = self.__dict__.get("length")
effort = self.__dict__.get("effort")
if length is not None and effort is not None:
return float(effort) / (float(length) or 1.0)
load = self._find_frozen("load")
if load is not None: return load
return 1.0
load = _TaskProperty(__calc_load)
#@-node:__calc_load
#@+node:__calc_length
def __calc_length(self):
effort = self.__dict__.get("effort")
if effort is None:
return self.end - self.start
return self._to_delta(effort / self.load)
length = _RoundingTaskProperty(__calc_length, "length")
#@-node:__calc_length
#@+node:__calc_duration
def __calc_duration(self):
return self._to_delta(self.end.to_datetime()\
- self.start.to_datetime(), True)
duration = _TaskProperty(__calc_duration)
#@-node:__calc_duration
#@+node:__calc_effort
def __calc_effort(self):
if self.children:
return self._to_delta(sum([ t.effort for t in self.children ]))
return self._to_delta(self.length * self.load)
effort = _RoundingTaskProperty(__calc_effort, "effort")
#@-node:__calc_effort
#@+node:__calc_done
def __calc_done(self):
if self.children:
dones = map(lambda t: t.done, self.children)
return self._to_delta(sum(dones))
res = self._iter_booked_resources()
done = sum(map(lambda r: r.done_of(self), res))
complete = self.__dict__.get("complete")
todo = self.__dict__.get("todo")
if not done and complete == 100 or todo == 0:
#if now is not set
done = self.effort
return self._to_delta(done)
done = _TaskProperty(__calc_done)
#@-node:__calc_done
#@+node:__calc_buffer
def __calc_buffer(self):
if self.children:
return self._to_delta(min(map(lambda t: t.buffer, self.children)))
scenario = self.scenario
end = self.end
old_end = self.__dict__.get("end")
#@ << find all tasks, that depend on my end >>
#@+node:<< find all tasks, that depend on my end >>
deps = { }
task = self
while task:
deps.update(task._dependencies.get("end", {}))
task = task.up
#@nonl
#@-node:<< find all tasks, that depend on my end >>
#@nl
#@ << define unfreeze_parents >>
#@+node:<< define unfreeze_parents >>
def unfreeze_parents():
task = self.up
while task:
task._unfreeze("end")
task = task.up
#@nonl
#@-node:<< define unfreeze_parents >>
#@nl
buffers = [ ]
for d in deps.keys():
path, attrib = _split_path(d)
if attrib != "start":
continue
#@ << calculate buffer to descendant 'd' >>
#@+node:<< calculate buffer to descendant 'd' >>
unfreeze_parents()
# the following code considers a expressione like
# start = predecessor.end + Delta("1d") the buffer
# calculation must be aware of the 1d delay.
# (therefore a simple succ_start - end would be
# incorrect)
# Solution: Simluate a later end and calculate the
# real delay
succ_task = self.get_task(path)
simulated_task = Task(succ_task._function,
succ_task.name,
succ_task.up, 1)
current_start = succ_task.start
simulated_end = current_start
self.end = current_start
simulated_task._generate()
simulated_start = simulated_task.start
unfreeze_parents()
if old_end: self.end = old_end
else: self._unfreeze("end")
del simulated_task
current_delay = current_start - end
simulated_delay = simulated_start - simulated_end
real_delay = current_delay - simulated_delay
try:
buffer_ = real_delay + succ_task.buffer
except RecursionError, err:
self._raise(err)
#@nonl
#@-node:<< calculate buffer to descendant 'd' >>
#@nl
buffers.append(buffer_)
if not buffer_:
break
if buffers:
return self._to_delta(min(buffers))
return not self.milestone \
and self.root.end - end \
or self._to_delta(0)
buffer = _TaskProperty(__calc_buffer)
#@-node:__calc_buffer
#@+node:__calc_complete
def __calc_complete(self):
done = self.done
todo = self.todo
return int(100.0 * done / ((done + todo) or 1))
complete = _TaskProperty(__calc_complete)
#@-node:__calc_complete
#@+node:__calc_todo
def __calc_todo(self):
complete = self.__dict__.get("complete")
if complete:
# effort = done + todo
# done done
# complete = ------ ==> todo = -------- - done
# effort complete
complete = float(complete)
done = self.done
if done:
done = float(done)
return self._to_delta(done * 100.0 / complete - done)
return self._to_delta(self.effort * complete / 100.0)
if self.children:
todos = map(lambda t: t.todo, self.children)
return self._to_delta(sum(todos))
todo = sum(map(lambda r: r.todo_of(self), self.booked_resource))
return self._to_delta(max(todo, self.effort - self.done))
todo = _TaskProperty(__calc_todo)
#@-node:__calc_todo
#@-node:Calculation Methods
#@+node:Check Methods
#@+node:__check_task
def __check_task(self):
if self.children: return
start = self._find_frozen("start")
end = self._find_frozen("end")
if not (start or end):
self._raise(ValueError("You must specify either a"\
" start or an end attribute"))
if start and end: return
length = self.__dict__.get("length")
duration = self.__dict__.get("duration")
effort = self.__dict__.get("effort")
if not (effort or length or duration):
#set a default value
self._set_effort("1d")
#self._raise(ValueError("You must specify either a"\
# " length or a duration or "\
# "an effort attribute"))
#@-node:__check_task
#@+node:__check_milestone
def __check_milestone(self):
if not self.milestone: return
self.length = self._to_delta(0)
start = self.__dict__.get("start")
if not start:
self._raise(ValueError("Milestone must have start attribute"),
"milstone")
if self.__start_class.__name__ == "edt":
#the milestone is probably dependent on the end date of
#an other task (see edt in pcalendar) ==> start at the end date
self.start = self.end = self._to_end(self.start)
else:
self.start = self.end = self._to_start(self.start)
#@-node:__check_milestone
#@+node:_check_completion
def _check_completion(self):
if not self.performed_effort: return
if self.root.is_snapshot: return
# allocation is not done yet ==> self.todo, self.done,
# self.complete cannot be calculated
if self._find_frozen("complete", 0) < 100 \
and self.__dict__.get("todo", 1) > 0:
return
start = self.performed_start
end = self.performed_end
#ensure that self.start.to_datetime() < self.end.to_datetime()
cstart = self._to_start(start)
if cstart.to_datetime() > start: cstart = self._to_end(start)
cend = self._to_end(end)
if cend.to_datetime() < end: cend = self._to_start(end)
self.start = cstart
self.end = cend
if self.performed_effort != self.effort:
self.estimated_effort = self.effort
self.effort = self.performed_effort
#@-node:_check_completion
#@+node:check
def check(self):
if self._constraint and self._is_compiled:
globals_ = self._function.func_globals
globals_["me"] = self
globals_["up"] = self.up
globals_["root"] = self.root
globals_["assert_"] = self.__assert
self._constraint()
#@-node:check
#@-node:Check Methods
#@+node:Error Methods
#@+node:__assert
def __assert(self, value):
if not value:
warnings.warn('Assertion in scenario: "%s".' % self.scenario,
RuntimeWarning, 2)
#@-node:__assert
#@+node:_warn
def _warn(self, message, attrib=None, level=2):
self.__compile_function([], True, _MeProxyWarn(self, attrib, message))
#@-node:_warn
#@+node:_raise
def _raise(self, exc, attrib=None):
self.__compile_function([], True, _MeProxyError(self, attrib, exc))
raise exc
#@-node:_raise
#@-node:Error Methods
#@-others
#@nonl
#@-node:class Task
#@-node:Task
#@+node:Projects
#@+node:class _ProjectBase
class _ProjectBase(Task):
"""
Base class for all projects.
"""
#@ << class _ProjectBase declarations >>
#@+node:<< class _ProjectBase declarations >>
__attrib_completions__ = { }
__attrib_completions__.update(Task.__attrib_completions__)
del __attrib_completions__["milestone"] #project cannot be milestones
priority = 500
efficiency = 1.0
max_load = 1.0
balance = 0
resource = None
copy_src = None
has_actual_data = False
is_snapshot = False
#@-node:<< class _ProjectBase declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, top_task, scenario="_default", id=""):
self.calendar = pcalendar.Calendar()
Task.__init__(self, top_task, top_task.func_name)
self.id = id or self.name
self.scenario = scenario
self.all_scenarios = set(("_default",))
self.path = "root"
self._globals = top_task.func_globals.copy()
self._generate()
#@-node:__init__
#@+node:_idendity_
def _idendity_(self): return self.id
#@-node:_idendity_
#@+node:_restore_globals
def _restore_globals(self):
self._function.func_globals.clear()
self._function.func_globals.update(self._globals)
del self._globals
#@-node:_restore_globals
#@+node:free
def free(self):
all_resources = self.all_resources()
for r in all_resources:
r().unbook_tasks_of_project(self.id, self.scenario)
for t in self:
t.booked_resource = ()
return all_resources
#@-node:free
#@+node:_get_balancing_list
def _get_balancing_list(self):
try:
cached_list = balancing_cache[self._function.org_code]
if len(cached_list) != len(tuple(self)):
# different scenarios can have different tasks
raise KeyError()
except KeyError:
cached_list = _build_balancing_list(self)
balancing_cache[self._function.org_code] = cached_list
else:
cached_list = [ self.get_task(t.path) for t in cached_list ]
return cached_list
#@-node:_get_balancing_list
#@+node:snapshot
def snapshot(self, indent="", name=None):
text = Task.snapshot(self, indent, name)
lines = text.splitlines(True)
indent += " "
def make_resource(r):
return '%sclass %s(Resource): title = "%s"\n' \
% (indent, r.name, r.title)
now = datetime.datetime.now().strftime("%x %H:%M")
resource_text = map(lambda r: make_resource(r), self.all_resources())
lines.insert(1, "%sfrom faces import Resource\n" % indent)
lines.insert(2, "".join(resource_text) + "\n")
lines.insert(3, '%snow = "%s"\n' % (indent, now))
lines.insert(4, '%sis_snapshot = True\n' % indent)
return "".join(lines)
#@-node:snapshot
#@-others
#@-node:class _ProjectBase
#@+node:class Project
class Project(_ProjectBase):
"""
Generates a Project without allocating resources.
@param top_task: Specifies the highest function of a project definiton.
@param scenario: Specifies the name of the scenario which should be scheduled.
@param id: Specifiess a unique idenfication name to distinguish the project from
other projects in the resource database. The default value for id
is the name of top_task.
"""
#@ << class Project declarations >>
#@+node:<< class Project declarations >>
__call_completion__ = 'Project(|top_task, scenario="_default", id=None)'
#@-node:<< class Project declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, top_task, scenario="_default", id=None):
_ProjectBase.__init__(self, top_task, scenario, id)
no_snapshot = not self.is_snapshot
for t in self:
t._is_frozen = True
t._recalc_properties()
no_snapshot and t.check()
self._restore_globals()
#@-node:__init__
#@-others
#@-node:class Project
#@+node:class _AllocationPoject
class _AllocationPoject(_ProjectBase):
#@ @+others
#@+node:unfreeze_parents
def unfreeze_parents(self):
if self.has_actual_data:
for t in filter(lambda t: t.children, self):
if not t._original_values.has_key("start"): t._unfreeze("start")
if not t._original_values.has_key("end"): t._unfreeze("end")
#@-node:unfreeze_parents
#@-others
#@-node:class _AllocationPoject
#@+node:class BalancedProject
class BalancedProject(_AllocationPoject):
"""
Generates a project with allocated resources. The tasks are balanced
to fit the resources load conditions.
"""
#@ << class BalancedProject declarations >>
#@+node:<< class BalancedProject declarations >>
__call_completion__ = """BalancedProject(|top_task, scenario="_default",
id=None, balance=SMART, performed=None)"""
#@-node:<< class BalancedProject declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, top_task, scenario="_default",
id=None, balance=SMART, performed=None):
_AllocationPoject.__init__(self, top_task, scenario, id)
self.balance = balance
if performed:
self._distribute_performed(performed)
self.has_actual_data = True
no_snapshot = not self.is_snapshot
if no_snapshot:
self.allocate()
else:
self.allocate_snapshot()
for t in self:
t._is_frozen = True
t._recalc_properties()
no_snapshot and t.check()
self._restore_globals()
#@nonl
#@-node:__init__
#@+node:allocate_snapshot
def allocate_snapshot(self):
all_resources = self.free()
scenario = self.scenario
has_actual_data = True
for t in self:
if not t.resource or t.milestone or t.children:
continue
t._convert_performed(all_resources)
t._allocate_performed(t._performed)
#@-node:allocate_snapshot
#@+node:allocate
def allocate(self):
all_resources = self.free()
balancing_list = self._get_balancing_list()
scenario = self.scenario
#for t in balancing_list:
# print t.path
for t in balancing_list:
t._compile([], True)
if not t.resource or t.milestone or t.children:
continue
if t._convert_performed(all_resources):
has_actual_data = True
try:
t._allocate_performed(t._performed)
except AttributeError:
pass
allocator = _allocators[t.balance]
min_val = None
min_state = None
for p in range(t.resource._permutation_count()):
state = t._test_allocation(p, allocator)
if not state: continue
to_minimize = state[0]
if not min_val or min_val > to_minimize:
min_val = to_minimize
min_state = state
if min_state:
t._allocate(min_state, allocator)
elif t.performed_start:
# t could not be allocated ==>
# performance data holds all information
t.start = t._to_start(t.performed_start)
t.end = t._to_end(t.performed_end)
self.unfreeze_parents()
#@-node:allocate
#@+node:_distribute_performed
def _distribute_performed(self, performed):
project_id = self._idendity_()
plen = len(project_id)
performed = filter(lambda item: item[0].startswith(project_id),
performed)
performed.sort()
task = None
for item in performed:
path = item[0]
rpath = "root" + path[plen:]
task = self.get_task(rpath)
if not task:
#@ << extract task in activity path >>
#@+node:<< extract task in activity path >>
#@+at
# A performed path can have sub activities appended to the
# task path.
# like:
#
# root.parent1.parent2.task.subactivity
#
# here rhe correct task path is:
#
# root.parent1.parent2.task
#
#@-at
#@@code
orpath = rpath
while not task:
#path can specify a sub module
#find the correct path to the module
try:
last_dot = rpath.rindex(".", 0, len(rpath))
except ValueError:
break
rpath = rpath[:last_dot]
task = self.get_task(rpath)
item = list(item)
item.append(orpath[len(rpath):])
#@nonl
#@-node:<< extract task in activity path >>
#@nl
if not task or task.children:
self._warn("The performance data contain "
"a task with id '%s'. But such "
"a task does not exist in your "
"project." % path)
continue
if not isinstance(task.performed, list):
task.performed = list(task.performed)
task.performed.append(item[1:])
#@nonl
#@-node:_distribute_performed
#@-others
#@-node:class BalancedProject
#@+node:class AdjustedProject
class AdjustedProject(_AllocationPoject):
"""
Generates a project with allocated resources. The tasks are
adjusted to the actual tracking data and balanced to fit the
resources load conditions.
"""
#@ << class AdjustedProject declarations >>
#@+node:<< class AdjustedProject declarations >>
__call_completion__ = 'AdjustedProject(|base_project)'
#@-node:<< class AdjustedProject declarations >>
#@nl
#@ @+others
#@+node:__init__
def __init__(self, base_project):
_AllocationPoject.__init__(self, base_project._function,
base_project.scenario,
base_project.id)
self.balance = base_project.balance
self.has_actual_data = base_project.has_actual_data
self.allocate(base_project)
for t in self:
t._is_frozen = True
t._recalc_properties()
t.check()
self._restore_globals()
#@-node:__init__
#@+node:allocate
def allocate(self, base):
balancing_list = self._get_balancing_list()
scenario = self.scenario
cal = self.calendar
now = cal.now
#for t in balancing_list:
# print t.path
#@ << free the resources, we have to rebook >>
#@+node:<< free the resources, we have to rebook >>
for t in balancing_list:
src = base.get_task(t.path)
if src.end > now or src.complete < 100:
for r in src._iter_booked_resources():
r.unbook_task(src)
#@nonl
#@-node:<< free the resources, we have to rebook >>
#@nl
for t in balancing_list:
src = base.get_task(t.path)
if src.end <= now and src.complete == 100:
#@ << copy the attribs of complete tasks >>
#@+node:<< copy the attribs of complete tasks >>
t.effort = src.effort
t.load = src.load
t.start = src.start
t.end = src.end
t.done = src.done
t.todo = src.todo
t.booked_resource = src.booked_resource
t.performed_resource = src.performed_resource
t._unfreeze("length")
t._unfreeze("duration")
#@nonl
#@-node:<< copy the attribs of complete tasks >>
#@nl
continue
t._compile([], True)
if not t.resource or t.milestone or t.children:
continue
# now allocate the uncomplete tasks
#@ << allocate performed data >>
#@+node:<< allocate performed data >>
try:
t._performed = src._performed
t._allocate_performed(t._performed)
except AttributeError:
pass
#@nonl
#@-node:<< allocate performed data >>
#@nl
allocator = _allocators[t.balance]
if src.start >= now:
#@ << allocate tasks, that have not begun yet >>
#@+node:<< allocate tasks, that have not begun yet >>
min_val = None
min_state = None
for p in range(t.resource._permutation_count()):
state = t._test_allocation(p, allocator)
if not state: continue
to_minimize = state[0]
if not min_val or min_val > to_minimize:
min_val = to_minimize
min_state = state
if min_state:
t._allocate(min_state, allocator)
elif t.performed_start:
t.start = t._to_start(t.performed_start)
t.end = t._to_end(t.performed_end)
#@-node:<< allocate tasks, that have not begun yet >>
#@nl
else:
#@ << allocate tasks, that are allready at work >>
#@+node:<< allocate tasks, that are allready at work >>
if t.__dict__.has_key("effort"):
t.effort = t._to_delta(src.done + src.todo).round()
resource = src.booked_resource or src.performed_resource
state = allocator.test_allocation(t, resource)
if state:
t._allocate(state, allocator)
#@nonl
#@-node:<< allocate tasks, that are allready at work >>
#@nl
self.unfreeze_parents()
#@nonl
#@-node:allocate
#@-others
#@-node:class AdjustedProject
#@-node:Projects
#@-others
"""
Atttribute mit Bedeutung:
calendar
--------
minimum_time_unit |int in minutes|
working_days_per_week |int in days |
working_days_per_month|int in days |
working_days_per_year |int in days |
working_hours_per_day |int in hours |
vacation | [ one_day, (from, to), .. ] |
working_days
now
Task
-----
load
start
end
length
effort
duration
resource
booked_resource
milestone
complete
done
todo
priority
efficiency
buffer
children
depth
index
path
dont_inherit
performed_effort
performed_end
performed_start
sum()
min()
max()
costs()
indent_name()
max_load
copy_src (set: copy all attributes of another task
get: reference of copy)
balance
for gantt
-----
line
accumulate
Resource
----------
efficiency
load
vacation
max_load
"""
#@-node:@file task.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
britcey/ansible | lib/ansible/modules/identity/ipa/ipa_sudorule.py | 49 | 16253 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ipa_sudorule
author: Thomas Krahn (@Nosmoht)
short_description: Manage FreeIPA sudo rule
description:
- Add, modify or delete sudo rule within IPA server using IPA API.
options:
cn:
description:
- Canonical name.
- Can not be changed as it is the unique identifier.
required: true
aliases: ['name']
cmdcategory:
description:
- Command category the rule applies to.
choices: ['all']
required: false
cmd:
description:
- List of commands assigned to the rule.
- If an empty list is passed all commands will be removed from the rule.
- If option is omitted commands will not be checked or changed.
required: false
host:
description:
- List of hosts assigned to the rule.
- If an empty list is passed all hosts will be removed from the rule.
- If option is omitted hosts will not be checked or changed.
- Option C(hostcategory) must be omitted to assign hosts.
required: false
hostcategory:
description:
- Host category the rule applies to.
- If 'all' is passed one must omit C(host) and C(hostgroup).
- Option C(host) and C(hostgroup) must be omitted to assign 'all'.
choices: ['all']
required: false
hostgroup:
description:
- List of host groups assigned to the rule.
- If an empty list is passed all host groups will be removed from the rule.
- If option is omitted host groups will not be checked or changed.
- Option C(hostcategory) must be omitted to assign host groups.
required: false
user:
description:
- List of users assigned to the rule.
- If an empty list is passed all users will be removed from the rule.
- If option is omitted users will not be checked or changed.
required: false
usercategory:
description:
- User category the rule applies to.
choices: ['all']
required: false
usergroup:
description:
- List of user groups assigned to the rule.
- If an empty list is passed all user groups will be removed from the rule.
- If option is omitted user groups will not be checked or changed.
required: false
state:
description: State to ensure
required: false
default: present
choices: ['present', 'absent', 'enabled', 'disabled']
ipa_port:
description: Port of IPA server
required: false
default: 443
ipa_host:
description: IP or hostname of IPA server
required: false
default: "ipa.example.com"
ipa_user:
description: Administrative account used on IPA server
required: false
default: "admin"
ipa_pass:
description: Password of administrative user
required: true
ipa_prot:
description: Protocol used by IPA server
required: false
default: "https"
choices: ["http", "https"]
validate_certs:
description:
- This only applies if C(ipa_prot) is I(https).
- If set to C(no), the SSL certificates will not be validated.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
default: true
version_added: "2.3"
'''
EXAMPLES = '''
# Ensure sudo rule is present thats allows all every body to execute any command on any host without beeing asked for a password.
- ipa_sudorule:
name: sudo_all_nopasswd
cmdcategory: all
description: Allow to run every command with sudo without password
hostcategory: all
sudoopt:
- '!authenticate'
usercategory: all
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
# Ensure user group developers can run every command on host group db-server as well as on host db01.example.com.
- ipa_sudorule:
name: sudo_dev_dbserver
description: Allow developers to run every command with sudo on all database server
cmdcategory: all
host:
- db01.example.com
hostgroup:
- db-server
sudoopt:
- '!authenticate'
usergroup:
- developers
ipa_host: ipa.example.com
ipa_user: admin
ipa_pass: topsecret
'''
RETURN = '''
sudorule:
description: Sudorule as returned by IPA
returned: always
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.ipa import IPAClient
class SudoRuleIPAClient(IPAClient):
def __init__(self, module, host, port, protocol):
super(SudoRuleIPAClient, self).__init__(module, host, port, protocol)
def sudorule_find(self, name):
return self._post_json(method='sudorule_find', name=None, item={'all': True, 'cn': name})
def sudorule_add(self, name, item):
return self._post_json(method='sudorule_add', name=name, item=item)
def sudorule_mod(self, name, item):
return self._post_json(method='sudorule_mod', name=name, item=item)
def sudorule_del(self, name):
return self._post_json(method='sudorule_del', name=name)
def sudorule_add_option(self, name, item):
return self._post_json(method='sudorule_add_option', name=name, item=item)
def sudorule_add_option_ipasudoopt(self, name, item):
return self.sudorule_add_option(name=name, item={'ipasudoopt': item})
def sudorule_remove_option(self, name, item):
return self._post_json(method='sudorule_remove_option', name=name, item=item)
def sudorule_remove_option_ipasudoopt(self, name, item):
return self.sudorule_remove_option(name=name, item={'ipasudoopt': item})
def sudorule_add_host(self, name, item):
return self._post_json(method='sudorule_add_host', name=name, item=item)
def sudorule_add_host_host(self, name, item):
return self.sudorule_add_host(name=name, item={'host': item})
def sudorule_add_host_hostgroup(self, name, item):
return self.sudorule_add_host(name=name, item={'hostgroup': item})
def sudorule_remove_host(self, name, item):
return self._post_json(method='sudorule_remove_host', name=name, item=item)
def sudorule_remove_host_host(self, name, item):
return self.sudorule_remove_host(name=name, item={'host': item})
def sudorule_remove_host_hostgroup(self, name, item):
return self.sudorule_remove_host(name=name, item={'hostgroup': item})
def sudorule_add_allow_command(self, name, item):
return self._post_json(method='sudorule_add_allow_command', name=name, item=item)
def sudorule_remove_allow_command(self, name, item):
return self._post_json(method='sudorule_remove_allow_command', name=name, item=item)
def sudorule_add_user(self, name, item):
return self._post_json(method='sudorule_add_user', name=name, item=item)
def sudorule_add_user_user(self, name, item):
return self.sudorule_add_user(name=name, item={'user': item})
def sudorule_add_user_group(self, name, item):
return self.sudorule_add_user(name=name, item={'group': item})
def sudorule_remove_user(self, name, item):
return self._post_json(method='sudorule_remove_user', name=name, item=item)
def sudorule_remove_user_user(self, name, item):
return self.sudorule_remove_user(name=name, item={'user': item})
def sudorule_remove_user_group(self, name, item):
return self.sudorule_remove_user(name=name, item={'group': item})
def get_sudorule_dict(cmdcategory=None, description=None, hostcategory=None, ipaenabledflag=None, usercategory=None):
data = {}
if cmdcategory is not None:
data['cmdcategory'] = cmdcategory
if description is not None:
data['description'] = description
if hostcategory is not None:
data['hostcategory'] = hostcategory
if ipaenabledflag is not None:
data['ipaenabledflag'] = ipaenabledflag
if usercategory is not None:
data['usercategory'] = usercategory
return data
def get_sudorule_diff(ipa_sudorule, module_sudorule):
data = []
for key in module_sudorule.keys():
module_value = module_sudorule.get(key, None)
ipa_value = ipa_sudorule.get(key, None)
if isinstance(ipa_value, list) and not isinstance(module_value, list):
module_value = [module_value]
if isinstance(ipa_value, list) and isinstance(module_value, list):
ipa_value = sorted(ipa_value)
module_value = sorted(module_value)
if ipa_value != module_value:
data.append(key)
return data
def category_changed(module, client, category_name, ipa_sudorule):
if ipa_sudorule.get(category_name, None) == ['all']:
if not module.check_mode:
# cn is returned as list even with only a single value.
client.sudorule_mod(name=ipa_sudorule.get('cn')[0], item={category_name: None})
return True
return False
def ensure(module, client):
state = module.params['state']
name = module.params['name']
cmd = module.params['cmd']
cmdcategory = module.params['cmdcategory']
host = module.params['host']
hostcategory = module.params['hostcategory']
hostgroup = module.params['hostgroup']
if state in ['present', 'enabled']:
ipaenabledflag = 'TRUE'
else:
ipaenabledflag = 'FALSE'
sudoopt = module.params['sudoopt']
user = module.params['user']
usercategory = module.params['usercategory']
usergroup = module.params['usergroup']
module_sudorule = get_sudorule_dict(cmdcategory=cmdcategory,
description=module.params['description'],
hostcategory=hostcategory,
ipaenabledflag=ipaenabledflag,
usercategory=usercategory)
ipa_sudorule = client.sudorule_find(name=name)
changed = False
if state in ['present', 'disabled', 'enabled']:
if not ipa_sudorule:
changed = True
if not module.check_mode:
ipa_sudorule = client.sudorule_add(name=name, item=module_sudorule)
else:
diff = get_sudorule_diff(client, ipa_sudorule, module_sudorule)
if len(diff) > 0:
changed = True
if not module.check_mode:
if 'hostcategory' in diff:
if ipa_sudorule.get('memberhost_host', None) is not None:
client.sudorule_remove_host_host(name=name, item=ipa_sudorule.get('memberhost_host'))
if ipa_sudorule.get('memberhost_hostgroup', None) is not None:
client.sudorule_remove_host_hostgroup(name=name,
item=ipa_sudorule.get('memberhost_hostgroup'))
client.sudorule_mod(name=name, item=module_sudorule)
if cmd is not None:
changed = category_changed(module, client, 'cmdcategory', ipa_sudorule) or changed
if not module.check_mode:
client.sudorule_add_allow_command(name=name, item=cmd)
if host is not None:
changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_host', []), host,
client.sudorule_add_host_host,
client.sudorule_remove_host_host) or changed
if hostgroup is not None:
changed = category_changed(module, client, 'hostcategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberhost_hostgroup', []), hostgroup,
client.sudorule_add_host_hostgroup,
client.sudorule_remove_host_hostgroup) or changed
if sudoopt is not None:
changed = client.modify_if_diff(name, ipa_sudorule.get('ipasudoopt', []), sudoopt,
client.sudorule_add_option_ipasudoopt,
client.sudorule_remove_option_ipasudoopt) or changed
if user is not None:
changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_user', []), user,
client.sudorule_add_user_user,
client.sudorule_remove_user_user) or changed
if usergroup is not None:
changed = category_changed(module, client, 'usercategory', ipa_sudorule) or changed
changed = client.modify_if_diff(name, ipa_sudorule.get('memberuser_group', []), usergroup,
client.sudorule_add_user_group,
client.sudorule_remove_user_group) or changed
else:
if ipa_sudorule:
changed = True
if not module.check_mode:
client.sudorule_del(name)
return changed, client.sudorule_find(name)
def main():
module = AnsibleModule(
argument_spec=dict(
cmd=dict(type='list', required=False),
cmdcategory=dict(type='str', required=False, choices=['all']),
cn=dict(type='str', required=True, aliases=['name']),
description=dict(type='str', required=False),
host=dict(type='list', required=False),
hostcategory=dict(type='str', required=False, choices=['all']),
hostgroup=dict(type='list', required=False),
sudoopt=dict(type='list', required=False),
state=dict(type='str', required=False, default='present',
choices=['present', 'absent', 'enabled', 'disabled']),
user=dict(type='list', required=False),
usercategory=dict(type='str', required=False, choices=['all']),
usergroup=dict(type='list', required=False),
ipa_prot=dict(type='str', required=False, default='https', choices=['http', 'https']),
ipa_host=dict(type='str', required=False, default='ipa.example.com'),
ipa_port=dict(type='int', required=False, default=443),
ipa_user=dict(type='str', required=False, default='admin'),
ipa_pass=dict(type='str', required=True, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
),
mutually_exclusive=[['cmdcategory', 'cmd'],
['hostcategory', 'host'],
['hostcategory', 'hostgroup'],
['usercategory', 'user'],
['usercategory', 'usergroup']],
supports_check_mode=True,
)
client = SudoRuleIPAClient(module=module,
host=module.params['ipa_host'],
port=module.params['ipa_port'],
protocol=module.params['ipa_prot'])
try:
client.login(username=module.params['ipa_user'],
password=module.params['ipa_pass'])
changed, sudorule = ensure(module, client)
module.exit_json(changed=changed, sudorule=sudorule)
except Exception:
e = get_exception()
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
aveshagarwal/openshift-ansible | roles/openshift_health_checker/test/zz_failure_summary_test.py | 53 | 2115 | from zz_failure_summary import deduplicate_failures
import pytest
@pytest.mark.parametrize('failures,deduplicated', [
(
[
{
'host': 'master1',
'msg': 'One or more checks failed',
},
],
[
{
'host': ('master1',),
'msg': 'One or more checks failed',
},
],
),
(
[
{
'host': 'master1',
'msg': 'One or more checks failed',
},
{
'host': 'node1',
'msg': 'One or more checks failed',
},
],
[
{
'host': ('master1', 'node1'),
'msg': 'One or more checks failed',
},
],
),
(
[
{
'host': 'node1',
'msg': 'One or more checks failed',
'checks': (('test_check', 'error message'),),
},
{
'host': 'master2',
'msg': 'Some error happened',
},
{
'host': 'master1',
'msg': 'One or more checks failed',
'checks': (('test_check', 'error message'),),
},
],
[
{
'host': ('master1', 'node1'),
'msg': 'One or more checks failed',
'checks': (('test_check', 'error message'),),
},
{
'host': ('master2',),
'msg': 'Some error happened',
},
],
),
# if a failure contain an unhashable value, it will not be deduplicated
(
[
{
'host': 'master1',
'msg': {'unhashable': 'value'},
},
],
[
{
'host': 'master1',
'msg': {'unhashable': 'value'},
},
],
),
])
def test_deduplicate_failures(failures, deduplicated):
assert deduplicate_failures(failures) == deduplicated
| apache-2.0 |
minglong-cse2016/stupidlang | stupidlang/skeleton.py | 1 | 3647 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a skeleton file that can serve as a starting point for a Python
console script. To run this script uncomment the following line in the
entry_points section in setup.cfg:
console_scripts =
fibonacci = stupidlang.skeleton:run
Then run `python setup.py install` which will install the command `fibonacci`
inside your current environment.
Besides console scripts, the header (i.e. until _logger...) of this file can
also be used as template for Python modules.
Note: This skeleton file can be safely removed if not needed!
"""
from __future__ import division, print_function, absolute_import
import argparse
import sys
import logging
from stupidlang import __version__
__author__ = "minglong-cse2016"
__copyright__ = "minglong-cse2016"
__license__ = "mit"
_logger = logging.getLogger(__name__)
#def fib(n):
# """
# Fibonacci example function
#
# :param n: integer
# :return: n-th Fibonacci number
# """
# assert n > 0
# a, b = 1, 1
# for i in range(n-1):
# a, b = b, a+b
# return a
import os
from .run import repl, run_program
from .env_dictimpl import Env
from .evaluator import global_env
def parse_args(args):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`airgparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="A stupid lispish language")
parser.add_argument(
'-v',
'--version',
action='version',
version='stupidlang {ver}'.format(ver='0.0.0'))
parser.add_argument(
'-t',
'--talkative',
dest="loglevel",
help="set loglevel to INFO",
action='store_const',
const=logging.INFO)
parser.add_argument(
'-tt',
'--very-talkative',
dest="loglevel",
help="set loglevel to DEBUG",
action='store_const',
const=logging.DEBUG)
parser.add_argument(
'-i',
'--interactive',
action = 'store_true',
help='starts a REPL to run stupidlang code')
parser.add_argument(
'-l',
'--load',
nargs=1,
help="a file to load before running the repl, implies -i")
parser.add_argument(
dest="programfile",
nargs='?',
help="the program to run. the last value will be printed")
ns = parser.parse_args(args)
if (bool(ns.interactive) | bool(ns.load)) & bool(ns.programfile):
parser.error('-i or -l cannot be given together with a program file')
if len(args)==0:
parser.error('atleast one of -i, -l file, or file must be specified')
if bool(ns.load) and not os.path.isfile(ns.load[0]):
parser.error("Loaded file must exist or be file")
if bool(ns.programfile) and not os.path.isfile(ns.programfile):
parser.error("Program file must exist or be file")
return ns
def main(args):
args = parse_args(args)
logging.basicConfig(level=args.loglevel, stream=sys.stdout)
_logger.debug("Starting lispy calculator...")
env=global_env(Env)
if args.interactive:
repl(env)
return None
elif bool(args.load):
with open(args.load[0]) as f:
run_program(f, env)
repl(env)
return None
else:
with open(args.programfile) as f:
output = run_program(f, env)
_logger.info("Script ends here")
return output
def run():
print("stupidlang version {}".format(__version__))
print(main(sys.argv[1:]))
if __name__ == "__main__":
run()
| mit |
happy56/kivy | examples/demo/kivycatalog/main.py | 3 | 5747 | import kivy
kivy.require('1.4.2')
import os
import sys
from kivy.app import App
from kivy.factory import Factory
from kivy.lang import Builder, Parser, ParserException
from kivy.properties import ObjectProperty
from kivy.config import Config
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.popup import Popup
from kivy.uix.label import Label
from kivy.uix.codeinput import CodeInput
from kivy.animation import Animation
from kivy.clock import Clock
CATALOG_ROOT = os.path.dirname(__file__)
Config.set('graphics', 'width', '1024')
Config.set('graphics', 'height', '768')
'''List of classes that need to be instantiated in the factory from .kv files.
'''
CONTAINER_KVS = os.path.join(CATALOG_ROOT, 'container_kvs')
CONTAINER_CLASSES = [c[:-3] for c in os.listdir(CONTAINER_KVS)
if c.endswith('.kv')]
class Container(BoxLayout):
'''A container is essentially a class that loads its root from a known
.kv file.
The name of the .kv file is taken from the Container's class.
We can't just use kv rules because the class may be edited
in the interface and reloaded by the user.
See :meth: change_kv where this happens.
'''
def __init__(self, **kwargs):
super(Container, self).__init__(**kwargs)
parser = Parser(content=file(self.kv_file).read())
widget = Factory.get(parser.root.name)()
Builder._apply_rule(widget, parser.root, parser.root)
self.add_widget(widget)
@property
def kv_file(self):
'''Get the name of the kv file, a lowercase version of the class
name.
'''
return os.path.join(CONTAINER_KVS, self.__class__.__name__ + '.kv')
for class_name in CONTAINER_CLASSES:
globals()[class_name] = type(class_name, (Container,), {})
class KivyRenderTextInput(CodeInput):
def _keyboard_on_key_down(self, window, keycode, text, modifiers):
is_osx = sys.platform == 'darwin'
# Keycodes on OSX:
ctrl, cmd = 64, 1024
key, key_str = keycode
if text and not key in (self.interesting_keys.keys() + [27]):
# This allows *either* ctrl *or* cmd, but not both.
if modifiers == ['ctrl'] or (is_osx and modifiers == ['meta']):
if key == ord('s'):
self.catalog.change_kv(True)
return
super(KivyRenderTextInput, self)._keyboard_on_key_down(
window, keycode, text, modifiers)
class Catalog(BoxLayout):
'''Catalog of widgets. This is the root widget of the app. It contains
a tabbed pain of widgets that can be displayed and a textbox where .kv
language files for widgets being demoed can be edited.
The entire interface for the Catalog is defined in kivycatalog.kv,
although individual containers are defined in the container_kvs
directory.
To add a container to the catalog,
first create the .kv file in container_kvs
The name of the file (sans .kv) will be the name of the widget available
inside the kivycatalog.kv
Finally modify kivycatalog.kv to add an AccordionItem
to hold the new widget.
Follow the examples in kivycatalog.kv to ensure the item
has an appropriate id and the class has been referenced.
You do not need to edit any python code, just .kv language files!
'''
language_box = ObjectProperty()
screen_manager = ObjectProperty()
def __init__(self, **kwargs):
super(Catalog, self).__init__(**kwargs)
self.show_kv(None)
def show_kv(self, object):
'''Called when an accordionitem is collapsed or expanded. If it
was expanded, we need to show the .kv language file associated with
the newly revealed container.'''
# if object is not passed, it's initialization, we just need to load
# the file
if object:
# one button must always be pressed, even if user presses it again
if object.state == "normal":
object.state = "down"
self.screen_manager.current = object.text
with open(self.screen_manager.current_screen.content.children[
0].kv_file) as file:
self.language_box.text = file.read()
# reset undo/redo history
self.language_box.reset_undo()
def schedule_reload(self):
if self.auto_reload:
Clock.unschedule(self.change_kv)
Clock.schedule_once(self.change_kv, 2)
def change_kv(self, *largs):
'''Called when the update button is clicked. Needs to update the
interface for the currently active kv widget, if there is one based
on the kv file the user entered. If there is an error in their kv
syntax, show a nice popup.'''
kv_container = self.screen_manager.current_screen.content.children[0]
try:
parser = Parser(content=self.language_box.text.encode('utf8'))
kv_container.clear_widgets()
widget = Factory.get(parser.root.name)()
Builder._apply_rule(widget, parser.root, parser.root)
kv_container.add_widget(widget)
except (SyntaxError, ParserException) as e:
self.show_error(e)
except Exception, e:
self.show_error(e)
def show_error(self, e):
self.info_label.text = str(e)
self.anim = Animation(top=190.0, opacity=1, d=2, t='in_back') +\
Animation(top=190.0, d=3) +\
Animation(top=0, opacity=0, d=2)
self.anim.start(self.info_label)
class KivyCatalogApp(App):
'''The kivy App that runs the main root. All we do is build a catalog
widget into the root.'''
def build(self):
return Catalog()
if __name__ == "__main__":
KivyCatalogApp().run()
| lgpl-3.0 |
fabiobatalha/doaj_client | doaj/applications.py | 1 | 1316 | # coding: utf-8
from doaj.client import Client, PAGESIZE, must_have_token
class Applications(Client):
endpoint = "application/"
search_endpoint = "search/applications/"
@must_have_token
def search(self, query, sort=None, pagesize=PAGESIZE):
"""
query must be a valid lucene query
sort must be field:[asc|desc]
"""
try:
int(pagesize)
except:
raise ValueError('pagesize must be integer')
if pagesize > 100 or pagesize < 10:
raise ValueError('pagesize must be between 10 and 100')
page = 1
payload = {'pageSize': pagesize, 'page': page, 'api_key': self.token}
while True:
url = self.api_url+self.search_endpoint+query
response = self.request_get(url, payload=payload)
if len(response.get('results', [])) == 0:
break
for item in response.get('results'):
yield item
payload['page'] += 1
@must_have_token
def get(self, journal_id):
"""
Retrieve one journal related to the given journal identification.
"""
url = self.api_url+self.endpoint+journal_id
payload = {'api_key': self.token}
return self.request_get(url, payload=payload) | bsd-2-clause |
dvliman/jaikuengine | .google_appengine/google/appengine/tools/devappserver2/python/request_state_test.py | 18 | 3331 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.appengine.tools.devappserver2.python.request_state."""
import ctypes
import threading
import unittest
import google
import mox
from google.appengine.tools.devappserver2.python import request_state
class CtypesComparator(mox.Comparator):
def __init__(self, lhs):
self.lhs = lhs.value
def equals(self, rhs):
return self.lhs == rhs.value
class RequestStateTest(unittest.TestCase):
def setUp(self):
self.mox = mox.Mox()
self.mox.StubOutWithMock(ctypes.pythonapi, 'PyThreadState_SetAsyncExc')
self.request_state = request_state.RequestState('id')
def tearDown(self):
self.mox.UnsetStubs()
request_state._request_states = {}
def test_start_and_end_thread(self):
self.request_state._threads = set()
self.request_state.start_thread()
self.assertEquals(set([threading.current_thread().ident]),
self.request_state._threads)
self.request_state.end_thread()
self.assertEquals(set(), self.request_state._threads)
def test_inject_exception(self):
ctypes.pythonapi.PyThreadState_SetAsyncExc(
CtypesComparator(ctypes.c_long(threading.current_thread().ident)),
CtypesComparator(ctypes.py_object(Exception)))
self.mox.ReplayAll()
self.request_state.inject_exception(Exception)
self.mox.VerifyAll()
def test_end_request(self):
def remove_fake_thread():
self.request_state._threads.remove('fake thread id')
self.mox.StubOutWithMock(self.request_state._condition, 'wait')
self.request_state._threads.add('fake thread id')
self.request_state._condition.wait().WithSideEffects(remove_fake_thread)
self.mox.ReplayAll()
self.request_state.end_request()
self.mox.VerifyAll()
def test_start_request_function(self):
request_state.start_request('id')
self.assertEqual(1, len(request_state.get_request_states()))
self.assertEqual('id', request_state.get_request_state('id').request_id)
def test_end_request_function(self):
request_state._request_states = {'id': self.request_state}
self.mox.StubOutWithMock(self.request_state, 'end_request')
self.request_state.end_request()
self.mox.ReplayAll()
request_state.end_request('id')
self.mox.VerifyAll()
self.assertEqual([], request_state.get_request_states())
def test_get_request_states(self):
request_state.start_request('1')
request_state.start_request('2')
request_state.start_request('3')
self.assertEqual(3, len(request_state.get_request_states()))
self.assertItemsEqual(
[request_state.get_request_state(request_id) for
request_id in ['1', '2', '3']], request_state.get_request_states())
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
decaf-emu/huehuetenango | static/node_modules/node-forge/tests/policyserver.py | 171 | 3551 | #!/usr/bin/env python
"""
Flash Socket Policy Server.
- Starts Flash socket policy file server.
- Defaults to port 843.
- NOTE: Most operating systems require administrative privileges to use
ports under 1024.
$ ./policyserver.py [options]
"""
"""
Also consider Adobe's solutions:
http://www.adobe.com/devnet/flashplayer/articles/socket_policy_files.html
"""
from multiprocessing import Process
from optparse import OptionParser
import SocketServer
import logging
# Set address reuse for all TCPServers
SocketServer.TCPServer.allow_reuse_address = True
# Static socket policy file string.
# NOTE: This format is very strict. Edit with care.
socket_policy_file = """\
<?xml version="1.0"?>\
<!DOCTYPE cross-domain-policy\
SYSTEM "http://www.adobe.com/xml/dtds/cross-domain-policy.dtd">\
<cross-domain-policy>\
<allow-access-from domain="*" to-ports="*"/>\
</cross-domain-policy>\0"""
class PolicyHandler(SocketServer.BaseRequestHandler):
"""
The RequestHandler class for our server.
Returns a policy file when requested.
"""
def handle(self):
"""Send policy string if proper request string is received."""
# get some data
# TODO: make this more robust (while loop, etc)
self.data = self.request.recv(1024).rstrip('\0')
logging.debug("%s wrote:%s" % (self.client_address[0], repr(self.data)))
# if policy file request, send the file.
if self.data == "<policy-file-request/>":
logging.info("Policy server request from %s." % (self.client_address[0]))
self.request.send(socket_policy_file)
else:
logging.info("Policy server received junk from %s: \"%s\"" % \
(self.client_address[0], repr(self.data)))
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def serve_forever(self):
"""Handle one request at a time until shutdown or keyboard interrupt."""
try:
SocketServer.BaseServer.serve_forever(self)
except KeyboardInterrupt:
return
def main():
"""Run socket policy file servers."""
usage = "Usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("", "--host", dest="host", metavar="HOST",
default="localhost", help="bind to HOST")
parser.add_option("-p", "--port", dest="port", metavar="PORT",
default=843, type="int", help="serve on PORT")
parser.add_option("-d", "--debug", dest="debug", action="store_true",
default=False, help="debugging output")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="verbose output")
(options, args) = parser.parse_args()
# setup logging
if options.debug:
lvl = logging.DEBUG
elif options.verbose:
lvl = logging.INFO
else:
lvl = logging.WARNING
logging.basicConfig(level=lvl, format="%(levelname)-8s %(message)s")
# log basic info
logging.info("Flash Socket Policy Server. Use ctrl-c to exit.")
# create policy server
logging.info("Socket policy serving on %s:%d." % (options.host, options.port))
policyd = ThreadedTCPServer((options.host, options.port), PolicyHandler)
# start server
policy_p = Process(target=policyd.serve_forever)
policy_p.start()
while policy_p.is_alive():
try:
policy_p.join(1)
except KeyboardInterrupt:
logging.info("Stopping test server...")
if __name__ == "__main__":
main()
| apache-2.0 |
google/eclipse2017 | profile/app/app/backend/app_module.py | 1 | 2118 | #
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import flask
from google.cloud import datastore
class AppModule(object):
"""
Base class for application module
"""
def __init__(self, Blueprint=flask.Blueprint, current_app=flask.current_app,
redirect=flask.redirect, render_template=flask.render_template,
request=flask.request, Response=flask.Response,
session=flask.session, url_for=flask.url_for,
datastore=datastore, datetime=datetime.datetime, json=json):
# Dependency injection
self.Blueprint = Blueprint
self.current_app = current_app
self.redirect = redirect
self.render_template = render_template
self.request = request
self.Response = Response
self.session = session
self.url_for = url_for
self.datastore = datastore
self.datetime = datetime
self.json = json
self.name = 'appmodule'
self.import_name = __name__
self._routes = None
def create_blueprint(self):
"""
Creates and returns a flask blueprint for the profile module.
"""
bp = self.Blueprint(self.name, self.import_name)
for route, name, method, rest_methods in self._routes:
bp.add_url_rule(route, name, method, methods=rest_methods)
return bp
def _get_datastore_client(self):
"""
Returns a datastore client.
"""
return self.datastore.Client(self.current_app.config['PROJECT_ID'])
| apache-2.0 |
MounirMesselmeni/django | django/test/html.py | 220 | 7928 | """
Comparing two html documents.
"""
from __future__ import unicode_literals
import re
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html_parser import HTMLParseError, HTMLParser
WHITESPACE = re.compile('\s+')
def normalize_whitespace(string):
return WHITESPACE.sub(' ', string)
@python_2_unicode_compatible
class Element(object):
def __init__(self, name, attributes):
self.name = name
self.attributes = sorted(attributes)
self.children = []
def append(self, element):
if isinstance(element, six.string_types):
element = force_text(element)
element = normalize_whitespace(element)
if self.children:
if isinstance(self.children[-1], six.string_types):
self.children[-1] += element
self.children[-1] = normalize_whitespace(self.children[-1])
return
elif self.children:
# removing last children if it is only whitespace
# this can result in incorrect dom representations since
# whitespace between inline tags like <span> is significant
if isinstance(self.children[-1], six.string_types):
if self.children[-1].isspace():
self.children.pop()
if element:
self.children.append(element)
def finalize(self):
def rstrip_last_element(children):
if children:
if isinstance(children[-1], six.string_types):
children[-1] = children[-1].rstrip()
if not children[-1]:
children.pop()
children = rstrip_last_element(children)
return children
rstrip_last_element(self.children)
for i, child in enumerate(self.children):
if isinstance(child, six.string_types):
self.children[i] = child.strip()
elif hasattr(child, 'finalize'):
child.finalize()
def __eq__(self, element):
if not hasattr(element, 'name'):
return False
if hasattr(element, 'name') and self.name != element.name:
return False
if len(self.attributes) != len(element.attributes):
return False
if self.attributes != element.attributes:
# attributes without a value is same as attribute with value that
# equals the attributes name:
# <input checked> == <input checked="checked">
for i in range(len(self.attributes)):
attr, value = self.attributes[i]
other_attr, other_value = element.attributes[i]
if value is None:
value = attr
if other_value is None:
other_value = other_attr
if attr != other_attr or value != other_value:
return False
if self.children != element.children:
return False
return True
def __hash__(self):
return hash((self.name,) + tuple(a for a in self.attributes))
def __ne__(self, element):
return not self.__eq__(element)
def _count(self, element, count=True):
if not isinstance(element, six.string_types):
if self == element:
return 1
i = 0
for child in self.children:
# child is text content and element is also text content, then
# make a simple "text" in "text"
if isinstance(child, six.string_types):
if isinstance(element, six.string_types):
if count:
i += child.count(element)
elif element in child:
return 1
else:
i += child._count(element, count=count)
if not count and i:
return i
return i
def __contains__(self, element):
return self._count(element, count=False) > 0
def count(self, element):
return self._count(element, count=True)
def __getitem__(self, key):
return self.children[key]
def __str__(self):
output = '<%s' % self.name
for key, value in self.attributes:
if value:
output += ' %s="%s"' % (key, value)
else:
output += ' %s' % key
if self.children:
output += '>\n'
output += ''.join(six.text_type(c) for c in self.children)
output += '\n</%s>' % self.name
else:
output += ' />'
return output
def __repr__(self):
return six.text_type(self)
@python_2_unicode_compatible
class RootElement(Element):
def __init__(self):
super(RootElement, self).__init__(None, ())
def __str__(self):
return ''.join(six.text_type(c) for c in self.children)
class Parser(HTMLParser):
SELF_CLOSING_TAGS = ('br', 'hr', 'input', 'img', 'meta', 'spacer',
'link', 'frame', 'base', 'col')
def __init__(self):
HTMLParser.__init__(self)
self.root = RootElement()
self.open_tags = []
self.element_positions = {}
def error(self, msg):
raise HTMLParseError(msg, self.getpos())
def format_position(self, position=None, element=None):
if not position and element:
position = self.element_positions[element]
if position is None:
position = self.getpos()
if hasattr(position, 'lineno'):
position = position.lineno, position.offset
return 'Line %d, Column %d' % position
@property
def current(self):
if self.open_tags:
return self.open_tags[-1]
else:
return self.root
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
if tag not in self.SELF_CLOSING_TAGS:
self.handle_endtag(tag)
def handle_starttag(self, tag, attrs):
# Special case handling of 'class' attribute, so that comparisons of DOM
# instances are not sensitive to ordering of classes.
attrs = [
(name, " ".join(sorted(value.split(" "))))
if name == "class"
else (name, value)
for name, value in attrs
]
element = Element(tag, attrs)
self.current.append(element)
if tag not in self.SELF_CLOSING_TAGS:
self.open_tags.append(element)
self.element_positions[element] = self.getpos()
def handle_endtag(self, tag):
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
while element.name != tag:
if not self.open_tags:
self.error("Unexpected end tag `%s` (%s)" % (
tag, self.format_position()))
element = self.open_tags.pop()
def handle_data(self, data):
self.current.append(data)
def handle_charref(self, name):
self.current.append('&%s;' % name)
def handle_entityref(self, name):
self.current.append('&%s;' % name)
def parse_html(html):
"""
Takes a string that contains *valid* HTML and turns it into a Python object
structure that can be easily compared against other HTML on semantic
equivalence. Syntactical differences like which quotation is used on
arguments will be ignored.
"""
parser = Parser()
parser.feed(html)
parser.close()
document = parser.root
document.finalize()
# Removing ROOT element if it's not necessary
if len(document.children) == 1:
if not isinstance(document.children[0], six.string_types):
document = document.children[0]
return document
| bsd-3-clause |
gmoberg/rqt_launch_editor | lib/rqt_launchtree-master/src/rqt_launchtree/launchtree_plugin.py | 2 | 2796 | #!/usr/bin/env python
import rospy
from rqt_gui_py.plugin import Plugin
from python_qt_binding.QtCore import Qt
from python_qt_binding.QtGui import QInputDialog
from rqt_launchtree.launchtree_widget import LaunchtreeWidget
class LaunchtreePlugin(Plugin):
_SETTING_LASTPKG = 'last_pkg'
_SETTING_LASTLAUNCHFILE = 'last_launch'
_SETTING_LASTLAUNCHARGS = 'last_args'
def __init__(self, context):
super(LaunchtreePlugin, self).__init__(context)
self._widget = LaunchtreeWidget(context)
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() +
(' (%d)' % context.serial_number()))
context.add_widget(self._widget)
def shutdown_plugin(self):
self._widget.shutdown()
def save_settings(self, plugin_settings, instance_settings):
instance_settings.set_value('editor', self._widget.editor)
_curr_pkg = self._widget.package_select.currentText()
rospy.logdebug('save_settings) currentIndex={}'.format(_curr_pkg))
instance_settings.set_value(self._SETTING_LASTPKG, _curr_pkg)
instance_settings.set_value(self._SETTING_LASTLAUNCHFILE, self._widget.launchfile_select.currentText())
instance_settings.set_value(self._SETTING_LASTLAUNCHARGS, self._widget.args_input.text())
def restore_settings(self, plugin_settings, instance_settings):
self._widget.editor = instance_settings.value('editor', 'gedit')
self._widget.args_input.setText(instance_settings.value(self._SETTING_LASTLAUNCHARGS, ''))
pkg_idx = self._widget.package_select.findText(instance_settings.value(self._SETTING_LASTPKG))
if pkg_idx >= 0:
self._widget.package_select.blockSignals(True)
self._widget.package_select.setCurrentIndex(pkg_idx)
self._widget.package_select.blockSignals(False)
self._widget.update_launchfiles(pkg_idx)
# only set launch file if pkg was restored
launch_idx = self._widget.launchfile_select.findText(instance_settings.value(self._SETTING_LASTLAUNCHFILE))
if launch_idx >= 0:
self._widget.launchfile_select.blockSignals(True)
self._widget.launchfile_select.setCurrentIndex(launch_idx)
self._widget.launchfile_select.blockSignals(False)
self._widget.block_load(False)
self._widget.load_launchfile()
def trigger_configuration(self):
(text, ok) = QInputDialog.getText(self._widget,
'Settings for %s' % self._widget.windowTitle(),
'Command to edit launch files (vim, gedit, ...), can accept args:',
text = self._widget.editor
)
if ok:
self._widget.editor = text | bsd-3-clause |
msabramo/PyHamcrest | tests/hamcrest_unit_test/string_description_test.py | 2 | 2266 | import six
from hamcrest.core.string_description import *
from hamcrest.core.selfdescribing import SelfDescribing
import re
import pytest
try:
import unittest2 as unittest
except ImportError:
import unittest
__author__ = "Jon Reid"
__copyright__ = "Copyright 2011 hamcrest.org"
__license__ = "BSD, see License.txt"
class FakeSelfDescribing(SelfDescribing):
def describe_to(self, description):
description.append_text('DESCRIPTION')
class StringDescriptionTest(unittest.TestCase):
def setUp(self):
self.description = StringDescription()
def testLetsSelfDescribingObjectDescribeItself(self):
self.description.append_description_of(FakeSelfDescribing())
self.assertEqual('DESCRIPTION', str(self.description))
def testDescribesStringInQuotes(self):
self.description.append_description_of('FOO')
self.assertEqual("'FOO'", str(self.description))
def testWrapsNonSelfDescribingObjectInAngleBrackets(self):
self.description.append_description_of(42)
self.assertEqual('<42>', str(self.description))
def testShouldNotAddAngleBracketsIfObjectDescriptionAlreadyHasThem(self):
self.description.append_description_of(object())
expected = re.compile("<object object at 0x[0-9a-fA-F]+>")
self.assertTrue(expected.match(str(self.description)))
@unittest.skip("Describe unicode strings doesn't malform in Python 3. Six makes this go away anyway :/")
def testDescribeUnicodeStringAsBytes(self):
self.description.append_description_of(six.u('\u05d0'))
self.assertEqual(six.u('\\u05d0'), str(self.description))
@unittest.skipUnless(six.PY3, "Describe unicode strings only malforms in Python 2")
def testDescribeUnicodeStringAsUnicode(self):
self.description.append_description_of(six.u('\u05d0'))
self.assertEqual(six.u("'\u05d0'"), str(self.description))
# below is a set of things that should append without error to string
# descriptions
@pytest.mark.parametrize('valid_input', ('native', six.b('bytes'), six.u('unicode')))
def test_description_append_valid_input(valid_input):
desc = StringDescription()
desc.append(valid_input)
str(desc)
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
tscohen/chainer | tests/chainer_tests/functions_tests/loss_tests/test_softmax_cross_entropy.py | 13 | 4589 | import math
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
class TestSoftmaxCrossEntropy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4,)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(x, t, use_cudnn)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0.0
for i in six.moves.range(y.shape[0]):
loss_expect -= math.log(y[i, self.t[i]] / y[i].sum())
loss_expect /= y.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=5)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
def check_backward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(x, t, use_cudnn)
loss.backward()
self.assertEqual(None, t.grad)
func = loss.creator
f = lambda: func.forward((x.data, t.data))
gx, = gradient_check.numerical_grad(f, (x.data,), (1,), eps=0.02)
gradient_check.assert_allclose(gx, x.grad, atol=1e-4)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
class TestReplicatedSoftmaxCrossEntropy1(TestSoftmaxCrossEntropy):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 3, 2)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4, 2)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(
x, t, use_cudnn, normalize=True)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0.0
for i in six.moves.range(y.shape[0]):
for k in six.moves.range(y.shape[2]):
loss_expect -= math.log(
y[i, self.t[i, k], k] / y[i, :, k].sum())
loss_expect /= y.shape[0] * y.shape[2]
self.assertAlmostEqual(loss_expect, loss_value, places=4)
class TestReplicatedSoftmaxCrossEntropy2(TestSoftmaxCrossEntropy):
def setUp(self):
self.x = numpy.random.uniform(
-1, 1, (4, 3, 2, 5)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4, 2, 5)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(
x, t, use_cudnn, normalize=False)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0.0
for i in six.moves.range(y.shape[0]):
for k in six.moves.range(y.shape[2]):
for l in six.moves.range(y.shape[3]):
loss_expect -= math.log(
y[i, self.t[i, k, l], k, l] / y[i, :, k, l].sum())
loss_expect /= y.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=4)
testing.run_module(__name__, __file__)
| mit |
nextgenusfs/amptk | amptk/remove_samples.py | 2 | 4077 | #!/usr/bin/env python
from __future__ import print_function
import sys
import argparse
import os
import pyfastx
from amptk import amptklib
class MyFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self,prog):
super(MyFormatter,self).__init__(prog,max_help_position=50)
def countBarcodes(file):
#now loop through data and find barcoded samples, counting each.....
BarcodeCount = {}
for title, seq, qual in pyfastx.Fastq(file, build_index=False):
if 'label=' in title:
ID = title.split("label=", 1)[-1].split(";")[0]
elif 'sample=' in title:
ID = title.split("sample=", 1)[-1].split(";")[0]
else:
ID = title.split("=", 1)[-1].split(";")[0]
if ID not in BarcodeCount:
BarcodeCount[ID] = 1
else:
BarcodeCount[ID] += 1
return BarcodeCount
def filter_sample(file, keep_list, output, format='fastq'):
keep_count = 0
total_count = 0
with open(output, 'w') as out:
for title, seq, qual in pyfastx.Fastq(file, build_index=False):
total_count += 1
if 'label=' in title:
sample = title.split('label=', 1)[1].split(';')[0]
elif 'sample=' in title:
sample = title.split('sample=', 1)[1].split(';')[0]
else:
sample = title.split('=', 1)[1].split(';')[0]
if not sample in keep_list:
keep_count += 1
if format == 'fastq':
out.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
elif format == 'fasta':
out.write(">%s\n%s\n" % (title, seq))
return keep_count, total_count
def main(args):
parser=argparse.ArgumentParser(prog='amptk-remove_samples.py',
description='''Script parses AMPtk de-multiplexed FASTQ file and keeps those sequences with barocde names in list ''',
epilog="""Written by Jon Palmer (2015) nextgenusfs@gmail.com""",
formatter_class=MyFormatter)
parser.add_argument('-i','--input', required=True, help='Input AMPtk demux FASTQ')
parser.add_argument('-l','--list', nargs='+', help='Input list of (BC) names to remove')
parser.add_argument('-t','--threshold', type=int, help='Keep samples with more reads than threshold')
parser.add_argument('-f','--file', help='File containing list of names to remove')
parser.add_argument('-o','--out', required=True, help='Output name')
parser.add_argument('--format', default='fastq', choices=['fastq','fasta'], help='format of output file')
args=parser.parse_args(args)
remove = []
if args.threshold:
print("Finding samples with less than %i reads" % args.threshold)
BC_counts = countBarcodes(args.input)
for k, v in list(BC_counts.items()):
if int(v) <= args.threshold:
if not k in remove:
remove.append(k)
print("Removing samples: %s" % ','.join(remove))
if args.file:
#load in list of sample names to keep
with open(args.file, 'r') as input:
lines = [line.rstrip('\n') for line in input]
print("Removing samples from file: %s" % ','.join(lines))
remove += lines
if args.list:
lines = args.list
print("Removing samples from list: %s" % ','.join(lines))
remove += lines
#make sure it is a set, faster lookup
keep_list = set(remove)
count = len(keep_list)
#rename to base
if args.out.endswith('.gz'):
outfile = args.out.replace('.gz', '')
else:
outfile = args.out
#run filtering
keep_count, total_count = filter_sample(args.input, keep_list, outfile,
format=args.format)
#compress and clean
if args.out.endswith('.gz'): #compress in place
amptklib.Fzip_inplace(outfile)
print("Removed %i samples" % count)
print("Kept %i reads out of %i total reads" % (keep_count, total_count))
if __name__ == "__main__":
main(sys.argv[1:])
| bsd-2-clause |
zyq001/ryu | ryu/log.py | 36 | 3413 | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2011 Isaku Yamahata <yamahata at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ryu import cfg
import inspect
import platform
import logging
import logging.config
import logging.handlers
import os
import sys
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
CONF = cfg.CONF
CONF.register_cli_opts([
cfg.IntOpt('default-log-level', default=None, help='default log level'),
cfg.BoolOpt('verbose', default=False, help='show debug output'),
cfg.BoolOpt('use-stderr', default=True, help='log to standard error'),
cfg.BoolOpt('use-syslog', default=False, help='output to syslog'),
cfg.StrOpt('log-dir', default=None, help='log file directory'),
cfg.StrOpt('log-file', default=None, help='log file name'),
cfg.StrOpt('log-file-mode', default='0644',
help='default log file permission'),
cfg.StrOpt('log-config-file', default=None,
help='Path to a logging config file to use')
])
_EARLY_LOG_HANDLER = None
def early_init_log(level=None):
global _EARLY_LOG_HANDLER
_EARLY_LOG_HANDLER = logging.StreamHandler(sys.stderr)
log = logging.getLogger()
log.addHandler(_EARLY_LOG_HANDLER)
if level is not None:
log.setLevel(level)
def _get_log_file():
if CONF.log_file:
return CONF.log_file
if CONF.log_dir:
return os.path.join(CONF.log_dir,
os.path.basename(inspect.stack()[-1][1])) + '.log'
return None
def init_log():
global _EARLY_LOG_HANDLER
log = logging.getLogger()
if CONF.log_config_file:
try:
logging.config.fileConfig(CONF.log_config_file,
disable_existing_loggers=False)
except ConfigParser.Error as e:
print('Failed to parse %s: %s' % (CONF.log_config_file, e),
file=sys.stderr)
sys.exit(2)
return
if CONF.use_stderr:
log.addHandler(logging.StreamHandler(sys.stderr))
if _EARLY_LOG_HANDLER is not None:
log.removeHandler(_EARLY_LOG_HANDLER)
_EARLY_LOG_HANDLER = None
if CONF.use_syslog:
if platform.system() == 'Darwin':
address = '/var/run/syslog'
else:
address = '/dev/log'
syslog = logging.handlers.SysLogHandler(address=address)
log.addHandler(syslog)
log_file = _get_log_file()
if log_file is not None:
log.addHandler(logging.handlers.WatchedFileHandler(log_file))
mode = int(CONF.log_file_mode, 8)
os.chmod(log_file, mode)
if CONF.default_log_level is not None:
log.setLevel(CONF.default_log_level)
elif CONF.verbose:
log.setLevel(logging.DEBUG)
else:
log.setLevel(logging.INFO)
| apache-2.0 |
vFense/vFenseAgent-nix | agent/deps/rpm/Python-2.7.5/lib/python2.7/encodings/cp852.py | 593 | 35258 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP852.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp852',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x00c7, # LATIN CAPITAL LETTER C WITH CEDILLA
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x00e2, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x016f, # LATIN SMALL LETTER U WITH RING ABOVE
0x0086: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0087: 0x00e7, # LATIN SMALL LETTER C WITH CEDILLA
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x00eb, # LATIN SMALL LETTER E WITH DIAERESIS
0x008a: 0x0150, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x008b: 0x0151, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x008c: 0x00ee, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x0139, # LATIN CAPITAL LETTER L WITH ACUTE
0x0092: 0x013a, # LATIN SMALL LETTER L WITH ACUTE
0x0093: 0x00f4, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x013d, # LATIN CAPITAL LETTER L WITH CARON
0x0096: 0x013e, # LATIN SMALL LETTER L WITH CARON
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x0164, # LATIN CAPITAL LETTER T WITH CARON
0x009c: 0x0165, # LATIN SMALL LETTER T WITH CARON
0x009d: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00a0: 0x00e1, # LATIN SMALL LETTER A WITH ACUTE
0x00a1: 0x00ed, # LATIN SMALL LETTER I WITH ACUTE
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x00fa, # LATIN SMALL LETTER U WITH ACUTE
0x00a4: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00a5: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00a6: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00a7: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00a8: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00a9: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00ac: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00ad: 0x015f, # LATIN SMALL LETTER S WITH CEDILLA
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x00c1, # LATIN CAPITAL LETTER A WITH ACUTE
0x00b6: 0x00c2, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00b7: 0x011a, # LATIN CAPITAL LETTER E WITH CARON
0x00b8: 0x015e, # LATIN CAPITAL LETTER S WITH CEDILLA
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00be: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0102, # LATIN CAPITAL LETTER A WITH BREVE
0x00c7: 0x0103, # LATIN SMALL LETTER A WITH BREVE
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x0111, # LATIN SMALL LETTER D WITH STROKE
0x00d1: 0x0110, # LATIN CAPITAL LETTER D WITH STROKE
0x00d2: 0x010e, # LATIN CAPITAL LETTER D WITH CARON
0x00d3: 0x00cb, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00d4: 0x010f, # LATIN SMALL LETTER D WITH CARON
0x00d5: 0x0147, # LATIN CAPITAL LETTER N WITH CARON
0x00d6: 0x00cd, # LATIN CAPITAL LETTER I WITH ACUTE
0x00d7: 0x00ce, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d8: 0x011b, # LATIN SMALL LETTER E WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x0162, # LATIN CAPITAL LETTER T WITH CEDILLA
0x00de: 0x016e, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S
0x00e2: 0x00d4, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e5: 0x0148, # LATIN SMALL LETTER N WITH CARON
0x00e6: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00e7: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00e8: 0x0154, # LATIN CAPITAL LETTER R WITH ACUTE
0x00e9: 0x00da, # LATIN CAPITAL LETTER U WITH ACUTE
0x00ea: 0x0155, # LATIN SMALL LETTER R WITH ACUTE
0x00eb: 0x0170, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x00ec: 0x00fd, # LATIN SMALL LETTER Y WITH ACUTE
0x00ed: 0x00dd, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00ee: 0x0163, # LATIN SMALL LETTER T WITH CEDILLA
0x00ef: 0x00b4, # ACUTE ACCENT
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x02dd, # DOUBLE ACUTE ACCENT
0x00f2: 0x02db, # OGONEK
0x00f3: 0x02c7, # CARON
0x00f4: 0x02d8, # BREVE
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x00b8, # CEDILLA
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x00a8, # DIAERESIS
0x00fa: 0x02d9, # DOT ABOVE
0x00fb: 0x0171, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x00fc: 0x0158, # LATIN CAPITAL LETTER R WITH CARON
0x00fd: 0x0159, # LATIN SMALL LETTER R WITH CARON
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
u'\x00' # 0x0000 -> NULL
u'\x01' # 0x0001 -> START OF HEADING
u'\x02' # 0x0002 -> START OF TEXT
u'\x03' # 0x0003 -> END OF TEXT
u'\x04' # 0x0004 -> END OF TRANSMISSION
u'\x05' # 0x0005 -> ENQUIRY
u'\x06' # 0x0006 -> ACKNOWLEDGE
u'\x07' # 0x0007 -> BELL
u'\x08' # 0x0008 -> BACKSPACE
u'\t' # 0x0009 -> HORIZONTAL TABULATION
u'\n' # 0x000a -> LINE FEED
u'\x0b' # 0x000b -> VERTICAL TABULATION
u'\x0c' # 0x000c -> FORM FEED
u'\r' # 0x000d -> CARRIAGE RETURN
u'\x0e' # 0x000e -> SHIFT OUT
u'\x0f' # 0x000f -> SHIFT IN
u'\x10' # 0x0010 -> DATA LINK ESCAPE
u'\x11' # 0x0011 -> DEVICE CONTROL ONE
u'\x12' # 0x0012 -> DEVICE CONTROL TWO
u'\x13' # 0x0013 -> DEVICE CONTROL THREE
u'\x14' # 0x0014 -> DEVICE CONTROL FOUR
u'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x0016 -> SYNCHRONOUS IDLE
u'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x0018 -> CANCEL
u'\x19' # 0x0019 -> END OF MEDIUM
u'\x1a' # 0x001a -> SUBSTITUTE
u'\x1b' # 0x001b -> ESCAPE
u'\x1c' # 0x001c -> FILE SEPARATOR
u'\x1d' # 0x001d -> GROUP SEPARATOR
u'\x1e' # 0x001e -> RECORD SEPARATOR
u'\x1f' # 0x001f -> UNIT SEPARATOR
u' ' # 0x0020 -> SPACE
u'!' # 0x0021 -> EXCLAMATION MARK
u'"' # 0x0022 -> QUOTATION MARK
u'#' # 0x0023 -> NUMBER SIGN
u'$' # 0x0024 -> DOLLAR SIGN
u'%' # 0x0025 -> PERCENT SIGN
u'&' # 0x0026 -> AMPERSAND
u"'" # 0x0027 -> APOSTROPHE
u'(' # 0x0028 -> LEFT PARENTHESIS
u')' # 0x0029 -> RIGHT PARENTHESIS
u'*' # 0x002a -> ASTERISK
u'+' # 0x002b -> PLUS SIGN
u',' # 0x002c -> COMMA
u'-' # 0x002d -> HYPHEN-MINUS
u'.' # 0x002e -> FULL STOP
u'/' # 0x002f -> SOLIDUS
u'0' # 0x0030 -> DIGIT ZERO
u'1' # 0x0031 -> DIGIT ONE
u'2' # 0x0032 -> DIGIT TWO
u'3' # 0x0033 -> DIGIT THREE
u'4' # 0x0034 -> DIGIT FOUR
u'5' # 0x0035 -> DIGIT FIVE
u'6' # 0x0036 -> DIGIT SIX
u'7' # 0x0037 -> DIGIT SEVEN
u'8' # 0x0038 -> DIGIT EIGHT
u'9' # 0x0039 -> DIGIT NINE
u':' # 0x003a -> COLON
u';' # 0x003b -> SEMICOLON
u'<' # 0x003c -> LESS-THAN SIGN
u'=' # 0x003d -> EQUALS SIGN
u'>' # 0x003e -> GREATER-THAN SIGN
u'?' # 0x003f -> QUESTION MARK
u'@' # 0x0040 -> COMMERCIAL AT
u'A' # 0x0041 -> LATIN CAPITAL LETTER A
u'B' # 0x0042 -> LATIN CAPITAL LETTER B
u'C' # 0x0043 -> LATIN CAPITAL LETTER C
u'D' # 0x0044 -> LATIN CAPITAL LETTER D
u'E' # 0x0045 -> LATIN CAPITAL LETTER E
u'F' # 0x0046 -> LATIN CAPITAL LETTER F
u'G' # 0x0047 -> LATIN CAPITAL LETTER G
u'H' # 0x0048 -> LATIN CAPITAL LETTER H
u'I' # 0x0049 -> LATIN CAPITAL LETTER I
u'J' # 0x004a -> LATIN CAPITAL LETTER J
u'K' # 0x004b -> LATIN CAPITAL LETTER K
u'L' # 0x004c -> LATIN CAPITAL LETTER L
u'M' # 0x004d -> LATIN CAPITAL LETTER M
u'N' # 0x004e -> LATIN CAPITAL LETTER N
u'O' # 0x004f -> LATIN CAPITAL LETTER O
u'P' # 0x0050 -> LATIN CAPITAL LETTER P
u'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
u'R' # 0x0052 -> LATIN CAPITAL LETTER R
u'S' # 0x0053 -> LATIN CAPITAL LETTER S
u'T' # 0x0054 -> LATIN CAPITAL LETTER T
u'U' # 0x0055 -> LATIN CAPITAL LETTER U
u'V' # 0x0056 -> LATIN CAPITAL LETTER V
u'W' # 0x0057 -> LATIN CAPITAL LETTER W
u'X' # 0x0058 -> LATIN CAPITAL LETTER X
u'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
u'Z' # 0x005a -> LATIN CAPITAL LETTER Z
u'[' # 0x005b -> LEFT SQUARE BRACKET
u'\\' # 0x005c -> REVERSE SOLIDUS
u']' # 0x005d -> RIGHT SQUARE BRACKET
u'^' # 0x005e -> CIRCUMFLEX ACCENT
u'_' # 0x005f -> LOW LINE
u'`' # 0x0060 -> GRAVE ACCENT
u'a' # 0x0061 -> LATIN SMALL LETTER A
u'b' # 0x0062 -> LATIN SMALL LETTER B
u'c' # 0x0063 -> LATIN SMALL LETTER C
u'd' # 0x0064 -> LATIN SMALL LETTER D
u'e' # 0x0065 -> LATIN SMALL LETTER E
u'f' # 0x0066 -> LATIN SMALL LETTER F
u'g' # 0x0067 -> LATIN SMALL LETTER G
u'h' # 0x0068 -> LATIN SMALL LETTER H
u'i' # 0x0069 -> LATIN SMALL LETTER I
u'j' # 0x006a -> LATIN SMALL LETTER J
u'k' # 0x006b -> LATIN SMALL LETTER K
u'l' # 0x006c -> LATIN SMALL LETTER L
u'm' # 0x006d -> LATIN SMALL LETTER M
u'n' # 0x006e -> LATIN SMALL LETTER N
u'o' # 0x006f -> LATIN SMALL LETTER O
u'p' # 0x0070 -> LATIN SMALL LETTER P
u'q' # 0x0071 -> LATIN SMALL LETTER Q
u'r' # 0x0072 -> LATIN SMALL LETTER R
u's' # 0x0073 -> LATIN SMALL LETTER S
u't' # 0x0074 -> LATIN SMALL LETTER T
u'u' # 0x0075 -> LATIN SMALL LETTER U
u'v' # 0x0076 -> LATIN SMALL LETTER V
u'w' # 0x0077 -> LATIN SMALL LETTER W
u'x' # 0x0078 -> LATIN SMALL LETTER X
u'y' # 0x0079 -> LATIN SMALL LETTER Y
u'z' # 0x007a -> LATIN SMALL LETTER Z
u'{' # 0x007b -> LEFT CURLY BRACKET
u'|' # 0x007c -> VERTICAL LINE
u'}' # 0x007d -> RIGHT CURLY BRACKET
u'~' # 0x007e -> TILDE
u'\x7f' # 0x007f -> DELETE
u'\xc7' # 0x0080 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
u'\xe2' # 0x0083 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u016f' # 0x0085 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0107' # 0x0086 -> LATIN SMALL LETTER C WITH ACUTE
u'\xe7' # 0x0087 -> LATIN SMALL LETTER C WITH CEDILLA
u'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
u'\xeb' # 0x0089 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\u0150' # 0x008a -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\u0151' # 0x008b -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\xee' # 0x008c -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0106' # 0x008f -> LATIN CAPITAL LETTER C WITH ACUTE
u'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0139' # 0x0091 -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0x0092 -> LATIN SMALL LETTER L WITH ACUTE
u'\xf4' # 0x0093 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u013d' # 0x0095 -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0x0096 -> LATIN SMALL LETTER L WITH CARON
u'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
u'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\u0164' # 0x009b -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0x009c -> LATIN SMALL LETTER T WITH CARON
u'\u0141' # 0x009d -> LATIN CAPITAL LETTER L WITH STROKE
u'\xd7' # 0x009e -> MULTIPLICATION SIGN
u'\u010d' # 0x009f -> LATIN SMALL LETTER C WITH CARON
u'\xe1' # 0x00a0 -> LATIN SMALL LETTER A WITH ACUTE
u'\xed' # 0x00a1 -> LATIN SMALL LETTER I WITH ACUTE
u'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
u'\xfa' # 0x00a3 -> LATIN SMALL LETTER U WITH ACUTE
u'\u0104' # 0x00a4 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\u0105' # 0x00a5 -> LATIN SMALL LETTER A WITH OGONEK
u'\u017d' # 0x00a6 -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0x00a7 -> LATIN SMALL LETTER Z WITH CARON
u'\u0118' # 0x00a8 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\u0119' # 0x00a9 -> LATIN SMALL LETTER E WITH OGONEK
u'\xac' # 0x00aa -> NOT SIGN
u'\u017a' # 0x00ab -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010c' # 0x00ac -> LATIN CAPITAL LETTER C WITH CARON
u'\u015f' # 0x00ad -> LATIN SMALL LETTER S WITH CEDILLA
u'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2591' # 0x00b0 -> LIGHT SHADE
u'\u2592' # 0x00b1 -> MEDIUM SHADE
u'\u2593' # 0x00b2 -> DARK SHADE
u'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
u'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
u'\xc1' # 0x00b5 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0x00b6 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\u011a' # 0x00b7 -> LATIN CAPITAL LETTER E WITH CARON
u'\u015e' # 0x00b8 -> LATIN CAPITAL LETTER S WITH CEDILLA
u'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
u'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
u'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
u'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
u'\u017b' # 0x00bd -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u017c' # 0x00be -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
u'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
u'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
u'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
u'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
u'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
u'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
u'\u0102' # 0x00c6 -> LATIN CAPITAL LETTER A WITH BREVE
u'\u0103' # 0x00c7 -> LATIN SMALL LETTER A WITH BREVE
u'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
u'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
u'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
u'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
u'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
u'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
u'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
u'\xa4' # 0x00cf -> CURRENCY SIGN
u'\u0111' # 0x00d0 -> LATIN SMALL LETTER D WITH STROKE
u'\u0110' # 0x00d1 -> LATIN CAPITAL LETTER D WITH STROKE
u'\u010e' # 0x00d2 -> LATIN CAPITAL LETTER D WITH CARON
u'\xcb' # 0x00d3 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\u010f' # 0x00d4 -> LATIN SMALL LETTER D WITH CARON
u'\u0147' # 0x00d5 -> LATIN CAPITAL LETTER N WITH CARON
u'\xcd' # 0x00d6 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x00d7 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\u011b' # 0x00d8 -> LATIN SMALL LETTER E WITH CARON
u'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
u'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
u'\u2588' # 0x00db -> FULL BLOCK
u'\u2584' # 0x00dc -> LOWER HALF BLOCK
u'\u0162' # 0x00dd -> LATIN CAPITAL LETTER T WITH CEDILLA
u'\u016e' # 0x00de -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\u2580' # 0x00df -> UPPER HALF BLOCK
u'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S
u'\xd4' # 0x00e2 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\u0144' # 0x00e4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0148' # 0x00e5 -> LATIN SMALL LETTER N WITH CARON
u'\u0160' # 0x00e6 -> LATIN CAPITAL LETTER S WITH CARON
u'\u0161' # 0x00e7 -> LATIN SMALL LETTER S WITH CARON
u'\u0154' # 0x00e8 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\xda' # 0x00e9 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u0155' # 0x00ea -> LATIN SMALL LETTER R WITH ACUTE
u'\u0170' # 0x00eb -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\xfd' # 0x00ec -> LATIN SMALL LETTER Y WITH ACUTE
u'\xdd' # 0x00ed -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0163' # 0x00ee -> LATIN SMALL LETTER T WITH CEDILLA
u'\xb4' # 0x00ef -> ACUTE ACCENT
u'\xad' # 0x00f0 -> SOFT HYPHEN
u'\u02dd' # 0x00f1 -> DOUBLE ACUTE ACCENT
u'\u02db' # 0x00f2 -> OGONEK
u'\u02c7' # 0x00f3 -> CARON
u'\u02d8' # 0x00f4 -> BREVE
u'\xa7' # 0x00f5 -> SECTION SIGN
u'\xf7' # 0x00f6 -> DIVISION SIGN
u'\xb8' # 0x00f7 -> CEDILLA
u'\xb0' # 0x00f8 -> DEGREE SIGN
u'\xa8' # 0x00f9 -> DIAERESIS
u'\u02d9' # 0x00fa -> DOT ABOVE
u'\u0171' # 0x00fb -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0158' # 0x00fc -> LATIN CAPITAL LETTER R WITH CARON
u'\u0159' # 0x00fd -> LATIN SMALL LETTER R WITH CARON
u'\u25a0' # 0x00fe -> BLACK SQUARE
u'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00f5, # SECTION SIGN
0x00a8: 0x00f9, # DIAERESIS
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b4: 0x00ef, # ACUTE ACCENT
0x00b8: 0x00f7, # CEDILLA
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00c1: 0x00b5, # LATIN CAPITAL LETTER A WITH ACUTE
0x00c2: 0x00b6, # LATIN CAPITAL LETTER A WITH CIRCUMFLEX
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c7: 0x0080, # LATIN CAPITAL LETTER C WITH CEDILLA
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00cb: 0x00d3, # LATIN CAPITAL LETTER E WITH DIAERESIS
0x00cd: 0x00d6, # LATIN CAPITAL LETTER I WITH ACUTE
0x00ce: 0x00d7, # LATIN CAPITAL LETTER I WITH CIRCUMFLEX
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d4: 0x00e2, # LATIN CAPITAL LETTER O WITH CIRCUMFLEX
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00da: 0x00e9, # LATIN CAPITAL LETTER U WITH ACUTE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00dd: 0x00ed, # LATIN CAPITAL LETTER Y WITH ACUTE
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S
0x00e1: 0x00a0, # LATIN SMALL LETTER A WITH ACUTE
0x00e2: 0x0083, # LATIN SMALL LETTER A WITH CIRCUMFLEX
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e7: 0x0087, # LATIN SMALL LETTER C WITH CEDILLA
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00eb: 0x0089, # LATIN SMALL LETTER E WITH DIAERESIS
0x00ed: 0x00a1, # LATIN SMALL LETTER I WITH ACUTE
0x00ee: 0x008c, # LATIN SMALL LETTER I WITH CIRCUMFLEX
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f4: 0x0093, # LATIN SMALL LETTER O WITH CIRCUMFLEX
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00fa: 0x00a3, # LATIN SMALL LETTER U WITH ACUTE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x00fd: 0x00ec, # LATIN SMALL LETTER Y WITH ACUTE
0x0102: 0x00c6, # LATIN CAPITAL LETTER A WITH BREVE
0x0103: 0x00c7, # LATIN SMALL LETTER A WITH BREVE
0x0104: 0x00a4, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00a5, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x008f, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0086, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00ac, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x009f, # LATIN SMALL LETTER C WITH CARON
0x010e: 0x00d2, # LATIN CAPITAL LETTER D WITH CARON
0x010f: 0x00d4, # LATIN SMALL LETTER D WITH CARON
0x0110: 0x00d1, # LATIN CAPITAL LETTER D WITH STROKE
0x0111: 0x00d0, # LATIN SMALL LETTER D WITH STROKE
0x0118: 0x00a8, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00a9, # LATIN SMALL LETTER E WITH OGONEK
0x011a: 0x00b7, # LATIN CAPITAL LETTER E WITH CARON
0x011b: 0x00d8, # LATIN SMALL LETTER E WITH CARON
0x0139: 0x0091, # LATIN CAPITAL LETTER L WITH ACUTE
0x013a: 0x0092, # LATIN SMALL LETTER L WITH ACUTE
0x013d: 0x0095, # LATIN CAPITAL LETTER L WITH CARON
0x013e: 0x0096, # LATIN SMALL LETTER L WITH CARON
0x0141: 0x009d, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e4, # LATIN SMALL LETTER N WITH ACUTE
0x0147: 0x00d5, # LATIN CAPITAL LETTER N WITH CARON
0x0148: 0x00e5, # LATIN SMALL LETTER N WITH CARON
0x0150: 0x008a, # LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
0x0151: 0x008b, # LATIN SMALL LETTER O WITH DOUBLE ACUTE
0x0154: 0x00e8, # LATIN CAPITAL LETTER R WITH ACUTE
0x0155: 0x00ea, # LATIN SMALL LETTER R WITH ACUTE
0x0158: 0x00fc, # LATIN CAPITAL LETTER R WITH CARON
0x0159: 0x00fd, # LATIN SMALL LETTER R WITH CARON
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x015e: 0x00b8, # LATIN CAPITAL LETTER S WITH CEDILLA
0x015f: 0x00ad, # LATIN SMALL LETTER S WITH CEDILLA
0x0160: 0x00e6, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00e7, # LATIN SMALL LETTER S WITH CARON
0x0162: 0x00dd, # LATIN CAPITAL LETTER T WITH CEDILLA
0x0163: 0x00ee, # LATIN SMALL LETTER T WITH CEDILLA
0x0164: 0x009b, # LATIN CAPITAL LETTER T WITH CARON
0x0165: 0x009c, # LATIN SMALL LETTER T WITH CARON
0x016e: 0x00de, # LATIN CAPITAL LETTER U WITH RING ABOVE
0x016f: 0x0085, # LATIN SMALL LETTER U WITH RING ABOVE
0x0170: 0x00eb, # LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
0x0171: 0x00fb, # LATIN SMALL LETTER U WITH DOUBLE ACUTE
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00ab, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00bd, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00be, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00a6, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00a7, # LATIN SMALL LETTER Z WITH CARON
0x02c7: 0x00f3, # CARON
0x02d8: 0x00f4, # BREVE
0x02d9: 0x00fa, # DOT ABOVE
0x02db: 0x00f2, # OGONEK
0x02dd: 0x00f1, # DOUBLE ACUTE ACCENT
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| lgpl-3.0 |
Apstra/aeon-venos | tests/test_opx.py | 1 | 2524 | import mock
import pytest
from pylib.aeon.opx import device
from pylib.aeon.cumulus import connector
g_facts = {
'hw_version': None,
'hw_part_number': None,
'hostname': 'opx221_vm',
'serial_number': '525400A5EC36',
'fqdn': 'opx221_vm',
'os_version': '2.2.1',
'virtual': True,
'hw_model': 'S6000-VM',
'vendor': 'OPX',
'mac_address': '52:54:00:A5:EC:36',
'os_name': 'OPX',
'service_tag': None
}
ip_link_show_out = '''
1: lo: <LOOPBACK> mtu 65536 qdisc noop state DOWN mode DEFAULT group default
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP mode DEFAULT group default qlen 1000
link/ether 52:54:00:a5:ec:36 brd ff:ff:ff:ff:ff:ff
'''
hostname_out = "opx221_vm"
grep_version_out = '2.2.1'
grep_platform_out = 'S6000-VM'
@mock.patch('pylib.aeon.opx.connector.paramiko.SSHClient')
@pytest.fixture()
def opx_connector(mock_ssh):
hostname = '1.1.1.1'
port = 22
proto = 'ssh'
user = 'test_user'
passwd = 'test_passwd'
con = connector.Connector(hostname, port=port, proto=proto, user=user, passwd=passwd)
return con
@mock.patch('pylib.aeon.opx.device.BaseDevice.probe')
@mock.patch('pylib.aeon.opx.device.Connector')
@pytest.fixture()
def opx_device(mock_connector, mock_probe, request):
def mock_execute(args, **kwargs):
results = []
for arg in args:
# os_version
if arg == """grep -oP '^OS_VERSION=[\"]?\K.*\d' /etc/OPX-release-version""":
results.append({'stdout': grep_version_out})
# platform
if arg == """grep -oP '^PLATFORM=[\"]?\K.*\w' /etc/OPX-release-version""":
results.append({'stdout': grep_platform_out})
# hostname
elif arg == 'hostname':
results.append({'stdout': hostname_out})
elif arg =='ip link show':
results.append({'stdout': ip_link_show_out})
return True, results
mock_connector.return_value.execute.side_effect = mock_execute
mock_probe.return_value = True, 10
target = '1.1.1.1'
user = 'test_user'
passwd = 'test_passwd'
dev = device.Device(target, user=user, passwd=passwd)
return dev
def test_opx_device(opx_device):
dev = opx_device
assert dev.OS_NAME == 'OPX'
assert dev.DEFAULT_PROBE_TIMEOUT == 10
assert dev.user == 'test_user'
assert dev.passwd == 'test_passwd'
assert dev.facts == g_facts
| apache-2.0 |
bbbenja/SickRage | lib/unrar2/unix.py | 13 | 11433 | # Copyright (c) 2003-2005 Jimmy Retzlaff, 2008 Konstantin Yegupov
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Unix version uses unrar command line executable
import subprocess
import gc
import os, os.path
import time, re
from rar_exceptions import *
class UnpackerNotInstalled(Exception): pass
rar_executable_cached = None
rar_executable_version = None
def call_unrar(params):
"Calls rar/unrar command line executable, returns stdout pipe"
global rar_executable_cached
if rar_executable_cached is None:
for command in ('unrar', 'rar'):
try:
subprocess.Popen([command], stdout=subprocess.PIPE)
rar_executable_cached = command
break
except OSError:
pass
if rar_executable_cached is None:
raise UnpackerNotInstalled("No suitable RAR unpacker installed")
assert type(params) == list, "params must be list"
args = [rar_executable_cached] + params
try:
gc.disable() # See http://bugs.python.org/issue1336
return subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
finally:
gc.enable()
class RarFileImplementation(object):
def init(self, password=None):
global rar_executable_version
self.password = password
proc = self.call('v', [])
stdoutdata, stderrdata = proc.communicate()
# Use unrar return code if available
self._check_returncode(proc.returncode)
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
if line.find("CRC failed")>=0:
raise IncorrectRARPassword
accum = []
source = iter(stdoutdata.splitlines())
line = ''
while (line.find('RAR ') == -1):
line = source.next()
signature = line
# The code below is mighty flaky
# and will probably crash on localized versions of RAR
# but I see no safe way to rewrite it using a CLI tool
if signature.find("RAR 4") > -1:
rar_executable_version = 4
while not (line.startswith('Comment:') or line.startswith('Pathname/Comment')):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
line = source.next()
while not line.startswith('Pathname/Comment'):
accum.append(line.rstrip('\n'))
line = source.next()
if len(accum):
accum[0] = accum[0][9:] # strip out "Comment:" part
self.comment = '\n'.join(accum[:-1])
else:
self.comment = None
elif signature.find("RAR 5") > -1:
rar_executable_version = 5
line = source.next()
while not line.startswith('Archive:'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
accum.append(line.rstrip('\n'))
line = source.next()
if len(accum):
self.comment = '\n'.join(accum[:-1]).strip()
else:
self.comment = None
else:
raise UnpackerNotInstalled("Unsupported RAR version, expected 4.x or 5.x, found: "
+ signature.split(" ")[1])
def escaped_password(self):
return '-' if self.password == None else self.password
def call(self, cmd, options=[], files=[]):
options2 = options + ['p'+self.escaped_password()]
soptions = ['-'+x for x in options2]
return call_unrar([cmd]+soptions+['--',self.archiveName]+files)
def infoiter(self):
command = "v" if rar_executable_version == 4 else "l"
proc = self.call(command, ['c-'])
stdoutdata, stderrdata = proc.communicate()
# Use unrar return code if available
self._check_returncode(proc.returncode)
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
accum = []
source = iter(stdoutdata.splitlines())
line = ''
while not line.startswith('-----------'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
if line.startswith("CRC failed") or line.startswith("Checksum error") or line.startswith("checksum error"):
raise IncorrectRARPassword
line = source.next()
line = source.next()
i = 0
re_spaces = re.compile(r"\s+")
if rar_executable_version == 4:
while not line.startswith('-----------'):
accum.append(line)
if len(accum)==2:
data = {}
data['index'] = i
# asterisks mark password-encrypted files
data['filename'] = accum[0].strip().lstrip("*") # asterisks marks password-encrypted files
fields = re_spaces.split(accum[1].strip())
data['size'] = int(fields[0])
attr = fields[5]
data['isdir'] = 'd' in attr.lower()
data['datetime'] = time.strptime(fields[3]+" "+fields[4], '%d-%m-%y %H:%M')
data['comment'] = None
data['volume'] = None
yield data
accum = []
i += 1
line = source.next()
elif rar_executable_version == 5:
while not line.startswith('-----------'):
fields = line.strip().lstrip("*").split()
data = {}
data['index'] = i
data['filename'] = " ".join(fields[4:])
data['size'] = int(fields[1])
attr = fields[0]
data['isdir'] = 'd' in attr.lower()
data['datetime'] = time.strptime(fields[2]+" "+fields[3], '%d-%m-%y %H:%M')
data['comment'] = None
data['volume'] = None
yield data
i += 1
line = source.next()
def read_files(self, checker):
res = []
for info in self.infoiter():
checkres = checker(info)
if checkres==True and not info.isdir:
pipe = self.call('p', ['inul'], [info.filename]).stdout
res.append((info, pipe.read()))
return res
def extract(self, checker, path, withSubpath, overwrite):
res = []
command = 'x'
if not withSubpath:
command = 'e'
options = []
if overwrite:
options.append('o+')
else:
options.append('o-')
if not path.endswith(os.sep):
path += os.sep
names = []
for info in self.infoiter():
checkres = checker(info)
if type(checkres) in [str, unicode]:
raise NotImplementedError("Condition callbacks returning strings are deprecated and only supported in Windows")
if checkres==True and not info.isdir:
names.append(info.filename)
res.append(info)
names.append(path)
proc = self.call(command, options, names)
stdoutdata, stderrdata = proc.communicate()
# Use unrar return code if available
self._check_returncode(proc.returncode)
if stderrdata.find("CRC failed")>=0 or stderrdata.find("Checksum error")>=0 or stderrdata.find("checksum error")>=0:
raise CRCRARError
if stderrdata.find("No files to extract")>=0:
raise NoFileToExtract
if stderrdata.find("Bad archive")>=0:
raise FatalRARError
return res
def _check_returncode(self, returncode):
# RAR exit code from unrarsrc-5.2.1.tar.gz/errhnd.hpp
RARX_SUCCESS = 0
RARX_WARNING = 1
RARX_FATAL = 2
RARX_CRC = 3
RARX_LOCK = 4
RARX_WRITE = 5
RARX_OPEN = 6
RARX_USERERROR = 7
RARX_MEMORY = 8
RARX_CREATE = 9
RARX_NOFILES = 10
RARX_BADPWD = 11
RARX_USERBREAK = 255
if returncode != RARX_SUCCESS:
if returncode == RARX_FATAL:
raise FatalRARError
elif returncode == RARX_CRC:
raise CRCRARError
elif returncode == RARX_BADPWD:
raise IncorrectRARPassword
elif returncode == RARX_NOFILES:
raise NoFileToExtract
else:
raise GenericRARError
def destruct(self):
pass
def get_volume(self):
command = "v" if rar_executable_version == 4 else "l"
stdoutdata, stderrdata = self.call(command, ['c-']).communicate()
for line in stderrdata.splitlines():
if line.strip().startswith("Cannot open"):
raise FileOpenError
source = iter(stdoutdata.splitlines())
line = ''
while not line.startswith('-----------'):
if line.strip().endswith('is not RAR archive'):
raise InvalidRARArchive
if line.startswith("CRC failed") or line.startswith("Checksum error"):
raise IncorrectRARPassword
line = source.next()
line = source.next()
if rar_executable_version == 4:
while not line.startswith('-----------'):
line = source.next()
line = source.next()
items = line.strip().split()
if len(items)>4 and items[4]=="volume":
return int(items[5]) - 1
else:
return None
elif rar_executable_version == 5:
while not line.startswith('-----------'):
line = source.next()
line = source.next()
items = line.strip().split()
if items[1]=="volume":
return int(items[2]) - 1
else:
return None
| gpl-3.0 |
JizhouZhang/SDR | gr-analog/python/analog/qa_rail_ff.py | 47 | 2030 | #!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, analog, blocks
def clip(x, lo, hi):
if(x < lo):
return lo
elif(x > hi):
return hi
else:
return x
class test_rail(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
def tearDown(self):
self.tb = None
def test_rail_001(self):
# Test set/gets
hi1 = 1
hi2 = 2
lo1 = -1
lo2 = -2
op = analog.rail_ff(lo1, hi1)
op.set_hi(hi2)
h = op.hi()
self.assertEqual(hi2, h)
op.set_lo(lo2)
l = op.lo()
self.assertEqual(lo2, l)
def test_rail_002(self):
lo = -0.75
hi = 0.90
src_data = [-2, -1, -0.5, -0.25, 0, 0.25, 0.5, 1, 2]
expected_result = map(lambda x: clip(x, lo, hi), src_data)
src = blocks.vector_source_f(src_data)
op = analog.rail_ff(lo, hi)
dst = blocks.vector_sink_f()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data()
self.assertFloatTuplesAlmostEqual(expected_result, result_data, 4)
if __name__ == '__main__':
gr_unittest.run(test_rail, "test_rail.xml")
| gpl-3.0 |
xq262144/hue | desktop/core/ext-py/rsa-3.4.2/rsa/varblock.py | 82 | 5406 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""VARBLOCK file support
.. deprecated:: 3.4
The VARBLOCK format is NOT recommended for general use, has been deprecated since
Python-RSA 3.4, and will be removed in a future release. It's vulnerable to a
number of attacks:
1. decrypt/encrypt_bigfile() does not implement `Authenticated encryption`_ nor
uses MACs to verify messages before decrypting public key encrypted messages.
2. decrypt/encrypt_bigfile() does not use hybrid encryption (it uses plain RSA)
and has no method for chaining, so block reordering is possible.
See `issue #19 on Github`_ for more information.
.. _Authenticated encryption: https://en.wikipedia.org/wiki/Authenticated_encryption
.. _issue #19 on Github: https://github.com/sybrenstuvel/python-rsa/issues/13
The VARBLOCK file format is as follows, where || denotes byte concatenation:
FILE := VERSION || BLOCK || BLOCK ...
BLOCK := LENGTH || DATA
LENGTH := varint-encoded length of the subsequent data. Varint comes from
Google Protobuf, and encodes an integer into a variable number of bytes.
Each byte uses the 7 lowest bits to encode the value. The highest bit set
to 1 indicates the next byte is also part of the varint. The last byte will
have this bit set to 0.
This file format is called the VARBLOCK format, in line with the varint format
used to denote the block sizes.
"""
import warnings
from rsa._compat import byte, b
ZERO_BYTE = b('\x00')
VARBLOCK_VERSION = 1
warnings.warn("The 'rsa.varblock' module was deprecated in Python-RSA version "
"3.4 due to security issues in the VARBLOCK format. See "
"https://github.com/sybrenstuvel/python-rsa/issues/13 for more information.",
DeprecationWarning)
def read_varint(infile):
"""Reads a varint from the file.
When the first byte to be read indicates EOF, (0, 0) is returned. When an
EOF occurs when at least one byte has been read, an EOFError exception is
raised.
:param infile: the file-like object to read from. It should have a read()
method.
:returns: (varint, length), the read varint and the number of read bytes.
"""
varint = 0
read_bytes = 0
while True:
char = infile.read(1)
if len(char) == 0:
if read_bytes == 0:
return 0, 0
raise EOFError('EOF while reading varint, value is %i so far' %
varint)
byte = ord(char)
varint += (byte & 0x7F) << (7 * read_bytes)
read_bytes += 1
if not byte & 0x80:
return varint, read_bytes
def write_varint(outfile, value):
"""Writes a varint to a file.
:param outfile: the file-like object to write to. It should have a write()
method.
:returns: the number of written bytes.
"""
# there is a big difference between 'write the value 0' (this case) and
# 'there is nothing left to write' (the false-case of the while loop)
if value == 0:
outfile.write(ZERO_BYTE)
return 1
written_bytes = 0
while value > 0:
to_write = value & 0x7f
value >>= 7
if value > 0:
to_write |= 0x80
outfile.write(byte(to_write))
written_bytes += 1
return written_bytes
def yield_varblocks(infile):
"""Generator, yields each block in the input file.
:param infile: file to read, is expected to have the VARBLOCK format as
described in the module's docstring.
@yields the contents of each block.
"""
# Check the version number
first_char = infile.read(1)
if len(first_char) == 0:
raise EOFError('Unable to read VARBLOCK version number')
version = ord(first_char)
if version != VARBLOCK_VERSION:
raise ValueError('VARBLOCK version %i not supported' % version)
while True:
(block_size, read_bytes) = read_varint(infile)
# EOF at block boundary, that's fine.
if read_bytes == 0 and block_size == 0:
break
block = infile.read(block_size)
read_size = len(block)
if read_size != block_size:
raise EOFError('Block size is %i, but could read only %i bytes' %
(block_size, read_size))
yield block
def yield_fixedblocks(infile, blocksize):
"""Generator, yields each block of ``blocksize`` bytes in the input file.
:param infile: file to read and separate in blocks.
:returns: a generator that yields the contents of each block
"""
while True:
block = infile.read(blocksize)
read_bytes = len(block)
if read_bytes == 0:
break
yield block
if read_bytes < blocksize:
break
| apache-2.0 |
simbha/mAngE-Gin | lib/Django 1.7/django/conf/locale/eo/formats.py | 115 | 2335 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = r'j\-\a \d\e F Y' # '26-a de julio 1887'
TIME_FORMAT = 'H:i' # '18:59'
DATETIME_FORMAT = r'j\-\a \d\e F Y\, \j\e H:i' # '26-a de julio 1887, je 18:59'
YEAR_MONTH_FORMAT = r'F \d\e Y' # 'julio de 1887'
MONTH_DAY_FORMAT = r'j\-\a \d\e F' # '26-a de julio'
SHORT_DATE_FORMAT = 'Y-m-d' # '1887-07-26'
SHORT_DATETIME_FORMAT = 'Y-m-d H:i' # '1887-07-26 18:59'
FIRST_DAY_OF_WEEK = 1 # Monday (lundo)
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = (
'%Y-%m-%d', # '1887-07-26'
'%y-%m-%d', # '87-07-26'
'%Y %m %d', # '1887 07 26'
'%d-a de %b %Y', # '26-a de jul 1887'
'%d %b %Y', # '26 jul 1887'
'%d-a de %B %Y', # '26-a de julio 1887'
'%d %B %Y', # '26 julio 1887'
'%d %m %Y', # '26 07 1887'
)
TIME_INPUT_FORMATS = (
'%H:%M:%S', # '18:59:00'
'%H:%M', # '18:59'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '1887-07-26 18:59:00'
'%Y-%m-%d %H:%M', # '1887-07-26 18:59'
'%Y-%m-%d', # '1887-07-26'
'%Y.%m.%d %H:%M:%S', # '1887.07.26 18:59:00'
'%Y.%m.%d %H:%M', # '1887.07.26 18:59'
'%Y.%m.%d', # '1887.07.26'
'%d/%m/%Y %H:%M:%S', # '26/07/1887 18:59:00'
'%d/%m/%Y %H:%M', # '26/07/1887 18:59'
'%d/%m/%Y', # '26/07/1887'
'%y-%m-%d %H:%M:%S', # '87-07-26 18:59:00'
'%y-%m-%d %H:%M', # '87-07-26 18:59'
'%y-%m-%d', # '87-07-26'
)
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| mit |
thodoris/djangoPharma | djangoPharma/env/Lib/site-packages/django/contrib/gis/gdal/geomtype.py | 100 | 3310 | from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class OGRGeomType(object):
"Encapsulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0: 'Unknown',
1: 'Point',
2: 'LineString',
3: 'Polygon',
4: 'MultiPoint',
5: 'MultiLineString',
6: 'MultiPolygon',
7: 'GeometryCollection',
100: 'None',
101: 'LinearRing',
102: 'PointZ',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit: 'MultiLineString25D',
6 + wkb25bit: 'MultiPolygon25D',
7 + wkb25bit: 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = {v.lower(): k for k, v in _types.items()}
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, six.string_types):
type_input = type_input.lower()
if type_input == 'geometry':
type_input = 'unknown'
num = self._str_types.get(type_input)
if num is None:
raise GDALException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if type_input not in self._types:
raise GDALException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, six.string_types):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
elif s == 'PointZ':
s = 'Point'
return s + 'Field'
def to_multi(self):
"""
Transform Point, LineString, Polygon, and their 25D equivalents
to their Multi... counterpart.
"""
if self.name.startswith(('Point', 'LineString', 'Polygon')):
self.num += 3
| apache-2.0 |
zstackorg/zstack-woodpecker | integrationtest/vm/multizones/suite_setup.py | 2 | 2233 | '''
@author: Frank
'''
import os.path
import zstacklib.utils.linux as linux
import zstacklib.utils.http as http
import zstacktestagent.plugins.host as host_plugin
import zstacktestagent.testagent as testagent
import zstackwoodpecker.operations.deploy_operations as deploy_operations
import zstackwoodpecker.operations.config_operations as config_operations
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_util as test_util
USER_PATH = os.path.expanduser('~')
EXTRA_SUITE_SETUP_SCRIPT = '%s/.zstackwoodpecker/extra_suite_setup_config.sh' % USER_PATH
def test():
nic_name = "eth0"
if test_lib.scenario_config != None and test_lib.scenario_file != None and os.path.exists(test_lib.scenario_file):
nic_name = "zsn0"
#This vlan creation is not a must, if testing is under nested virt env. But it is required on physical host without enough physcial network devices and your test execution machine is not the same one as Host machine.
linux.create_vlan_eth(nic_name, 10)
linux.create_vlan_eth(nic_name, 11)
#If test execution machine is not the same one as Host machine, deploy work is needed to separated to 2 steps(deploy_test_agent, execute_plan_without_deploy_test_agent). And it can not directly call SetupAction.run()
test_lib.setup_plan.deploy_test_agent()
cmd = host_plugin.CreateVlanDeviceCmd()
cmd.ethname = nic_name
cmd.vlan = 10
cmd2 = host_plugin.CreateVlanDeviceCmd()
cmd2.ethname = nic_name
cmd2.vlan = 11
testHosts = test_lib.lib_get_all_hosts_from_plan()
if type(testHosts) != type([]):
testHosts = [testHosts]
for host in testHosts:
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd)
http.json_dump_post(testagent.build_http_path(host.managementIp_, host_plugin.CREATE_VLAN_DEVICE_PATH), cmd2)
test_lib.setup_plan.execute_plan_without_deploy_test_agent()
deploy_operations.deploy_initial_database(test_lib.deploy_config)
if os.path.exists(EXTRA_SUITE_SETUP_SCRIPT):
os.system("bash %s" % EXTRA_SUITE_SETUP_SCRIPT)
test_util.test_pass('Suite Setup Success')
| apache-2.0 |
theguardian/vanilla2tender | cherrypy/lib/static.py | 40 | 14510 | try:
from io import UnsupportedOperation
except ImportError:
UnsupportedOperation = object()
import logging
import mimetypes
mimetypes.init()
mimetypes.types_map['.dwg']='image/x-dwg'
mimetypes.types_map['.ico']='image/x-icon'
mimetypes.types_map['.bz2']='application/x-bzip2'
mimetypes.types_map['.gz']='application/x-gzip'
import os
import re
import stat
import time
import cherrypy
from cherrypy._cpcompat import ntob, unquote
from cherrypy.lib import cptools, httputil, file_generator_limited
def serve_file(path, content_type=None, disposition=None, name=None, debug=False):
"""Set status, headers, and body in order to serve the given path.
The Content-Type header will be set to the content_type arg, if provided.
If not provided, the Content-Type will be guessed by the file extension
of the 'path' argument.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, it will be set
to the basename of path. If disposition is None, no Content-Disposition
header will be written.
"""
response = cherrypy.serving.response
# If path is relative, users should fix it by making path absolute.
# That is, CherryPy should not guess where the application root is.
# It certainly should *not* use cwd (since CP may be invoked from a
# variety of paths). If using tools.staticdir, you can make your relative
# paths become absolute by supplying a value for "tools.staticdir.root".
if not os.path.isabs(path):
msg = "'%s' is not an absolute path." % path
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
try:
st = os.stat(path)
except OSError:
if debug:
cherrypy.log('os.stat(%r) failed' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Check if path is a directory.
if stat.S_ISDIR(st.st_mode):
# Let the caller deal with it as they like.
if debug:
cherrypy.log('%r is a directory' % path, 'TOOLS.STATIC')
raise cherrypy.NotFound()
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
if content_type is None:
# Set content-type based on filename extension
ext = ""
i = path.rfind('.')
if i != -1:
ext = path[i:].lower()
content_type = mimetypes.types_map.get(ext, None)
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
name = os.path.basename(path)
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
content_length = st.st_size
fileobj = open(path, 'rb')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def serve_fileobj(fileobj, content_type=None, disposition=None, name=None,
debug=False):
"""Set status, headers, and body in order to serve the given file object.
The Content-Type header will be set to the content_type arg, if provided.
If disposition is not None, the Content-Disposition header will be set
to "<disposition>; filename=<name>". If name is None, 'filename' will
not be set. If disposition is None, no Content-Disposition header will
be written.
CAUTION: If the request contains a 'Range' header, one or more seek()s will
be performed on the file object. This may cause undesired behavior if
the file object is not seekable. It could also produce undesired results
if the caller set the read position of the file object prior to calling
serve_fileobj(), expecting that the data would be served starting from that
position.
"""
response = cherrypy.serving.response
try:
st = os.fstat(fileobj.fileno())
except AttributeError:
if debug:
cherrypy.log('os has no fstat attribute', 'TOOLS.STATIC')
content_length = None
except UnsupportedOperation:
content_length = None
else:
# Set the Last-Modified response header, so that
# modified-since validation code can work.
response.headers['Last-Modified'] = httputil.HTTPDate(st.st_mtime)
cptools.validate_since()
content_length = st.st_size
if content_type is not None:
response.headers['Content-Type'] = content_type
if debug:
cherrypy.log('Content-Type: %r' % content_type, 'TOOLS.STATIC')
cd = None
if disposition is not None:
if name is None:
cd = disposition
else:
cd = '%s; filename="%s"' % (disposition, name)
response.headers["Content-Disposition"] = cd
if debug:
cherrypy.log('Content-Disposition: %r' % cd, 'TOOLS.STATIC')
return _serve_fileobj(fileobj, content_type, content_length, debug=debug)
def _serve_fileobj(fileobj, content_type, content_length, debug=False):
"""Internal. Set response.body to the given file object, perhaps ranged."""
response = cherrypy.serving.response
# HTTP/1.0 didn't have Range/Accept-Ranges headers, or the 206 code
request = cherrypy.serving.request
if request.protocol >= (1, 1):
response.headers["Accept-Ranges"] = "bytes"
r = httputil.get_ranges(request.headers.get('Range'), content_length)
if r == []:
response.headers['Content-Range'] = "bytes */%s" % content_length
message = "Invalid Range (first-byte-pos greater than Content-Length)"
if debug:
cherrypy.log(message, 'TOOLS.STATIC')
raise cherrypy.HTTPError(416, message)
if r:
if len(r) == 1:
# Return a single-part response.
start, stop = r[0]
if stop > content_length:
stop = content_length
r_len = stop - start
if debug:
cherrypy.log('Single part; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
response.status = "206 Partial Content"
response.headers['Content-Range'] = (
"bytes %s-%s/%s" % (start, stop - 1, content_length))
response.headers['Content-Length'] = r_len
fileobj.seek(start)
response.body = file_generator_limited(fileobj, r_len)
else:
# Return a multipart/byteranges response.
response.status = "206 Partial Content"
try:
# Python 3
from email.generator import _make_boundary as choose_boundary
except ImportError:
# Python 2
from mimetools import choose_boundary
boundary = choose_boundary()
ct = "multipart/byteranges; boundary=%s" % boundary
response.headers['Content-Type'] = ct
if "Content-Length" in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers["Content-Length"]
def file_ranges():
# Apache compatibility:
yield ntob("\r\n")
for start, stop in r:
if debug:
cherrypy.log('Multipart; start: %r, stop: %r' % (start, stop),
'TOOLS.STATIC')
yield ntob("--" + boundary, 'ascii')
yield ntob("\r\nContent-type: %s" % content_type, 'ascii')
yield ntob("\r\nContent-range: bytes %s-%s/%s\r\n\r\n"
% (start, stop - 1, content_length), 'ascii')
fileobj.seek(start)
for chunk in file_generator_limited(fileobj, stop-start):
yield chunk
yield ntob("\r\n")
# Final boundary
yield ntob("--" + boundary + "--", 'ascii')
# Apache compatibility:
yield ntob("\r\n")
response.body = file_ranges()
return response.body
else:
if debug:
cherrypy.log('No byteranges requested', 'TOOLS.STATIC')
# Set Content-Length and use an iterable (file object)
# this way CP won't load the whole file in memory
response.headers['Content-Length'] = content_length
response.body = fileobj
return response.body
def serve_download(path, name=None):
"""Serve 'path' as an application/x-download attachment."""
# This is such a common idiom I felt it deserved its own wrapper.
return serve_file(path, "application/x-download", "attachment", name)
def _attempt(filename, content_types, debug=False):
if debug:
cherrypy.log('Attempting %r (content_types %r)' %
(filename, content_types), 'TOOLS.STATICDIR')
try:
# you can set the content types for a
# complete directory per extension
content_type = None
if content_types:
r, ext = os.path.splitext(filename)
content_type = content_types.get(ext[1:], None)
serve_file(filename, content_type=content_type, debug=debug)
return True
except cherrypy.NotFound:
# If we didn't find the static file, continue handling the
# request. We might find a dynamic handler instead.
if debug:
cherrypy.log('NotFound', 'TOOLS.STATICFILE')
return False
def staticdir(section, dir, root="", match="", content_types=None, index="",
debug=False):
"""Serve a static resource from the given (root +) dir.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
index
If provided, it should be the (relative) name of a file to
serve for directory requests. For example, if the dir argument is
'/home/me', the Request-URI is 'myapp', and the index arg is
'index.html', the file '/home/me/myapp/index.html' will be sought.
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICDIR')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICDIR')
return False
# Allow the use of '~' to refer to a user's home directory.
dir = os.path.expanduser(dir)
# If dir is relative, make absolute using "root".
if not os.path.isabs(dir):
if not root:
msg = "Static dir requires an absolute dir (or root)."
if debug:
cherrypy.log(msg, 'TOOLS.STATICDIR')
raise ValueError(msg)
dir = os.path.join(root, dir)
# Determine where we are in the object tree relative to 'section'
# (where the static tool was defined).
if section == 'global':
section = "/"
section = section.rstrip(r"\/")
branch = request.path_info[len(section) + 1:]
branch = unquote(branch.lstrip(r"\/"))
# If branch is "", filename will end in a slash
filename = os.path.join(dir, branch)
if debug:
cherrypy.log('Checking file %r to fulfill %r' %
(filename, request.path_info), 'TOOLS.STATICDIR')
# There's a chance that the branch pulled from the URL might
# have ".." or similar uplevel attacks in it. Check that the final
# filename is a child of dir.
if not os.path.normpath(filename).startswith(os.path.normpath(dir)):
raise cherrypy.HTTPError(403) # Forbidden
handled = _attempt(filename, content_types)
if not handled:
# Check for an index file if a folder was requested.
if index:
handled = _attempt(os.path.join(filename, index), content_types)
if handled:
request.is_index = filename[-1] in (r"\/")
return handled
def staticfile(filename, root=None, match="", content_types=None, debug=False):
"""Serve a static resource from the given (root +) filename.
match
If given, request.path_info will be searched for the given
regular expression before attempting to serve static content.
content_types
If given, it should be a Python dictionary of
{file-extension: content-type} pairs, where 'file-extension' is
a string (e.g. "gif") and 'content-type' is the value to write
out in the Content-Type response header (e.g. "image/gif").
"""
request = cherrypy.serving.request
if request.method not in ('GET', 'HEAD'):
if debug:
cherrypy.log('request.method not GET or HEAD', 'TOOLS.STATICFILE')
return False
if match and not re.search(match, request.path_info):
if debug:
cherrypy.log('request.path_info %r does not match pattern %r' %
(request.path_info, match), 'TOOLS.STATICFILE')
return False
# If filename is relative, make absolute using "root".
if not os.path.isabs(filename):
if not root:
msg = "Static tool requires an absolute filename (got '%s')." % filename
if debug:
cherrypy.log(msg, 'TOOLS.STATICFILE')
raise ValueError(msg)
filename = os.path.join(root, filename)
return _attempt(filename, content_types, debug=debug)
| gpl-2.0 |
eteq/bokeh | sphinx/source/docs/tutorials/exercises/les_mis.py | 23 | 2382 | import numpy as np
from bokeh.plotting import figure, output_file, show
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.sampledata.les_mis import data
# EXERCISE: try out different sort orders for the names
nodes = data['nodes']
names = [node['name'] for node in sorted(data['nodes'], key=lambda x: x['group'])]
# store the links information in numpy
N = len(nodes)
counts = np.empty((N, N))
for link in data['links']:
counts[link['source'], link['target']] = link['value']
counts[link['target'], link['source']] = link['value']
# We will use these colors to color each group by a different color
colormap = [
"#444444", "#a6cee3", "#1f78b4", "#b2df8a", "#33a02c", "#fb9a99",
"#e31a1c", "#fdbf6f", "#ff7f00", "#cab2d6", "#6a3d9a"
]
# set up some data to plot! We will need to have values for every pair of names. The
# co-occurrence count for a given pair of names is in `count[i,j]`. The strategy is
# to color each rect by the group, and set its alpha based on the count.
xname = []
yname = []
color = []
alpha = []
for i, n1 in enumerate(nodes):
for j, n2 in enumerate(nodes):
xname.append(n1['name'])
yname.append(n2['name'])
a = min(counts[i,j]/4.0, 0.9) + 0.1
alpha.append(a)
if n1['group'] == n2['group']:
color.append(colormap[n1['group']])
else:
color.append('lightgrey')
# EXERCISE: output static HTML file
# EXERCISE: create a ColumnDataSource to hold the xnames, ynames, colors, alphas,
# and counts. NOTE: the counts array is 2D and will need to be flattened
# create a new figure
p = figure(title="Les Mis Occurrences (one at a time)",
x_axis_location="above", tools="resize,hover",
x_range=list(reversed(names)), y_range=names,
plot_width=800, plot_height=800)
# EXERCISE: use the `p.rect` renderer to render a categorical heatmap of all the
# data. Experiment with the widths and heights (use categorical percentage
# unite) as well as colors and alphas.
# EXERCISE: use p.grid, p.axis, etc. to style the plot. Some suggestions:
# - remove the axis and grid lines
# - remove the major ticks
# - make the tick labels smaller
# - set the x-axis orientation to vertical, or angled
# EXERCISE: configure the hover tool to display both names as well as
# the count value as tooltips
# EXERCISE: show the plot | bsd-3-clause |
OshynSong/scikit-learn | sklearn/utils/sparsetools/tests/test_traversal.py | 315 | 2001 | from __future__ import division, print_function, absolute_import
from nose import SkipTest
import numpy as np
from numpy.testing import assert_array_almost_equal
try:
from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\
csgraph_to_dense, csgraph_from_dense
except ImportError:
# Oldish versions of scipy don't have that
csgraph_from_dense = None
def test_graph_breadth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
bfirst = np.array([[0, 1, 2, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 7, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
for directed in [True, False]:
bfirst_test = breadth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(bfirst_test),
bfirst)
def test_graph_depth_first():
if csgraph_from_dense is None:
raise SkipTest("Old version of scipy, doesn't have csgraph.")
csgraph = np.array([[0, 1, 2, 0, 0],
[1, 0, 0, 0, 3],
[2, 0, 0, 7, 0],
[0, 0, 7, 0, 1],
[0, 3, 0, 1, 0]])
csgraph = csgraph_from_dense(csgraph, null_value=0)
dfirst = np.array([[0, 1, 0, 0, 0],
[0, 0, 0, 0, 3],
[0, 0, 0, 0, 0],
[0, 0, 7, 0, 0],
[0, 0, 0, 1, 0]])
for directed in [True, False]:
dfirst_test = depth_first_tree(csgraph, 0, directed)
assert_array_almost_equal(csgraph_to_dense(dfirst_test),
dfirst)
| bsd-3-clause |
JoinTheRealms/CopyCat-Kernel-P880 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
dstroppa/openstack-smartos-nova-grizzly | nova/api/openstack/compute/contrib/hosts.py | 2 | 14323 | # Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The hosts admin extension."""
import webob.exc
from xml.parsers import expat
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common import log as logging
from nova import utils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'hosts')
class HostIndexTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('hosts')
elem = xmlutil.SubTemplateElement(root, 'host', selector='hosts')
elem.set('host_name')
elem.set('service')
return xmlutil.MasterTemplate(root, 1)
class HostUpdateTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('status')
root.set('maintenance_mode')
return xmlutil.MasterTemplate(root, 1)
class HostActionTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
root.set('host')
root.set('power_action')
return xmlutil.MasterTemplate(root, 1)
class HostShowTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('host')
elem = xmlutil.make_flat_dict('resource', selector='host',
subselector='resource')
root.append(elem)
return xmlutil.MasterTemplate(root, 1)
class HostUpdateDeserializer(wsgi.XMLDeserializer):
def default(self, string):
try:
node = utils.safe_minidom_parse_string(string)
except expat.ExpatError:
msg = _("cannot understand XML")
raise exception.MalformedRequestBody(reason=msg)
updates = {}
updates_node = self.find_first_child_named(node, 'updates')
if updates_node is not None:
maintenance = self.find_first_child_named(updates_node,
'maintenance_mode')
if maintenance is not None:
updates[maintenance.tagName] = self.extract_text(maintenance)
status = self.find_first_child_named(updates_node, 'status')
if status is not None:
updates[status.tagName] = self.extract_text(status)
return dict(body=updates)
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
self.api = compute.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
"""
:returns: A dict in the format:
{'hosts': [{'host_name': 'some.host.name',
'service': 'cells'},
{'host_name': 'some.other.host.name',
'service': 'cells'},
{'host_name': 'some.celly.host.name',
'service': 'cells'},
{'host_name': 'console1.host.com',
'service': 'consoleauth'},
{'host_name': 'network1.host.com',
'service': 'network'},
{'host_name': 'netwwork2.host.com',
'service': 'network'},
{'host_name': 'sched1.host.com',
'service': 'scheduler'},
{'host_name': 'sched2.host.com',
'service': 'scheduler'},
{'host_name': 'vol1.host.com',
'service': 'volume'}]}
"""
context = req.environ['nova.context']
authorize(context)
filters = {'disabled': False}
zone = req.GET.get('zone', None)
if zone:
filters['availability_zone'] = zone
services = self.api.service_get_all(context, filters=filters,
set_zones=True)
hosts = []
for service in services:
hosts.append({'host_name': service['host'],
'service': service['topic'],
'zone': service['availability_zone']})
return {'hosts': hosts}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostUpdateDeserializer)
def update(self, req, id, body):
"""
:param body: example format {'status': 'enable',
'maintenance_mode': 'enable'}
:returns:
"""
def read_enabled(orig_val, msg):
"""
:param orig_val: A string with either 'enable' or 'disable'. May
be surrounded by whitespace, and case doesn't
matter
:param msg: The message to be passed to HTTPBadRequest. A single
%s will be replaced with orig_val.
:returns: True for 'enabled' and False for 'disabled'
"""
val = orig_val.strip().lower()
if val == "enable":
return True
elif val == "disable":
return False
else:
raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
context = req.environ['nova.context']
authorize(context)
# See what the user wants to 'update'
params = dict([(k.strip().lower(), v) for k, v in body.iteritems()])
orig_status = status = params.pop('status', None)
orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
# Validate the request
if len(params) > 0:
# Some extra param was passed. Fail.
explanation = _("Invalid update setting: '%s'") % params.keys()[0]
raise webob.exc.HTTPBadRequest(explanation=explanation)
if orig_status is not None:
status = read_enabled(orig_status, _("Invalid status: '%s'"))
if orig_maint_mode is not None:
maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
if status is None and maint_mode is None:
explanation = _("'status' or 'maintenance_mode' needed for "
"host update")
raise webob.exc.HTTPBadRequest(explanation=explanation)
# Make the calls and merge the results
result = {'host': id}
if status is not None:
result['status'] = self._set_enabled_status(context, id, status)
if maint_mode is not None:
result['maintenance_mode'] = self._set_host_maintenance(context,
id, maint_mode)
return result
def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
LOG.audit(_("Putting host %(host_name)s in maintenance "
"mode %(mode)s.") % locals())
try:
result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.message)
if result not in ("on_maintenance", "off_maintenance"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _set_enabled_status(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances.
:param enabled: a boolean - if False no new VMs will be able to start
on the host"""
if enabled:
LOG.audit(_("Enabling host %s.") % host_name)
else:
LOG.audit(_("Disabling host %s.") % host_name)
try:
result = self.api.set_host_enabled(context, host_name=host_name,
enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.message)
if result not in ("enabled", "disabled"):
raise webob.exc.HTTPBadRequest(explanation=result)
return result
def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.message)
return {"host": host_name, "power_action": result}
@wsgi.serializers(xml=HostActionTemplate)
def startup(self, req, id):
return self._host_power_action(req, host_name=id, action="startup")
@wsgi.serializers(xml=HostActionTemplate)
def shutdown(self, req, id):
return self._host_power_action(req, host_name=id, action="shutdown")
@wsgi.serializers(xml=HostActionTemplate)
def reboot(self, req, id):
return self._host_power_action(req, host_name=id, action="reboot")
@staticmethod
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node['vcpus'],
'memory_mb': compute_node['memory_mb'],
'disk_gb': compute_node['local_gb']}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node['vcpus_used'],
'memory_mb': compute_node['memory_mb_used'],
'disk_gb': compute_node['local_gb_used']}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
for instance in instances:
cpu_sum += instance['vcpus']
mem_sum += instance['memory_mb']
hdd_sum += instance['root_gb'] + instance['ephemeral_gb']
return {'resource': {'host': host_name,
'project': '(used_max)',
'cpu': cpu_sum,
'memory_mb': mem_sum,
'disk_gb': hdd_sum}}
@staticmethod
def _get_resources_by_project(host_name, instances):
# Getting usage resource per project
project_map = {}
for instance in instances:
resource = project_map.setdefault(instance['project_id'],
{'host': host_name,
'project': instance['project_id'],
'cpu': 0,
'memory_mb': 0,
'disk_gb': 0})
resource['cpu'] += instance['vcpus']
resource['memory_mb'] += instance['memory_mb']
resource['disk_gb'] += (instance['root_gb'] +
instance['ephemeral_gb'])
return project_map
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
:param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
{'host': {'resource':D},..}
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
context = req.environ['nova.context']
host_name = id
try:
service = self.api.service_get_by_compute_host(context, host_name)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.message)
except exception.AdminRequired:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
compute_node = service['compute_node'][0]
instances = self.api.instance_get_all_by_host(context, host_name)
resources = [self._get_total_resources(host_name, compute_node)]
resources.append(self._get_used_now_resources(host_name,
compute_node))
resources.append(self._get_resource_totals_from_instances(host_name,
instances))
by_proj_resources = self._get_resources_by_project(host_name,
instances)
for resource in by_proj_resources.itervalues():
resources.append({'resource': resource})
return {'host': resources}
class Hosts(extensions.ExtensionDescriptor):
"""Admin-only host administration."""
name = "Hosts"
alias = "os-hosts"
namespace = "http://docs.openstack.org/compute/ext/hosts/api/v1.1"
updated = "2011-06-29T00:00:00+00:00"
def get_resources(self):
resources = [extensions.ResourceExtension('os-hosts',
HostController(),
collection_actions={'update': 'PUT'},
member_actions={"startup": "GET", "shutdown": "GET",
"reboot": "GET"})]
return resources
| apache-2.0 |
pschmitt/home-assistant | homeassistant/components/homematicip_cloud/cover.py | 5 | 5751 | """Support for HomematicIP Cloud cover devices."""
import logging
from typing import Optional
from homematicip.aio.device import (
AsyncFullFlushBlind,
AsyncFullFlushShutter,
AsyncGarageDoorModuleTormatic,
AsyncHoermannDrivesModule,
)
from homematicip.aio.group import AsyncExtendedLinkedShutterGroup
from homematicip.base.enums import DoorCommand, DoorState
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
CoverEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericDevice
from .hap import HomematicipHAP
_LOGGER = logging.getLogger(__name__)
HMIP_COVER_OPEN = 0
HMIP_COVER_CLOSED = 1
HMIP_SLATS_OPEN = 0
HMIP_SLATS_CLOSED = 1
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP cover from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = []
for device in hap.home.devices:
if isinstance(device, AsyncFullFlushBlind):
entities.append(HomematicipCoverSlats(hap, device))
elif isinstance(device, AsyncFullFlushShutter):
entities.append(HomematicipCoverShutter(hap, device))
elif isinstance(
device, (AsyncHoermannDrivesModule, AsyncGarageDoorModuleTormatic)
):
entities.append(HomematicipGarageDoorModule(hap, device))
for group in hap.home.groups:
if isinstance(group, AsyncExtendedLinkedShutterGroup):
entities.append(HomematicipCoverShutterGroup(hap, group))
if entities:
async_add_entities(entities)
class HomematicipCoverShutter(HomematicipGenericDevice, CoverEntity):
"""Representation of a HomematicIP Cloud cover shutter device."""
@property
def current_cover_position(self) -> int:
"""Return current position of cover."""
if self._device.shutterLevel is not None:
return int((1 - self._device.shutterLevel) * 100)
return None
async def async_set_cover_position(self, **kwargs) -> None:
"""Move the cover to a specific position."""
position = kwargs[ATTR_POSITION]
# HmIP cover is closed:1 -> open:0
level = 1 - position / 100.0
await self._device.set_shutter_level(level)
@property
def is_closed(self) -> Optional[bool]:
"""Return if the cover is closed."""
if self._device.shutterLevel is not None:
return self._device.shutterLevel == HMIP_COVER_CLOSED
return None
async def async_open_cover(self, **kwargs) -> None:
"""Open the cover."""
await self._device.set_shutter_level(HMIP_COVER_OPEN)
async def async_close_cover(self, **kwargs) -> None:
"""Close the cover."""
await self._device.set_shutter_level(HMIP_COVER_CLOSED)
async def async_stop_cover(self, **kwargs) -> None:
"""Stop the device if in motion."""
await self._device.set_shutter_stop()
class HomematicipCoverSlats(HomematicipCoverShutter, CoverEntity):
"""Representation of a HomematicIP Cloud cover slats device."""
@property
def current_cover_tilt_position(self) -> int:
"""Return current tilt position of cover."""
if self._device.slatsLevel is not None:
return int((1 - self._device.slatsLevel) * 100)
return None
async def async_set_cover_tilt_position(self, **kwargs) -> None:
"""Move the cover to a specific tilt position."""
position = kwargs[ATTR_TILT_POSITION]
# HmIP slats is closed:1 -> open:0
level = 1 - position / 100.0
await self._device.set_slats_level(level)
async def async_open_cover_tilt(self, **kwargs) -> None:
"""Open the slats."""
await self._device.set_slats_level(HMIP_SLATS_OPEN)
async def async_close_cover_tilt(self, **kwargs) -> None:
"""Close the slats."""
await self._device.set_slats_level(HMIP_SLATS_CLOSED)
async def async_stop_cover_tilt(self, **kwargs) -> None:
"""Stop the device if in motion."""
await self._device.set_shutter_stop()
class HomematicipGarageDoorModule(HomematicipGenericDevice, CoverEntity):
"""Representation of a HomematicIP Garage Door Module."""
@property
def current_cover_position(self) -> int:
"""Return current position of cover."""
door_state_to_position = {
DoorState.CLOSED: 0,
DoorState.OPEN: 100,
DoorState.VENTILATION_POSITION: 10,
DoorState.POSITION_UNKNOWN: None,
}
return door_state_to_position.get(self._device.doorState)
@property
def is_closed(self) -> Optional[bool]:
"""Return if the cover is closed."""
return self._device.doorState == DoorState.CLOSED
async def async_open_cover(self, **kwargs) -> None:
"""Open the cover."""
await self._device.send_door_command(DoorCommand.OPEN)
async def async_close_cover(self, **kwargs) -> None:
"""Close the cover."""
await self._device.send_door_command(DoorCommand.CLOSE)
async def async_stop_cover(self, **kwargs) -> None:
"""Stop the cover."""
await self._device.send_door_command(DoorCommand.STOP)
class HomematicipCoverShutterGroup(HomematicipCoverSlats, CoverEntity):
"""Representation of a HomematicIP Cloud cover shutter group."""
def __init__(self, hap: HomematicipHAP, device, post: str = "ShutterGroup") -> None:
"""Initialize switching group."""
device.modelType = f"HmIP-{post}"
super().__init__(hap, device, post)
| apache-2.0 |
SnabbCo/neutron | neutron/tests/unit/vmware/test_dhcpmeta.py | 8 | 62666 | # Copyright 2013 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo.config import cfg
from neutron.common import constants as n_consts
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.db import api as db
from neutron.plugins.vmware.api_client import exception
from neutron.plugins.vmware.common import exceptions as p_exc
from neutron.plugins.vmware.dbexts import lsn_db
from neutron.plugins.vmware.dhcp_meta import constants
from neutron.plugins.vmware.dhcp_meta import lsnmanager as lsn_man
from neutron.plugins.vmware.dhcp_meta import migration as mig_man
from neutron.plugins.vmware.dhcp_meta import nsx
from neutron.plugins.vmware.dhcp_meta import rpc
from neutron.tests import base
class DhcpMetadataBuilderTestCase(base.BaseTestCase):
def setUp(self):
super(DhcpMetadataBuilderTestCase, self).setUp()
self.builder = mig_man.DhcpMetadataBuilder(mock.Mock(), mock.Mock())
self.network_id = 'foo_network_id'
self.subnet_id = 'foo_subnet_id'
self.router_id = 'foo_router_id'
def test_dhcp_agent_get_all(self):
expected = []
self.builder.plugin.list_dhcp_agents_hosting_network.return_value = (
{'agents': expected})
agents = self.builder.dhcp_agent_get_all(mock.ANY, self.network_id)
self.assertEqual(expected, agents)
def test_dhcp_port_get_all(self):
expected = []
self.builder.plugin.get_ports.return_value = expected
ports = self.builder.dhcp_port_get_all(mock.ANY, self.network_id)
self.assertEqual(expected, ports)
def test_router_id_get(self):
port = {
'device_id': self.router_id,
'network_id': self.network_id,
'fixed_ips': [{'subnet_id': self.subnet_id}]
}
subnet = {
'id': self.subnet_id,
'network_id': self.network_id
}
self.builder.plugin.get_ports.return_value = [port]
result = self.builder.router_id_get(context, subnet)
self.assertEqual(self.router_id, result)
def test_router_id_get_none_subnet(self):
self.assertIsNone(self.builder.router_id_get(mock.ANY, None))
def test_router_id_get_none_no_router(self):
self.builder.plugin.get_ports.return_value = []
subnet = {'network_id': self.network_id}
self.assertIsNone(self.builder.router_id_get(mock.ANY, subnet))
def test_metadata_deallocate(self):
self.builder.metadata_deallocate(
mock.ANY, self.router_id, self.subnet_id)
self.assertTrue(self.builder.plugin.remove_router_interface.call_count)
def test_metadata_allocate(self):
self.builder.metadata_allocate(
mock.ANY, self.router_id, self.subnet_id)
self.assertTrue(self.builder.plugin.add_router_interface.call_count)
def test_dhcp_deallocate(self):
agents = [{'id': 'foo_agent_id'}]
ports = [{'id': 'foo_port_id'}]
self.builder.dhcp_deallocate(mock.ANY, self.network_id, agents, ports)
self.assertTrue(
self.builder.plugin.remove_network_from_dhcp_agent.call_count)
self.assertTrue(self.builder.plugin.delete_port.call_count)
def _test_dhcp_allocate(self, subnet, expected_notify_count):
with mock.patch.object(mig_man.nsx, 'handle_network_dhcp_access') as f:
self.builder.dhcp_allocate(mock.ANY, self.network_id, subnet)
self.assertTrue(f.call_count)
self.assertEqual(expected_notify_count,
self.builder.notifier.notify.call_count)
def test_dhcp_allocate(self):
subnet = {'network_id': self.network_id, 'id': self.subnet_id}
self._test_dhcp_allocate(subnet, 2)
def test_dhcp_allocate_none_subnet(self):
self._test_dhcp_allocate(None, 0)
class MigrationManagerTestCase(base.BaseTestCase):
def setUp(self):
super(MigrationManagerTestCase, self).setUp()
self.manager = mig_man.MigrationManager(mock.Mock(),
mock.Mock(),
mock.Mock())
self.network_id = 'foo_network_id'
self.router_id = 'foo_router_id'
self.subnet_id = 'foo_subnet_id'
self.mock_builder_p = mock.patch.object(self.manager, 'builder')
self.mock_builder = self.mock_builder_p.start()
def _test_validate(self, lsn_exists=False, ext_net=False, subnets=None):
network = {'router:external': ext_net}
self.manager.manager.lsn_exists.return_value = lsn_exists
self.manager.plugin.get_network.return_value = network
self.manager.plugin.get_subnets.return_value = subnets
result = self.manager.validate(mock.ANY, self.network_id)
if len(subnets):
self.assertEqual(subnets[0], result)
else:
self.assertIsNone(result)
def test_validate_no_subnets(self):
self._test_validate(subnets=[])
def test_validate_with_one_subnet(self):
self._test_validate(subnets=[{'cidr': '0.0.0.0/0'}])
def test_validate_raise_conflict_many_subnets(self):
self.assertRaises(p_exc.LsnMigrationConflict,
self._test_validate,
subnets=[{'id': 'sub1'}, {'id': 'sub2'}])
def test_validate_raise_conflict_lsn_exists(self):
self.assertRaises(p_exc.LsnMigrationConflict,
self._test_validate,
lsn_exists=True)
def test_validate_raise_badrequest_external_net(self):
self.assertRaises(n_exc.BadRequest,
self._test_validate,
ext_net=True)
def test_validate_raise_badrequest_metadata_net(self):
self.assertRaises(n_exc.BadRequest,
self._test_validate,
ext_net=False,
subnets=[{'cidr': rpc.METADATA_SUBNET_CIDR}])
def _test_migrate(self, router, subnet, expected_calls):
self.mock_builder.router_id_get.return_value = router
self.manager.migrate(mock.ANY, self.network_id, subnet)
# testing the exact the order of calls is important
self.assertEqual(expected_calls, self.mock_builder.mock_calls)
def test_migrate(self):
subnet = {
'id': self.subnet_id,
'network_id': self.network_id
}
call_sequence = [
mock.call.router_id_get(mock.ANY, subnet),
mock.call.metadata_deallocate(
mock.ANY, self.router_id, self.subnet_id),
mock.call.dhcp_agent_get_all(mock.ANY, self.network_id),
mock.call.dhcp_port_get_all(mock.ANY, self.network_id),
mock.call.dhcp_deallocate(
mock.ANY, self.network_id, mock.ANY, mock.ANY),
mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet),
mock.call.metadata_allocate(
mock.ANY, self.router_id, self.subnet_id)
]
self._test_migrate(self.router_id, subnet, call_sequence)
def test_migrate_no_router_uplink(self):
subnet = {
'id': self.subnet_id,
'network_id': self.network_id
}
call_sequence = [
mock.call.router_id_get(mock.ANY, subnet),
mock.call.dhcp_agent_get_all(mock.ANY, self.network_id),
mock.call.dhcp_port_get_all(mock.ANY, self.network_id),
mock.call.dhcp_deallocate(
mock.ANY, self.network_id, mock.ANY, mock.ANY),
mock.call.dhcp_allocate(mock.ANY, self.network_id, subnet),
]
self._test_migrate(None, subnet, call_sequence)
def test_migrate_no_subnet(self):
call_sequence = [
mock.call.router_id_get(mock.ANY, None),
mock.call.dhcp_allocate(mock.ANY, self.network_id, None),
]
self._test_migrate(None, None, call_sequence)
def _test_report(self, lsn_attrs, expected):
self.manager.manager.lsn_port_get.return_value = lsn_attrs
report = self.manager.report(mock.ANY, self.network_id, self.subnet_id)
self.assertEqual(expected, report)
def test_report_for_lsn(self):
self._test_report(('foo_lsn_id', 'foo_lsn_port_id'),
{'ports': ['foo_lsn_port_id'],
'services': ['foo_lsn_id'], 'type': 'lsn'})
def test_report_for_lsn_without_lsn_port(self):
self._test_report(('foo_lsn_id', None),
{'ports': [],
'services': ['foo_lsn_id'], 'type': 'lsn'})
def _test_report_for_lsn_without_subnet(self, validated_subnet):
with mock.patch.object(self.manager.plugin, 'get_subnets',
return_value=validated_subnet):
self.manager.manager.lsn_port_get.return_value = (
('foo_lsn_id', 'foo_lsn_port_id'))
report = self.manager.report(context, self.network_id)
expected = {
'ports': ['foo_lsn_port_id'] if validated_subnet else [],
'services': ['foo_lsn_id'], 'type': 'lsn'
}
self.assertEqual(expected, report)
def test_report_for_lsn_without_subnet_subnet_found(self):
self._test_report_for_lsn_without_subnet([{'id': self.subnet_id}])
def test_report_for_lsn_without_subnet_subnet_not_found(self):
self.manager.manager.lsn_get.return_value = 'foo_lsn_id'
self._test_report_for_lsn_without_subnet(None)
def test_report_for_dhcp_agent(self):
self.manager.manager.lsn_port_get.return_value = (None, None)
self.mock_builder.dhcp_agent_get_all.return_value = (
[{'id': 'foo_agent_id'}])
self.mock_builder.dhcp_port_get_all.return_value = (
[{'id': 'foo_dhcp_port_id'}])
result = self.manager.report(mock.ANY, self.network_id, self.subnet_id)
expected = {
'ports': ['foo_dhcp_port_id'],
'services': ['foo_agent_id'],
'type': 'agent'
}
self.assertEqual(expected, result)
class LsnManagerTestCase(base.BaseTestCase):
def setUp(self):
super(LsnManagerTestCase, self).setUp()
self.net_id = 'foo_network_id'
self.sub_id = 'foo_subnet_id'
self.port_id = 'foo_port_id'
self.lsn_id = 'foo_lsn_id'
self.mac = 'aa:bb:cc:dd:ee:ff'
self.switch_id = 'foo_switch_id'
self.lsn_port_id = 'foo_lsn_port_id'
self.tenant_id = 'foo_tenant_id'
self.manager = lsn_man.LsnManager(mock.Mock())
self.context = context.get_admin_context()
self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api')
self.mock_lsn_api = self.mock_lsn_api_p.start()
self.mock_nsx_utils_p = mock.patch.object(lsn_man, 'nsx_utils')
self.mock_nsx_utils = self.mock_nsx_utils_p.start()
nsx.register_dhcp_opts(cfg)
nsx.register_metadata_opts(cfg)
def test_lsn_get(self):
self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id
expected = self.manager.lsn_get(mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
mock.ANY, self.net_id)
self.assertEqual(expected, self.lsn_id)
def _test_lsn_get_raise_not_found_with_exc(self, exc):
self.mock_lsn_api.lsn_for_network_get.side_effect = exc
self.assertRaises(p_exc.LsnNotFound,
self.manager.lsn_get,
mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
mock.ANY, self.net_id)
def test_lsn_get_raise_not_found_with_not_found(self):
self._test_lsn_get_raise_not_found_with_exc(n_exc.NotFound)
def test_lsn_get_raise_not_found_with_api_error(self):
self._test_lsn_get_raise_not_found_with_exc(exception.NsxApiException)
def _test_lsn_get_silent_raise_with_exc(self, exc):
self.mock_lsn_api.lsn_for_network_get.side_effect = exc
expected = self.manager.lsn_get(
mock.ANY, self.net_id, raise_on_err=False)
self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
mock.ANY, self.net_id)
self.assertIsNone(expected)
def test_lsn_get_silent_raise_with_not_found(self):
self._test_lsn_get_silent_raise_with_exc(n_exc.NotFound)
def test_lsn_get_silent_raise_with_api_error(self):
self._test_lsn_get_silent_raise_with_exc(exception.NsxApiException)
def test_lsn_create(self):
self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id
self.manager.lsn_create(mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_create.assert_called_once_with(
mock.ANY, self.net_id)
def test_lsn_create_raise_api_error(self):
self.mock_lsn_api.lsn_for_network_create.side_effect = (
exception.NsxApiException)
self.assertRaises(p_exc.NsxPluginException,
self.manager.lsn_create,
mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_create.assert_called_once_with(
mock.ANY, self.net_id)
def test_lsn_delete(self):
self.manager.lsn_delete(mock.ANY, self.lsn_id)
self.mock_lsn_api.lsn_delete.assert_called_once_with(
mock.ANY, self.lsn_id)
def _test_lsn_delete_with_exc(self, exc):
self.mock_lsn_api.lsn_delete.side_effect = exc
self.manager.lsn_delete(mock.ANY, self.lsn_id)
self.mock_lsn_api.lsn_delete.assert_called_once_with(
mock.ANY, self.lsn_id)
def test_lsn_delete_with_not_found(self):
self._test_lsn_delete_with_exc(n_exc.NotFound)
def test_lsn_delete_api_exception(self):
self._test_lsn_delete_with_exc(exception.NsxApiException)
def test_lsn_delete_by_network(self):
self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id
with mock.patch.object(self.manager, 'lsn_delete') as f:
self.manager.lsn_delete_by_network(mock.ANY, self.net_id)
self.mock_lsn_api.lsn_for_network_get.assert_called_once_with(
mock.ANY, self.net_id)
f.assert_called_once_with(mock.ANY, self.lsn_id)
def _test_lsn_delete_by_network_with_exc(self, exc):
self.mock_lsn_api.lsn_for_network_get.side_effect = exc
with mock.patch.object(lsn_man.LOG, 'warn') as l:
self.manager.lsn_delete_by_network(mock.ANY, self.net_id)
self.assertEqual(1, l.call_count)
def test_lsn_delete_by_network_with_not_found(self):
self._test_lsn_delete_by_network_with_exc(n_exc.NotFound)
def test_lsn_delete_by_network_with_not_api_error(self):
self._test_lsn_delete_by_network_with_exc(exception.NsxApiException)
def test_lsn_port_get(self):
self.mock_lsn_api.lsn_port_by_subnet_get.return_value = (
self.lsn_port_id)
with mock.patch.object(
self.manager, 'lsn_get', return_value=self.lsn_id):
expected = self.manager.lsn_port_get(
mock.ANY, self.net_id, self.sub_id)
self.assertEqual(expected, (self.lsn_id, self.lsn_port_id))
def test_lsn_port_get_lsn_not_found_on_raise(self):
with mock.patch.object(
self.manager, 'lsn_get',
side_effect=p_exc.LsnNotFound(entity='network',
entity_id=self.net_id)):
self.assertRaises(p_exc.LsnNotFound,
self.manager.lsn_port_get,
mock.ANY, self.net_id, self.sub_id)
def test_lsn_port_get_lsn_not_found_silent_raise(self):
with mock.patch.object(self.manager, 'lsn_get', return_value=None):
expected = self.manager.lsn_port_get(
mock.ANY, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual(expected, (None, None))
def test_lsn_port_get_port_not_found_on_raise(self):
self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound
with mock.patch.object(
self.manager, 'lsn_get', return_value=self.lsn_id):
self.assertRaises(p_exc.LsnPortNotFound,
self.manager.lsn_port_get,
mock.ANY, self.net_id, self.sub_id)
def test_lsn_port_get_port_not_found_silent_raise(self):
self.mock_lsn_api.lsn_port_by_subnet_get.side_effect = n_exc.NotFound
with mock.patch.object(
self.manager, 'lsn_get', return_value=self.lsn_id):
expected = self.manager.lsn_port_get(
mock.ANY, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual(expected, (self.lsn_id, None))
def test_lsn_port_create(self):
self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
expected = self.manager.lsn_port_create(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(expected, self.lsn_port_id)
def _test_lsn_port_create_with_exc(self, exc, expected):
self.mock_lsn_api.lsn_port_create.side_effect = exc
self.assertRaises(expected,
self.manager.lsn_port_create,
mock.ANY, mock.ANY, mock.ANY)
def test_lsn_port_create_with_not_found(self):
self._test_lsn_port_create_with_exc(n_exc.NotFound, p_exc.LsnNotFound)
def test_lsn_port_create_api_exception(self):
self._test_lsn_port_create_with_exc(exception.NsxApiException,
p_exc.NsxPluginException)
def test_lsn_port_delete(self):
self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count)
def _test_lsn_port_delete_with_exc(self, exc):
self.mock_lsn_api.lsn_port_delete.side_effect = exc
with mock.patch.object(lsn_man.LOG, 'warn') as l:
self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count)
self.assertEqual(1, l.call_count)
def test_lsn_port_delete_with_not_found(self):
self._test_lsn_port_delete_with_exc(n_exc.NotFound)
def test_lsn_port_delete_api_exception(self):
self._test_lsn_port_delete_with_exc(exception.NsxApiException)
def _test_lsn_port_dhcp_setup(self, ret_val, sub):
self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
with mock.patch.object(
self.manager, 'lsn_get', return_value=self.lsn_id):
with mock.patch.object(lsn_man.switch_api,
'get_port_by_neutron_tag'):
expected = self.manager.lsn_port_dhcp_setup(
mock.Mock(), mock.ANY, mock.ANY,
mock.ANY, subnet_config=sub)
self.assertEqual(
1, self.mock_lsn_api.lsn_port_create.call_count)
self.assertEqual(
1, self.mock_lsn_api.lsn_port_plug_network.call_count)
self.assertEqual(expected, ret_val)
def test_lsn_port_dhcp_setup(self):
self._test_lsn_port_dhcp_setup((self.lsn_id, self.lsn_port_id), None)
def test_lsn_port_dhcp_setup_with_config(self):
with mock.patch.object(self.manager, 'lsn_port_dhcp_configure') as f:
self._test_lsn_port_dhcp_setup(None, mock.ANY)
self.assertEqual(1, f.call_count)
def test_lsn_port_dhcp_setup_with_not_found(self):
self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
with mock.patch.object(lsn_man.switch_api,
'get_port_by_neutron_tag') as f:
f.side_effect = n_exc.NotFound
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_dhcp_setup,
mock.Mock(), mock.ANY, mock.ANY, mock.ANY)
def test_lsn_port_dhcp_setup_with_conflict(self):
self.mock_lsn_api.lsn_port_plug_network.side_effect = (
p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id))
self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
with mock.patch.object(lsn_man.switch_api, 'get_port_by_neutron_tag'):
with mock.patch.object(self.manager, 'lsn_port_delete') as g:
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_dhcp_setup,
mock.Mock(), mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, g.call_count)
def _test_lsn_port_dhcp_configure_with_subnet(
self, expected, dns=None, gw=None, routes=None):
subnet = {
'enable_dhcp': True,
'dns_nameservers': dns or [],
'gateway_ip': gw,
'host_routes': routes
}
self.manager.lsn_port_dhcp_configure(mock.ANY, self.lsn_id,
self.lsn_port_id, subnet)
self.mock_lsn_api.lsn_port_dhcp_configure.assert_called_once_with(
mock.ANY, self.lsn_id, self.lsn_port_id, subnet['enable_dhcp'],
expected)
def test_lsn_port_dhcp_configure(self):
expected = {
'routers': '127.0.0.1',
'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
'domain_name': cfg.CONF.NSX_DHCP.domain_name
}
self._test_lsn_port_dhcp_configure_with_subnet(
expected, dns=[], gw='127.0.0.1', routes=[])
def test_lsn_port_dhcp_configure_gatewayless(self):
expected = {
'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
'domain_name': cfg.CONF.NSX_DHCP.domain_name
}
self._test_lsn_port_dhcp_configure_with_subnet(expected, gw=None)
def test_lsn_port_dhcp_configure_with_extra_dns_servers(self):
expected = {
'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
'domain_name_servers': '8.8.8.8,9.9.9.9',
'domain_name': cfg.CONF.NSX_DHCP.domain_name
}
self._test_lsn_port_dhcp_configure_with_subnet(
expected, dns=['8.8.8.8', '9.9.9.9'])
def test_lsn_port_dhcp_configure_with_host_routes(self):
expected = {
'default_lease_time': cfg.CONF.NSX_DHCP.default_lease_time,
'domain_name': cfg.CONF.NSX_DHCP.domain_name,
'classless_static_routes': '8.8.8.8,9.9.9.9'
}
self._test_lsn_port_dhcp_configure_with_subnet(
expected, routes=['8.8.8.8', '9.9.9.9'])
def _test_lsn_metadata_configure(self, is_enabled):
with mock.patch.object(self.manager, 'lsn_port_dispose') as f:
self.manager.plugin.get_subnet.return_value = (
{'network_id': self.net_id})
self.manager.lsn_metadata_configure(mock.ANY,
self.sub_id, is_enabled)
expected = {
'metadata_server_port': 8775,
'metadata_server_ip': '127.0.0.1',
'metadata_proxy_shared_secret': ''
}
self.mock_lsn_api.lsn_metadata_configure.assert_called_once_with(
mock.ANY, mock.ANY, is_enabled, expected)
if is_enabled:
self.assertEqual(
1, self.mock_lsn_api.lsn_port_by_subnet_get.call_count)
else:
self.assertEqual(1, f.call_count)
def test_lsn_metadata_configure_enabled(self):
self._test_lsn_metadata_configure(True)
def test_lsn_metadata_configure_disabled(self):
self._test_lsn_metadata_configure(False)
def test_lsn_metadata_configure_not_found(self):
self.mock_lsn_api.lsn_metadata_configure.side_effect = (
p_exc.LsnNotFound(entity='lsn', entity_id=self.lsn_id))
self.manager.plugin.get_subnet.return_value = (
{'network_id': self.net_id})
self.assertRaises(p_exc.NsxPluginException,
self.manager.lsn_metadata_configure,
mock.ANY, self.sub_id, True)
def test_lsn_port_metadata_setup(self):
subnet = {
'cidr': '0.0.0.0/0',
'id': self.sub_id,
'network_id': self.net_id,
'tenant_id': self.tenant_id
}
expected_data = {
'subnet_id': subnet['id'],
'ip_address': subnet['cidr'],
'mac_address': constants.METADATA_MAC
}
self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
with mock.patch.object(self.manager, 'lsn_port_create') as g:
f.return_value = {'uuid': self.port_id}
self.manager.lsn_port_metadata_setup(
self.context, self.lsn_id, subnet)
(self.mock_lsn_api.lsn_port_plug_network.
assert_called_once_with(mock.ANY, self.lsn_id,
mock.ANY, self.port_id))
g.assert_called_once_with(
self.context, self.lsn_id, expected_data)
def test_lsn_port_metadata_setup_raise_not_found(self):
subnet = {
'cidr': '0.0.0.0/0',
'id': self.sub_id,
'network_id': self.net_id,
'tenant_id': self.tenant_id
}
self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
f.side_effect = n_exc.NotFound
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_metadata_setup,
mock.Mock(), self.lsn_id, subnet)
def test_lsn_port_metadata_setup_raise_conflict(self):
subnet = {
'cidr': '0.0.0.0/0',
'id': self.sub_id,
'network_id': self.net_id,
'tenant_id': self.tenant_id
}
self.mock_nsx_utils.get_nsx_switch_ids.return_value = [self.switch_id]
with mock.patch.object(lsn_man.switch_api, 'create_lport') as f:
with mock.patch.object(lsn_man.switch_api, 'delete_port') as g:
f.return_value = {'uuid': self.port_id}
self.mock_lsn_api.lsn_port_plug_network.side_effect = (
p_exc.LsnConfigurationConflict(lsn_id=self.lsn_id))
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_metadata_setup,
mock.Mock(), self.lsn_id, subnet)
self.assertEqual(1,
self.mock_lsn_api.lsn_port_delete.call_count)
self.assertEqual(1, g.call_count)
def _test_lsn_port_dispose_with_values(self, lsn_id, lsn_port_id, count):
with mock.patch.object(self.manager,
'lsn_port_get_by_mac',
return_value=(lsn_id, lsn_port_id)):
self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac)
self.assertEqual(count,
self.mock_lsn_api.lsn_port_delete.call_count)
def test_lsn_port_dispose(self):
self._test_lsn_port_dispose_with_values(
self.lsn_id, self.lsn_port_id, 1)
def test_lsn_port_dispose_meta_mac(self):
self.mac = constants.METADATA_MAC
with mock.patch.object(lsn_man.switch_api,
'get_port_by_neutron_tag') as f:
with mock.patch.object(lsn_man.switch_api, 'delete_port') as g:
f.return_value = {'uuid': self.port_id}
self._test_lsn_port_dispose_with_values(
self.lsn_id, self.lsn_port_id, 1)
f.assert_called_once_with(
mock.ANY, self.net_id, constants.METADATA_PORT_ID)
g.assert_called_once_with(mock.ANY, self.net_id, self.port_id)
def test_lsn_port_dispose_lsn_not_found(self):
self._test_lsn_port_dispose_with_values(None, None, 0)
def test_lsn_port_dispose_lsn_port_not_found(self):
self._test_lsn_port_dispose_with_values(self.lsn_id, None, 0)
def test_lsn_port_dispose_api_error(self):
self.mock_lsn_api.lsn_port_delete.side_effect = (
exception.NsxApiException)
with mock.patch.object(lsn_man.LOG, 'warn') as l:
self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac)
self.assertEqual(1, l.call_count)
def test_lsn_port_host_conf(self):
with mock.patch.object(self.manager,
'lsn_port_get',
return_value=(self.lsn_id, self.lsn_port_id)):
f = mock.Mock()
self.manager._lsn_port_host_conf(mock.ANY, self.net_id,
self.sub_id, mock.ANY, f)
self.assertEqual(1, f.call_count)
def test_lsn_port_host_conf_lsn_port_not_found(self):
with mock.patch.object(
self.manager, 'lsn_port_get', return_value=(None, None)) as f:
self.manager._lsn_port_host_conf(
mock.ANY, self.net_id, self.sub_id, mock.ANY, mock.Mock())
self.assertEqual(1, f.call_count)
def _test_lsn_port_update(self, dhcp=None, meta=None):
self.manager.lsn_port_update(
mock.ANY, self.net_id, self.sub_id, dhcp, meta)
count = 1 if dhcp else 0
count = count + 1 if meta else count
self.assertEqual(count, (self.mock_lsn_api.
lsn_port_host_entries_update.call_count))
def test_lsn_port_update(self):
self._test_lsn_port_update()
def test_lsn_port_update_dhcp_meta(self):
self._test_lsn_port_update(mock.ANY, mock.ANY)
def test_lsn_port_update_dhcp_and_nometa(self):
self._test_lsn_port_update(mock.ANY, None)
def test_lsn_port_update_nodhcp_and_nmeta(self):
self._test_lsn_port_update(None, mock.ANY)
def test_lsn_port_update_raise_error(self):
self.mock_lsn_api.lsn_port_host_entries_update.side_effect = (
exception.NsxApiException)
self.assertRaises(p_exc.PortConfigurationError,
self.manager.lsn_port_update,
mock.ANY, mock.ANY, mock.ANY, mock.ANY)
class PersistentLsnManagerTestCase(base.BaseTestCase):
def setUp(self):
super(PersistentLsnManagerTestCase, self).setUp()
self.net_id = 'foo_network_id'
self.sub_id = 'foo_subnet_id'
self.port_id = 'foo_port_id'
self.lsn_id = 'foo_lsn_id'
self.mac = 'aa:bb:cc:dd:ee:ff'
self.lsn_port_id = 'foo_lsn_port_id'
self.tenant_id = 'foo_tenant_id'
db.configure_db()
nsx.register_dhcp_opts(cfg)
nsx.register_metadata_opts(cfg)
lsn_man.register_lsn_opts(cfg)
self.manager = lsn_man.PersistentLsnManager(mock.Mock())
self.context = context.get_admin_context()
self.mock_lsn_api_p = mock.patch.object(lsn_man, 'lsn_api')
self.mock_lsn_api = self.mock_lsn_api_p.start()
self.addCleanup(db.clear_db)
def test_lsn_get(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
result = self.manager.lsn_get(self.context, self.net_id)
self.assertEqual(self.lsn_id, result)
def test_lsn_get_raise_not_found(self):
self.assertRaises(p_exc.LsnNotFound,
self.manager.lsn_get, self.context, self.net_id)
def test_lsn_get_silent_not_found(self):
result = self.manager.lsn_get(
self.context, self.net_id, raise_on_err=False)
self.assertIsNone(result)
def test_lsn_get_sync_on_missing(self):
cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN')
self.manager = lsn_man.PersistentLsnManager(mock.Mock())
with mock.patch.object(self.manager, 'lsn_save') as f:
self.manager.lsn_get(self.context, self.net_id, raise_on_err=True)
self.assertTrue(self.mock_lsn_api.lsn_for_network_get.call_count)
self.assertTrue(f.call_count)
def test_lsn_save(self):
self.manager.lsn_save(self.context, self.net_id, self.lsn_id)
result = self.manager.lsn_get(self.context, self.net_id)
self.assertEqual(self.lsn_id, result)
def test_lsn_create(self):
self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id
with mock.patch.object(self.manager, 'lsn_save') as f:
result = self.manager.lsn_create(self.context, self.net_id)
self.assertTrue(
self.mock_lsn_api.lsn_for_network_create.call_count)
self.assertTrue(f.call_count)
self.assertEqual(self.lsn_id, result)
def test_lsn_create_failure(self):
with mock.patch.object(
self.manager, 'lsn_save',
side_effect=p_exc.NsxPluginException(err_msg='')):
self.assertRaises(p_exc.NsxPluginException,
self.manager.lsn_create,
self.context, self.net_id)
self.assertTrue(self.mock_lsn_api.lsn_delete.call_count)
def test_lsn_delete(self):
self.mock_lsn_api.lsn_for_network_create.return_value = self.lsn_id
self.manager.lsn_create(self.context, self.net_id)
self.manager.lsn_delete(self.context, self.lsn_id)
self.assertIsNone(self.manager.lsn_get(
self.context, self.net_id, raise_on_err=False))
def test_lsn_delete_not_existent(self):
self.manager.lsn_delete(self.context, self.lsn_id)
self.assertTrue(self.mock_lsn_api.lsn_delete.call_count)
def test_lsn_port_get(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id,
self.sub_id, self.mac, self.lsn_id)
res = self.manager.lsn_port_get(self.context, self.net_id, self.sub_id)
self.assertEqual((self.lsn_id, self.lsn_port_id), res)
def test_lsn_port_get_raise_not_found(self):
self.assertRaises(p_exc.LsnPortNotFound,
self.manager.lsn_port_get,
self.context, self.net_id, self.sub_id)
def test_lsn_port_get_silent_not_found(self):
result = self.manager.lsn_port_get(
self.context, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual((None, None), result)
def test_lsn_port_get_sync_on_missing(self):
return
cfg.CONF.set_override('sync_on_missing_data', True, 'NSX_LSN')
self.manager = lsn_man.PersistentLsnManager(mock.Mock())
self.mock_lsn_api.lsn_for_network_get.return_value = self.lsn_id
self.mock_lsn_api.lsn_port_by_subnet_get.return_value = (
self.lsn_id, self.lsn_port_id)
with mock.patch.object(self.manager, 'lsn_save') as f:
with mock.patch.object(self.manager, 'lsn_port_save') as g:
self.manager.lsn_port_get(
self.context, self.net_id, self.sub_id)
self.assertTrue(
self.mock_lsn_api.lsn_port_by_subnet_get.call_count)
self.assertTrue(
self.mock_lsn_api.lsn_port_info_get.call_count)
self.assertTrue(f.call_count)
self.assertTrue(g.call_count)
def test_lsn_port_get_by_mac(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id,
self.sub_id, self.mac, self.lsn_id)
res = self.manager.lsn_port_get_by_mac(
self.context, self.net_id, self.mac)
self.assertEqual((self.lsn_id, self.lsn_port_id), res)
def test_lsn_port_get_by_mac_raise_not_found(self):
self.assertRaises(p_exc.LsnPortNotFound,
self.manager.lsn_port_get_by_mac,
self.context, self.net_id, self.sub_id)
def test_lsn_port_get_by_mac_silent_not_found(self):
result = self.manager.lsn_port_get_by_mac(
self.context, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual((None, None), result)
def test_lsn_port_create(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
self.mock_lsn_api.lsn_port_create.return_value = self.lsn_port_id
subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac}
with mock.patch.object(self.manager, 'lsn_port_save') as f:
result = self.manager.lsn_port_create(
self.context, self.net_id, subnet)
self.assertTrue(
self.mock_lsn_api.lsn_port_create.call_count)
self.assertTrue(f.call_count)
self.assertEqual(self.lsn_port_id, result)
def test_lsn_port_create_failure(self):
subnet = {'subnet_id': self.sub_id, 'mac_address': self.mac}
with mock.patch.object(
self.manager, 'lsn_port_save',
side_effect=p_exc.NsxPluginException(err_msg='')):
self.assertRaises(p_exc.NsxPluginException,
self.manager.lsn_port_create,
self.context, self.net_id, subnet)
self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count)
def test_lsn_port_delete(self):
lsn_db.lsn_add(self.context, self.net_id, self.lsn_id)
lsn_db.lsn_port_add_for_lsn(self.context, self.lsn_port_id,
self.sub_id, self.mac, self.lsn_id)
self.manager.lsn_port_delete(
self.context, self.lsn_id, self.lsn_port_id)
self.assertEqual((None, None), self.manager.lsn_port_get(
self.context, self.lsn_id, self.sub_id, raise_on_err=False))
def test_lsn_port_delete_not_existent(self):
self.manager.lsn_port_delete(
self.context, self.lsn_id, self.lsn_port_id)
self.assertTrue(self.mock_lsn_api.lsn_port_delete.call_count)
def test_lsn_port_save(self):
self.manager.lsn_save(self.context, self.net_id, self.lsn_id)
self.manager.lsn_port_save(self.context, self.lsn_port_id,
self.sub_id, self.mac, self.lsn_id)
result = self.manager.lsn_port_get(
self.context, self.net_id, self.sub_id, raise_on_err=False)
self.assertEqual((self.lsn_id, self.lsn_port_id), result)
class DhcpAgentNotifyAPITestCase(base.BaseTestCase):
def setUp(self):
super(DhcpAgentNotifyAPITestCase, self).setUp()
self.notifier = nsx.DhcpAgentNotifyAPI(mock.Mock(), mock.Mock())
self.plugin = self.notifier.plugin
self.lsn_manager = self.notifier.lsn_manager
def _test_notify_port_update(
self, ports, expected_count, expected_args=None):
port = {
'id': 'foo_port_id',
'network_id': 'foo_network_id',
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}]
}
self.notifier.plugin.get_ports.return_value = ports
self.notifier.notify(mock.ANY, {'port': port}, 'port.update.end')
self.lsn_manager.lsn_port_update.assert_has_calls(expected_args)
def test_notify_ports_update_no_ports(self):
self._test_notify_port_update(None, 0, [])
self._test_notify_port_update([], 0, [])
def test_notify_ports_update_one_port(self):
ports = [{
'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}],
'device_id': 'foo_device_id',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'
}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id',
dhcp=[{'ip_address': '1.2.3.4',
'mac_address': 'fa:16:3e:da:1d:46'}],
meta=[{'instance_id': 'foo_device_id',
'ip_address': '1.2.3.4'}])
self._test_notify_port_update(ports, 1, call_args)
def test_notify_ports_update_ports_with_empty_device_id(self):
ports = [{
'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}],
'device_id': '',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'
}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id',
dhcp=[{'ip_address': '1.2.3.4',
'mac_address': 'fa:16:3e:da:1d:46'}],
meta=[]
)
self._test_notify_port_update(ports, 1, call_args)
def test_notify_ports_update_ports_with_no_fixed_ips(self):
ports = [{
'fixed_ips': [],
'device_id': 'foo_device_id',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'
}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[])
self._test_notify_port_update(ports, 1, call_args)
def test_notify_ports_update_ports_with_no_fixed_ips_and_no_device(self):
ports = [{
'fixed_ips': [],
'device_id': '',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'
}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[])
self._test_notify_port_update(ports, 0, call_args)
def test_notify_ports_update_with_special_ports(self):
ports = [{'fixed_ips': [],
'device_id': '',
'device_owner': n_consts.DEVICE_OWNER_DHCP,
'mac_address': 'fa:16:3e:da:1d:46'},
{'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}],
'device_id': 'foo_device_id',
'device_owner': n_consts.DEVICE_OWNER_ROUTER_GW,
'mac_address': 'fa:16:3e:da:1d:46'}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id', dhcp=[], meta=[])
self._test_notify_port_update(ports, 0, call_args)
def test_notify_ports_update_many_ports(self):
ports = [{'fixed_ips': [],
'device_id': '',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'},
{'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}],
'device_id': 'foo_device_id',
'device_owner': 'foo_device_owner',
'mac_address': 'fa:16:3e:da:1d:46'}]
call_args = mock.call(
mock.ANY, 'foo_network_id', 'foo_subnet_id',
dhcp=[{'ip_address': '1.2.3.4',
'mac_address': 'fa:16:3e:da:1d:46'}],
meta=[{'instance_id': 'foo_device_id',
'ip_address': '1.2.3.4'}])
self._test_notify_port_update(ports, 1, call_args)
def _test_notify_subnet_action(self, action):
with mock.patch.object(self.notifier, '_subnet_%s' % action) as f:
self.notifier._handle_subnet_dhcp_access[action] = f
subnet = {'subnet': mock.ANY}
self.notifier.notify(
mock.ANY, subnet, 'subnet.%s.end' % action)
f.assert_called_once_with(mock.ANY, subnet)
def test_notify_subnet_create(self):
self._test_notify_subnet_action('create')
def test_notify_subnet_update(self):
self._test_notify_subnet_action('update')
def test_notify_subnet_delete(self):
self._test_notify_subnet_action('delete')
def _test_subnet_create(self, enable_dhcp, exc=None,
exc_obj=None, call_notify=True):
subnet = {
'id': 'foo_subnet_id',
'enable_dhcp': enable_dhcp,
'network_id': 'foo_network_id',
'tenant_id': 'foo_tenant_id',
'cidr': '0.0.0.0/0'
}
if exc:
self.plugin.create_port.side_effect = exc_obj or exc
self.assertRaises(exc,
self.notifier.notify,
mock.ANY,
{'subnet': subnet},
'subnet.create.end')
self.plugin.delete_subnet.assert_called_with(
mock.ANY, subnet['id'])
else:
if call_notify:
self.notifier.notify(
mock.ANY, {'subnet': subnet}, 'subnet.create.end')
if enable_dhcp:
dhcp_port = {
'name': '',
'admin_state_up': True,
'network_id': 'foo_network_id',
'tenant_id': 'foo_tenant_id',
'device_owner': n_consts.DEVICE_OWNER_DHCP,
'mac_address': mock.ANY,
'fixed_ips': [{'subnet_id': 'foo_subnet_id'}],
'device_id': ''
}
self.plugin.create_port.assert_called_once_with(
mock.ANY, {'port': dhcp_port})
else:
self.assertEqual(0, self.plugin.create_port.call_count)
def test_subnet_create_enabled_dhcp(self):
self._test_subnet_create(True)
def test_subnet_create_disabled_dhcp(self):
self._test_subnet_create(False)
def test_subnet_create_raise_port_config_error(self):
with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2,
'delete_port') as d:
self._test_subnet_create(
True,
exc=n_exc.Conflict,
exc_obj=p_exc.PortConfigurationError(lsn_id='foo_lsn_id',
net_id='foo_net_id',
port_id='foo_port_id'))
d.assert_called_once_with(self.plugin, mock.ANY, 'foo_port_id')
def test_subnet_update(self):
subnet = {
'id': 'foo_subnet_id',
'network_id': 'foo_network_id',
}
self.lsn_manager.lsn_port_get.return_value = ('foo_lsn_id',
'foo_lsn_port_id')
self.notifier.notify(
mock.ANY, {'subnet': subnet}, 'subnet.update.end')
self.lsn_manager.lsn_port_dhcp_configure.assert_called_once_with(
mock.ANY, 'foo_lsn_id', 'foo_lsn_port_id', subnet)
def test_subnet_update_raise_lsn_not_found(self):
subnet = {
'id': 'foo_subnet_id',
'network_id': 'foo_network_id',
}
self.lsn_manager.lsn_port_get.side_effect = (
p_exc.LsnNotFound(entity='network',
entity_id=subnet['network_id']))
self.assertRaises(p_exc.LsnNotFound,
self.notifier.notify,
mock.ANY, {'subnet': subnet}, 'subnet.update.end')
def _test_subnet_update_lsn_port_not_found(self, dhcp_port):
subnet = {
'id': 'foo_subnet_id',
'enable_dhcp': True,
'network_id': 'foo_network_id',
'tenant_id': 'foo_tenant_id'
}
self.lsn_manager.lsn_port_get.side_effect = (
p_exc.LsnPortNotFound(lsn_id='foo_lsn_id',
entity='subnet',
entity_id=subnet['id']))
self.notifier.plugin.get_ports.return_value = dhcp_port
count = 0 if dhcp_port is None else 1
with mock.patch.object(nsx, 'handle_port_dhcp_access') as h:
self.notifier.notify(
mock.ANY, {'subnet': subnet}, 'subnet.update.end')
self.assertEqual(count, h.call_count)
if not dhcp_port:
self._test_subnet_create(enable_dhcp=True,
exc=None, call_notify=False)
def test_subnet_update_lsn_port_not_found_without_dhcp_port(self):
self._test_subnet_update_lsn_port_not_found(None)
def test_subnet_update_lsn_port_not_found_with_dhcp_port(self):
self._test_subnet_update_lsn_port_not_found([mock.ANY])
def _test_subnet_delete(self, ports=None):
subnet = {
'id': 'foo_subnet_id',
'network_id': 'foo_network_id',
'cidr': '0.0.0.0/0'
}
self.plugin.get_ports.return_value = ports
self.notifier.notify(mock.ANY, {'subnet': subnet}, 'subnet.delete.end')
filters = {
'network_id': [subnet['network_id']],
'device_owner': [n_consts.DEVICE_OWNER_DHCP]
}
self.plugin.get_ports.assert_called_once_with(
mock.ANY, filters=filters)
if ports:
self.plugin.delete_port.assert_called_once_with(
mock.ANY, ports[0]['id'])
else:
self.assertEqual(0, self.plugin.delete_port.call_count)
def test_subnet_delete_enabled_dhcp_no_ports(self):
self._test_subnet_delete()
def test_subnet_delete_enabled_dhcp_with_dhcp_port(self):
self._test_subnet_delete([{'id': 'foo_port_id'}])
class DhcpTestCase(base.BaseTestCase):
def setUp(self):
super(DhcpTestCase, self).setUp()
self.plugin = mock.Mock()
self.plugin.lsn_manager = mock.Mock()
def test_handle_create_network(self):
network = {'id': 'foo_network_id'}
nsx.handle_network_dhcp_access(
self.plugin, mock.ANY, network, 'create_network')
self.plugin.lsn_manager.lsn_create.assert_called_once_with(
mock.ANY, network['id'])
def test_handle_create_network_router_external(self):
network = {'id': 'foo_network_id', 'router:external': True}
nsx.handle_network_dhcp_access(
self.plugin, mock.ANY, network, 'create_network')
self.assertFalse(self.plugin.lsn_manager.lsn_create.call_count)
def test_handle_delete_network(self):
network_id = 'foo_network_id'
self.plugin.lsn_manager.lsn_delete_by_network.return_value = (
'foo_lsn_id')
nsx.handle_network_dhcp_access(
self.plugin, mock.ANY, network_id, 'delete_network')
self.plugin.lsn_manager.lsn_delete_by_network.assert_called_once_with(
mock.ANY, 'foo_network_id')
def _test_handle_create_dhcp_owner_port(self, exc=None):
subnet = {
'cidr': '0.0.0.0/0',
'id': 'foo_subnet_id'
}
port = {
'id': 'foo_port_id',
'device_owner': n_consts.DEVICE_OWNER_DHCP,
'mac_address': 'aa:bb:cc:dd:ee:ff',
'network_id': 'foo_network_id',
'fixed_ips': [{'subnet_id': subnet['id']}]
}
expected_data = {
'subnet_id': subnet['id'],
'ip_address': subnet['cidr'],
'mac_address': port['mac_address']
}
self.plugin.get_subnet.return_value = subnet
if exc is None:
nsx.handle_port_dhcp_access(
self.plugin, mock.ANY, port, 'create_port')
(self.plugin.lsn_manager.lsn_port_dhcp_setup.
assert_called_once_with(mock.ANY, port['network_id'],
port['id'], expected_data, subnet))
else:
self.plugin.lsn_manager.lsn_port_dhcp_setup.side_effect = exc
self.assertRaises(n_exc.NeutronException,
nsx.handle_port_dhcp_access,
self.plugin, mock.ANY, port, 'create_port')
def test_handle_create_dhcp_owner_port(self):
self._test_handle_create_dhcp_owner_port()
def test_handle_create_dhcp_owner_port_raise_port_config_error(self):
config_error = p_exc.PortConfigurationError(lsn_id='foo_lsn_id',
net_id='foo_net_id',
port_id='foo_port_id')
self._test_handle_create_dhcp_owner_port(exc=config_error)
def test_handle_delete_dhcp_owner_port(self):
port = {
'id': 'foo_port_id',
'device_owner': n_consts.DEVICE_OWNER_DHCP,
'network_id': 'foo_network_id',
'fixed_ips': [],
'mac_address': 'aa:bb:cc:dd:ee:ff'
}
nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, 'delete_port')
self.plugin.lsn_manager.lsn_port_dispose.assert_called_once_with(
mock.ANY, port['network_id'], port['mac_address'])
def _test_handle_user_port(self, action, handler):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_owner',
'network_id': 'foo_network_id',
'mac_address': 'aa:bb:cc:dd:ee:ff',
'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}]
}
expected_data = {
'ip_address': '1.2.3.4',
'mac_address': 'aa:bb:cc:dd:ee:ff'
}
self.plugin.get_subnet.return_value = {'enable_dhcp': True}
nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action)
handler.assert_called_once_with(
mock.ANY, port['network_id'], 'foo_subnet_id', expected_data)
def test_handle_create_user_port(self):
self._test_handle_user_port(
'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add)
def test_handle_delete_user_port(self):
self._test_handle_user_port(
'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove)
def _test_handle_user_port_disabled_dhcp(self, action, handler):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_owner',
'network_id': 'foo_network_id',
'mac_address': 'aa:bb:cc:dd:ee:ff',
'fixed_ips': [{'subnet_id': 'foo_subnet_id',
'ip_address': '1.2.3.4'}]
}
self.plugin.get_subnet.return_value = {'enable_dhcp': False}
nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action)
self.assertEqual(0, handler.call_count)
def test_handle_create_user_port_disabled_dhcp(self):
self._test_handle_user_port_disabled_dhcp(
'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add)
def test_handle_delete_user_port_disabled_dhcp(self):
self._test_handle_user_port_disabled_dhcp(
'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove)
def _test_handle_user_port_no_fixed_ips(self, action, handler):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_owner',
'network_id': 'foo_network_id',
'fixed_ips': []
}
nsx.handle_port_dhcp_access(self.plugin, mock.ANY, port, action)
self.assertEqual(0, handler.call_count)
def test_handle_create_user_port_no_fixed_ips(self):
self._test_handle_user_port_no_fixed_ips(
'create_port', self.plugin.lsn_manager.lsn_port_dhcp_host_add)
def test_handle_delete_user_port_no_fixed_ips(self):
self._test_handle_user_port_no_fixed_ips(
'delete_port', self.plugin.lsn_manager.lsn_port_dhcp_host_remove)
class MetadataTestCase(base.BaseTestCase):
def setUp(self):
super(MetadataTestCase, self).setUp()
self.plugin = mock.Mock()
self.plugin.lsn_manager = mock.Mock()
def _test_handle_port_metadata_access_special_owners(
self, owner, dev_id='foo_device_id', ips=None):
port = {
'id': 'foo_port_id',
'device_owner': owner,
'device_id': dev_id,
'fixed_ips': ips or []
}
nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY)
self.assertFalse(
self.plugin.lsn_manager.lsn_port_meta_host_add.call_count)
self.assertFalse(
self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count)
def test_handle_port_metadata_access_external_network(self):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_owner',
'device_id': 'foo_device_id',
'network_id': 'foo_network_id',
'fixed_ips': [{'subnet_id': 'foo_subnet'}]
}
self.plugin.get_network.return_value = {'router:external': True}
nsx.handle_port_metadata_access(self.plugin, mock.ANY, port, mock.ANY)
self.assertFalse(
self.plugin.lsn_manager.lsn_port_meta_host_add.call_count)
self.assertFalse(
self.plugin.lsn_manager.lsn_port_meta_host_remove.call_count)
def test_handle_port_metadata_access_dhcp_port(self):
self._test_handle_port_metadata_access_special_owners(
n_consts.DEVICE_OWNER_DHCP, [{'subnet_id': 'foo_subnet'}])
def test_handle_port_metadata_access_router_port(self):
self._test_handle_port_metadata_access_special_owners(
n_consts.DEVICE_OWNER_ROUTER_INTF, [{'subnet_id': 'foo_subnet'}])
def test_handle_port_metadata_access_no_device_id(self):
self._test_handle_port_metadata_access_special_owners(
n_consts.DEVICE_OWNER_DHCP, '')
def test_handle_port_metadata_access_no_fixed_ips(self):
self._test_handle_port_metadata_access_special_owners(
'foo', 'foo', None)
def _test_handle_port_metadata_access(self, is_delete, raise_exc=False):
port = {
'id': 'foo_port_id',
'device_owner': 'foo_device_id',
'network_id': 'foo_network_id',
'device_id': 'foo_device_id',
'tenant_id': 'foo_tenant_id',
'fixed_ips': [
{'subnet_id': 'foo_subnet_id', 'ip_address': '1.2.3.4'}
]
}
meta = {
'instance_id': port['device_id'],
'tenant_id': port['tenant_id'],
'ip_address': port['fixed_ips'][0]['ip_address']
}
self.plugin.get_network.return_value = {'router:external': False}
if is_delete:
mock_func = self.plugin.lsn_manager.lsn_port_meta_host_remove
else:
mock_func = self.plugin.lsn_manager.lsn_port_meta_host_add
if raise_exc:
mock_func.side_effect = p_exc.PortConfigurationError(
lsn_id='foo_lsn_id', net_id='foo_net_id', port_id=None)
with mock.patch.object(nsx.db_base_plugin_v2.NeutronDbPluginV2,
'delete_port') as d:
self.assertRaises(p_exc.PortConfigurationError,
nsx.handle_port_metadata_access,
self.plugin, mock.ANY, port,
is_delete=is_delete)
if not is_delete:
d.assert_called_once_with(mock.ANY, mock.ANY, port['id'])
else:
self.assertFalse(d.call_count)
else:
nsx.handle_port_metadata_access(
self.plugin, mock.ANY, port, is_delete=is_delete)
mock_func.assert_called_once_with(mock.ANY, mock.ANY, mock.ANY, meta)
def test_handle_port_metadata_access_on_delete_true(self):
self._test_handle_port_metadata_access(True)
def test_handle_port_metadata_access_on_delete_false(self):
self._test_handle_port_metadata_access(False)
def test_handle_port_metadata_access_on_delete_true_raise(self):
self._test_handle_port_metadata_access(True, raise_exc=True)
def test_handle_port_metadata_access_on_delete_false_raise(self):
self._test_handle_port_metadata_access(False, raise_exc=True)
def _test_handle_router_metadata_access(
self, is_port_found, raise_exc=False):
subnet = {
'id': 'foo_subnet_id',
'network_id': 'foo_network_id'
}
interface = {
'subnet_id': subnet['id'],
'port_id': 'foo_port_id'
}
mock_func = self.plugin.lsn_manager.lsn_metadata_configure
if not is_port_found:
self.plugin.get_port.side_effect = n_exc.NotFound
if raise_exc:
with mock.patch.object(nsx.l3_db.L3_NAT_db_mixin,
'remove_router_interface') as d:
mock_func.side_effect = p_exc.NsxPluginException(err_msg='')
self.assertRaises(p_exc.NsxPluginException,
nsx.handle_router_metadata_access,
self.plugin, mock.ANY, 'foo_router_id',
interface)
d.assert_called_once_with(mock.ANY, mock.ANY, 'foo_router_id',
interface)
else:
nsx.handle_router_metadata_access(
self.plugin, mock.ANY, 'foo_router_id', interface)
mock_func.assert_called_once_with(
mock.ANY, subnet['id'], is_port_found)
def test_handle_router_metadata_access_add_interface(self):
self._test_handle_router_metadata_access(True)
def test_handle_router_metadata_access_delete_interface(self):
self._test_handle_router_metadata_access(False)
def test_handle_router_metadata_access_raise_error_on_add(self):
self._test_handle_router_metadata_access(True, raise_exc=True)
def test_handle_router_metadata_access_raise_error_on_delete(self):
self._test_handle_router_metadata_access(True, raise_exc=False)
| apache-2.0 |
msmolens/VTK | ThirdParty/Twisted/twisted/words/test/test_basesupport.py | 67 | 3056 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from twisted.trial import unittest
from twisted.words.im import basesupport
from twisted.internet import error, defer
class DummyAccount(basesupport.AbstractAccount):
"""
An account object that will do nothing when asked to start to log on.
"""
loginHasFailed = False
loginCallbackCalled = False
def _startLogOn(self, *args):
"""
Set self.loginDeferred to the same as the deferred returned, allowing a
testcase to .callback or .errback.
@return: A deferred.
"""
self.loginDeferred = defer.Deferred()
return self.loginDeferred
def _loginFailed(self, result):
self.loginHasFailed = True
return basesupport.AbstractAccount._loginFailed(self, result)
def _cb_logOn(self, result):
self.loginCallbackCalled = True
return basesupport.AbstractAccount._cb_logOn(self, result)
class DummyUI(object):
"""
Provide just the interface required to be passed to AbstractAccount.logOn.
"""
clientRegistered = False
def registerAccountClient(self, result):
self.clientRegistered = True
class ClientMsgTests(unittest.TestCase):
def makeUI(self):
return DummyUI()
def makeAccount(self):
return DummyAccount('la', False, 'la', None, 'localhost', 6667)
def test_connect(self):
"""
Test that account.logOn works, and it calls the right callback when a
connection is established.
"""
account = self.makeAccount()
ui = self.makeUI()
d = account.logOn(ui)
account.loginDeferred.callback(None)
def check(result):
self.assert_(not account.loginHasFailed,
"Login shouldn't have failed")
self.assert_(account.loginCallbackCalled,
"We should be logged in")
d.addCallback(check)
return d
def test_failedConnect(self):
"""
Test that account.logOn works, and it calls the right callback when a
connection is established.
"""
account = self.makeAccount()
ui = self.makeUI()
d = account.logOn(ui)
account.loginDeferred.errback(Exception())
def err(reason):
self.assert_(account.loginHasFailed, "Login should have failed")
self.assert_(not account.loginCallbackCalled,
"We shouldn't be logged in")
self.assert_(not ui.clientRegistered,
"Client shouldn't be registered in the UI")
cb = lambda r: self.assert_(False, "Shouldn't get called back")
d.addCallbacks(cb, err)
return d
def test_alreadyConnecting(self):
"""
Test that it can fail sensibly when someone tried to connect before
we did.
"""
account = self.makeAccount()
ui = self.makeUI()
account.logOn(ui)
self.assertRaises(error.ConnectError, account.logOn, ui)
| bsd-3-clause |
anudeepsharma/autorest | src/generator/AutoRest.Python.Tests/Expected/AcceptanceTests/BodyBoolean/setup.py | 28 | 1097 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "autorestbooltestservice"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.2.0"]
setup(
name=NAME,
version=VERSION,
description="AutoRestBoolTestService",
author_email="",
url="",
keywords=["Swagger", "AutoRestBoolTestService"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Test Infrastructure for AutoRest
"""
)
| mit |
plotly/plotly.py | packages/python/plotly/plotly/tests/test_core/test_graph_objs/test_graph_objs.py | 1 | 5401 | from unittest import TestCase
import plotly.graph_objs as go
OLD_CLASS_NAMES = [
"AngularAxis",
"Annotation",
"Annotations",
"Bar",
"Box",
"ColorBar",
"Contour",
"Contours",
"Data",
"ErrorX",
"ErrorY",
"ErrorZ",
"Figure",
"Font",
"Frame",
"Frames",
"Heatmap",
"Histogram",
"Histogram2d",
"Histogram2dContour",
"Layout",
"Legend",
"Line",
"Margin",
"Marker",
"RadialAxis",
"Scatter",
"Scatter3d",
"Scene",
"Stream",
"Surface",
"Trace",
"XAxis",
"XBins",
"YAxis",
"YBins",
"ZAxis",
]
class TestBackwardsCompat(TestCase):
def test_old_class_names(self):
# these were all defined at one point, we want to maintain backwards
# compat, so we basically just create a checkpoint with this test.
for class_name in OLD_CLASS_NAMES:
self.assertIsNotNone(getattr(go, class_name, None))
def test_title_as_string_layout(self):
"""
Prior to plotly.js 1.43.0 title properties were strings, in 1.43.0
these title properties became compound objects with a text property.
For backwards compatibility, we still need to support setting this
title object as a string or number
"""
layout_title_parents = [
go.Layout(),
go.layout.XAxis(),
go.layout.YAxis(),
go.layout.ternary.Aaxis(),
go.layout.ternary.Baxis(),
go.layout.ternary.Caxis(),
go.layout.scene.XAxis(),
go.layout.scene.YAxis(),
go.layout.scene.ZAxis(),
go.layout.polar.RadialAxis(),
go.scatter.marker.ColorBar(),
go.cone.ColorBar(),
]
for obj in layout_title_parents:
obj.title = "A title"
self.assertEqual(obj.title.text, "A title")
self.assertEqual(obj.to_plotly_json(), {"title": {"text": "A title"}})
# And update
obj.update(title="A title 2")
self.assertEqual(obj.title.text, "A title 2")
self.assertEqual(obj.to_plotly_json(), {"title": {"text": "A title 2"}})
# Update titlefont
obj.update(titlefont={"size": 23})
self.assertEqual(obj.title.font.size, 23)
self.assertEqual(
obj.to_plotly_json(),
{"title": {"text": "A title 2", "font": {"size": 23}}},
)
# Pie
obj = go.Pie()
obj.title = "A title"
self.assertEqual(obj.title.text, "A title")
self.assertEqual(
obj.to_plotly_json(), {"title": {"text": "A title"}, "type": "pie"}
)
# And update
obj.update(title="A title 2")
self.assertEqual(obj.title.text, "A title 2")
self.assertEqual(
obj.to_plotly_json(), {"type": "pie", "title": {"text": "A title 2"}}
)
# Update titlefont
obj.update(titlefont={"size": 23})
self.assertEqual(obj.title.font.size, 23)
self.assertEqual(
obj.to_plotly_json(),
{"type": "pie", "title": {"text": "A title 2", "font": {"size": 23}}},
)
def test_legacy_title_props_remapped(self):
# plain Layout
obj = go.Layout()
self.assertIs(obj.titlefont, obj.title.font)
self.assertIsNone(obj.title.font.family)
# Set titlefont in constructor
obj = go.Layout(titlefont={"family": "Courier"})
self.assertIs(obj.titlefont, obj.title.font)
self.assertEqual(obj.titlefont.family, "Courier")
self.assertEqual(obj.title.font.family, "Courier")
# Property assignment
obj = go.Layout()
obj.titlefont.family = "Courier"
self.assertIs(obj.titlefont, obj.title.font)
self.assertEqual(obj["titlefont.family"], "Courier")
self.assertEqual(obj.title.font.family, "Courier")
# In/Iter
self.assertIn("titlefont", obj)
self.assertIn("titlefont.family", obj)
self.assertIn("titlefont", iter(obj))
class TestPop(TestCase):
def setUp(self):
self.layout = go.Layout(
width=1000,
title={"text": "the title", "font": {"size": 20}},
annotations=[{}, {}],
xaxis2={"range": [1, 2]},
)
def test_pop_valid_simple_prop(self):
self.assertEqual(self.layout.width, 1000)
self.assertEqual(self.layout.pop("width"), 1000)
self.assertIsNone(self.layout.width)
def test_pop_valid_compound_prop(self):
val = self.layout.title
self.assertEqual(self.layout.pop("title"), val)
self.assertEqual(self.layout.title, go.layout.Title())
def test_pop_valid_array_prop(self):
val = self.layout.annotations
self.assertEqual(self.layout.pop("annotations"), val)
self.assertEqual(self.layout.annotations, ())
def test_pop_valid_subplot_prop(self):
val = self.layout.xaxis2
self.assertEqual(self.layout.pop("xaxis2"), val)
self.assertEqual(self.layout.xaxis2, go.layout.XAxis())
def test_pop_invalid_prop_key_error(self):
with self.assertRaises(KeyError):
self.layout.pop("bogus")
def test_pop_invalid_prop_with_default(self):
self.assertEqual(self.layout.pop("bogus", 42), 42)
| mit |
ddico/odoo | addons/test_event_full/tests/common.py | 1 | 3967 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo.addons.event_crm.tests.common import TestEventCrmCommon
class TestEventFullCommon(TestEventCrmCommon):
@classmethod
def setUpClass(cls):
super(TestEventFullCommon, cls).setUpClass()
cls.event_product = cls.env['product.product'].create({
'name': 'Test Registration Product',
'description_sale': 'Mighty Description',
'list_price': 10,
'event_ok': True,
'standard_price': 30.0,
'type': 'service',
})
cls.event_0.write({
'event_ticket_ids': [
(5, 0),
(0, 0, {
'name': 'First Ticket',
'product_id': cls.event_product.id,
'seats_max': 30,
}), (0, 0, {
'name': 'Second Ticket',
'product_id': cls.event_product.id,
})
],
})
cls.event_question_1 = cls.env['event.question'].create({
'title': 'Question1',
'question_type': 'simple_choice',
'event_id': cls.event_0.id,
'once_per_order': False,
'answer_ids': [
(0, 0, {'name': 'Q1-Answer1'}),
(0, 0, {'name': 'Q1-Answer2'})
],
})
cls.event_question_2 = cls.env['event.question'].create({
'title': 'Question2',
'question_type': 'simple_choice',
'event_id': cls.event_0.id,
'once_per_order': True,
'answer_ids': [
(0, 0, {'name': 'Q2-Answer1'}),
(0, 0, {'name': 'Q2-Answer2'})
],
})
cls.event_question_3 = cls.env['event.question'].create({
'title': 'Question3',
'question_type': 'text_box',
'event_id': cls.event_0.id,
'once_per_order': True,
})
# make a SO for a customer, selling some tickets
cls.customer_so = cls.env['sale.order'].with_user(cls.user_sales_salesman).create({
'partner_id': cls.event_customer.id,
})
cls.website_customer_data = [{
'name': 'My Customer %02d' % x,
'partner_id': cls.env.ref('base.public_partner').id,
'email': 'email.%02d@test.example.com' % x,
'phone': '04560000%02d' % x,
'registration_answer_ids': [
(0, 0, {
'question_id': cls.event_question_1.id,
'value_answer_id': cls.event_question_1.answer_ids[(x % 2)].id,
}), (0, 0, {
'question_id': cls.event_question_2.id,
'value_answer_id': cls.event_question_2.answer_ids[(x % 2)].id,
}), (0, 0, {
'question_id': cls.event_question_3.id,
'value_text_box': 'CustomerAnswer%s' % x,
})
],
} for x in range(0, 4)]
def assertLeadConvertion(self, rule, registrations, partner=None, **expected):
super(TestEventFullCommon, self).assertLeadConvertion(rule, registrations, partner=partner, **expected)
lead = self.env['crm.lead'].sudo().search([
('registration_ids', 'in', registrations.ids),
('event_lead_rule_id', '=', rule.id)
])
for registration in registrations:
if not registration.registration_answer_ids:
continue
for answer in registration.registration_answer_ids:
self.assertIn(answer.question_id.title, lead.description)
if answer.question_type == 'simple_choice':
self.assertIn(answer.value_answer_id.name, lead.description)
else:
self.assertIn(answer.value_text_box, lead.description) # better: check multi line
| agpl-3.0 |
embray/numpy | numpy/distutils/conv_template.py | 165 | 9681 | #!/usr/bin/python
"""
takes templated file .xxx.src and produces .xxx file where .xxx is
.i or .c or .h, using the following template rules
/**begin repeat -- on a line by itself marks the start of a repeated code
segment
/**end repeat**/ -- on a line by itself marks it's end
After the /**begin repeat and before the */, all the named templates are placed
these should all have the same number of replacements
Repeat blocks can be nested, with each nested block labeled with its depth,
i.e.
/**begin repeat1
*....
*/
/**end repeat1**/
When using nested loops, you can optionally exlude particular
combinations of the variables using (inside the comment portion of the inner loop):
:exclude: var1=value1, var2=value2, ...
This will exlude the pattern where var1 is value1 and var2 is value2 when
the result is being generated.
In the main body each replace will use one entry from the list of named replacements
Note that all #..# forms in a block must have the same number of
comma-separated entries.
Example:
An input file containing
/**begin repeat
* #a = 1,2,3#
* #b = 1,2,3#
*/
/**begin repeat1
* #c = ted, jim#
*/
@a@, @b@, @c@
/**end repeat1**/
/**end repeat**/
produces
line 1 "template.c.src"
/*
*********************************************************************
** This file was autogenerated from a template DO NOT EDIT!!**
** Changes should be made to the original source (.src) file **
*********************************************************************
*/
#line 9
1, 1, ted
#line 9
1, 1, jim
#line 9
2, 2, ted
#line 9
2, 2, jim
#line 9
3, 3, ted
#line 9
3, 3, jim
"""
from __future__ import division, absolute_import, print_function
__all__ = ['process_str', 'process_file']
import os
import sys
import re
from numpy.distutils.compat import get_exception
# names for replacement that are already global.
global_names = {}
# header placed at the front of head processed file
header =\
"""
/*
*****************************************************************************
** This file was autogenerated from a template DO NOT EDIT!!!! **
** Changes should be made to the original source (.src) file **
*****************************************************************************
*/
"""
# Parse string for repeat loops
def parse_structure(astr, level):
"""
The returned line number is from the beginning of the string, starting
at zero. Returns an empty list if no loops found.
"""
if level == 0 :
loopbeg = "/**begin repeat"
loopend = "/**end repeat**/"
else :
loopbeg = "/**begin repeat%d" % level
loopend = "/**end repeat%d**/" % level
ind = 0
line = 0
spanlist = []
while True:
start = astr.find(loopbeg, ind)
if start == -1:
break
start2 = astr.find("*/", start)
start2 = astr.find("\n", start2)
fini1 = astr.find(loopend, start2)
fini2 = astr.find("\n", fini1)
line += astr.count("\n", ind, start2+1)
spanlist.append((start, start2+1, fini1, fini2+1, line))
line += astr.count("\n", start2+1, fini2)
ind = fini2
spanlist.sort()
return spanlist
def paren_repl(obj):
torep = obj.group(1)
numrep = obj.group(2)
return ','.join([torep]*int(numrep))
parenrep = re.compile(r"[(]([^)]*)[)]\*(\d+)")
plainrep = re.compile(r"([^*]+)\*(\d+)")
def parse_values(astr):
# replaces all occurrences of '(a,b,c)*4' in astr
# with 'a,b,c,a,b,c,a,b,c,a,b,c'. Empty braces generate
# empty values, i.e., ()*4 yields ',,,'. The result is
# split at ',' and a list of values returned.
astr = parenrep.sub(paren_repl, astr)
# replaces occurences of xxx*3 with xxx, xxx, xxx
astr = ','.join([plainrep.sub(paren_repl, x.strip())
for x in astr.split(',')])
return astr.split(',')
stripast = re.compile(r"\n\s*\*?")
named_re = re.compile(r"#\s*(\w*)\s*=([^#]*)#")
exclude_vars_re = re.compile(r"(\w*)=(\w*)")
exclude_re = re.compile(":exclude:")
def parse_loop_header(loophead) :
"""Find all named replacements in the header
Returns a list of dictionaries, one for each loop iteration,
where each key is a name to be substituted and the corresponding
value is the replacement string.
Also return a list of exclusions. The exclusions are dictionaries
of key value pairs. There can be more than one exclusion.
[{'var1':'value1', 'var2', 'value2'[,...]}, ...]
"""
# Strip out '\n' and leading '*', if any, in continuation lines.
# This should not effect code previous to this change as
# continuation lines were not allowed.
loophead = stripast.sub("", loophead)
# parse out the names and lists of values
names = []
reps = named_re.findall(loophead)
nsub = None
for rep in reps:
name = rep[0]
vals = parse_values(rep[1])
size = len(vals)
if nsub is None :
nsub = size
elif nsub != size :
msg = "Mismatch in number of values:\n%s = %s" % (name, vals)
raise ValueError(msg)
names.append((name, vals))
# Find any exclude variables
excludes = []
for obj in exclude_re.finditer(loophead):
span = obj.span()
# find next newline
endline = loophead.find('\n', span[1])
substr = loophead[span[1]:endline]
ex_names = exclude_vars_re.findall(substr)
excludes.append(dict(ex_names))
# generate list of dictionaries, one for each template iteration
dlist = []
if nsub is None :
raise ValueError("No substitution variables found")
for i in range(nsub) :
tmp = {}
for name, vals in names :
tmp[name] = vals[i]
dlist.append(tmp)
return dlist
replace_re = re.compile(r"@([\w]+)@")
def parse_string(astr, env, level, line) :
lineno = "#line %d\n" % line
# local function for string replacement, uses env
def replace(match):
name = match.group(1)
try :
val = env[name]
except KeyError:
msg = 'line %d: no definition of key "%s"'%(line, name)
raise ValueError(msg)
return val
code = [lineno]
struct = parse_structure(astr, level)
if struct :
# recurse over inner loops
oldend = 0
newlevel = level + 1
for sub in struct:
pref = astr[oldend:sub[0]]
head = astr[sub[0]:sub[1]]
text = astr[sub[1]:sub[2]]
oldend = sub[3]
newline = line + sub[4]
code.append(replace_re.sub(replace, pref))
try :
envlist = parse_loop_header(head)
except ValueError:
e = get_exception()
msg = "line %d: %s" % (newline, e)
raise ValueError(msg)
for newenv in envlist :
newenv.update(env)
newcode = parse_string(text, newenv, newlevel, newline)
code.extend(newcode)
suff = astr[oldend:]
code.append(replace_re.sub(replace, suff))
else :
# replace keys
code.append(replace_re.sub(replace, astr))
code.append('\n')
return ''.join(code)
def process_str(astr):
code = [header]
code.extend(parse_string(astr, global_names, 0, 1))
return ''.join(code)
include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
r"(?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
sourcefile = os.path.normcase(source).replace("\\", "\\\\")
try:
code = process_str(''.join(lines))
except ValueError:
e = get_exception()
raise ValueError('In "%s" loop at %s' % (sourcefile, e))
return '#line 1 "%s"\n%s' % (sourcefile, code)
def unique_key(adict):
# this obtains a unique key given a dictionary
# currently it works by appending together n of the letters of the
# current keys and increasing n until a unique key is found
# -- not particularly quick
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = "".join([x[:n] for x in allkeys])
if newkey in allkeys:
n += 1
else:
done = True
return newkey
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
try:
writestr = process_str(allstr)
except ValueError:
e = get_exception()
raise ValueError("In %s loop at %s" % (file, e))
outfile.write(writestr)
| bsd-3-clause |
maninator/manimediaserver | setup/lib/mani_controller.py | 1 | 2431 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: Mani
# @Date: 2017-08-28 19:20:58
# @Last Modified time: 2017-09-27 12:54:55
#
##############################################
import mani_config
import docker_config
import emby_config
SERVICE_CONFIG_OPTIONS = {
"embyserver" : {
"module":"emby_config"
}
}
class Controller(object):
def __init__(self, con=None, cache=None, debugging=None):
self.con = con
self.cache = cache
self.debugging = debugging
self.services = mani_config.Services()
self.emby_handle = emby_config.EmbyHandle(con, cache, debugging);
self.doc = docker_config.DockerHandler(self.con,self.cache,self.debugging)
def start_stop_handle(self,meta,start=False,stop=False):
if meta[1]["type"] == "docker":
if start:
return self.doc.start_container(meta[0])
elif stop:
return self.doc.stop_container(meta[0])
def start_service(self,command):
res = False
meta = self.services.find_service_meta(command)
if meta: # We know how to handle this command
res = self.start_stop_handle(meta, start=True)
return res
def stop_service(self,command):
res = False
meta = self.services.find_service_meta(command)
if meta:
res = self.start_stop_handle(meta, stop=True)
return res
def service_status(self,command):
meta = self.services.find_service_meta(command)
if meta:
if meta[1]["type"] == "docker":
data = self.doc.find_installed_container(meta[0])
if self.cache:
self.cache.docker_start_progress(data)
return True
return False
def configure(self,command):
result = {"error":True}
# Run the function to handle this command
_object = SERVICE_CONFIG_OPTIONS[command["service"]]["module"]
return getattr(self, _object)(command=command);
def emby_config(self,command):
result = {"error":True}
res = getattr(self.emby_handle, command["function"])(params=command["params"]);
try:
if res:
result["error"] = False
result["result"] = res
except:
result["result"] = "SOMETHING BAD HAPPENED"
return result
| gpl-3.0 |
psrthegreat/heroku-buildpack-python-sklearn | vendor/pip-1.3.1/pip/vcs/subversion.py | 63 | 10620 | import os
import re
from pip.backwardcompat import urlparse
from pip import InstallationError
from pip.index import Link
from pip.util import rmtree, display_path, call_subprocess
from pip.log import logger
from pip.vcs import vcs, VersionControl
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile('committed-rev="(\d+)"')
_svn_url_re = re.compile(r'URL: (.+)')
_svn_revision_re = re.compile(r'Revision: (.+)')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
bundle_file = 'svn-checkout.txt'
guide = ('# This was an svn checkout; to make it a checkout again run:\n'
'svn checkout --force -r %(rev)s %(url)s .\n')
def get_info(self, location):
"""Returns (url, revision), where both are strings"""
assert not location.rstrip('/').endswith(self.dirname), 'Bad directory: %s' % location
output = call_subprocess(
[self.cmd, 'info', location], show_stdout=False, extra_environ={'LANG': 'C'})
match = _svn_url_re.search(output)
if not match:
logger.warn('Cannot determine URL of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return None, None
url = match.group(1).strip()
match = _svn_revision_re.search(output)
if not match:
logger.warn('Cannot determine revision of svn checkout %s' % display_path(location))
logger.info('Output that cannot be parsed: \n%s' % output)
return url, None
return url, match.group(1)
def parse_vcs_bundle_file(self, content):
for line in content.splitlines():
if not line.strip() or line.strip().startswith('#'):
continue
match = re.search(r'^-r\s*([^ ])?', line)
if not match:
return None, None
rev = match.group(1)
rest = line[match.end():].strip().split(None, 1)[0]
return rest, rev
return None, None
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
logger.notify('Exporting svn repository %s to %s' % (url, location))
logger.indent += 2
try:
if os.path.exists(location):
# Subversion doesn't like to check out over an existing directory
# --force fixes this, but was only added in svn 1.5
rmtree(location)
call_subprocess(
[self.cmd, 'export'] + rev_options + [url, location],
filter_stdout=self._filter, show_stdout=False)
finally:
logger.indent -= 2
def switch(self, dest, url, rev_options):
call_subprocess(
[self.cmd, 'switch'] + rev_options + [url, dest])
def update(self, dest, rev_options):
call_subprocess(
[self.cmd, 'update'] + rev_options + [dest])
def obtain(self, dest):
url, rev = self.get_url_rev()
rev_options = get_rev_options(url, rev)
if rev:
rev_display = ' (to revision %s)' % rev
else:
rev_display = ''
if self.check_destination(dest, url, rev_options, rev_display):
logger.notify('Checking out %s%s to %s'
% (url, rev_display, display_path(dest)))
call_subprocess(
[self.cmd, 'checkout', '-q'] + rev_options + [url, dest])
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
## FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
## FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base_url = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base_url):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_url_rev(self):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev = super(Subversion, self).get_url_rev()
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside setup.py
# we have to look up in the location until we find a real setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without finding setup.py
logger.warn("Could not find setup.py for directory %s (tried all parent directories)"
% orig_location)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
f = open(os.path.join(location, self.dirname, 'entries'))
data = f.read()
f.close()
if data.startswith('8') or data.startswith('9') or data.startswith('10'):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = call_subprocess([self.cmd, 'info', '--xml', location], show_stdout=False)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_tag_revs(self, svn_tag_url):
stdout = call_subprocess(
[self.cmd, 'ls', '-v', svn_tag_url], show_stdout=False)
results = []
for line in stdout.splitlines():
parts = line.split()
rev = int(parts[0])
tag = parts[-1].strip('/')
results.append((tag, rev))
return results
def find_tag_match(self, rev, tag_revs):
best_match_rev = None
best_tag = None
for tag, tag_rev in tag_revs:
if (tag_rev > rev and
(best_match_rev is None or best_match_rev > tag_rev)):
# FIXME: Is best_match > tag_rev really possible?
# or is it a sign something is wacky?
best_match_rev = tag_rev
best_tag = tag
return best_tag
def get_src_requirement(self, dist, location, find_tags=False):
repo = self.get_url(location)
if repo is None:
return None
parts = repo.split('/')
## FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
if parts[-2] in ('tags', 'tag'):
# It's a tag, perfect!
full_egg_name = '%s-%s' % (egg_project_name, parts[-1])
elif parts[-2] in ('branches', 'branch'):
# It's a branch :(
full_egg_name = '%s-%s-r%s' % (dist.egg_name(), parts[-1], rev)
elif parts[-1] == 'trunk':
# Trunk :-/
full_egg_name = '%s-dev_r%s' % (dist.egg_name(), rev)
if find_tags:
tag_url = '/'.join(parts[:-1]) + '/tags'
tag_revs = self.get_tag_revs(tag_url)
match = self.find_tag_match(rev, tag_revs)
if match:
logger.notify('trunk checkout %s seems to be equivalent to tag %s' % match)
repo = '%s/%s' % (tag_url, match)
full_egg_name = '%s-%s' % (egg_project_name, match)
else:
# Don't know what it is
logger.warn('svn URL does not fit normal structure (tags/branches/trunk): %s' % repo)
full_egg_name = '%s-dev_r%s' % (egg_project_name, rev)
return 'svn+%s@%s#egg=%s' % (repo, rev, full_egg_name)
def get_rev_options(url, rev):
if rev:
rev_options = ['-r', rev]
else:
rev_options = []
r = urlparse.urlsplit(url)
if hasattr(r, 'username'):
# >= Python-2.5
username, password = r.username, r.password
else:
netloc = r[1]
if '@' in netloc:
auth = netloc.split('@')[0]
if ':' in auth:
username, password = auth.split(':', 1)
else:
username, password = auth, None
else:
username, password = None, None
if username:
rev_options += ['--username', username]
if password:
rev_options += ['--password', password]
return rev_options
vcs.register(Subversion)
| mit |
flwh/KK_mt6589_iq451 | prebuilts/python/linux-x86/2.7.5/lib/python2.7/sqlite3/__init__.py | 239 | 1037 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/__init__.py: the pysqlite2 package.
#
# Copyright (C) 2005 Gerhard Häring <gh@ghaering.de>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
from dbapi2 import *
| gpl-2.0 |
flwh/KK_mt6589_iq451 | prebuilts/python/linux-x86/2.7.5/lib/python2.7/test/test_strptime.py | 50 | 25849 | """PyUnit testing against strptime"""
import unittest
import time
import locale
import re
import sys
from test import test_support
from datetime import date as datetime_date
import _strptime
class getlang_Tests(unittest.TestCase):
"""Test _getlang"""
def test_basic(self):
self.assertEqual(_strptime._getlang(), locale.getlocale(locale.LC_TIME))
class LocaleTime_Tests(unittest.TestCase):
"""Tests for _strptime.LocaleTime.
All values are lower-cased when stored in LocaleTime, so make sure to
compare values after running ``lower`` on them.
"""
def setUp(self):
"""Create time tuple based on current time."""
self.time_tuple = time.localtime()
self.LT_ins = _strptime.LocaleTime()
def compare_against_time(self, testing, directive, tuple_position,
error_msg):
"""Helper method that tests testing against directive based on the
tuple_position of time_tuple. Uses error_msg as error message.
"""
strftime_output = time.strftime(directive, self.time_tuple).lower()
comparison = testing[self.time_tuple[tuple_position]]
self.assertIn(strftime_output, testing,
"%s: not found in tuple" % error_msg)
self.assertEqual(comparison, strftime_output,
"%s: position within tuple incorrect; %s != %s" %
(error_msg, comparison, strftime_output))
def test_weekday(self):
# Make sure that full and abbreviated weekday names are correct in
# both string and position with tuple
self.compare_against_time(self.LT_ins.f_weekday, '%A', 6,
"Testing of full weekday name failed")
self.compare_against_time(self.LT_ins.a_weekday, '%a', 6,
"Testing of abbreviated weekday name failed")
def test_month(self):
# Test full and abbreviated month names; both string and position
# within the tuple
self.compare_against_time(self.LT_ins.f_month, '%B', 1,
"Testing against full month name failed")
self.compare_against_time(self.LT_ins.a_month, '%b', 1,
"Testing against abbreviated month name failed")
def test_am_pm(self):
# Make sure AM/PM representation done properly
strftime_output = time.strftime("%p", self.time_tuple).lower()
self.assertIn(strftime_output, self.LT_ins.am_pm,
"AM/PM representation not in tuple")
if self.time_tuple[3] < 12: position = 0
else: position = 1
self.assertEqual(self.LT_ins.am_pm[position], strftime_output,
"AM/PM representation in the wrong position within the tuple")
def test_timezone(self):
# Make sure timezone is correct
timezone = time.strftime("%Z", self.time_tuple).lower()
if timezone:
self.assertTrue(timezone in self.LT_ins.timezone[0] or
timezone in self.LT_ins.timezone[1],
"timezone %s not found in %s" %
(timezone, self.LT_ins.timezone))
def test_date_time(self):
# Check that LC_date_time, LC_date, and LC_time are correct
# the magic date is used so as to not have issues with %c when day of
# the month is a single digit and has a leading space. This is not an
# issue since strptime still parses it correctly. The problem is
# testing these directives for correctness by comparing strftime
# output.
magic_date = (1999, 3, 17, 22, 44, 55, 2, 76, 0)
strftime_output = time.strftime("%c", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date_time, magic_date),
strftime_output, "LC_date_time incorrect")
strftime_output = time.strftime("%x", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_date, magic_date),
strftime_output, "LC_date incorrect")
strftime_output = time.strftime("%X", magic_date)
self.assertEqual(time.strftime(self.LT_ins.LC_time, magic_date),
strftime_output, "LC_time incorrect")
LT = _strptime.LocaleTime()
LT.am_pm = ('', '')
self.assertTrue(LT.LC_time, "LocaleTime's LC directives cannot handle "
"empty strings")
def test_lang(self):
# Make sure lang is set to what _getlang() returns
# Assuming locale has not changed between now and when self.LT_ins was created
self.assertEqual(self.LT_ins.lang, _strptime._getlang())
class TimeRETests(unittest.TestCase):
"""Tests for TimeRE."""
def setUp(self):
"""Construct generic TimeRE object."""
self.time_re = _strptime.TimeRE()
self.locale_time = _strptime.LocaleTime()
def test_pattern(self):
# Test TimeRE.pattern
pattern_string = self.time_re.pattern(r"%a %A %d")
self.assertTrue(pattern_string.find(self.locale_time.a_weekday[2]) != -1,
"did not find abbreviated weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.locale_time.f_weekday[4]) != -1,
"did not find full weekday in pattern string '%s'" %
pattern_string)
self.assertTrue(pattern_string.find(self.time_re['d']) != -1,
"did not find 'd' directive pattern string '%s'" %
pattern_string)
def test_pattern_escaping(self):
# Make sure any characters in the format string that might be taken as
# regex syntax is escaped.
pattern_string = self.time_re.pattern("\d+")
self.assertIn(r"\\d\+", pattern_string,
"%s does not have re characters escaped properly" %
pattern_string)
def test_compile(self):
# Check that compiled regex is correct
found = self.time_re.compile(r"%A").match(self.locale_time.f_weekday[6])
self.assertTrue(found and found.group('A') == self.locale_time.f_weekday[6],
"re object for '%A' failed")
compiled = self.time_re.compile(r"%a %b")
found = compiled.match("%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4]))
self.assertTrue(found,
"Match failed with '%s' regex and '%s' string" %
(compiled.pattern, "%s %s" % (self.locale_time.a_weekday[4],
self.locale_time.a_month[4])))
self.assertTrue(found.group('a') == self.locale_time.a_weekday[4] and
found.group('b') == self.locale_time.a_month[4],
"re object couldn't find the abbreviated weekday month in "
"'%s' using '%s'; group 'a' = '%s', group 'b' = %s'" %
(found.string, found.re.pattern, found.group('a'),
found.group('b')))
for directive in ('a','A','b','B','c','d','H','I','j','m','M','p','S',
'U','w','W','x','X','y','Y','Z','%'):
compiled = self.time_re.compile("%" + directive)
found = compiled.match(time.strftime("%" + directive))
self.assertTrue(found, "Matching failed on '%s' using '%s' regex" %
(time.strftime("%" + directive),
compiled.pattern))
def test_blankpattern(self):
# Make sure when tuple or something has no values no regex is generated.
# Fixes bug #661354
test_locale = _strptime.LocaleTime()
test_locale.timezone = (frozenset(), frozenset())
self.assertEqual(_strptime.TimeRE(test_locale).pattern("%Z"), '',
"with timezone == ('',''), TimeRE().pattern('%Z') != ''")
def test_matching_with_escapes(self):
# Make sure a format that requires escaping of characters works
compiled_re = self.time_re.compile("\w+ %m")
found = compiled_re.match("\w+ 10")
self.assertTrue(found, "Escaping failed of format '\w+ 10'")
def test_locale_data_w_regex_metacharacters(self):
# Check that if locale data contains regex metacharacters they are
# escaped properly.
# Discovered by bug #1039270 .
locale_time = _strptime.LocaleTime()
locale_time.timezone = (frozenset(("utc", "gmt",
"Tokyo (standard time)")),
frozenset("Tokyo (daylight time)"))
time_re = _strptime.TimeRE(locale_time)
self.assertTrue(time_re.compile("%Z").match("Tokyo (standard time)"),
"locale data that contains regex metacharacters is not"
" properly escaped")
def test_whitespace_substitution(self):
# When pattern contains whitespace, make sure it is taken into account
# so as to not allow to subpatterns to end up next to each other and
# "steal" characters from each other.
pattern = self.time_re.pattern('%j %H')
self.assertFalse(re.match(pattern, "180"))
self.assertTrue(re.match(pattern, "18 0"))
class StrptimeTests(unittest.TestCase):
"""Tests for _strptime.strptime."""
def setUp(self):
"""Create testing time tuple."""
self.time_tuple = time.gmtime()
def test_ValueError(self):
# Make sure ValueError is raised when match fails or format is bad
self.assertRaises(ValueError, _strptime._strptime_time, data_string="%d",
format="%A")
for bad_format in ("%", "% ", "%e"):
try:
_strptime._strptime_time("2005", bad_format)
except ValueError:
continue
except Exception, err:
self.fail("'%s' raised %s, not ValueError" %
(bad_format, err.__class__.__name__))
else:
self.fail("'%s' did not raise ValueError" % bad_format)
def test_unconverteddata(self):
# Check ValueError is raised when there is unconverted data
self.assertRaises(ValueError, _strptime._strptime_time, "10 12", "%m")
def helper(self, directive, position):
"""Helper fxn in testing."""
strf_output = time.strftime("%" + directive, self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%" + directive)
self.assertTrue(strp_output[position] == self.time_tuple[position],
"testing of '%s' directive failed; '%s' -> %s != %s" %
(directive, strf_output, strp_output[position],
self.time_tuple[position]))
def test_year(self):
# Test that the year is handled properly
for directive in ('y', 'Y'):
self.helper(directive, 0)
# Must also make sure %y values are correct for bounds set by Open Group
for century, bounds in ((1900, ('69', '99')), (2000, ('00', '68'))):
for bound in bounds:
strp_output = _strptime._strptime_time(bound, '%y')
expected_result = century + int(bound)
self.assertTrue(strp_output[0] == expected_result,
"'y' test failed; passed in '%s' "
"and returned '%s'" % (bound, strp_output[0]))
def test_month(self):
# Test for month directives
for directive in ('B', 'b', 'm'):
self.helper(directive, 1)
def test_day(self):
# Test for day directives
self.helper('d', 2)
def test_hour(self):
# Test hour directives
self.helper('H', 3)
strf_output = time.strftime("%I %p", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%I %p")
self.assertTrue(strp_output[3] == self.time_tuple[3],
"testing of '%%I %%p' directive failed; '%s' -> %s != %s" %
(strf_output, strp_output[3], self.time_tuple[3]))
def test_minute(self):
# Test minute directives
self.helper('M', 4)
def test_second(self):
# Test second directives
self.helper('S', 5)
def test_fraction(self):
# Test microseconds
import datetime
d = datetime.datetime(2012, 12, 20, 12, 34, 56, 78987)
tup, frac = _strptime._strptime(str(d), format="%Y-%m-%d %H:%M:%S.%f")
self.assertEqual(frac, d.microsecond)
def test_weekday(self):
# Test weekday directives
for directive in ('A', 'a', 'w'):
self.helper(directive,6)
def test_julian(self):
# Test julian directives
self.helper('j', 7)
def test_timezone(self):
# Test timezone directives.
# When gmtime() is used with %Z, entire result of strftime() is empty.
# Check for equal timezone names deals with bad locale info when this
# occurs; first found in FreeBSD 4.4.
strp_output = _strptime._strptime_time("UTC", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
strp_output = _strptime._strptime_time("GMT", "%Z")
self.assertEqual(strp_output.tm_isdst, 0)
time_tuple = time.localtime()
strf_output = time.strftime("%Z") #UTC does not have a timezone
strp_output = _strptime._strptime_time(strf_output, "%Z")
locale_time = _strptime.LocaleTime()
if time.tzname[0] != time.tzname[1] or not time.daylight:
self.assertTrue(strp_output[8] == time_tuple[8],
"timezone check failed; '%s' -> %s != %s" %
(strf_output, strp_output[8], time_tuple[8]))
else:
self.assertTrue(strp_output[8] == -1,
"LocaleTime().timezone has duplicate values and "
"time.daylight but timezone value not set to -1")
def test_bad_timezone(self):
# Explicitly test possibility of bad timezone;
# when time.tzname[0] == time.tzname[1] and time.daylight
tz_name = time.tzname[0]
if tz_name.upper() in ("UTC", "GMT"):
return
try:
original_tzname = time.tzname
original_daylight = time.daylight
time.tzname = (tz_name, tz_name)
time.daylight = 1
tz_value = _strptime._strptime_time(tz_name, "%Z")[8]
self.assertEqual(tz_value, -1,
"%s lead to a timezone value of %s instead of -1 when "
"time.daylight set to %s and passing in %s" %
(time.tzname, tz_value, time.daylight, tz_name))
finally:
time.tzname = original_tzname
time.daylight = original_daylight
def test_date_time(self):
# Test %c directive
for position in range(6):
self.helper('c', position)
def test_date(self):
# Test %x directive
for position in range(0,3):
self.helper('x', position)
def test_time(self):
# Test %X directive
for position in range(3,6):
self.helper('X', position)
def test_percent(self):
# Make sure % signs are handled properly
strf_output = time.strftime("%m %% %Y", self.time_tuple)
strp_output = _strptime._strptime_time(strf_output, "%m %% %Y")
self.assertTrue(strp_output[0] == self.time_tuple[0] and
strp_output[1] == self.time_tuple[1],
"handling of percent sign failed")
def test_caseinsensitive(self):
# Should handle names case-insensitively.
strf_output = time.strftime("%B", self.time_tuple)
self.assertTrue(_strptime._strptime_time(strf_output.upper(), "%B"),
"strptime does not handle ALL-CAPS names properly")
self.assertTrue(_strptime._strptime_time(strf_output.lower(), "%B"),
"strptime does not handle lowercase names properly")
self.assertTrue(_strptime._strptime_time(strf_output.capitalize(), "%B"),
"strptime does not handle capword names properly")
def test_defaults(self):
# Default return value should be (1900, 1, 1, 0, 0, 0, 0, 1, 0)
defaults = (1900, 1, 1, 0, 0, 0, 0, 1, -1)
strp_output = _strptime._strptime_time('1', '%m')
self.assertTrue(strp_output == defaults,
"Default values for strptime() are incorrect;"
" %s != %s" % (strp_output, defaults))
def test_escaping(self):
# Make sure all characters that have regex significance are escaped.
# Parentheses are in a purposeful order; will cause an error of
# unbalanced parentheses when the regex is compiled if they are not
# escaped.
# Test instigated by bug #796149 .
need_escaping = ".^$*+?{}\[]|)("
self.assertTrue(_strptime._strptime_time(need_escaping, need_escaping))
def test_feb29_on_leap_year_without_year(self):
time.strptime("Feb 29", "%b %d")
def test_mar1_comes_after_feb29_even_when_omitting_the_year(self):
self.assertLess(
time.strptime("Feb 29", "%b %d"),
time.strptime("Mar 1", "%b %d"))
class Strptime12AMPMTests(unittest.TestCase):
"""Test a _strptime regression in '%I %p' at 12 noon (12 PM)"""
def test_twelve_noon_midnight(self):
eq = self.assertEqual
eq(time.strptime('12 PM', '%I %p')[3], 12)
eq(time.strptime('12 AM', '%I %p')[3], 0)
eq(_strptime._strptime_time('12 PM', '%I %p')[3], 12)
eq(_strptime._strptime_time('12 AM', '%I %p')[3], 0)
class JulianTests(unittest.TestCase):
"""Test a _strptime regression that all julian (1-366) are accepted"""
def test_all_julian_days(self):
eq = self.assertEqual
for i in range(1, 367):
# use 2004, since it is a leap year, we have 366 days
eq(_strptime._strptime_time('%d 2004' % i, '%j %Y')[7], i)
class CalculationTests(unittest.TestCase):
"""Test that strptime() fills in missing info correctly"""
def setUp(self):
self.time_tuple = time.gmtime()
def test_julian_calculation(self):
# Make sure that when Julian is missing that it is calculated
format_string = "%Y %m %d %H %M %S %w %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_yday == self.time_tuple.tm_yday,
"Calculation of tm_yday failed; %s != %s" %
(result.tm_yday, self.time_tuple.tm_yday))
def test_gregorian_calculation(self):
# Test that Gregorian date can be calculated from Julian day
format_string = "%Y %H %M %S %w %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_year == self.time_tuple.tm_year and
result.tm_mon == self.time_tuple.tm_mon and
result.tm_mday == self.time_tuple.tm_mday,
"Calculation of Gregorian date failed;"
"%s-%s-%s != %s-%s-%s" %
(result.tm_year, result.tm_mon, result.tm_mday,
self.time_tuple.tm_year, self.time_tuple.tm_mon,
self.time_tuple.tm_mday))
def test_day_of_week_calculation(self):
# Test that the day of the week is calculated as needed
format_string = "%Y %m %d %H %S %j %Z"
result = _strptime._strptime_time(time.strftime(format_string, self.time_tuple),
format_string)
self.assertTrue(result.tm_wday == self.time_tuple.tm_wday,
"Calculation of day of the week failed;"
"%s != %s" % (result.tm_wday, self.time_tuple.tm_wday))
def test_week_of_year_and_day_of_week_calculation(self):
# Should be able to infer date if given year, week of year (%U or %W)
# and day of the week
def test_helper(ymd_tuple, test_reason):
for directive in ('W', 'U'):
format_string = "%%Y %%%s %%w" % directive
dt_date = datetime_date(*ymd_tuple)
strp_input = dt_date.strftime(format_string)
strp_output = _strptime._strptime_time(strp_input, format_string)
self.assertTrue(strp_output[:3] == ymd_tuple,
"%s(%s) test failed w/ '%s': %s != %s (%s != %s)" %
(test_reason, directive, strp_input,
strp_output[:3], ymd_tuple,
strp_output[7], dt_date.timetuple()[7]))
test_helper((1901, 1, 3), "week 0")
test_helper((1901, 1, 8), "common case")
test_helper((1901, 1, 13), "day on Sunday")
test_helper((1901, 1, 14), "day on Monday")
test_helper((1905, 1, 1), "Jan 1 on Sunday")
test_helper((1906, 1, 1), "Jan 1 on Monday")
test_helper((1906, 1, 7), "first Sunday in a year starting on Monday")
test_helper((1905, 12, 31), "Dec 31 on Sunday")
test_helper((1906, 12, 31), "Dec 31 on Monday")
test_helper((2008, 12, 29), "Monday in the last week of the year")
test_helper((2008, 12, 22), "Monday in the second-to-last week of the "
"year")
test_helper((1978, 10, 23), "randomly chosen date")
test_helper((2004, 12, 18), "randomly chosen date")
test_helper((1978, 10, 23), "year starting and ending on Monday while "
"date not on Sunday or Monday")
test_helper((1917, 12, 17), "year starting and ending on Monday with "
"a Monday not at the beginning or end "
"of the year")
test_helper((1917, 12, 31), "Dec 31 on Monday with year starting and "
"ending on Monday")
test_helper((2007, 01, 07), "First Sunday of 2007")
test_helper((2007, 01, 14), "Second Sunday of 2007")
test_helper((2006, 12, 31), "Last Sunday of 2006")
test_helper((2006, 12, 24), "Second to last Sunday of 2006")
class CacheTests(unittest.TestCase):
"""Test that caching works properly."""
def test_time_re_recreation(self):
# Make sure cache is recreated when current locale does not match what
# cached object was created with.
_strptime._strptime_time("10", "%d")
_strptime._strptime_time("2005", "%Y")
_strptime._TimeRE_cache.locale_time.lang = "Ni"
original_time_re = _strptime._TimeRE_cache
_strptime._strptime_time("10", "%d")
self.assertIsNot(original_time_re, _strptime._TimeRE_cache)
self.assertEqual(len(_strptime._regex_cache), 1)
def test_regex_cleanup(self):
# Make sure cached regexes are discarded when cache becomes "full".
try:
del _strptime._regex_cache['%d']
except KeyError:
pass
bogus_key = 0
while len(_strptime._regex_cache) <= _strptime._CACHE_MAX_SIZE:
_strptime._regex_cache[bogus_key] = None
bogus_key += 1
_strptime._strptime_time("10", "%d")
self.assertEqual(len(_strptime._regex_cache), 1)
def test_new_localetime(self):
# A new LocaleTime instance should be created when a new TimeRE object
# is created.
locale_time_id = _strptime._TimeRE_cache.locale_time
_strptime._TimeRE_cache.locale_time.lang = "Ni"
_strptime._strptime_time("10", "%d")
self.assertIsNot(locale_time_id, _strptime._TimeRE_cache.locale_time)
def test_TimeRE_recreation(self):
# The TimeRE instance should be recreated upon changing the locale.
locale_info = locale.getlocale(locale.LC_TIME)
try:
locale.setlocale(locale.LC_TIME, ('en_US', 'UTF8'))
except locale.Error:
return
try:
_strptime._strptime_time('10', '%d')
# Get id of current cache object.
first_time_re = _strptime._TimeRE_cache
try:
# Change the locale and force a recreation of the cache.
locale.setlocale(locale.LC_TIME, ('de_DE', 'UTF8'))
_strptime._strptime_time('10', '%d')
# Get the new cache object's id.
second_time_re = _strptime._TimeRE_cache
# They should not be equal.
self.assertIsNot(first_time_re, second_time_re)
# Possible test locale is not supported while initial locale is.
# If this is the case just suppress the exception and fall-through
# to the resetting to the original locale.
except locale.Error:
pass
# Make sure we don't trample on the locale setting once we leave the
# test.
finally:
locale.setlocale(locale.LC_TIME, locale_info)
def test_main():
test_support.run_unittest(
getlang_Tests,
LocaleTime_Tests,
TimeRETests,
StrptimeTests,
Strptime12AMPMTests,
JulianTests,
CalculationTests,
CacheTests
)
if __name__ == '__main__':
test_main()
| gpl-2.0 |
rodoviario/Tomb | extras/tomber/tomber/test.py | 5 | 3293 | import os
import unittest
from tomber import *
from random import randrange
from shutil import rmtree, copyfile
class tomberTester(unittest.TestCase):
@classmethod
def setUpClass(self):
self.pid = str(os.getpid())
self.tombfile = '.'.join([self.pid, 'tomb'])
self.keyfile = '.'.join([self.pid, 'key'])
self.keyfile2 = '.'.join([self.pid, '2ndkey'])
self.exhumedkey = '.'.join([self.pid, 'exhumed'])
self.mountpath = './tmptomb'
# generate a passphrase with spaces
self.passphrase = str(randrange(2 ** 64)).replace("", " ")[1:-1]
self.passphrase2 = str(randrange(2 ** 64))
self.imagefile = '.'.join([self.pid, 'jpg'])
copyfile(
'/'.join([os.path.dirname(__file__), 'test.jpg']),
self.imagefile)
@classmethod
def tearDownClass(self):
os.unlink(self.tombfile)
os.unlink(self.keyfile)
os.unlink(self.keyfile2)
os.unlink(self.imagefile)
os.unlink(self.exhumedkey)
rmtree(self.mountpath)
def test_01_dig(self):
""" Dig a tomb of 10mb"""
self.assertTrue(tdig(self.tombfile, 10)[0])
def test_02_forge(self):
""" Forge a keyfile and set a passphrase """
self.assertTrue(tforge(self.keyfile, self.passphrase)[0])
def test_03_lock(self):
""" Lock created tomb with forged keyfile """
self.assertTrue(tlock(self.tombfile, self.keyfile, self.passphrase)[0])
def test_04_open(self):
""" Open the created tomb with forged keyfile and passhrase """
self.assertTrue(topen(
self.tombfile, self.keyfile, self.passphrase, self.mountpath
)[0]
)
def test_05_close(self):
""" Close the created tomb """
self.assertTrue(tclose(self.tombfile.split('.')[0])[0])
def test_06_resize(self):
""" Resize created tomb to 12mb """
self.assertTrue(tresize(
self.tombfile, self.keyfile, self.passphrase, 12
)[0]
)
def test_07_passwd(self):
""" Change password in keyfile """
self.assertTrue(tpasswd(
self.keyfile, self.passphrase2, self.passphrase
)[0]
)
def test_08_bury(self):
""" Bury keyfile in a image file """
self.assertTrue(tbury(
self.keyfile, self.passphrase2, self.imagefile
)[0]
)
def test_09_exhume(self):
""" Exhume a key from an image """
self.assertTrue(texhume(
self.exhumedkey, self.passphrase2, self.imagefile
)[0]
)
def test_10_setkey(self):
""" Forge a new key and and set different keyfile to created tomb """
tforge(self.keyfile2, self.passphrase)
self.assertTrue(tsetkey(
self.keyfile,
self.tombfile,
self.keyfile2,
self.passphrase,
self.passphrase2
)[0]
)
def test_11_slam(self):
""" Slam open tombs """
topen(self.tombfile, self.keyfile, self.passphrase2, self.mountpath)
self.assertTrue(tslam()[0])
if __name__ == '__main__':
unittest.main() | gpl-3.0 |
StefanRijnhart/odoo | openerp/addons/base/ir/ir_model.py | 1 | 61185 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2014 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from collections import defaultdict
import logging
import re
import time
import types
import openerp
from openerp import SUPERUSER_ID
from openerp import models, tools, api
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.osv.orm import BaseModel, Model, MAGIC_COLUMNS, except_orm
from openerp.tools import config
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
MODULE_UNINSTALL_FLAG = '_force_unlink'
def _get_fields_type(self, cr, uid, context=None):
# Avoid too many nested `if`s below, as RedHat's Python 2.6
# break on it. See bug 939653.
return sorted([(k,k) for k,v in fields.__dict__.iteritems()
if type(v) == types.TypeType and \
issubclass(v, fields._column) and \
v != fields._column and \
not v._deprecated and \
not issubclass(v, fields.function)])
def _in_modules(self, cr, uid, ids, field_name, arg, context=None):
#pseudo-method used by fields.function in ir.model/ir.model.fields
module_pool = self.pool["ir.module.module"]
installed_module_ids = module_pool.search(cr, uid, [('state','=','installed')])
installed_module_names = module_pool.read(cr, uid, installed_module_ids, ['name'], context=context)
installed_modules = set(x['name'] for x in installed_module_names)
result = {}
xml_ids = osv.osv._get_xml_ids(self, cr, uid, ids)
for k,v in xml_ids.iteritems():
result[k] = ', '.join(sorted(installed_modules & set(xml_id.split('.')[0] for xml_id in v)))
return result
class ir_model(osv.osv):
_name = 'ir.model'
_description = "Models"
_order = 'model'
def _is_osv_memory(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids, context=context)
res = dict.fromkeys(ids)
for model in models:
if model.model in self.pool:
res[model.id] = self.pool[model.model].is_transient()
else:
_logger.error('Missing model %s' % (model.model, ))
return res
def _search_osv_memory(self, cr, uid, model, name, domain, context=None):
if not domain:
return []
__, operator, value = domain[0]
if operator not in ['=', '!=']:
raise osv.except_osv(_("Invalid Search Criteria"), _('The osv_memory field can only be compared with = and != operator.'))
value = bool(value) if operator == '=' else not bool(value)
all_model_ids = self.search(cr, uid, [], context=context)
is_osv_mem = self._is_osv_memory(cr, uid, all_model_ids, 'osv_memory', arg=None, context=context)
return [('id', 'in', [id for id in is_osv_mem if bool(is_osv_mem[id]) == value])]
def _view_ids(self, cr, uid, ids, field_name, arg, context=None):
models = self.browse(cr, uid, ids)
res = {}
for model in models:
res[model.id] = self.pool["ir.ui.view"].search(cr, uid, [('model', '=', model.model)])
return res
def _inherited_models(self, cr, uid, ids, field_name, arg, context=None):
res = {}
for model in self.browse(cr, uid, ids, context=context):
res[model.id] = []
inherited_models = [model_name for model_name in self.pool[model.model]._inherits]
if inherited_models:
res[model.id] = self.search(cr, uid, [('model', 'in', inherited_models)], context=context)
return res
_columns = {
'name': fields.char('Model Description', translate=True, required=True),
'model': fields.char('Model', required=True, select=1),
'info': fields.text('Information'),
'field_id': fields.one2many('ir.model.fields', 'model_id', 'Fields', required=True, copy=True),
'inherited_model_ids': fields.function(_inherited_models, type="many2many", obj="ir.model", string="Inherited models",
help="The list of models that extends the current model."),
'state': fields.selection([('manual','Custom Object'),('base','Base Object')],'Type', readonly=True),
'access_ids': fields.one2many('ir.model.access', 'model_id', 'Access'),
'osv_memory': fields.function(_is_osv_memory, string='Transient Model', type='boolean',
fnct_search=_search_osv_memory,
help="This field specifies whether the model is transient or not (i.e. if records are automatically deleted from the database or not)"),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the object is defined or inherited'),
'view_ids': fields.function(_view_ids, type='one2many', obj='ir.ui.view', string='Views'),
}
_defaults = {
'model': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
}
def _check_model_name(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context=context):
if model.state=='manual':
if not model.model.startswith('x_'):
return False
if not re.match('^[a-z_A-Z0-9.]+$',model.model):
return False
return True
def _model_name_msg(self, cr, uid, ids, context=None):
return _('The Object name must start with x_ and not contain any special character !')
_constraints = [
(_check_model_name, _model_name_msg, ['model']),
]
_sql_constraints = [
('obj_name_uniq', 'unique (model)', 'Each model must be unique!'),
]
# overridden to allow searching both on model name (model field)
# and model description (name field)
def _name_search(self, cr, uid, name='', args=None, operator='ilike', context=None, limit=100, name_get_uid=None):
if args is None:
args = []
domain = args + ['|', ('model', operator, name), ('name', operator, name)]
return self.name_get(cr, name_get_uid or uid,
super(ir_model, self).search(cr, uid, domain, limit=limit, context=context),
context=context)
def _drop_table(self, cr, uid, ids, context=None):
for model in self.browse(cr, uid, ids, context):
model_pool = self.pool[model.model]
cr.execute('select relkind from pg_class where relname=%s', (model_pool._table,))
result = cr.fetchone()
if result and result[0] == 'v':
cr.execute('DROP view %s' % (model_pool._table,))
elif result and result[0] == 'r':
cr.execute('DROP TABLE %s CASCADE' % (model_pool._table,))
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module tables
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG):
for model in self.browse(cr, user, ids, context):
if model.state != 'manual':
raise except_orm(_('Error'), _("Model '%s' contains module data and cannot be removed!") % (model.name,))
self._drop_table(cr, user, ids, context)
res = super(ir_model, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
# only reload pool for normal unlink. For module uninstall the
# reload is done independently in openerp.modules.loading
cr.commit() # must be committed before reloading registry in new cursor
api.Environment.reset()
RegistryManager.new(cr.dbname)
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context:
context = dict(context)
context.pop('__last_update', None)
# Filter out operations 4 link from field id, because openerp-web
# always write (4,id,False) even for non dirty items
if 'field_id' in vals:
vals['field_id'] = [op for op in vals['field_id'] if op[0] != 4]
return super(ir_model,self).write(cr, user, ids, vals, context)
def create(self, cr, user, vals, context=None):
if context is None:
context = {}
if context and context.get('manual'):
vals['state']='manual'
res = super(ir_model,self).create(cr, user, vals, context)
if vals.get('state','base')=='manual':
self.instanciate(cr, user, vals['model'], context)
model = self.pool[vals['model']]
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
self.pool.setup_models(cr, partial=(not self.pool.ready))
RegistryManager.signal_registry_change(cr.dbname)
return res
def instanciate(self, cr, user, model, context=None):
if isinstance(model, unicode):
model = model.encode('utf-8')
class CustomModel(models.Model):
_name = model
_module = False
_custom = True
obj = CustomModel._build_model(self.pool, cr)
obj._rec_name = CustomModel._rec_name = (
'x_name' if 'x_name' in obj._columns else
list(obj._columns)[0] if obj._columns else
'id'
)
class ir_model_fields(osv.osv):
_name = 'ir.model.fields'
_description = "Fields"
_rec_name = 'field_description'
_columns = {
'name': fields.char('Name', required=True, select=1),
'complete_name': fields.char('Complete Name', select=1),
'model': fields.char('Object Name', required=True, select=1,
help="The technical name of the model this field belongs to"),
'relation': fields.char('Object Relation',
help="For relationship fields, the technical name of the target model"),
'relation_field': fields.char('Relation Field',
help="For one2many fields, the field on the target model that implement the opposite many2one relationship"),
'model_id': fields.many2one('ir.model', 'Model', required=True, select=True, ondelete='cascade',
help="The model this field belongs to"),
'field_description': fields.char('Field Label', required=True),
'ttype': fields.selection(_get_fields_type, 'Field Type', required=True),
'selection': fields.char('Selection Options', help="List of options for a selection field, "
"specified as a Python expression defining a list of (key, label) pairs. "
"For example: [('blue','Blue'),('yellow','Yellow')]"),
'required': fields.boolean('Required'),
'readonly': fields.boolean('Readonly'),
'select_level': fields.selection([('0','Not Searchable'),('1','Always Searchable'),('2','Advanced Search (deprecated)')],'Searchable', required=True),
'translate': fields.boolean('Translatable', help="Whether values for this field can be translated (enables the translation mechanism for that field)"),
'size': fields.integer('Size'),
'state': fields.selection([('manual','Custom Field'),('base','Base Field')],'Type', required=True, readonly=True, select=1),
'on_delete': fields.selection([('cascade', 'Cascade'), ('set null', 'Set NULL'), ('restrict', 'Restrict')],
'On Delete', help='On delete property for many2one fields'),
'domain': fields.char('Domain', help="The optional domain to restrict possible values for relationship fields, "
"specified as a Python expression defining a list of triplets. "
"For example: [('color','=','red')]"),
'groups': fields.many2many('res.groups', 'ir_model_fields_group_rel', 'field_id', 'group_id', 'Groups'),
'selectable': fields.boolean('Selectable'),
'modules': fields.function(_in_modules, type='char', string='In Modules', help='List of modules in which the field is defined'),
'serialization_field_id': fields.many2one('ir.model.fields', 'Serialization Field', domain = "[('ttype','=','serialized')]",
ondelete='cascade', help="If set, this field will be stored in the sparse "
"structure of the serialization field, instead "
"of having its own database column. This cannot be "
"changed after creation."),
}
_rec_name='field_description'
_defaults = {
'selection': "",
'domain': "[]",
'name': 'x_',
'state': lambda self,cr,uid,ctx=None: (ctx and ctx.get('manual',False)) and 'manual' or 'base',
'on_delete': 'set null',
'select_level': '0',
'field_description': '',
'selectable': 1,
}
_order = "name"
def _check_selection(self, cr, uid, selection, context=None):
try:
selection_list = eval(selection)
except Exception:
_logger.warning('Invalid selection list definition for fields.selection', exc_info=True)
raise except_orm(_('Error'),
_("The Selection Options expression is not a valid Pythonic expression."
"Please provide an expression in the [('key','Label'), ...] format."))
check = True
if not (isinstance(selection_list, list) and selection_list):
check = False
else:
for item in selection_list:
if not (isinstance(item, (tuple,list)) and len(item) == 2):
check = False
break
if not check:
raise except_orm(_('Error'),
_("The Selection Options expression is must be in the [('key','Label'), ...] format!"))
return True
def _size_gt_zero_msg(self, cr, user, ids, context=None):
return _('Size of the field can never be less than 0 !')
_sql_constraints = [
('size_gt_zero', 'CHECK (size>=0)',_size_gt_zero_msg ),
]
def _drop_column(self, cr, uid, ids, context=None):
for field in self.browse(cr, uid, ids, context):
if field.name in MAGIC_COLUMNS:
continue
model = self.pool[field.model]
cr.execute('select relkind from pg_class where relname=%s', (model._table,))
result = cr.fetchone()
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name ='%s' and column_name='%s'" %(model._table, field.name))
column_name = cr.fetchone()
if column_name and (result and result[0] == 'r'):
cr.execute('ALTER table "%s" DROP column "%s" cascade' % (model._table, field.name))
# remove m2m relation table for custom fields
# we consider the m2m relation is only one way as it's not possible
# to specify the relation table in the interface for custom fields
# TODO master: maybe use ir.model.relations for custom fields
if field.state == 'manual' and field.ttype == 'many2many':
rel_name = model._fields[field.name].relation
cr.execute('DROP table "%s"' % (rel_name))
model._pop_field(field.name)
return True
def unlink(self, cr, user, ids, context=None):
# Prevent manual deletion of module columns
if context is None: context = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not context.get(MODULE_UNINSTALL_FLAG) and \
any(field.state != 'manual' for field in self.browse(cr, user, ids, context)):
raise except_orm(_('Error'), _("This column contains module data and cannot be removed!"))
self._drop_column(cr, user, ids, context)
res = super(ir_model_fields, self).unlink(cr, user, ids, context)
if not context.get(MODULE_UNINSTALL_FLAG):
cr.commit()
self.pool.setup_models(cr, partial=(not self.pool.ready))
RegistryManager.signal_registry_change(cr.dbname)
return res
def create(self, cr, user, vals, context=None):
if 'model_id' in vals:
model_data = self.pool['ir.model'].browse(cr, user, vals['model_id'])
vals['model'] = model_data.model
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
if vals.get('ttype', False) == 'selection':
if not vals.get('selection',False):
raise except_orm(_('Error'), _('For selection fields, the Selection Options must be given!'))
self._check_selection(cr, user, vals['selection'], context=context)
res = super(ir_model_fields,self).create(cr, user, vals, context)
if vals.get('state','base') == 'manual':
if not vals['name'].startswith('x_'):
raise except_orm(_('Error'), _("Custom fields must have a name that starts with 'x_' !"))
if vals.get('relation',False) and not self.pool['ir.model'].search(cr, user, [('model','=',vals['relation'])]):
raise except_orm(_('Error'), _("Model %s does not exist!") % vals['relation'])
if vals['model'] in self.pool:
model = self.pool[vals['model']]
if vals['model'].startswith('x_') and vals['name'] == 'x_name':
model._rec_name = 'x_name'
if self.pool.fields_by_model is not None:
cr.execute('SELECT * FROM ir_model_fields WHERE id=%s', (res,))
self.pool.fields_by_model.setdefault(vals['model'], []).append(cr.dictfetchone())
model.__init__(self.pool, cr)
#Added context to _auto_init for special treatment to custom field for select_level
ctx = dict(context,
field_name=vals['name'],
field_state='manual',
select=vals.get('select_level', '0'),
update_custom_fields=True)
model._auto_init(cr, ctx)
model._auto_end(cr, ctx) # actually create FKs!
self.pool.setup_models(cr, partial=(not self.pool.ready))
RegistryManager.signal_registry_change(cr.dbname)
return res
def write(self, cr, user, ids, vals, context=None):
if context is None:
context = {}
if context and context.get('manual',False):
vals['state'] = 'manual'
#For the moment renaming a sparse field or changing the storing system is not allowed. This may be done later
if 'serialization_field_id' in vals or 'name' in vals:
for field in self.browse(cr, user, ids, context=context):
if 'serialization_field_id' in vals and field.serialization_field_id.id != vals['serialization_field_id']:
raise except_orm(_('Error!'), _('Changing the storing system for field "%s" is not allowed.')%field.name)
if field.serialization_field_id and (field.name != vals['name']):
raise except_orm(_('Error!'), _('Renaming sparse field "%s" is not allowed')%field.name)
column_rename = None # if set, *one* column can be renamed here
models_patch = {} # structs of (obj, [(field, prop, change_to),..])
# data to be updated on the orm model
# static table of properties
model_props = [ # (our-name, fields.prop, set_fn)
('field_description', 'string', tools.ustr),
('required', 'required', bool),
('readonly', 'readonly', bool),
('domain', '_domain', eval),
('size', 'size', int),
('on_delete', 'ondelete', str),
('translate', 'translate', bool),
('selectable', 'selectable', bool),
('select_level', 'select', int),
('selection', 'selection', eval),
]
if vals and ids:
checked_selection = False # need only check it once, so defer
for item in self.browse(cr, user, ids, context=context):
obj = self.pool.get(item.model)
if item.state != 'manual':
raise except_orm(_('Error!'),
_('Properties of base fields cannot be altered in this manner! '
'Please modify them through Python code, '
'preferably through a custom addon!'))
if item.ttype == 'selection' and 'selection' in vals \
and not checked_selection:
self._check_selection(cr, user, vals['selection'], context=context)
checked_selection = True
final_name = item.name
if 'name' in vals and vals['name'] != item.name:
# We need to rename the column
if column_rename:
raise except_orm(_('Error!'), _('Can only rename one column at a time!'))
if vals['name'] in obj._columns:
raise except_orm(_('Error!'), _('Cannot rename column to %s, because that column already exists!') % vals['name'])
if vals.get('state', 'base') == 'manual' and not vals['name'].startswith('x_'):
raise except_orm(_('Error!'), _('New column name must still start with x_ , because it is a custom field!'))
if '\'' in vals['name'] or '"' in vals['name'] or ';' in vals['name']:
raise ValueError('Invalid character in column name')
column_rename = (obj, (obj._table, item.name, vals['name']))
final_name = vals['name']
if 'model_id' in vals and vals['model_id'] != item.model_id.id:
raise except_orm(_("Error!"), _("Changing the model of a field is forbidden!"))
if 'ttype' in vals and vals['ttype'] != item.ttype:
raise except_orm(_("Error!"), _("Changing the type of a column is not yet supported. "
"Please drop it and create it again!"))
# We don't check the 'state', because it might come from the context
# (thus be set for multiple fields) and will be ignored anyway.
if obj is not None:
models_patch.setdefault(obj._name, (obj,[]))
# find out which properties (per model) we need to update
for field_name, field_property, set_fn in model_props:
if field_name in vals:
property_value = set_fn(vals[field_name])
if getattr(obj._columns[item.name], field_property) != property_value:
models_patch[obj._name][1].append((final_name, field_property, property_value))
# our dict is ready here, but no properties are changed so far
# These shall never be written (modified)
for column_name in ('model_id', 'model', 'state'):
if column_name in vals:
del vals[column_name]
res = super(ir_model_fields,self).write(cr, user, ids, vals, context=context)
if column_rename:
obj, rename = column_rename
cr.execute('ALTER TABLE "%s" RENAME COLUMN "%s" TO "%s"' % rename)
# This is VERY risky, but let us have this feature:
# we want to change the key of field in obj._fields and obj._columns
field = obj._pop_field(rename[1])
obj._add_field(rename[2], field)
if models_patch:
# We have to update _columns of the model(s) and then call their
# _auto_init to sync the db with the model. Hopefully, since write()
# was called earlier, they will be in-sync before the _auto_init.
# Anything we don't update in _columns now will be reset from
# the model into ir.model.fields (db).
ctx = dict(context, select=vals.get('select_level', '0'),
update_custom_fields=True)
for __, patch_struct in models_patch.items():
obj = patch_struct[0]
# TODO: update new-style fields accordingly
for col_name, col_prop, val in patch_struct[1]:
setattr(obj._columns[col_name], col_prop, val)
obj._auto_init(cr, ctx)
obj._auto_end(cr, ctx) # actually create FKs!
if column_rename or models_patch:
self.pool.setup_models(cr, partial=(not self.pool.ready))
RegistryManager.signal_registry_change(cr.dbname)
return res
class ir_model_constraint(Model):
"""
This model tracks PostgreSQL foreign keys and constraints used by OpenERP
models.
"""
_name = 'ir.model.constraint'
_columns = {
'name': fields.char('Constraint', required=True, select=1,
help="PostgreSQL constraint or foreign key name."),
'definition': fields.char('Definition', help="PostgreSQL constraint definition"),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'type': fields.char('Constraint Type', required=True, size=1, select=1,
help="Type of the constraint: `f` for a foreign key, "
"`u` for other constraints."),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)',
'Constraints with the same name are unique per module.'),
]
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL foreign keys and constraints tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
ids_set = set(ids)
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model.model
model_obj = self.pool[model]
name = openerp.tools.ustr(data.name)
typ = data.type
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_constraint where name=%s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
if typ == 'f':
# test if FK exists on this table (it could be on a related m2m table, in which case we ignore it)
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('f', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped FK CONSTRAINT %s@%s', name, model)
if typ == 'u':
# test if constraint exists
cr.execute("""SELECT 1 from pg_constraint cs JOIN pg_class cl ON (cs.conrelid = cl.oid)
WHERE cs.contype=%s and cs.conname=%s and cl.relname=%s""", ('u', name, model_obj._table))
if cr.fetchone():
cr.execute('ALTER TABLE "%s" DROP CONSTRAINT "%s"' % (model_obj._table, name),)
_logger.info('Dropped CONSTRAINT %s@%s', name, model)
self.unlink(cr, uid, ids, context)
class ir_model_relation(Model):
"""
This model tracks PostgreSQL tables used to implement OpenERP many2many
relations.
"""
_name = 'ir.model.relation'
_columns = {
'name': fields.char('Relation Name', required=True, select=1,
help="PostgreSQL table name implementing a many2many relation."),
'model': fields.many2one('ir.model', string='Model',
required=True, select=1),
'module': fields.many2one('ir.module.module', string='Module',
required=True, select=1),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Initialization Date')
}
def _module_data_uninstall(self, cr, uid, ids, context=None):
"""
Delete PostgreSQL many2many relations tracked by this model.
"""
if uid != SUPERUSER_ID and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
ids_set = set(ids)
to_drop_table = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
name = openerp.tools.ustr(data.name)
# double-check we are really going to delete all the owners of this schema element
cr.execute("""SELECT id from ir_model_relation where name = %s""", (data.name,))
external_ids = [x[0] for x in cr.fetchall()]
if set(external_ids)-ids_set:
# as installed modules have defined this element we must not delete it!
continue
cr.execute("SELECT 1 FROM information_schema.tables WHERE table_name=%s", (name,))
if cr.fetchone() and not name in to_drop_table:
to_drop_table.append(name)
self.unlink(cr, uid, ids, context)
# drop m2m relation tables
for table in to_drop_table:
cr.execute('DROP TABLE %s CASCADE'% table,)
_logger.info('Dropped table %s', table)
cr.commit()
class ir_model_access(osv.osv):
_name = 'ir.model.access'
_columns = {
'name': fields.char('Name', required=True, select=True),
'active': fields.boolean('Active', help='If you uncheck the active field, it will disable the ACL without deleting it (if you delete a native ACL, it will be re-created when you reload the module.'),
'model_id': fields.many2one('ir.model', 'Object', required=True, domain=[('osv_memory','=', False)], select=True, ondelete='cascade'),
'group_id': fields.many2one('res.groups', 'Group', ondelete='cascade', select=True),
'perm_read': fields.boolean('Read Access'),
'perm_write': fields.boolean('Write Access'),
'perm_create': fields.boolean('Create Access'),
'perm_unlink': fields.boolean('Delete Access'),
}
_defaults = {
'active': True,
}
def check_groups(self, cr, uid, group):
grouparr = group.split('.')
if not grouparr:
return False
cr.execute("select 1 from res_groups_users_rel where uid=%s and gid IN (select res_id from ir_model_data where module=%s and name=%s)", (uid, grouparr[0], grouparr[1],))
return bool(cr.fetchone())
def check_group(self, cr, uid, model, mode, group_ids):
""" Check if a specific group has the access mode to the specified model"""
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.name
else:
model_name = model
if isinstance(group_ids, (int, long)):
group_ids = [group_ids]
for group_id in group_ids:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id = %s", (model_name, group_id)
)
r = cr.fetchone()
if r is None:
cr.execute("SELECT perm_" + mode + " "
" FROM ir_model_access a "
" JOIN ir_model m ON (m.id = a.model_id) "
" WHERE m.model = %s AND a.active IS True "
" AND a.group_id IS NULL", (model_name, )
)
r = cr.fetchone()
access = bool(r and r[0])
if access:
return True
# pass no groups -> no access
return False
def group_names_with_access(self, cr, model_name, access_mode):
"""Returns the names of visible groups which have been granted ``access_mode`` on
the model ``model_name``.
:rtype: list
"""
assert access_mode in ['read','write','create','unlink'], 'Invalid access mode: %s' % access_mode
cr.execute('''SELECT
c.name, g.name
FROM
ir_model_access a
JOIN ir_model m ON (a.model_id=m.id)
JOIN res_groups g ON (a.group_id=g.id)
LEFT JOIN ir_module_category c ON (c.id=g.category_id)
WHERE
m.model=%s AND
a.active IS True AND
a.perm_''' + access_mode, (model_name,))
return [('%s/%s' % x) if x[0] else x[1] for x in cr.fetchall()]
@tools.ormcache()
def check(self, cr, uid, model, mode='read', raise_exception=True, context=None):
if uid==1:
# User root have all accesses
# TODO: exclude xml-rpc requests
return True
assert mode in ['read','write','create','unlink'], 'Invalid access mode'
if isinstance(model, BaseModel):
assert model._name == 'ir.model', 'Invalid model object'
model_name = model.model
else:
model_name = model
# TransientModel records have no access rights, only an implicit access rule
if model_name not in self.pool:
_logger.error('Missing model %s' % (model_name, ))
elif self.pool[model_name].is_transient():
return True
# We check if a specific rule exists
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' JOIN res_groups_users_rel gu ON (gu.gid = a.group_id) '
' WHERE m.model = %s '
' AND gu.uid = %s '
' AND a.active IS True '
, (model_name, uid,)
)
r = cr.fetchone()[0]
if r is None:
# there is no specific rule. We check the generic rule
cr.execute('SELECT MAX(CASE WHEN perm_' + mode + ' THEN 1 ELSE 0 END) '
' FROM ir_model_access a '
' JOIN ir_model m ON (m.id = a.model_id) '
' WHERE a.group_id IS NULL '
' AND m.model = %s '
' AND a.active IS True '
, (model_name,)
)
r = cr.fetchone()[0]
if not r and raise_exception:
groups = '\n\t'.join('- %s' % g for g in self.group_names_with_access(cr, model_name, mode))
msg_heads = {
# Messages are declared in extenso so they are properly exported in translation terms
'read': _("Sorry, you are not allowed to access this document."),
'write': _("Sorry, you are not allowed to modify this document."),
'create': _("Sorry, you are not allowed to create this kind of document."),
'unlink': _("Sorry, you are not allowed to delete this document."),
}
if groups:
msg_tail = _("Only users with the following access level are currently allowed to do that") + ":\n%s\n\n(" + _("Document model") + ": %s)"
msg_params = (groups, model_name)
else:
msg_tail = _("Please contact your system administrator if you think this is an error.") + "\n\n(" + _("Document model") + ": %s)"
msg_params = (model_name,)
_logger.warning('Access Denied by ACLs for operation: %s, uid: %s, model: %s', mode, uid, model_name)
msg = '%s %s' % (msg_heads[mode], msg_tail)
raise openerp.exceptions.AccessError(msg % msg_params)
return bool(r)
__cache_clearing_methods = []
def register_cache_clearing_method(self, model, method):
self.__cache_clearing_methods.append((model, method))
def unregister_cache_clearing_method(self, model, method):
try:
i = self.__cache_clearing_methods.index((model, method))
del self.__cache_clearing_methods[i]
except ValueError:
pass
def call_cache_clearing_methods(self, cr):
self.invalidate_cache(cr, SUPERUSER_ID)
self.check.clear_cache(self) # clear the cache of check function
for model, method in self.__cache_clearing_methods:
if model in self.pool:
getattr(self.pool[model], method)()
#
# Check rights on actions
#
def write(self, cr, uid, ids, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).write(cr, uid, ids, values, context=context)
return res
def create(self, cr, uid, values, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).create(cr, uid, values, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self.call_cache_clearing_methods(cr)
res = super(ir_model_access, self).unlink(cr, uid, ids, context=context)
return res
class ir_model_data(osv.osv):
"""Holds external identifier keys for records in the database.
This has two main uses:
* allows easy data integration with third-party systems,
making import/export/sync of data possible, as records
can be uniquely identified across multiple systems
* allows tracking the origin of data installed by OpenERP
modules themselves, thus making it possible to later
update them seamlessly.
"""
_name = 'ir.model.data'
_order = 'module,model,name'
def name_get(self, cr, uid, ids, context=None):
bymodel = defaultdict(dict)
names = {}
for res in self.browse(cr, uid, ids, context=context):
bymodel[res.model][res.res_id] = res
names[res.id] = res.complete_name
#result[res.model][res.res_id] = res.id
for model, id_map in bymodel.iteritems():
try:
ng = dict(self.pool[model].name_get(cr, uid, id_map.keys(), context=context))
except Exception:
pass
else:
for r in id_map.itervalues():
names[r.id] = ng.get(r.res_id, r.complete_name)
return [(i, names[i]) for i in ids]
def _complete_name_get(self, cr, uid, ids, prop, unknow_none, context=None):
result = {}
for res in self.browse(cr, uid, ids, context=context):
result[res.id] = (res.module and (res.module + '.') or '')+res.name
return result
_columns = {
'name': fields.char('External Identifier', required=True, select=1,
help="External Key/Identifier that can be used for "
"data integration with third-party systems"),
'complete_name': fields.function(_complete_name_get, type='char', string='Complete ID'),
'model': fields.char('Model Name', required=True, select=1),
'module': fields.char('Module', required=True, select=1),
'res_id': fields.integer('Record ID', select=1,
help="ID of the target record in the database"),
'noupdate': fields.boolean('Non Updatable'),
'date_update': fields.datetime('Update Date'),
'date_init': fields.datetime('Init Date')
}
_defaults = {
'date_init': fields.datetime.now,
'date_update': fields.datetime.now,
'noupdate': False,
'module': ''
}
_sql_constraints = [
('module_name_uniq', 'unique(name, module)', 'You cannot have multiple records with the same external ID in the same module!'),
]
def __init__(self, pool, cr):
osv.osv.__init__(self, pool, cr)
# also stored in pool to avoid being discarded along with this osv instance
if getattr(pool, 'model_data_reference_ids', None) is None:
self.pool.model_data_reference_ids = {}
# put loads on the class, in order to share it among all instances
type(self).loads = self.pool.model_data_reference_ids
def _auto_init(self, cr, context=None):
super(ir_model_data, self)._auto_init(cr, context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'ir_model_data_module_name_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX ir_model_data_module_name_index ON ir_model_data (module, name)')
# NEW V8 API
@tools.ormcache(skiparg=3)
def xmlid_lookup(self, cr, uid, xmlid):
"""Low level xmlid lookup
Return (id, res_model, res_id) or raise ValueError if not found
"""
module, name = xmlid.split('.', 1)
ids = self.search(cr, uid, [('module','=',module), ('name','=', name)])
if not ids:
raise ValueError('External ID not found in the system: %s' % (xmlid))
# the sql constraints ensure us we have only one result
res = self.read(cr, uid, ids[0], ['model', 'res_id'])
if not res['res_id']:
raise ValueError('External ID not found in the system: %s' % (xmlid))
return ids[0], res['model'], res['res_id']
def xmlid_to_res_model_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Return (res_model, res_id)"""
try:
return self.xmlid_lookup(cr, uid, xmlid)[1:3]
except ValueError:
if raise_if_not_found:
raise
return (False, False)
def xmlid_to_res_id(self, cr, uid, xmlid, raise_if_not_found=False):
""" Returns res_id """
return self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)[1]
def xmlid_to_object(self, cr, uid, xmlid, raise_if_not_found=False, context=None):
""" Return a browse_record
if not found and raise_if_not_found is True return None
"""
t = self.xmlid_to_res_model_res_id(cr, uid, xmlid, raise_if_not_found)
res_model, res_id = t
if res_model and res_id:
record = self.pool[res_model].browse(cr, uid, res_id, context=context)
if record.exists():
return record
if raise_if_not_found:
raise ValueError('No record found for unique ID %s. It may have been deleted.' % (xmlid))
return None
# OLD API
def _get_id(self, cr, uid, module, xml_id):
"""Returns the id of the ir.model.data record corresponding to a given module and xml_id (cached) or raise a ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[0]
def get_object_reference(self, cr, uid, module, xml_id):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached) or raise ValueError if not found"""
return self.xmlid_lookup(cr, uid, "%s.%s" % (module, xml_id))[1:3]
def check_object_reference(self, cr, uid, module, xml_id, raise_on_access_error=False):
"""Returns (model, res_id) corresponding to a given module and xml_id (cached), if and only if the user has the necessary access rights
to see that object, otherwise raise a ValueError if raise_on_access_error is True or returns a tuple (model found, False)"""
model, res_id = self.get_object_reference(cr, uid, module, xml_id)
#search on id found in result to check if current user has read access right
check_right = self.pool.get(model).search(cr, uid, [('id', '=', res_id)])
if check_right:
return model, res_id
if raise_on_access_error:
raise ValueError('Not enough access rights on the external ID: %s.%s' % (module, xml_id))
return model, False
def get_object(self, cr, uid, module, xml_id, context=None):
""" Returns a browsable record for the given module name and xml_id.
If not found, raise a ValueError or return None, depending
on the value of `raise_exception`.
"""
return self.xmlid_to_object(cr, uid, "%s.%s" % (module, xml_id), raise_if_not_found=True, context=context)
def _update_dummy(self,cr, uid, model, module, xml_id=False, store=True):
if not xml_id:
return False
try:
id = self.read(cr, uid, [self._get_id(cr, uid, module, xml_id)], ['res_id'])[0]['res_id']
self.loads[(module,xml_id)] = (model,id)
except:
id = False
return id
def clear_caches(self):
""" Clears all orm caches on the object's methods
:returns: itself
"""
self.xmlid_lookup.clear_cache(self)
return self
def unlink(self, cr, uid, ids, context=None):
""" Regular unlink method, but make sure to clear the caches. """
self.clear_caches()
return super(ir_model_data,self).unlink(cr, uid, ids, context=context)
def _update(self,cr, uid, model, module, values, xml_id=False, store=True, noupdate=False, mode='init', res_id=False, context=None):
model_obj = self.pool[model]
if not context:
context = {}
# records created during module install should not display the messages of OpenChatter
context = dict(context, install_mode=True)
if xml_id and ('.' in xml_id):
assert len(xml_id.split('.'))==2, _("'%s' contains too many dots. XML ids should not contain dots ! These are used to refer to other modules data, as in module.reference_id") % xml_id
module, xml_id = xml_id.split('.')
action_id = False
if xml_id:
cr.execute('''SELECT imd.id, imd.res_id, md.id, imd.model, imd.noupdate
FROM ir_model_data imd LEFT JOIN %s md ON (imd.res_id = md.id)
WHERE imd.module=%%s AND imd.name=%%s''' % model_obj._table,
(module, xml_id))
results = cr.fetchall()
for imd_id2,res_id2,real_id2,real_model,noupdate_imd in results:
# In update mode, do not update a record if it's ir.model.data is flagged as noupdate
if mode == 'update' and noupdate_imd:
return res_id2
if not real_id2:
self.clear_caches()
cr.execute('delete from ir_model_data where id=%s', (imd_id2,))
res_id = False
else:
assert model == real_model, "External ID conflict, %s already refers to a `%s` record,"\
" you can't define a `%s` record with this ID." % (xml_id, real_model, model)
res_id,action_id = res_id2,imd_id2
if action_id and res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
self.write(cr, uid, [action_id], {
'date_update': time.strftime('%Y-%m-%d %H:%M:%S'),
},context=context)
elif res_id:
model_obj.write(cr, uid, [res_id], values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module':module,
'res_id':res_id,
'noupdate': noupdate,
},context=context)
else:
if mode=='init' or (mode=='update' and xml_id):
res_id = model_obj.create(cr, uid, values, context=context)
if xml_id:
if model_obj._inherits:
for table in model_obj._inherits:
inherit_id = model_obj.browse(cr, uid,
res_id,context=context)[model_obj._inherits[table]]
self.create(cr, uid, {
'name': xml_id + '_' + table.replace('.', '_'),
'model': table,
'module': module,
'res_id': inherit_id.id,
'noupdate': noupdate,
},context=context)
self.create(cr, uid, {
'name': xml_id,
'model': model,
'module': module,
'res_id': res_id,
'noupdate': noupdate
},context=context)
if xml_id and res_id:
self.loads[(module, xml_id)] = (model, res_id)
for table, inherit_field in model_obj._inherits.iteritems():
inherit_id = model_obj.read(cr, uid, [res_id],
[inherit_field])[0][inherit_field]
self.loads[(module, xml_id + '_' + table.replace('.', '_'))] = (table, inherit_id)
return res_id
def ir_set(self, cr, uid, key, key2, name, models, value, replace=True, isobject=False, meta=None, xml_id=False):
if isinstance(models[0], (list, tuple)):
model,res_id = models[0]
else:
res_id=None
model = models[0]
if res_id:
where = ' and res_id=%s' % (res_id,)
else:
where = ' and (res_id is null)'
if key2:
where += ' and key2=\'%s\'' % (key2,)
else:
where += ' and (key2 is null)'
cr.execute('select * from ir_values where model=%s and key=%s and name=%s'+where,(model, key, name))
res = cr.fetchone()
ir_values_obj = openerp.registry(cr.dbname)['ir.values']
if not res:
ir_values_obj.set(cr, uid, key, key2, name, models, value, replace, isobject, meta)
elif xml_id:
cr.execute('UPDATE ir_values set value=%s WHERE model=%s and key=%s and name=%s'+where,(value, model, key, name))
ir_values_obj.invalidate_cache(cr, uid, ['value'])
return True
def _module_data_uninstall(self, cr, uid, modules_to_remove, context=None):
"""Deletes all the records referenced by the ir.model.data entries
``ids`` along with their corresponding database backed (including
dropping tables, columns, FKs, etc, as long as there is no other
ir.model.data entry holding a reference to them (which indicates that
they are still owned by another module).
Attempts to perform the deletion in an appropriate order to maximize
the chance of gracefully deleting all records.
This step is performed as part of the full uninstallation of a module.
"""
ids = self.search(cr, uid, [('module', 'in', modules_to_remove)])
if uid != 1 and not self.pool['ir.model.access'].check_groups(cr, uid, "base.group_system"):
raise except_orm(_('Permission Denied'), (_('Administrator access is required to uninstall a module')))
context = dict(context or {})
context[MODULE_UNINSTALL_FLAG] = True # enable model/field deletion
ids_set = set(ids)
wkf_todo = []
to_unlink = []
ids.sort()
ids.reverse()
for data in self.browse(cr, uid, ids, context):
model = data.model
res_id = data.res_id
pair_to_unlink = (model, res_id)
if pair_to_unlink not in to_unlink:
to_unlink.append(pair_to_unlink)
if model == 'workflow.activity':
# Special treatment for workflow activities: temporarily revert their
# incoming transition and trigger an update to force all workflow items
# to move out before deleting them
cr.execute('select res_type,res_id from wkf_instance where id IN (select inst_id from wkf_workitem where act_id=%s)', (res_id,))
wkf_todo.extend(cr.fetchall())
cr.execute("update wkf_transition set condition='True', group_id=NULL, signal=NULL,act_to=act_from,act_from=%s where act_to=%s", (res_id,res_id))
self.invalidate_cache(cr, uid, context=context)
for model,res_id in wkf_todo:
try:
openerp.workflow.trg_write(uid, model, res_id, cr)
except Exception:
_logger.info('Unable to force processing of workflow for item %s@%s in order to leave activity to be deleted', res_id, model, exc_info=True)
def unlink_if_refcount(to_unlink):
for model, res_id in to_unlink:
external_ids = self.search(cr, uid, [('model', '=', model),('res_id', '=', res_id)])
if set(external_ids)-ids_set:
# if other modules have defined this record, we must not delete it
continue
if model == 'ir.model.fields':
# Don't remove the LOG_ACCESS_COLUMNS unless _log_access
# has been turned off on the model.
field = self.pool[model].browse(cr, uid, [res_id], context=context)[0]
if not field.exists():
_logger.info('Deleting orphan external_ids %s', external_ids)
self.unlink(cr, uid, external_ids)
continue
if field.name in openerp.models.LOG_ACCESS_COLUMNS and self.pool[field.model]._log_access:
continue
if field.name == 'id':
continue
_logger.info('Deleting %s@%s', res_id, model)
try:
cr.execute('SAVEPOINT record_unlink_save')
self.pool[model].unlink(cr, uid, [res_id], context=context)
except Exception:
_logger.info('Unable to delete %s@%s', res_id, model, exc_info=True)
cr.execute('ROLLBACK TO SAVEPOINT record_unlink_save')
else:
cr.execute('RELEASE SAVEPOINT record_unlink_save')
# Remove non-model records first, then model fields, and finish with models
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model not in ('ir.model','ir.model.fields','ir.model.constraint'))
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.constraint')
ir_module_module = self.pool['ir.module.module']
ir_model_constraint = self.pool['ir.model.constraint']
modules_to_remove_ids = ir_module_module.search(cr, uid, [('name', 'in', modules_to_remove)], context=context)
constraint_ids = ir_model_constraint.search(cr, uid, [('module', 'in', modules_to_remove_ids)], context=context)
ir_model_constraint._module_data_uninstall(cr, uid, constraint_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model.fields')
ir_model_relation = self.pool['ir.model.relation']
relation_ids = ir_model_relation.search(cr, uid, [('module', 'in', modules_to_remove_ids)])
ir_model_relation._module_data_uninstall(cr, uid, relation_ids, context)
unlink_if_refcount((model, res_id) for model, res_id in to_unlink
if model == 'ir.model')
cr.commit()
self.unlink(cr, uid, ids, context)
def _process_end(self, cr, uid, modules):
""" Clear records removed from updated module data.
This method is called at the end of the module loading process.
It is meant to removed records that are no longer present in the
updated data. Such records are recognised as the one with an xml id
and a module in ir_model_data and noupdate set to false, but not
present in self.loads.
"""
if not modules:
return True
to_unlink = []
cr.execute("""SELECT id,name,model,res_id,module FROM ir_model_data
WHERE module IN %s AND res_id IS NOT NULL AND noupdate=%s ORDER BY id DESC""",
(tuple(modules), False))
for (id, name, model, res_id, module) in cr.fetchall():
if (module,name) not in self.loads:
to_unlink.append((model,res_id))
if not config.get('import_partial'):
for (model, res_id) in to_unlink:
if model in self.pool:
_logger.info('Deleting %s@%s', res_id, model)
self.pool[model].unlink(cr, uid, [res_id])
class wizard_model_menu(osv.osv_memory):
_name = 'wizard.ir.model.menu.create'
_columns = {
'menu_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
'name': fields.char('Menu Name', required=True),
}
def menu_create(self, cr, uid, ids, context=None):
if not context:
context = {}
model_pool = self.pool.get('ir.model')
for menu in self.browse(cr, uid, ids, context):
model = model_pool.browse(cr, uid, context.get('model_id'), context=context)
val = {
'name': menu.name,
'res_model': model.model,
'view_type': 'form',
'view_mode': 'tree,form'
}
action_id = self.pool.get('ir.actions.act_window').create(cr, uid, val)
self.pool.get('ir.ui.menu').create(cr, uid, {
'name': menu.name,
'parent_id': menu.menu_id.id,
'action': 'ir.actions.act_window,%d' % (action_id,),
'icon': 'STOCK_INDENT'
}, context)
return {'type':'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
erdincay/youtube-dl | youtube_dl/extractor/wayofthemaster.py | 154 | 1527 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
class WayOfTheMasterIE(InfoExtractor):
_VALID_URL = r'https?://www\.wayofthemaster\.com/([^/?#]*/)*(?P<id>[^/?#]+)\.s?html(?:$|[?#])'
_TEST = {
'url': 'http://www.wayofthemaster.com/hbks.shtml',
'md5': '5316b57487ada8480606a93cb3d18d24',
'info_dict': {
'id': 'hbks',
'ext': 'mp4',
'title': 'Intelligent Design vs. Evolution',
},
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
webpage = self._download_webpage(url, video_id)
title = self._search_regex(
r'<img src="images/title_[^"]+".*?alt="([^"]+)"',
webpage, 'title', default=None)
if title is None:
title = self._html_search_regex(
r'<title>(.*?)</title>', webpage, 'page title')
url_base = self._search_regex(
r'<param\s+name="?movie"?\s+value=".*?/wotm_videoplayer_highlow[0-9]*\.swf\?vid=([^"]+)"',
webpage, 'URL base')
formats = [{
'format_id': 'low',
'quality': 1,
'url': url_base + '_low.mp4',
}, {
'format_id': 'high',
'quality': 2,
'url': url_base + '_high.mp4',
}]
self._sort_formats(formats)
return {
'id': video_id,
'title': title,
'formats': formats,
}
| unlicense |
Arelle/Arelle | arelle/plugin/validate/EBA/__init__.py | 2 | 47269 | '''
Created on Dec 12, 2013
@author: Mark V Systems Limited
(c) Copyright 2013 Mark V Systems Limited, All rights reserved.
'''
import os, sys, re
from arelle import PluginManager
from arelle import ModelDocument, XbrlConst, XmlUtil, UrlUtil, LeiUtil
from arelle.HashUtil import md5hash, Md5Sum
from arelle.ModelDtsObject import ModelConcept, ModelType, ModelLocator, ModelResource
from arelle.ModelFormulaObject import Aspect
from arelle.ModelObject import ModelObject
from arelle.ModelRelationshipSet import ModelRelationshipSet
from arelle.ModelValue import qname, qnameEltPfxName
from arelle.ValidateUtr import ValidateUtr
from arelle.XbrlConst import qnEnumerationItemTypes
from arelle.ModelInstanceObject import ModelFact
try:
import regex as re
except ImportError:
import re
from lxml import etree
from collections import defaultdict
qnFIndicators = qname("{http://www.eurofiling.info/xbrl/ext/filing-indicators}find:fIndicators")
qnFilingIndicator = qname("{http://www.eurofiling.info/xbrl/ext/filing-indicators}find:filingIndicator")
qnPercentItemType = qname("{http://www.xbrl.org/dtr/type/numeric}num:percentItemType")
qnPureItemType = qname("{http://www.xbrl.org/2003/instance}xbrli:pureItemType")
qnMetReportingCurrency = qname("{http://eiopa.europa.eu/xbrl/s2md/dict/met}met:ei1930")
integerItemTypes = {"integerItemType", "nonPositiveIntegerItemType", "negativeIntegerItemType",
"longItemType", "intItemType", "shortItemType", "byteItemType",
"nonNegativeIntegerItemType", "unsignedLongItemType", "unsignedIntItemType",
"unsignedShortItemType", "unsignedByteItemType", "positiveIntegerItemType"}
schemaRefDatePattern = re.compile(r".*/([0-9]{4}-[01][0-9]-[0-3][0-9])/.*")
s_2_18_c_a_met = {
"""
in templates S.06.02, SE.06.02, S.08.01, S.08.02,S.11.01 and E.01.01,
data points with the data type 'monetary' shall be expressed in units
with at least two decimals:
select distinct mem.MemberXBRLCode from mOrdinateCategorisation oc
inner join mAxisOrdinate ao on ao.OrdinateID = oc.OrdinateID
inner join mTableAxis ta on ta.AxisID = ao.AxisID
inner join mTable t on t.TableID = ta.TableID
inner join mMember mem on mem.MemberID = oc.MemberID
inner join mMetric met on met.CorrespondingMemberID = mem.MemberID and met.DataType = 'Monetary'
where (t.TableCode like 'S.06.02%' or t.TableCode like 'SE.06.02%' or t.TableCode like 'S.08.01%' or t.TableCode like 'S.08.02%' or t.TableCode like 'S.11.01%' or t.TableCode like 'E.01.01%') and mem.MemberXBRLCode not like 's2hd_met%'
order by t.TableCode;
"""
"s2md_met:mi1088", "s2md_met:mi1096", "s2md_met:mi1101", "s2md_met:mi1110",
"s2md_met:mi1112", "s2md_met:mi1115", "s2md_met:mi1117", "s2md_met:mi1126",
"s2md_met:mi1127", "s2md_met:mi1128", "s2md_met:mi1131"}
CANONICAL_PREFIXES = {
"http://www.xbrl.org/2003/iso4217": "iso4217",
"http://www.xbrl.org/2003/linkbase": "link",
"http://xbrl.org/2006/xbrldi": "xbrldi",
"http://www.xbrl.org/2003/instance": "xbrli",
"http://www.w3.org/1999/xlink": "xlink"}
def dislosureSystemTypes(disclosureSystem, *args, **kwargs):
# return ((disclosure system name, variable name), ...)
return (("EBA", "EBA"),
("EIOPA", "EIOPA"))
def disclosureSystemConfigURL(disclosureSystem, *args, **kwargs):
return os.path.join(os.path.dirname(__file__), "config.xml")
def validateSetup(val, parameters=None, *args, **kwargs):
val.validateEBA = val.validateDisclosureSystem and getattr(val.disclosureSystem, "EBA", False)
val.validateEIOPA = val.validateDisclosureSystem and getattr(val.disclosureSystem, "EIOPA", False)
if not (val.validateEBA or val.validateEIOPA):
return
val.validateUTR = False # do not use default UTR validation, it's at error level and not streamable
val.utrValidator = ValidateUtr(val.modelXbrl,
"WARNING", # EBA specifies SHOULD on UTR validation
"EBA.2.23") # override utre error-severity message code
val.isEIOPAfullVersion = val.isEIOPA_2_0_1 = False
modelDocument = val.modelXbrl.modelDocument
if modelDocument.type == ModelDocument.Type.INSTANCE:
for doc, docRef in modelDocument.referencesDocument.items():
if "href" in docRef.referenceTypes:
if docRef.referringModelObject.localName == "schemaRef":
_match = schemaRefDatePattern.match(doc.uri)
if _match:
val.isEIOPAfullVersion = _match.group(1) > "2015-02-28"
val.isEIOPA_2_0_1 = _match.group(1) >= "2015-10-21"
break
else:
val.modelXbrl.error( ("EBA.S.1.5.a/EBA.S.1.5.b", "EIOPA.S.1.5.a/EIOPA.S.1.5.b"),
_('The link:schemaRef element in submitted instances MUST resolve to the full published entry point URL, this schemaRef is missing date portion: %(schemaRef)s.'),
modelObject=modelDocument, schemaRef=doc.uri)
val.qnDimAF = val.qnDimOC = val.qnCAx1 = None
_nsmap = val.modelXbrl.modelDocument.xmlRootElement.nsmap
if val.isEIOPA_2_0_1:
_hasPiInstanceGenerator = False
for pi in modelDocument.processingInstructions:
if pi.target == "instance-generator":
_hasPiInstanceGenerator = True
if not all(pi.get(attr) for attr in ("id", "version", "creationdate")):
val.modelXbrl.warning("EIOPA.S.2.23",
_('The processing instruction instance-generator SHOULD contain attributes "id", "version" and "creationdate".'),
modelObject=modelDocument)
if not _hasPiInstanceGenerator:
val.modelXbrl.warning("EIOPA.S.2.23",
_('The instance SHOULD include a processing instruction "instance-generator".'),
modelObject=modelDocument)
val.qnDimAF = qname("s2c_dim:AF", _nsmap)
val.qnDimOC = qname("s2c_dim:OC", _nsmap)
val.qnCAx1 = qname("s2c_CA:x1", _nsmap)
elif val.validateEBA:
val.eba_qnDimCUS = qname("eba_dim:CUS", _nsmap)
val.eba_qnDimCCA = qname("eba_dim:CCA", _nsmap)
val.eba_qnCAx1 = qname("eba_CA:x1", _nsmap)
val.prefixNamespace = {}
val.namespacePrefix = {}
val.idObjects = {}
val.typedDomainQnames = set()
val.typedDomainElements = set()
for modelConcept in val.modelXbrl.qnameConcepts.values():
if modelConcept.isTypedDimension:
typedDomainElement = modelConcept.typedDomainElement
if isinstance(typedDomainElement, ModelConcept):
val.typedDomainQnames.add(typedDomainElement.qname)
val.typedDomainElements.add(typedDomainElement)
val.filingIndicators = {}
val.numFilingIndicatorTuples = 0
val.cntxEntities = set()
val.cntxDates = defaultdict(set)
val.unusedCntxIDs = set()
val.unusedUnitIDs = set()
val.currenciesUsed = {}
val.reportingCurrency = None
val.namespacePrefixesUsed = defaultdict(set)
val.prefixesUnused = set()
for prefix, ns in _nsmap.items():
val.prefixesUnused.add(prefix)
val.namespacePrefixesUsed[ns].add(prefix)
val.firstFactObjectIndex = sys.maxsize
val.firstFact = None
val.footnotesRelationshipSet = ModelRelationshipSet(val.modelXbrl, "XBRL-footnotes")
# re-init batch flag to enable more than one context/unit validation sessions for the same instance.
# (note that this monkey-patching would give trouble on two concurrent validation sessions of the same instance)
for cntx in val.modelXbrl.contexts.values():
if hasattr(cntx, "_batchChecked"):
cntx._batchChecked = False
for unit in val.modelXbrl.units.values():
if hasattr(unit, "_batchChecked"):
unit._batchChecked = False
def prefixUsed(val, ns, prefix):
val.namespacePrefixesUsed[ns].add(prefix)
for _prefix in val.namespacePrefixesUsed[ns]:
val.prefixesUnused.discard(_prefix)
def validateStreamingFacts(val, factsToCheck, *args, **kwargs):
if not (val.validateEBA or val.validateEIOPA):
return True
validateFacts(val, factsToCheck)
def validateFacts(val, factsToCheck):
# may be called in streaming batches or all at end (final) if not streaming
modelXbrl = val.modelXbrl
modelDocument = modelXbrl.modelDocument
# note EBA 2.1 is in ModelDocument.py
timelessDatePattern = re.compile(r"\s*([0-9]{4})-([0-9]{2})-([0-9]{2})\s*$")
for cntx in modelXbrl.contexts.values():
if getattr(cntx, "_batchChecked", False):
continue # prior streaming batch already checked
cntx._batchChecked = True
val.cntxEntities.add(cntx.entityIdentifier)
dateElts = XmlUtil.descendants(cntx, XbrlConst.xbrli, ("startDate","endDate","instant"))
if any(not timelessDatePattern.match(e.textValue) for e in dateElts):
modelXbrl.error(("EBA.2.10","EIOPA.2.10"),
_('Period dates must be whole dates without time or timezone: %(dates)s.'),
modelObject=cntx, dates=", ".join(e.text for e in dateElts))
if cntx.isForeverPeriod:
modelXbrl.error(("EBA.2.11","EIOPA.N.2.11"),
_('Forever context period is not allowed.'),
modelObject=cntx)
elif cntx.isStartEndPeriod:
modelXbrl.error(("EBA.2.13","EIOPA.N.2.11"),
_('Start-End (flow) context period is not allowed.'),
modelObject=cntx)
elif cntx.isInstantPeriod:
# cannot pass context object to final() below, for error logging, if streaming mode
val.cntxDates[cntx.instantDatetime].add(modelXbrl if getattr(val.modelXbrl, "isStreamingMode", False)
else cntx)
if cntx.hasSegment:
modelXbrl.error(("EBA.2.14","EIOPA.N.2.14"),
_("Contexts MUST NOT contain xbrli:segment values: %(cntx)s.'"),
modelObject=cntx, cntx=cntx.id)
if cntx.nonDimValues("scenario"):
modelXbrl.error(("EBA.2.15","EIOPA.S.2.15" if val.isEIOPAfullVersion else "EIOPA.N.2.15"),
_("Contexts MUST NOT contain non-dimensional xbrli:scenario values: %(cntx)s.'"),
modelObject=cntx, cntx=cntx.id,
messageCodes=("EBA.2.15","EIOPA.N.2.15","EIOPA.S.2.15"))
val.unusedCntxIDs.add(cntx.id)
if val.isEIOPA_2_0_1 and len(cntx.id) > 128:
modelXbrl.warning("EIOPA.S.2.6",
_("Contexts IDs SHOULD be short: %(cntx)s.'"),
modelObject=cntx, cntx=cntx.id)
for unit in modelXbrl.units.values():
if getattr(unit, "_batchChecked", False):
continue # prior streaming batch already checked
unit._batchChecked = True
val.unusedUnitIDs.add(unit.id)
factsByQname = defaultdict(set) # top level for this
for f in factsToCheck:
factsByQname[f.qname].add(f)
val.unusedCntxIDs.discard(f.contextID)
val.unusedUnitIDs.discard(f.unitID)
if f.objectIndex < val.firstFactObjectIndex:
val.firstFactObjectIndex = f.objectIndex
val.firstFact = f
for fIndicators in factsByQname[qnFIndicators]:
val.numFilingIndicatorTuples += 1
for fIndicator in fIndicators.modelTupleFacts:
_value = (getattr(fIndicator, "xValue", None) or fIndicator.value) # use validated xValue if DTS else value for skipDTS
_filed = fIndicator.get("{http://www.eurofiling.info/xbrl/ext/filing-indicators}filed", "true") in ("true", "1")
if _value in val.filingIndicators:
modelXbrl.error(("EBA.1.6.1", "EIOPA.1.6.1"),
_('Multiple filing indicators facts for indicator %(filingIndicator)s.'),
modelObject=(fIndicator, val.filingIndicators[_value]), filingIndicator=_value)
if _filed and not val.filingIndicators[_value]:
val.filingIndicators[_value] = _filed #set to filed if any of the multiple indicators are filed=true
else: # not a duplicate filing indicator
val.filingIndicators[_value] = _filed
val.unusedCntxIDs.discard(fIndicator.contextID)
cntx = fIndicator.context
if cntx is not None and (cntx.hasSegment or cntx.hasScenario):
modelXbrl.error("EIOPA.N.1.6.d" if val.isEIOPAfullVersion else "EIOPA.S.1.6.d",
_('Filing indicators must not contain segment or scenario elements %(filingIndicator)s.'),
modelObject=fIndicator, filingIndicator=_value)
# Using model object id's is not accurate in case of edition
prevObj = fIndicators.getprevious()
while prevObj is not None:
if isinstance(prevObj, ModelFact) and prevObj.qname != qnFIndicators:
modelXbrl.warning("EIOPA.1.6.2",
_('Filing indicators should precede first fact %(firstFact)s.'),
modelObject=(fIndicators, val.firstFact), firstFact=val.firstFact.qname)
break
prevObj = prevObj.getprevious()
if val.isEIOPAfullVersion:
for fIndicator in factsByQname[qnFilingIndicator]:
if fIndicator.getparent().qname == XbrlConst.qnXbrliXbrl:
_isPos = fIndicator.get("{http://www.eurofiling.info/xbrl/ext/filing-indicators}filed", "true") in ("true", "1")
_value = (getattr(fIndicator, "xValue", None) or fIndicator.value) # use validated xValue if DTS else value for skipDTS
modelXbrl.error("EIOPA.1.6.a" if _isPos else "EIOPA.1.6.b",
_('Filing indicators must be in a tuple %(filingIndicator)s.'),
modelObject=fIndicator, filingIndicator=_value,
messageCodes=("EIOPA.1.6.a", "EIOPA.1.6.b"))
otherFacts = {} # (contextHash, unitHash, xmlLangHash) : fact
nilFacts = []
stringFactsWithXmlLang = []
nonMonetaryNonPureFacts = []
for qname, facts in factsByQname.items():
for f in facts:
if f.qname == qnFIndicators or f.qname == qnFIndicators:
continue # skip root-level and non-root-level filing indicators
if modelXbrl.skipDTS:
c = f.qname.localName[0]
isNumeric = c in ('m', 'p', 'r', 'i')
isMonetary = c == 'm'
isInteger = c == 'i'
isPercent = c == 'p'
isString = c == 's'
isEnum = c == 'e'
else:
concept = f.concept
if concept is not None:
isNumeric = concept.isNumeric
isMonetary = concept.isMonetary
isInteger = concept.baseXbrliType in integerItemTypes
isPercent = concept.typeQname in (qnPercentItemType, qnPureItemType)
isString = concept.baseXbrliType in ("stringItemType", "normalizedStringItemType")
isEnum = concept.typeQname in qnEnumerationItemTypes
else:
isNumeric = isString = isEnum = False # error situation
k = (f.getparent().objectIndex,
f.qname,
f.context.contextDimAwareHash if f.context is not None else None,
f.unit.hash if f.unit is not None else None,
hash(f.xmlLang))
if k not in otherFacts:
otherFacts[k] = {f}
else:
matches = [o
for o in otherFacts[k]
if (f.getparent().objectIndex == o.getparent().objectIndex and
f.qname == o.qname and
f.context.isEqualTo(o.context) if f.context is not None and o.context is not None else True) and
# (f.unit.isEqualTo(o.unit) if f.unit is not None and o.unit is not None else True) and
(f.xmlLang == o.xmlLang)]
if matches:
contexts = [f.contextID] + [o.contextID for o in matches]
modelXbrl.error(("EBA.2.16", "EIOPA.S.2.16" if val.isEIOPAfullVersion else "EIOPA.S.2.16.a"),
_('Facts are duplicates %(fact)s contexts %(contexts)s.'),
modelObject=[f] + matches, fact=f.qname, contexts=', '.join(contexts),
messageCodes=("EBA.2.16", "EIOPA.S.2.16", "EIOPA.S.2.16.a"))
else:
otherFacts[k].add(f)
if isNumeric:
if f.precision:
modelXbrl.error(("EBA.2.17", "EIOPA.2.18.a"),
_("Numeric fact %(fact)s of context %(contextID)s has a precision attribute '%(precision)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, precision=f.precision)
if f.decimals and not f.isNil: # in XbrlDpmSqlDB for 2_0_1
if f.decimals == "INF":
if not val.isEIOPAfullVersion:
modelXbrl.error("EIOPA.S.2.18.f",
_("Monetary fact %(fact)s of context %(contextID)s has a decimal attribute INF: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
else:
try:
xValue = f.xValue
dec = int(f.decimals)
if isMonetary:
if val.isEIOPA_2_0_1:
_absXvalue = abs(xValue)
if str(f.qname) in s_2_18_c_a_met:
dMin = 2
elif _absXvalue >= 100000000:
dMin = -4
elif 100000000 > _absXvalue >= 1000000:
dMin = -3
elif 1000000 > _absXvalue >= 1000:
dMin = -2
else:
dMin = -1
if dMin > dec:
modelXbrl.error("EIOPA.S.2.18.c",
_("Monetary fact %(fact)s of context %(contextID)s has a decimals attribute less than minimum %(minimumDecimals)s: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, minimumDecimals=dMin, decimals=f.decimals)
elif dec < -3:
modelXbrl.error(("EBA.2.18","EIOPA.S.2.18.c"),
_("Monetary fact %(fact)s of context %(contextID)s has a decimals attribute < -3: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
else: # apply dynamic decimals check
if -.1 < xValue < .1: dMin = 2
elif -1 < xValue < 1: dMin = 1
elif -10 < xValue < 10: dMin = 0
elif -100 < xValue < 100: dMin = -1
elif -1000 < xValue < 1000: dMin = -2
else: dMin = -3
if dMin > dec:
modelXbrl.warning("EIOPA:factDecimalsWarning",
_("Monetary fact %(fact)s of context %(contextID)s value %(value)s has an imprecise decimals attribute: %(decimals)s, minimum is %(mindec)s"),
modelObject=f, fact=f.qname, contextID=f.contextID, value=xValue, decimals=f.decimals, mindec=dMin)
elif isInteger:
if dec != 0:
modelXbrl.error(("EBA.2.18","EIOPA.S.2.18.d"),
_("Integer fact %(fact)s of context %(contextID)s has a decimals attribute \u2260 0: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
elif isPercent:
if dec < 4:
modelXbrl.error(("EBA.2.18","EIOPA.S.2.18.e"),
_("Percent fact %(fact)s of context %(contextID)s has a decimals attribute < 4: '%(decimals)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, decimals=f.decimals)
if val.isEIOPA_2_0_1 and xValue > 1:
modelXbrl.warning(("EIOPA.3.2.b"),
_("Percent fact %(fact)s of context %(contextID)s appears to be over 100% = 1.0: '%(value)s'"),
modelObject=f, fact=f.qname, contextID=f.contextID, value=xValue)
else:
if -.001 < xValue < .001: dMin = 4
elif -.01 < xValue < .01: dMin = 3
elif -.1 < xValue < .1: dMin = 2
elif -1 < xValue < 1: dMin = 1
else: dMin = 0
if dMin > dec:
modelXbrl.warning("EIOPA:factDecimalsWarning",
_("Numeric fact %(fact)s of context %(contextID)s value %(value)s has an imprecise decimals attribute: %(decimals)s, minimum is %(mindec)s"),
modelObject=f, fact=f.qname, contextID=f.contextID, value=xValue, decimals=f.decimals, mindec=dMin)
except (AttributeError, ValueError):
pass # should have been reported as a schema error by loader
'''' (not intended by EBA 2.18, paste here is from EFM)
if not f.isNil and getattr(f,"xValid", 0) == 4:
try:
insignificance = insignificantDigits(f.xValue, decimals=f.decimals)
if insignificance: # if not None, returns (truncatedDigits, insiginficantDigits)
modelXbrl.error(("EFM.6.05.37", "GFM.1.02.26"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s has nonzero digits in insignificant portion %(insignificantDigits)s."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals,
value=f1.xValue, truncatedDigits=insignificance[0], insignificantDigits=insignificance[1])
except (ValueError,TypeError):
modelXbrl.error(("EBA.2.18"),
_("Fact %(fact)s of context %(contextID)s decimals %(decimals)s value %(value)s causes Value Error exception."),
modelObject=f1, fact=f1.qname, contextID=f1.contextID, decimals=f1.decimals, value=f1.value)
'''
unit = f.unit
if unit is not None:
if isMonetary:
if unit.measures[0]:
_currencyMeasure = unit.measures[0][0]
if val.isEIOPA_2_0_1 and f.context is not None:
if f.context.dimMemberQname(val.qnDimAF) == val.qnCAx1 and val.qnDimOC in f.context.qnameDims:
_ocCurrency = f.context.dimMemberQname(val.qnDimOC).localName
if _currencyMeasure.localName != _ocCurrency:
modelXbrl.error("EIOPA.3.1",
_("There MUST be only one currency but metric %(metric)s reported OC dimension currency %(ocCurrency)s differs from unit currency: %(unitCurrency)s."),
modelObject=f, metric=f.qname, ocCurrency=_ocCurrency, unitCurrency=_currencyMeasure.localName)
else:
val.currenciesUsed[_currencyMeasure] = unit
elif val.validateEBA and f.context is not None:
if f.context.dimMemberQname(val.eba_qnDimCCA) == val.eba_qnCAx1 and val.eba_qnDimCUS in f.context.qnameDims:
currency = f.context.dimMemberQname(val.eba_qnDimCUS).localName
if _currencyMeasure.localName != currency:
modelXbrl.error("EBA.3.1",
_("There MUST be only one currency but metric %(metric)s reported CCA dimension currency %(currency)s differs from unit currency: %(unitCurrency)s."),
modelObject=f, metric=f.qname, currency=currency, unitCurrency=_currencyMeasure.localName)
else:
val.currenciesUsed[_currencyMeasure] = unit
else:
val.currenciesUsed[_currencyMeasure] = unit
elif not unit.isSingleMeasure or unit.measures[0][0] != XbrlConst.qnXbrliPure:
nonMonetaryNonPureFacts.append(f)
if isEnum:
_eQn = getattr(f,"xValue", None) or qnameEltPfxName(f, f.value)
if _eQn:
prefixUsed(val, _eQn.namespaceURI, _eQn.prefix)
if val.isEIOPA_2_0_1 and f.qname.localName == "ei1930":
val.reportingCurrency = _eQn.localName
elif isString:
if f.xmlLang: # requires disclosureSystem to NOT specify default language
stringFactsWithXmlLang.append(f)
if f.isNil:
nilFacts.append(f)
if val.footnotesRelationshipSet.fromModelObject(f):
modelXbrl.warning("EIOPA.S.19",
_("Fact %(fact)s of context %(contextID)s has footnotes.'"),
modelObject=f, fact=f.qname, contextID=f.contextID)
if nilFacts:
modelXbrl.error(("EBA.2.19", "EIOPA.S.2.19"),
_('Nil facts MUST NOT be present in the instance: %(nilFacts)s.'),
modelObject=nilFacts, nilFacts=", ".join(str(f.qname) for f in nilFacts))
if stringFactsWithXmlLang:
modelXbrl.warning("EIOPA.2.20", # not reported for EBA
_("String facts reporting xml:lang (not saved by T4U, not round-tripped): '%(factsWithLang)s'"),
modelObject=stringFactsWithXmlLang, factsWithLang=", ".join(set(str(f.qname) for f in stringFactsWithXmlLang)))
if nonMonetaryNonPureFacts:
modelXbrl.error(("EBA.3.2","EIOPA.3.2.a"),
_("Non monetary (numeric) facts MUST use the pure unit: '%(langLessFacts)s'"),
modelObject=nonMonetaryNonPureFacts, langLessFacts=", ".join(set(str(f.qname) for f in nonMonetaryNonPureFacts)))
val.utrValidator.validateFacts() # validate facts for UTR at logLevel WARNING
unitHashes = {}
for unit in modelXbrl.units.values():
h = unit.hash
if h in unitHashes and unit.isEqualTo(unitHashes[h]):
modelXbrl.warning("EBA.2.21",
_("Duplicate units SHOULD NOT be reported, units %(unit1)s and %(unit2)s have same measures.'"),
modelObject=(unit, unitHashes[h]), unit1=unit.id, unit2=unitHashes[h].id)
if not getattr(modelXbrl, "isStreamingMode", False):
modelXbrl.error("EIOPA.2.21",
_("Duplicate units MUST NOT be reported, units %(unit1)s and %(unit2)s have same measures.'"),
modelObject=(unit, unitHashes[h]), unit1=unit.id, unit2=unitHashes[h].id)
else:
unitHashes[h] = unit
for _measures in unit.measures:
for _measure in _measures:
prefixUsed(val, _measure.namespaceURI, _measure.prefix)
del unitHashes
cntxHashes = {}
for cntx in modelXbrl.contexts.values():
h = cntx.contextDimAwareHash
if h in cntxHashes and cntx.isEqualTo(cntxHashes[h]):
if not getattr(modelXbrl, "isStreamingMode", False):
modelXbrl.log("WARNING" if val.isEIOPAfullVersion else "ERROR",
"EIOPA.S.2.7.b",
_("Duplicate contexts MUST NOT be reported, contexts %(cntx1)s and %(cntx2)s are equivalent.'"),
modelObject=(cntx, cntxHashes[h]), cntx1=cntx.id, cntx2=cntxHashes[h].id)
else:
cntxHashes[h] = cntx
for _dim in cntx.qnameDims.values():
_dimQn = _dim.dimensionQname
prefixUsed(val, _dimQn.namespaceURI, _dimQn.prefix)
if _dim.isExplicit:
_memQn = _dim.memberQname
else:
_memQn = _dim.typedMember.qname
if _memQn:
prefixUsed(val, _memQn.namespaceURI, _memQn.prefix)
for elt in modelDocument.xmlRootElement.iter():
if isinstance(elt, ModelObject): # skip comments and processing instructions
prefixUsed(val, elt.qname.namespaceURI, elt.qname.prefix)
for attrTag in elt.keys():
if attrTag.startswith("{"):
_prefix, _NS, _localName = XmlUtil.clarkNotationToPrefixNsLocalname(elt, attrTag, isAttribute=True)
if _prefix:
prefixUsed(val, _NS, _prefix)
elif val.isEIOPA_2_0_1:
if elt.tag in ("{http://www.w3.org/2001/XMLSchema}documentation", "{http://www.w3.org/2001/XMLSchema}annotation"):
modelXbrl.error("EIOPA.2.5",
_("xs:documentation element found, all relevant business data MUST only be contained in contexts, units, schemaRef and facts."),
modelObject=modelDocument)
elif isinstance(elt, etree._Comment):
modelXbrl.error("EIOPA.2.5",
_("XML comment found, all relevant business data MUST only be contained in contexts, units, schemaRef and facts: %(comment)s"),
modelObject=modelDocument, comment=elt.text)
def validateNonStreamingFinish(val, *args, **kwargs):
# non-streaming EBA checks, ignore when streaming (first all from ValidateXbrl.py)
if not getattr(val.modelXbrl, "isStreamingMode", False):
final(val)
def validateStreamingFinish(val, *args, **kwargs):
final(val) # always finish validation when streaming
def final(val):
if not (val.validateEBA or val.validateEIOPA):
return
modelXbrl = val.modelXbrl
modelDocument = modelXbrl.modelDocument
_statusMsg = _("validating {0} filing rules").format(val.disclosureSystem.name)
modelXbrl.profileActivity()
modelXbrl.modelManager.showStatus(_statusMsg)
if modelDocument.type == ModelDocument.Type.INSTANCE and (val.validateEBA or val.validateEIOPA):
if not modelDocument.uri.endswith(".xbrl"):
modelXbrl.warning("EBA.1.1",
_('XBRL instance documents SHOULD use the extension ".xbrl" but it is "%(extension)s"'),
modelObject=modelDocument, extension=os.path.splitext(modelDocument.basename)[1])
modelXbrl.error("EIOPA.S.1.1.a",
_('XBRL instance documents MUST use the extension ".xbrl" but it is "%(extension)s"'),
modelObject=modelDocument, extension=os.path.splitext(modelDocument.basename)[1])
if val.isEIOPA_2_0_1: _encodings = ("UTF-8", "utf-8-sig")
else: _encodings = ("utf-8", "UTF-8", "utf-8-sig")
if modelDocument.documentEncoding not in _encodings:
modelXbrl.error(("EBA.1.4", "EIOPA.1.4"),
_('XBRL instance documents MUST use "UTF-8" encoding but is "%(xmlEncoding)s"'),
modelObject=modelDocument, xmlEncoding=modelDocument.documentEncoding)
schemaRefElts = []
schemaRefFileNames = []
for doc, docRef in modelDocument.referencesDocument.items():
if "href" in docRef.referenceTypes:
if docRef.referringModelObject.localName == "schemaRef":
schemaRefElts.append(docRef.referringModelObject)
schemaRefFileNames.append(doc.basename)
if not UrlUtil.isAbsolute(doc.uri):
modelXbrl.error(("EBA.2.2", "EIOPA.S.1.5.a" if val.isEIOPAfullVersion else "EIOPA.S.1.5.b"),
_('The link:schemaRef element in submitted instances MUST resolve to the full published entry point URL: %(url)s.'),
modelObject=docRef.referringModelObject, url=doc.uri,
messageCodes=("EBA.2.2", "EIOPA.S.1.5.a","EIOPA.S.1.5.b"))
elif docRef.referringModelObject.localName == "linkbaseRef":
modelXbrl.error(("EBA.2.3","EIOPA.S.1.5.a"),
_('The link:linkbaseRef element is not allowed: %(fileName)s.'),
modelObject=docRef.referringModelObject, fileName=doc.basename)
_numSchemaRefs = len(XmlUtil.children(modelDocument.xmlRootElement, XbrlConst.link, "schemaRef"))
if _numSchemaRefs > 1:
modelXbrl.error(("EIOPA.S.1.5.a", "EBA.1.5"),
_('XBRL instance documents MUST reference only one entry point schema but %(numEntryPoints)s were found: %(entryPointNames)s'),
modelObject=modelDocument, numEntryPoints=_numSchemaRefs, entryPointNames=', '.join(sorted(schemaRefFileNames)))
### check entry point names appropriate for filing indicator (DPM DB?)
if len(schemaRefElts) != 1:
modelXbrl.error("EBA.2.3",
_('Any reported XBRL instance document MUST contain only one xbrli:xbrl/link:schemaRef node, but %(entryPointCount)s.'),
modelObject=schemaRefElts, entryPointCount=len(schemaRefElts))
# non-streaming EBA checks
if not getattr(modelXbrl, "isStreamingMode", False):
val.qnReportedCurrency = None
if val.isEIOPA_2_0_1 and qnMetReportingCurrency in modelXbrl.factsByQname:
for _multiCurrencyFact in modelXbrl.factsByQname[qnMetReportingCurrency]:
# multi-currency fact
val.qnReportedCurrency = _multiCurrencyFact.xValue
break
validateFacts(val, modelXbrl.facts)
# check sum of fact md5s (otherwise checked in streaming process)
xbrlFactsCheckVersion = None
expectedSumOfFactMd5s = None
for pi in modelDocument.xmlRootElement.getchildren():
if isinstance(pi, etree._ProcessingInstruction) and pi.target == "xbrl-facts-check":
_match = re.search("([\\w-]+)=[\"']([^\"']+)[\"']", pi.text)
if _match:
_matchGroups = _match.groups()
if len(_matchGroups) == 2:
if _matchGroups[0] == "version":
xbrlFactsCheckVersion = _matchGroups[1]
elif _matchGroups[0] == "sum-of-fact-md5s":
try:
expectedSumOfFactMd5s = Md5Sum(_matchGroups[1])
except ValueError:
modelXbrl.error("EIOPA:xbrlFactsCheckError",
_("Invalid sum-of-md5s %(sumOfMd5)s"),
modelObject=modelXbrl, sumOfMd5=_matchGroups[1])
if xbrlFactsCheckVersion and expectedSumOfFactMd5s:
sumOfFactMd5s = Md5Sum()
for f in modelXbrl.factsInInstance:
sumOfFactMd5s += f.md5sum
if sumOfFactMd5s != expectedSumOfFactMd5s:
modelXbrl.warning("EIOPA:xbrlFactsCheckWarning",
_("XBRL facts sum of md5s expected %(expectedMd5)s not matched to actual sum %(actualMd5Sum)s"),
modelObject=modelXbrl, expectedMd5=expectedSumOfFactMd5s, actualMd5Sum=sumOfFactMd5s)
else:
modelXbrl.info("info",
_("Successful XBRL facts sum of md5s."),
modelObject=modelXbrl)
if any(badError in modelXbrl.errors
for badError in ("EBA.2.1", "EIOPA.2.1", "EIOPA.S.1.5.a/EIOPA.S.1.5.b")):
pass # skip checking filingIndicators if bad errors
elif not val.filingIndicators:
modelXbrl.error(("EBA.1.6", "EIOPA.1.6.a"),
_('Missing filing indicators. Reported XBRL instances MUST include appropriate (positive) filing indicator elements'),
modelObject=modelDocument)
elif all(filed == False for filed in val.filingIndicators.values()):
modelXbrl.error(("EBA.1.6", "EIOPA.1.6.a"),
_('All filing indicators are filed="false". Reported XBRL instances MUST include appropriate (positive) filing indicator elements'),
modelObject=modelDocument)
if val.numFilingIndicatorTuples > 1:
modelXbrl.warning(("EBA.1.6.2", "EIOPA.1.6.2"),
_('Multiple filing indicators tuples when not in streaming mode (info).'),
modelObject=modelXbrl.factsByQname[qnFIndicators])
if len(val.cntxDates) > 1:
modelXbrl.error(("EBA.2.13","EIOPA.2.13"),
_('Contexts must have the same date: %(dates)s.'),
# when streaming values are no longer available, but without streaming they can be logged
modelObject=set(_cntx for _cntxs in val.cntxDates.values() for _cntx in _cntxs),
dates=', '.join(XmlUtil.dateunionValue(_dt, subtractOneDay=True)
for _dt in val.cntxDates.keys()))
if val.unusedCntxIDs:
if val.isEIOPA_2_0_1:
modelXbrl.error("EIOPA.2.7",
_('Unused xbrli:context nodes MUST NOT be present in the instance: %(unusedContextIDs)s.'),
modelObject=[modelXbrl.contexts[unusedCntxID] for unusedCntxID in val.unusedCntxIDs if unusedCntxID in modelXbrl.contexts],
unusedContextIDs=", ".join(sorted(val.unusedCntxIDs)))
else:
modelXbrl.warning(("EBA.2.7", "EIOPA.2.7"),
_('Unused xbrli:context nodes SHOULD NOT be present in the instance: %(unusedContextIDs)s.'),
modelObject=[modelXbrl.contexts[unusedCntxID] for unusedCntxID in val.unusedCntxIDs if unusedCntxID in modelXbrl.contexts],
unusedContextIDs=", ".join(sorted(val.unusedCntxIDs)))
if len(val.cntxEntities) > 1:
modelXbrl.error(("EBA.2.9", "EIOPA.2.9"),
_('All entity identifiers and schemes MUST be the same, %(count)s found: %(entities)s.'),
modelObject=modelDocument, count=len(val.cntxEntities),
entities=", ".join(sorted(str(cntxEntity) for cntxEntity in val.cntxEntities)))
for _scheme, _LEI in val.cntxEntities:
if (_scheme in ("http://standards.iso.org/iso/17442", "http://standard.iso.org/iso/17442", "LEI") or
(not val.isEIOPAfullVersion and _scheme == "PRE-LEI")):
if _scheme == "http://standard.iso.org/iso/17442":
modelXbrl.warning(("EBA.3.6", "EIOPA.S.2.8.c"),
_("Warning, context has entity scheme %(scheme)s should be plural: http://standards.iso.org/iso/17442."),
modelObject=modelDocument, scheme=_scheme)
result = LeiUtil.checkLei(_LEI)
if result == LeiUtil.LEI_INVALID_LEXICAL:
modelXbrl.error("EIOPA.S.2.8.c",
_("Context has lexically invalid LEI %(lei)s."),
modelObject=modelDocument, lei=_LEI)
elif result == LeiUtil.LEI_INVALID_CHECKSUM:
modelXbrl.error("EIOPA.S.2.8.c",
_("Context has LEI checksum error in %(lei)s."),
modelObject=modelDocument, lei=_LEI)
elif _scheme == "SC":
pass # anything is ok for Specific Code
else:
modelXbrl.error("EIOPA.S.2.8.c",
_("Context has unrecognized entity scheme %(scheme)s."),
modelObject=modelDocument, scheme=_scheme)
if val.unusedUnitIDs:
if val.isEIOPA_2_0_1:
modelXbrl.error("EIOPA.2.22",
_('Unused xbrli:unit nodes MUST NOT be present in the instance: %(unusedUnitIDs)s.'),
modelObject=[modelXbrl.units[unusedUnitID] for unusedUnitID in val.unusedUnitIDs if unusedUnitID in modelXbrl.units],
unusedUnitIDs=", ".join(sorted(val.unusedUnitIDs)))
else:
modelXbrl.warning(("EBA.2.22", "EIOPA.2.22"),
_('Unused xbrli:unit nodes SHOULD NOT be present in the instance: %(unusedUnitIDs)s.'),
modelObject=[modelXbrl.units[unusedUnitID] for unusedUnitID in val.unusedUnitIDs if unusedUnitID in modelXbrl.units],
unusedUnitIDs=", ".join(sorted(val.unusedUnitIDs)))
if len(val.currenciesUsed) > 1:
modelXbrl.error(("EBA.3.1","EIOPA.3.1"),
_("There MUST be only one currency but %(numCurrencies)s were found: %(currencies)s.'"),
modelObject=val.currenciesUsed.values(), numCurrencies=len(val.currenciesUsed), currencies=", ".join(str(c) for c in val.currenciesUsed.keys()))
elif val.isEIOPA_2_0_1 and any(_measure.localName != val.reportingCurrency for _measure in val.currenciesUsed.keys()):
modelXbrl.error("EIOPA.3.1",
_("There MUST be only one currency but reporting currency %(reportingCurrency)s differs from unit currencies: %(currencies)s.'"),
modelObject=val.currenciesUsed.values(), reportingCurrency=val.reportingCurrency, currencies=", ".join(str(c) for c in val.currenciesUsed.keys()))
if val.prefixesUnused:
modelXbrl.warning(("EBA.3.4", "EIOPA.3.4"),
_("There SHOULD be no unused prefixes but these were declared: %(unusedPrefixes)s.'"),
modelObject=modelDocument, unusedPrefixes=', '.join(sorted(val.prefixesUnused)))
for ns, prefixes in val.namespacePrefixesUsed.items():
nsDocs = modelXbrl.namespaceDocs.get(ns)
if nsDocs:
for nsDoc in nsDocs:
nsDocPrefix = XmlUtil.xmlnsprefix(nsDoc.xmlRootElement, ns)
if any(prefix != nsDocPrefix for prefix in prefixes if prefix is not None):
modelXbrl.warning(("EBA.3.5", "EIOPA.3.5"),
_("Prefix for namespace %(namespace)s is %(declaredPrefix)s but these were found %(foundPrefixes)s"),
modelObject=modelDocument, namespace=ns, declaredPrefix=nsDocPrefix, foundPrefixes=', '.join(sorted(prefixes - {None})))
elif ns in CANONICAL_PREFIXES and any(prefix != CANONICAL_PREFIXES[ns] for prefix in prefixes if prefix is not None):
modelXbrl.warning(("EBA.3.5", "EIOPA.3.5"),
_("Prefix for namespace %(namespace)s is %(declaredPrefix)s but these were found %(foundPrefixes)s"),
modelObject=modelDocument, namespace=ns, declaredPrefix=CANONICAL_PREFIXES[ns], foundPrefixes=', '.join(sorted(prefixes - {None})))
modelXbrl.profileActivity(_statusMsg, minTimeToShow=0.0)
modelXbrl.modelManager.showStatus(None)
del val.prefixNamespace, val.namespacePrefix, val.idObjects, val.typedDomainElements
del val.utrValidator, val.firstFact, val.footnotesRelationshipSet
__pluginInfo__ = {
# Do not use _( ) in pluginInfo itself (it is applied later, after loading
'name': 'Validate EBA, EIOPA',
'version': '1.2',
'description': 'EBA (2.3), EIOPA (2.0.0) Filing Rules Validation.',
'license': 'Apache-2',
'author': 'Mark V Systems',
'copyright': '(c) Copyright 2015 Mark V Systems Limited, All rights reserved.',
# classes of mount points (required)
'DisclosureSystem.Types': dislosureSystemTypes,
'DisclosureSystem.ConfigURL': disclosureSystemConfigURL,
'Validate.XBRL.Start': validateSetup,
'Validate.XBRL.Finally': validateNonStreamingFinish,
'Streaming.ValidateFacts': validateStreamingFacts,
'Streaming.ValidateFinish': validateStreamingFinish,
}
| apache-2.0 |
retomerz/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/tests/test_geos.py | 76 | 43164 | import ctypes, random, unittest, sys
from django.contrib.gis.geos import *
from django.contrib.gis.geos.base import gdal, numpy, GEOSBase
from django.contrib.gis.geos.libgeos import GEOS_PREPARE
from django.contrib.gis.geometry.test_data import TestDataMixin
class GEOSTest(unittest.TestCase, TestDataMixin):
@property
def null_srid(self):
"""
Returns the proper null SRID depending on the GEOS version.
See the comments in `test15_srid` for more details.
"""
info = geos_version_info()
if info['version'] == '3.0.0' and info['release_candidate']:
return -1
else:
return None
def test00_base(self):
"Tests out the GEOSBase class."
# Testing out GEOSBase class, which provides a `ptr` property
# that abstracts out access to underlying C pointers.
class FakeGeom1(GEOSBase):
pass
# This one only accepts pointers to floats
c_float_p = ctypes.POINTER(ctypes.c_float)
class FakeGeom2(GEOSBase):
ptr_type = c_float_p
# Default ptr_type is `c_void_p`.
fg1 = FakeGeom1()
# Default ptr_type is C float pointer
fg2 = FakeGeom2()
# These assignments are OK -- None is allowed because
# it's equivalent to the NULL pointer.
fg1.ptr = ctypes.c_void_p()
fg1.ptr = None
fg2.ptr = c_float_p(ctypes.c_float(5.23))
fg2.ptr = None
# Because pointers have been set to NULL, an exception should be
# raised when we try to access it. Raising an exception is
# preferrable to a segmentation fault that commonly occurs when
# a C method is given a NULL memory reference.
for fg in (fg1, fg2):
# Equivalent to `fg.ptr`
self.assertRaises(GEOSException, fg._get_ptr)
# Anything that is either not None or the acceptable pointer type will
# result in a TypeError when trying to assign it to the `ptr` property.
# Thus, memmory addresses (integers) and pointers of the incorrect type
# (in `bad_ptrs`) will not be allowed.
bad_ptrs = (5, ctypes.c_char_p('foobar'))
for bad_ptr in bad_ptrs:
# Equivalent to `fg.ptr = bad_ptr`
self.assertRaises(TypeError, fg1._set_ptr, bad_ptr)
self.assertRaises(TypeError, fg2._set_ptr, bad_ptr)
def test01a_wkt(self):
"Testing WKT output."
for g in self.geometries.wkt_out:
geom = fromstr(g.wkt)
self.assertEqual(g.ewkt, geom.wkt)
def test01b_hex(self):
"Testing HEX output."
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
self.assertEqual(g.hex, geom.hex)
def test01b_hexewkb(self):
"Testing (HEX)EWKB output."
from binascii import a2b_hex
# For testing HEX(EWKB).
ogc_hex = '01010000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));`
hexewkb_2d = '0101000020E61000000000000000000000000000000000F03F'
# `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));`
hexewkb_3d = '01010000A0E61000000000000000000000000000000000F03F0000000000000040'
pnt_2d = Point(0, 1, srid=4326)
pnt_3d = Point(0, 1, 2, srid=4326)
# OGC-compliant HEX will not have SRID nor Z value.
self.assertEqual(ogc_hex, pnt_2d.hex)
self.assertEqual(ogc_hex, pnt_3d.hex)
# HEXEWKB should be appropriate for its dimension -- have to use an
# a WKBWriter w/dimension set accordingly, else GEOS will insert
# garbage into 3D coordinate if there is none. Also, GEOS has a
# a bug in versions prior to 3.1 that puts the X coordinate in
# place of Z; an exception should be raised on those versions.
self.assertEqual(hexewkb_2d, pnt_2d.hexewkb)
if GEOS_PREPARE:
self.assertEqual(hexewkb_3d, pnt_3d.hexewkb)
self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz)
else:
try:
hexewkb = pnt_3d.hexewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException.')
# Same for EWKB.
self.assertEqual(buffer(a2b_hex(hexewkb_2d)), pnt_2d.ewkb)
if GEOS_PREPARE:
self.assertEqual(buffer(a2b_hex(hexewkb_3d)), pnt_3d.ewkb)
else:
try:
ewkb = pnt_3d.ewkb
except GEOSException:
pass
else:
self.fail('Should have raised GEOSException')
# Redundant sanity check.
self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid)
def test01c_kml(self):
"Testing KML output."
for tg in self.geometries.wkt_out:
geom = fromstr(tg.wkt)
kml = getattr(tg, 'kml', False)
if kml: self.assertEqual(kml, geom.kml)
def test01d_errors(self):
"Testing the Error handlers."
# string-based
print "\nBEGIN - expecting GEOS_ERROR; safe to ignore.\n"
for err in self.geometries.errors:
try:
g = fromstr(err.wkt)
except (GEOSException, ValueError):
pass
# Bad WKB
self.assertRaises(GEOSException, GEOSGeometry, buffer('0'))
print "\nEND - expecting GEOS_ERROR; safe to ignore.\n"
class NotAGeometry(object):
pass
# Some other object
self.assertRaises(TypeError, GEOSGeometry, NotAGeometry())
# None
self.assertRaises(TypeError, GEOSGeometry, None)
def test01e_wkb(self):
"Testing WKB output."
from binascii import b2a_hex
for g in self.geometries.hex_wkt:
geom = fromstr(g.wkt)
wkb = geom.wkb
self.assertEqual(b2a_hex(wkb).upper(), g.hex)
def test01f_create_hex(self):
"Testing creation from HEX."
for g in self.geometries.hex_wkt:
geom_h = GEOSGeometry(g.hex)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01g_create_wkb(self):
"Testing creation from WKB."
from binascii import a2b_hex
for g in self.geometries.hex_wkt:
wkb = buffer(a2b_hex(g.hex))
geom_h = GEOSGeometry(wkb)
# we need to do this so decimal places get normalised
geom_t = fromstr(g.wkt)
self.assertEqual(geom_t.wkt, geom_h.wkt)
def test01h_ewkt(self):
"Testing EWKT."
srid = 32140
for p in self.geometries.polygons:
ewkt = 'SRID=%d;%s' % (srid, p.wkt)
poly = fromstr(ewkt)
self.assertEqual(srid, poly.srid)
self.assertEqual(srid, poly.shell.srid)
self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export
def test01i_json(self):
"Testing GeoJSON input/output (via GDAL)."
if not gdal or not gdal.GEOJSON: return
for g in self.geometries.json_geoms:
geom = GEOSGeometry(g.wkt)
if not hasattr(g, 'not_equal'):
self.assertEqual(g.json, geom.json)
self.assertEqual(g.json, geom.geojson)
self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json))
def test01k_fromfile(self):
"Testing the fromfile() factory."
from StringIO import StringIO
ref_pnt = GEOSGeometry('POINT(5 23)')
wkt_f = StringIO()
wkt_f.write(ref_pnt.wkt)
wkb_f = StringIO()
wkb_f.write(str(ref_pnt.wkb))
# Other tests use `fromfile()` on string filenames so those
# aren't tested here.
for fh in (wkt_f, wkb_f):
fh.seek(0)
pnt = fromfile(fh)
self.assertEqual(ref_pnt, pnt)
def test01k_eq(self):
"Testing equivalence."
p = fromstr('POINT(5 23)')
self.assertEqual(p, p.wkt)
self.assertNotEqual(p, 'foo')
ls = fromstr('LINESTRING(0 0, 1 1, 5 5)')
self.assertEqual(ls, ls.wkt)
self.assertNotEqual(p, 'bar')
# Error shouldn't be raise on equivalence testing with
# an invalid type.
for g in (p, ls):
self.assertNotEqual(g, None)
self.assertNotEqual(g, {'foo' : 'bar'})
self.assertNotEqual(g, False)
def test02a_points(self):
"Testing Point objects."
prev = fromstr('POINT(0 0)')
for p in self.geometries.points:
# Creating the point from the WKT
pnt = fromstr(p.wkt)
self.assertEqual(pnt.geom_type, 'Point')
self.assertEqual(pnt.geom_typeid, 0)
self.assertEqual(p.x, pnt.x)
self.assertEqual(p.y, pnt.y)
self.assertEqual(True, pnt == fromstr(p.wkt))
self.assertEqual(False, pnt == prev)
# Making sure that the point's X, Y components are what we expect
self.assertAlmostEqual(p.x, pnt.tuple[0], 9)
self.assertAlmostEqual(p.y, pnt.tuple[1], 9)
# Testing the third dimension, and getting the tuple arguments
if hasattr(p, 'z'):
self.assertEqual(True, pnt.hasz)
self.assertEqual(p.z, pnt.z)
self.assertEqual(p.z, pnt.tuple[2], 9)
tup_args = (p.x, p.y, p.z)
set_tup1 = (2.71, 3.14, 5.23)
set_tup2 = (5.23, 2.71, 3.14)
else:
self.assertEqual(False, pnt.hasz)
self.assertEqual(None, pnt.z)
tup_args = (p.x, p.y)
set_tup1 = (2.71, 3.14)
set_tup2 = (3.14, 2.71)
# Centroid operation on point should be point itself
self.assertEqual(p.centroid, pnt.centroid.tuple)
# Now testing the different constructors
pnt2 = Point(tup_args) # e.g., Point((1, 2))
pnt3 = Point(*tup_args) # e.g., Point(1, 2)
self.assertEqual(True, pnt == pnt2)
self.assertEqual(True, pnt == pnt3)
# Now testing setting the x and y
pnt.y = 3.14
pnt.x = 2.71
self.assertEqual(3.14, pnt.y)
self.assertEqual(2.71, pnt.x)
# Setting via the tuple/coords property
pnt.tuple = set_tup1
self.assertEqual(set_tup1, pnt.tuple)
pnt.coords = set_tup2
self.assertEqual(set_tup2, pnt.coords)
prev = pnt # setting the previous geometry
def test02b_multipoints(self):
"Testing MultiPoint objects."
for mp in self.geometries.multipoints:
mpnt = fromstr(mp.wkt)
self.assertEqual(mpnt.geom_type, 'MultiPoint')
self.assertEqual(mpnt.geom_typeid, 4)
self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9)
self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9)
self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt))
self.assertEqual(mp.centroid, mpnt.centroid.tuple)
self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt))
for p in mpnt:
self.assertEqual(p.geom_type, 'Point')
self.assertEqual(p.geom_typeid, 0)
self.assertEqual(p.empty, False)
self.assertEqual(p.valid, True)
def test03a_linestring(self):
"Testing LineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.linestrings:
ls = fromstr(l.wkt)
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertEqual(ls.ring, False)
if hasattr(l, 'centroid'):
self.assertEqual(l.centroid, ls.centroid.tuple)
if hasattr(l, 'tup'):
self.assertEqual(l.tup, ls.tuple)
self.assertEqual(True, ls == fromstr(l.wkt))
self.assertEqual(False, ls == prev)
self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls))
prev = ls
# Creating a LineString from a tuple, list, and numpy array
self.assertEqual(ls, LineString(ls.tuple)) # tuple
self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments
self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list
self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments
if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array
def test03b_multilinestring(self):
"Testing MultiLineString objects."
prev = fromstr('POINT(0 0)')
for l in self.geometries.multilinestrings:
ml = fromstr(l.wkt)
self.assertEqual(ml.geom_type, 'MultiLineString')
self.assertEqual(ml.geom_typeid, 5)
self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9)
self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9)
self.assertEqual(True, ml == fromstr(l.wkt))
self.assertEqual(False, ml == prev)
prev = ml
for ls in ml:
self.assertEqual(ls.geom_type, 'LineString')
self.assertEqual(ls.geom_typeid, 1)
self.assertEqual(ls.empty, False)
self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml))
self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt)
self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml)))
def test04_linearring(self):
"Testing LinearRing objects."
for rr in self.geometries.linearrings:
lr = fromstr(rr.wkt)
self.assertEqual(lr.geom_type, 'LinearRing')
self.assertEqual(lr.geom_typeid, 2)
self.assertEqual(rr.n_p, len(lr))
self.assertEqual(True, lr.valid)
self.assertEqual(False, lr.empty)
# Creating a LinearRing from a tuple, list, and numpy array
self.assertEqual(lr, LinearRing(lr.tuple))
self.assertEqual(lr, LinearRing(*lr.tuple))
self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple]))
if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple)))
def test05a_polygons(self):
"Testing Polygon objects."
# Testing `from_bbox` class method
bbox = (-180, -90, 180, 90)
p = Polygon.from_bbox( bbox )
self.assertEqual(bbox, p.extent)
prev = fromstr('POINT(0 0)')
for p in self.geometries.polygons:
# Creating the Polygon, testing its properties.
poly = fromstr(p.wkt)
self.assertEqual(poly.geom_type, 'Polygon')
self.assertEqual(poly.geom_typeid, 3)
self.assertEqual(poly.empty, False)
self.assertEqual(poly.ring, False)
self.assertEqual(p.n_i, poly.num_interior_rings)
self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__
self.assertEqual(p.n_p, poly.num_points)
# Area & Centroid
self.assertAlmostEqual(p.area, poly.area, 9)
self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9)
self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9)
# Testing the geometry equivalence
self.assertEqual(True, poly == fromstr(p.wkt))
self.assertEqual(False, poly == prev) # Should not be equal to previous geometry
self.assertEqual(True, poly != prev)
# Testing the exterior ring
ring = poly.exterior_ring
self.assertEqual(ring.geom_type, 'LinearRing')
self.assertEqual(ring.geom_typeid, 2)
if p.ext_ring_cs:
self.assertEqual(p.ext_ring_cs, ring.tuple)
self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__
# Testing __getitem__ and __setitem__ on invalid indices
self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly))
self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False)
self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1)
# Testing __iter__
for r in poly:
self.assertEqual(r.geom_type, 'LinearRing')
self.assertEqual(r.geom_typeid, 2)
# Testing polygon construction.
self.assertRaises(TypeError, Polygon.__init__, 0, [1, 2, 3])
self.assertRaises(TypeError, Polygon.__init__, 'foo')
# Polygon(shell, (hole1, ... holeN))
rings = tuple(r for r in poly)
self.assertEqual(poly, Polygon(rings[0], rings[1:]))
# Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN)
ring_tuples = tuple(r.tuple for r in poly)
self.assertEqual(poly, Polygon(*ring_tuples))
# Constructing with tuples of LinearRings.
self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt)
self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt)
def test05b_multipolygons(self):
"Testing MultiPolygon objects."
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
prev = fromstr('POINT (0 0)')
for mp in self.geometries.multipolygons:
mpoly = fromstr(mp.wkt)
self.assertEqual(mpoly.geom_type, 'MultiPolygon')
self.assertEqual(mpoly.geom_typeid, 6)
self.assertEqual(mp.valid, mpoly.valid)
if mp.valid:
self.assertEqual(mp.num_geom, mpoly.num_geom)
self.assertEqual(mp.n_p, mpoly.num_coords)
self.assertEqual(mp.num_geom, len(mpoly))
self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly))
for p in mpoly:
self.assertEqual(p.geom_type, 'Polygon')
self.assertEqual(p.geom_typeid, 3)
self.assertEqual(p.valid, True)
self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt)
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def test06a_memory_hijinks(self):
"Testing Geometry __del__() on rings and polygons."
#### Memory issues with rings and polygons
# These tests are needed to ensure sanity with writable geometries.
# Getting a polygon with interior rings, and pulling out the interior rings
poly = fromstr(self.geometries.polygons[1].wkt)
ring1 = poly[0]
ring2 = poly[1]
# These deletes should be 'harmless' since they are done on child geometries
del ring1
del ring2
ring1 = poly[0]
ring2 = poly[1]
# Deleting the polygon
del poly
# Access to these rings is OK since they are clones.
s1, s2 = str(ring1), str(ring2)
def test08_coord_seq(self):
"Testing Coordinate Sequence objects."
for p in self.geometries.polygons:
if p.ext_ring_cs:
# Constructing the polygon and getting the coordinate sequence
poly = fromstr(p.wkt)
cs = poly.exterior_ring.coord_seq
self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too.
self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works
# Checks __getitem__ and __setitem__
for i in xrange(len(p.ext_ring_cs)):
c1 = p.ext_ring_cs[i] # Expected value
c2 = cs[i] # Value from coordseq
self.assertEqual(c1, c2)
# Constructing the test value to set the coordinate sequence with
if len(c1) == 2: tset = (5, 23)
else: tset = (5, 23, 8)
cs[i] = tset
# Making sure every set point matches what we expect
for j in range(len(tset)):
cs[i] = tset
self.assertEqual(tset[j], cs[i][j])
def test09_relate_pattern(self):
"Testing relate() and relate_pattern()."
g = fromstr('POINT (0 0)')
self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo')
for rg in self.geometries.relate_geoms:
a = fromstr(rg.wkt_a)
b = fromstr(rg.wkt_b)
self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern))
self.assertEqual(rg.pattern, a.relate(b))
def test10_intersection(self):
"Testing intersects() and intersection()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
i1 = fromstr(self.geometries.intersect_geoms[i].wkt)
self.assertEqual(True, a.intersects(b))
i2 = a.intersection(b)
self.assertEqual(i1, i2)
self.assertEqual(i1, a & b) # __and__ is intersection operator
a &= b # testing __iand__
self.assertEqual(i1, a)
def test11_union(self):
"Testing union()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
u1 = fromstr(self.geometries.union_geoms[i].wkt)
u2 = a.union(b)
self.assertEqual(u1, u2)
self.assertEqual(u1, a | b) # __or__ is union operator
a |= b # testing __ior__
self.assertEqual(u1, a)
def test12_difference(self):
"Testing difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.diff_geoms[i].wkt)
d2 = a.difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a - b) # __sub__ is difference operator
a -= b # testing __isub__
self.assertEqual(d1, a)
def test13_symdifference(self):
"Testing sym_difference()."
for i in xrange(len(self.geometries.topology_geoms)):
a = fromstr(self.geometries.topology_geoms[i].wkt_a)
b = fromstr(self.geometries.topology_geoms[i].wkt_b)
d1 = fromstr(self.geometries.sdiff_geoms[i].wkt)
d2 = a.sym_difference(b)
self.assertEqual(d1, d2)
self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator
a ^= b # testing __ixor__
self.assertEqual(d1, a)
def test14_buffer(self):
"Testing buffer()."
for bg in self.geometries.buffer_geoms:
g = fromstr(bg.wkt)
# The buffer we expect
exp_buf = fromstr(bg.buffer_wkt)
quadsegs = bg.quadsegs
width = bg.width
# Can't use a floating-point for the number of quadsegs.
self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs))
# Constructing our buffer
buf = g.buffer(width, quadsegs)
self.assertEqual(exp_buf.num_coords, buf.num_coords)
self.assertEqual(len(exp_buf), len(buf))
# Now assuring that each point in the buffer is almost equal
for j in xrange(len(exp_buf)):
exp_ring = exp_buf[j]
buf_ring = buf[j]
self.assertEqual(len(exp_ring), len(buf_ring))
for k in xrange(len(exp_ring)):
# Asserting the X, Y of each point are almost equal (due to floating point imprecision)
self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9)
self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9)
def test15_srid(self):
"Testing the SRID property and keyword."
# Testing SRID keyword on Point
pnt = Point(5, 23, srid=4326)
self.assertEqual(4326, pnt.srid)
pnt.srid = 3084
self.assertEqual(3084, pnt.srid)
self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326')
# Testing SRID keyword on fromstr(), and on Polygon rings.
poly = fromstr(self.geometries.polygons[1].wkt, srid=4269)
self.assertEqual(4269, poly.srid)
for ring in poly: self.assertEqual(4269, ring.srid)
poly.srid = 4326
self.assertEqual(4326, poly.shell.srid)
# Testing SRID keyword on GeometryCollection
gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021)
self.assertEqual(32021, gc.srid)
for i in range(len(gc)): self.assertEqual(32021, gc[i].srid)
# GEOS may get the SRID from HEXEWKB
# 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS
# using `SELECT GeomFromText('POINT (5 23)', 4326);`.
hex = '0101000020E610000000000000000014400000000000003740'
p1 = fromstr(hex)
self.assertEqual(4326, p1.srid)
# In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported,
# the SRID information is lost and set to -1 -- this is not a
# problem on the 3.0.0 version (another reason to upgrade).
exp_srid = self.null_srid
p2 = fromstr(p1.hex)
self.assertEqual(exp_srid, p2.srid)
p3 = fromstr(p1.hex, srid=-1) # -1 is intended.
self.assertEqual(-1, p3.srid)
def test16_mutable_geometries(self):
"Testing the mutability of Polygons and Geometry Collections."
### Testing the mutability of Polygons ###
for p in self.geometries.polygons:
poly = fromstr(p.wkt)
# Should only be able to use __setitem__ with LinearRing geometries.
self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2)))
# Constructing the new shell by adding 500 to every point in the old shell.
shell_tup = poly.shell.tuple
new_coords = []
for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.))
new_shell = LinearRing(*tuple(new_coords))
# Assigning polygon's exterior ring w/the new shell
poly.exterior_ring = new_shell
s = str(new_shell) # new shell is still accessible
self.assertEqual(poly.exterior_ring, new_shell)
self.assertEqual(poly[0], new_shell)
### Testing the mutability of Geometry Collections
for tg in self.geometries.multipoints:
mp = fromstr(tg.wkt)
for i in range(len(mp)):
# Creating a random point.
pnt = mp[i]
new = Point(random.randint(1, 100), random.randint(1, 100))
# Testing the assignment
mp[i] = new
s = str(new) # what was used for the assignment is still accessible
self.assertEqual(mp[i], new)
self.assertEqual(mp[i].wkt, new.wkt)
self.assertNotEqual(pnt, mp[i])
# MultiPolygons involve much more memory management because each
# Polygon w/in the collection has its own rings.
for tg in self.geometries.multipolygons:
mpoly = fromstr(tg.wkt)
for i in xrange(len(mpoly)):
poly = mpoly[i]
old_poly = mpoly[i]
# Offsetting the each ring in the polygon by 500.
for j in xrange(len(poly)):
r = poly[j]
for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.)
poly[j] = r
self.assertNotEqual(mpoly[i], poly)
# Testing the assignment
mpoly[i] = poly
s = str(poly) # Still accessible
self.assertEqual(mpoly[i], poly)
self.assertNotEqual(mpoly[i], old_poly)
# Extreme (!!) __setitem__ -- no longer works, have to detect
# in the first object that __setitem__ is called in the subsequent
# objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)?
#mpoly[0][0][0] = (3.14, 2.71)
#self.assertEqual((3.14, 2.71), mpoly[0][0][0])
# Doing it more slowly..
#self.assertEqual((3.14, 2.71), mpoly[0].shell[0])
#del mpoly
def test17_threed(self):
"Testing three-dimensional geometries."
# Testing a 3D Point
pnt = Point(2, 3, 8)
self.assertEqual((2.,3.,8.), pnt.coords)
self.assertRaises(TypeError, pnt.set_coords, (1.,2.))
pnt.coords = (1.,2.,3.)
self.assertEqual((1.,2.,3.), pnt.coords)
# Testing a 3D LineString
ls = LineString((2., 3., 8.), (50., 250., -117.))
self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple)
self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.))
ls[0] = (1.,2.,3.)
self.assertEqual((1.,2.,3.), ls[0])
def test18_distance(self):
"Testing the distance() function."
# Distance to self should be 0.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.distance(Point(0, 0)))
# Distance should be 1
self.assertEqual(1.0, pnt.distance(Point(0, 1)))
# Distance should be ~ sqrt(2)
self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11)
# Distances are from the closest vertex in each geometry --
# should be 3 (distance from (2, 2) to (5, 2)).
ls1 = LineString((0, 0), (1, 1), (2, 2))
ls2 = LineString((5, 2), (6, 1), (7, 0))
self.assertEqual(3, ls1.distance(ls2))
def test19_length(self):
"Testing the length property."
# Points have 0 length.
pnt = Point(0, 0)
self.assertEqual(0.0, pnt.length)
# Should be ~ sqrt(2)
ls = LineString((0, 0), (1, 1))
self.assertAlmostEqual(1.41421356237, ls.length, 11)
# Should be circumfrence of Polygon
poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
self.assertEqual(4.0, poly.length)
# Should be sum of each element's length in collection.
mpoly = MultiPolygon(poly.clone(), poly)
self.assertEqual(8.0, mpoly.length)
def test20a_emptyCollections(self):
"Testing empty geometries and collections."
gc1 = GeometryCollection([])
gc2 = fromstr('GEOMETRYCOLLECTION EMPTY')
pnt = fromstr('POINT EMPTY')
ls = fromstr('LINESTRING EMPTY')
poly = fromstr('POLYGON EMPTY')
mls = fromstr('MULTILINESTRING EMPTY')
mpoly1 = fromstr('MULTIPOLYGON EMPTY')
mpoly2 = MultiPolygon(())
for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]:
self.assertEqual(True, g.empty)
# Testing len() and num_geom.
if isinstance(g, Polygon):
self.assertEqual(1, len(g)) # Has one empty linear ring
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g[0]))
elif isinstance(g, (Point, LineString)):
self.assertEqual(1, g.num_geom)
self.assertEqual(0, len(g))
else:
self.assertEqual(0, g.num_geom)
self.assertEqual(0, len(g))
# Testing __getitem__ (doesn't work on Point or Polygon)
if isinstance(g, Point):
self.assertRaises(GEOSIndexError, g.get_x)
elif isinstance(g, Polygon):
lr = g.shell
self.assertEqual('LINEARRING EMPTY', lr.wkt)
self.assertEqual(0, len(lr))
self.assertEqual(True, lr.empty)
self.assertRaises(GEOSIndexError, lr.__getitem__, 0)
else:
self.assertRaises(GEOSIndexError, g.__getitem__, 0)
def test20b_collections_of_collections(self):
"Testing GeometryCollection handling of other collections."
# Creating a GeometryCollection WKT string composed of other
# collections and polygons.
coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid]
coll.extend([mls.wkt for mls in self.geometries.multilinestrings])
coll.extend([p.wkt for p in self.geometries.polygons])
coll.extend([mp.wkt for mp in self.geometries.multipoints])
gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll)
# Should construct ok from WKT
gc1 = GEOSGeometry(gc_wkt)
# Should also construct ok from individual geometry arguments.
gc2 = GeometryCollection(*tuple(g for g in gc1))
# And, they should be equal.
self.assertEqual(gc1, gc2)
def test21_test_gdal(self):
"Testing `ogr` and `srs` properties."
if not gdal.HAS_GDAL: return
g1 = fromstr('POINT(5 23)')
self.assertEqual(True, isinstance(g1.ogr, gdal.OGRGeometry))
self.assertEqual(g1.srs, None)
g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326)
self.assertEqual(True, isinstance(g2.ogr, gdal.OGRGeometry))
self.assertEqual(True, isinstance(g2.srs, gdal.SpatialReference))
self.assertEqual(g2.hex, g2.ogr.hex)
self.assertEqual('WGS 84', g2.srs.name)
def test22_copy(self):
"Testing use with the Python `copy` module."
import django.utils.copycompat as copy
poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))')
cpy1 = copy.copy(poly)
cpy2 = copy.deepcopy(poly)
self.assertNotEqual(poly._ptr, cpy1._ptr)
self.assertNotEqual(poly._ptr, cpy2._ptr)
def test23_transform(self):
"Testing `transform` method."
if not gdal.HAS_GDAL: return
orig = GEOSGeometry('POINT (-104.609 38.255)', 4326)
trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774)
# Using a srid, a SpatialReference object, and a CoordTransform object
# for transformations.
t1, t2, t3 = orig.clone(), orig.clone(), orig.clone()
t1.transform(trans.srid)
t2.transform(gdal.SpatialReference('EPSG:2774'))
ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774))
t3.transform(ct)
# Testing use of the `clone` keyword.
k1 = orig.clone()
k2 = k1.transform(trans.srid, clone=True)
self.assertEqual(k1, orig)
self.assertNotEqual(k1, k2)
prec = 3
for p in (t1, t2, t3, k2):
self.assertAlmostEqual(trans.x, p.x, prec)
self.assertAlmostEqual(trans.y, p.y, prec)
def test23_transform_noop(self):
""" Testing `transform` method (SRID match) """
# transform() should no-op if source & dest SRIDs match,
# regardless of whether GDAL is available.
if gdal.HAS_GDAL:
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assert_(g1 is not g, "Clone didn't happen")
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
gt = g.tuple
g.transform(4326)
self.assertEqual(g.tuple, gt)
self.assertEqual(g.srid, 4326)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
g1 = g.transform(4326, clone=True)
self.assertEqual(g1.tuple, g.tuple)
self.assertEqual(g1.srid, 4326)
self.assert_(g1 is not g, "Clone didn't happen")
finally:
gdal.HAS_GDAL = old_has_gdal
def test23_transform_nosrid(self):
""" Testing `transform` method (no SRID) """
# raise a warning if SRID <0/None
import warnings
print "\nBEGIN - expecting Warnings; safe to ignore.\n"
# test for do-nothing behaviour.
try:
# Keeping line-noise down by only printing the relevant
# warnings once.
warnings.simplefilter('once', UserWarning)
warnings.simplefilter('once', FutureWarning)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
g.transform(2774)
self.assertEqual(g.tuple, (-104.609, 38.255))
self.assertEqual(g.srid, None)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
g1 = g.transform(2774, clone=True)
self.assert_(g1 is None)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
g.transform(2774)
self.assertEqual(g.tuple, (-104.609, 38.255))
self.assertEqual(g.srid, -1)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
g1 = g.transform(2774, clone=True)
self.assert_(g1 is None)
finally:
warnings.simplefilter('default', UserWarning)
warnings.simplefilter('default', FutureWarning)
print "\nEND - expecting Warnings; safe to ignore.\n"
# test warning is raised
try:
warnings.simplefilter('error', FutureWarning)
warnings.simplefilter('ignore', UserWarning)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(FutureWarning, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=None)
self.assertRaises(FutureWarning, g.transform, 2774, clone=True)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(FutureWarning, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1)
self.assertRaises(FutureWarning, g.transform, 2774, clone=True)
finally:
warnings.simplefilter('default', FutureWarning)
warnings.simplefilter('default', UserWarning)
def test23_transform_nogdal(self):
""" Testing `transform` method (GDAL not available) """
old_has_gdal = gdal.HAS_GDAL
try:
gdal.HAS_GDAL = False
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774)
g = GEOSGeometry('POINT (-104.609 38.255)', 4326)
self.assertRaises(GEOSException, g.transform, 2774, clone=True)
finally:
gdal.HAS_GDAL = old_has_gdal
def test24_extent(self):
"Testing `extent` method."
# The xmin, ymin, xmax, ymax of the MultiPoint should be returned.
mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50))
self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent)
pnt = Point(5.23, 17.8)
# Extent of points is just the point itself repeated.
self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent)
# Testing on the 'real world' Polygon.
poly = fromstr(self.geometries.polygons[3].wkt)
ring = poly.shell
x, y = ring.x, ring.y
xmin, ymin = min(x), min(y)
xmax, ymax = max(x), max(y)
self.assertEqual((xmin, ymin, xmax, ymax), poly.extent)
def test25_pickle(self):
"Testing pickling and unpickling support."
# Using both pickle and cPickle -- just 'cause.
import pickle, cPickle
# Creating a list of test geometries for pickling,
# and setting the SRID on some of them.
def get_geoms(lst, srid=None):
return [GEOSGeometry(tg.wkt, srid) for tg in lst]
tgeoms = get_geoms(self.geometries.points)
tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326))
tgeoms.extend(get_geoms(self.geometries.polygons, 3084))
tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913))
# The SRID won't be exported in GEOS 3.0 release candidates.
no_srid = self.null_srid == -1
for geom in tgeoms:
s1, s2 = cPickle.dumps(geom), pickle.dumps(geom)
g1, g2 = cPickle.loads(s1), pickle.loads(s2)
for tmpg in (g1, g2):
self.assertEqual(geom, tmpg)
if not no_srid: self.assertEqual(geom.srid, tmpg.srid)
def test26_prepared(self):
"Testing PreparedGeometry support."
if not GEOS_PREPARE: return
# Creating a simple multipolygon and getting a prepared version.
mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))')
prep = mpoly.prepared
# A set of test points.
pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)]
covers = [True, True, False] # No `covers` op for regular GEOS geoms.
for pnt, c in zip(pnts, covers):
# Results should be the same (but faster)
self.assertEqual(mpoly.contains(pnt), prep.contains(pnt))
self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt))
self.assertEqual(c, prep.covers(pnt))
def test26_line_merge(self):
"Testing line merge support"
ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'),
fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'),
)
ref_merged = (fromstr('LINESTRING(1 1, 3 3)'),
fromstr('LINESTRING (1 1, 3 3, 4 2)'),
)
for geom, merged in zip(ref_geoms, ref_merged):
self.assertEqual(merged, geom.merged)
def test27_valid_reason(self):
"Testing IsValidReason support"
# Skipping tests if GEOS < v3.1.
if not GEOS_PREPARE: return
g = GEOSGeometry("POINT(0 0)")
self.assert_(g.valid)
self.assert_(isinstance(g.valid_reason, basestring))
self.assertEqual(g.valid_reason, "Valid Geometry")
print "\nBEGIN - expecting GEOS_NOTICE; safe to ignore.\n"
g = GEOSGeometry("LINESTRING(0 0, 0 0)")
self.assert_(not g.valid)
self.assert_(isinstance(g.valid_reason, basestring))
self.assert_(g.valid_reason.startswith("Too few points in geometry component"))
print "\nEND - expecting GEOS_NOTICE; safe to ignore.\n"
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GEOSTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| apache-2.0 |
txm/potato | django/contrib/staticfiles/storage.py | 154 | 2080 | import os
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.files.storage import FileSystemStorage
from django.utils.importlib import import_module
from django.contrib.staticfiles import utils
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
if not location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT setting.")
# check for None since we might use a root URL (``/``)
if base_url is None:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_URL setting.")
utils.check_settings()
super(StaticFilesStorage, self).__init__(location, base_url, *args, **kwargs)
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
self.app_module = app
# We special case the admin app here since it has its static files
# in 'media' for historic reasons.
if self.app_module == 'django.contrib.admin':
self.prefix = 'admin'
self.source_dir = 'media'
mod = import_module(self.app_module)
mod_path = os.path.dirname(mod.__file__)
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
| bsd-3-clause |
arhik/nupic | tests/unit/nupic/research/inhibition_object_test.py | 5 | 6367 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Test if the firing number of coincidences after inhibition equals spatial pooler
numActiveColumnsPerInhArea.
TODO: Fix this up to be more unit testy.
"""
import numpy
import unittest2 as unittest
from nupic.research.spatial_pooler import SpatialPooler
numpy.random.seed(100)
class InhibitionObjectTest(unittest.TestCase):
@unittest.skip("Currently fails due to switch from FDRCSpatial2 to SpatialPooler."
"The new SP doesn't have explicit methods to get inhibition.")
# TODO: See https://github.com/numenta/nupic/issues/2071
def testInhibition(self):
"""
Test if the firing number of coincidences after inhibition
equals spatial pooler numActiveColumnsPerInhArea.
"""
# Miscellaneous variables:
# n, w: n, w of encoders
# inputLen: Length of binary input
# synPermConnected: Spatial pooler synPermConnected
# synPermActiveInc: Spatial pooler synPermActiveInc
# connectPct: Initial connect percentage of permanences
# columnDimensions: Number of spatial pooler coincidences
# numActiveColumnsPerInhArea: Spatial pooler numActiveColumnsPerInhArea
# stimulusThreshold: Spatial pooler stimulusThreshold
# spSeed: Spatial pooler for initial permanences
# stimulusThresholdInh: Parameter for inhibition, default value 0.00001
# kDutyCycleFactor: kDutyCycleFactor for dutyCycleTieBreaker in
# Inhibition
# spVerbosity: Verbosity to print other sp initial parameters
# testIter: Testing iterations
n = 100
w = 15
inputLen = 300
columnDimensions = 2048
numActiveColumnsPerInhArea = 40
stimulusThreshold = 0
spSeed = 1956
stimulusThresholdInh = 0.00001
kDutyCycleFactor = 0.01
spVerbosity = 0
testIter = 100
spTest = SpatialPooler(
columnDimensions=(columnDimensions, 1),
inputDimensions=(1, inputLen),
potentialRadius=inputLen / 2,
numActiveColumnsPerInhArea=numActiveColumnsPerInhArea,
spVerbosity=spVerbosity,
stimulusThreshold=stimulusThreshold,
seed=spSeed
)
initialPermanence = spTest._initialPermanence()
spTest._masterPotentialM, spTest._masterPermanenceM = (
spTest._makeMasterCoincidences(spTest.numCloneMasters,
spTest._coincRFShape,
spTest.potentialPct,
initialPermanence,
spTest.random))
spTest._updateInhibitionObj()
boostFactors = numpy.ones(columnDimensions)
for i in range(testIter):
spTest._iterNum = i
# random binary input
input_ = numpy.zeros((1, inputLen))
nonzero = numpy.random.random(inputLen)
input_[0][numpy.where (nonzero < float(w)/float(n))] = 1
# overlap step
spTest._computeOverlapsFP(input_,
stimulusThreshold=spTest.stimulusThreshold)
spTest._overlaps *= boostFactors
onCellIndices = numpy.where(spTest._overlaps > 0)
spTest._onCells.fill(0)
spTest._onCells[onCellIndices] = 1
denseOn = spTest._onCells
# update _dutyCycleBeforeInh
spTest.dutyCyclePeriod = min(i + 1, 1000)
spTest._dutyCycleBeforeInh = (
(spTest.dutyCyclePeriod - 1) *
spTest._dutyCycleBeforeInh +denseOn) / spTest.dutyCyclePeriod
dutyCycleTieBreaker = spTest._dutyCycleAfterInh.copy()
dutyCycleTieBreaker *= kDutyCycleFactor
# inhibition step
numOn = spTest._inhibitionObj.compute(
spTest._overlaps + dutyCycleTieBreaker, spTest._onCellIndices,
stimulusThresholdInh, # stimulusThresholdInh
max(spTest._overlaps)/1000, # addToWinners
)
# update _dutyCycleAfterInh
spTest._onCells.fill(0)
onCellIndices = spTest._onCellIndices[0:numOn]
spTest._onCells[onCellIndices] = 1
denseOn = spTest._onCells
spTest._dutyCycleAfterInh = (((spTest.dutyCyclePeriod-1) *
spTest._dutyCycleAfterInh + denseOn) /
spTest.dutyCyclePeriod)
# learning step
spTest._adaptSynapses(onCellIndices, [], input_)
# update boostFactor
spTest._updateBoostFactors()
boostFactors = spTest._firingBoostFactors
# update dutyCycle and boost
if ((spTest._iterNum+1) % 50) == 0:
spTest._updateInhibitionObj()
spTest._updateMinDutyCycles(
spTest._dutyCycleBeforeInh,
spTest.minPctDutyCycleBeforeInh,
spTest._minDutyCycleBeforeInh)
spTest._updateMinDutyCycles(
spTest._dutyCycleAfterInh,
spTest.minPctDutyCycleAfterInh,
spTest._minDutyCycleAfterInh)
# test numOn and spTest.numActiveColumnsPerInhArea
self.assertEqual(numOn, spTest.numActiveColumnsPerInhArea,
"Error at input %s, actual numOn are: %i, "
"numActivePerInhAre is: %s" % (
i, numOn, numActiveColumnsPerInhArea))
if __name__=="__main__":
unittest.main()
| agpl-3.0 |
sudheesh001/oh-mainline | vendor/packages/python-social-auth/social/tests/backends/oauth.py | 77 | 4538 | import requests
from httpretty import HTTPretty
from social.p3 import urlparse
from social.utils import parse_qs, url_add_parameters
from social.tests.models import User
from social.tests.backends.base import BaseBackendTest
class BaseOAuthTest(BaseBackendTest):
backend = None
backend_path = None
user_data_body = None
user_data_url = ''
user_data_content_type = 'application/json'
access_token_body = None
access_token_status = 200
expected_username = ''
def extra_settings(self):
return {'SOCIAL_AUTH_' + self.name + '_KEY': 'a-key',
'SOCIAL_AUTH_' + self.name + '_SECRET': 'a-secret-key'}
def _method(self, method):
return {'GET': HTTPretty.GET,
'POST': HTTPretty.POST}[method]
def handle_state(self, start_url, target_url):
start_query = parse_qs(urlparse(start_url).query)
redirect_uri = start_query.get('redirect_uri')
if getattr(self.backend, 'STATE_PARAMETER', False):
if start_query.get('state'):
target_url = url_add_parameters(target_url, {
'state': start_query['state']
})
if redirect_uri and getattr(self.backend, 'REDIRECT_STATE', False):
redirect_query = parse_qs(urlparse(redirect_uri).query)
if redirect_query.get('redirect_state'):
target_url = url_add_parameters(target_url, {
'redirect_state': redirect_query['redirect_state']
})
return target_url
def auth_handlers(self, start_url):
target_url = self.handle_state(start_url,
self.strategy.build_absolute_uri(
self.complete_url
))
HTTPretty.register_uri(HTTPretty.GET,
start_url,
status=301,
location=target_url)
HTTPretty.register_uri(HTTPretty.GET,
target_url,
status=200,
body='foobar')
HTTPretty.register_uri(self._method(self.backend.ACCESS_TOKEN_METHOD),
uri=self.backend.access_token_url(),
status=self.access_token_status,
body=self.access_token_body or '',
content_type='text/json')
if self.user_data_url:
HTTPretty.register_uri(HTTPretty.GET,
self.user_data_url,
body=self.user_data_body or '',
content_type=self.user_data_content_type)
return target_url
def do_start(self):
start_url = self.backend.start().url
target_url = self.auth_handlers(start_url)
response = requests.get(start_url)
self.assertEqual(response.url, target_url)
self.assertEqual(response.text, 'foobar')
self.strategy.set_request_data(parse_qs(urlparse(target_url).query),
self.backend)
return self.backend.complete()
class OAuth1Test(BaseOAuthTest):
request_token_body = None
raw_complete_url = '/complete/{0}/?oauth_verifier=bazqux&' \
'oauth_token=foobar'
def request_token_handler(self):
HTTPretty.register_uri(self._method(self.backend.REQUEST_TOKEN_METHOD),
self.backend.REQUEST_TOKEN_URL,
body=self.request_token_body,
status=200)
def do_start(self):
self.request_token_handler()
return super(OAuth1Test, self).do_start()
class OAuth2Test(BaseOAuthTest):
raw_complete_url = '/complete/{0}/?code=foobar'
refresh_token_body = ''
def refresh_token_arguments(self):
return {}
def do_refresh_token(self):
self.do_login()
HTTPretty.register_uri(self._method(self.backend.REFRESH_TOKEN_METHOD),
self.backend.refresh_token_url(),
status=200,
body=self.refresh_token_body)
user = list(User.cache.values())[0]
social = user.social[0]
social.refresh_token(strategy=self.strategy,
**self.refresh_token_arguments())
return user, social
| agpl-3.0 |
ZENGXH/scikit-learn | sklearn/neighbors/regression.py | 106 | 10572 | """Nearest Neighbor Regression"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import numpy as np
from .base import _get_weights, _check_weights, NeighborsBase, KNeighborsMixin
from .base import RadiusNeighborsMixin, SupervisedFloatMixin
from ..base import RegressorMixin
from ..utils import check_array
class KNeighborsRegressor(NeighborsBase, KNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on k-nearest neighbors.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsRegressor
>>> neigh = KNeighborsRegressor(n_neighbors=2)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
RadiusNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
.. warning::
Regarding the Nearest Neighbors algorithms, if it is found that two
neighbors, neighbor `k+1` and `k`, have identical distances but
but different labels, the results will depend on the ordering of the
training data.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.kneighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.mean(_y[neigh_ind], axis=1)
else:
y_pred = np.empty((X.shape[0], _y.shape[1]), dtype=np.float)
denom = np.sum(weights, axis=1)
for j in range(_y.shape[1]):
num = np.sum(_y[neigh_ind, j] * weights, axis=1)
y_pred[:, j] = num / denom
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
class RadiusNeighborsRegressor(NeighborsBase, RadiusNeighborsMixin,
SupervisedFloatMixin,
RegressorMixin):
"""Regression based on neighbors within a fixed radius.
The target is predicted by local interpolation of the targets
associated of the nearest neighbors in the training set.
Read more in the :ref:`User Guide <regression>`.
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or DistanceMetric object (default='minkowski')
the distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. See the documentation of the DistanceMetric class for a
list of available metrics.
p : integer, optional (default = 2)
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsRegressor
>>> neigh = RadiusNeighborsRegressor(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsRegressor(...)
>>> print(neigh.predict([[1.5]]))
[ 0.5]
See also
--------
NearestNeighbors
KNeighborsRegressor
KNeighborsClassifier
RadiusNeighborsClassifier
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30,
p=2, metric='minkowski', metric_params=None, **kwargs):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p, metric=metric, metric_params=metric_params,
**kwargs)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the target for the provided data
Parameters
----------
X : array or matrix, shape = [n_samples, n_features]
Returns
-------
y : array of int, shape = [n_samples] or [n_samples, n_outputs]
Target values
"""
X = check_array(X, accept_sparse='csr')
neigh_dist, neigh_ind = self.radius_neighbors(X)
weights = _get_weights(neigh_dist, self.weights)
_y = self._y
if _y.ndim == 1:
_y = _y.reshape((-1, 1))
if weights is None:
y_pred = np.array([np.mean(_y[ind, :], axis=0)
for ind in neigh_ind])
else:
y_pred = np.array([(np.average(_y[ind, :], axis=0,
weights=weights[i]))
for (i, ind) in enumerate(neigh_ind)])
if self._y.ndim == 1:
y_pred = y_pred.ravel()
return y_pred
| bsd-3-clause |
jag1g13/lammps | python/examples/pizza/gl.py | 22 | 43829 | # Pizza.py toolkit, www.cs.sandia.gov/~sjplimp/pizza.html
# Steve Plimpton, sjplimp@sandia.gov, Sandia National Laboratories
#
# Copyright (2005) Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
# certain rights in this software. This software is distributed under
# the GNU General Public License.
# for python3 compatibility
from __future__ import print_function
# gl tool
oneline = "3d interactive visualization via OpenGL"
docstr = """
g = gl(d) create OpenGL display for data in d
d = atom snapshot object (dump, data)
g.bg("black") set background color (def = "black")
g.size(N) set image size to NxN
g.size(N,M) set image size to NxM
g.rotate(60,135) view from z theta and azimuthal phi (def = 60,30)
g.shift(x,y) translate by x,y pixels in view window (def = 0,0)
g.zoom(0.5) scale image by factor (def = 1)
g.box(0/1/2) 0/1/2 = none/variable/fixed box
g.box(0/1/2,"green") set box color
g.box(0/1/2,"red",4) set box edge thickness
g.file = "image" file prefix for created images (def = "image")
g.show(N) show image of snapshot at timestep N
g.all() make images of all selected snapshots
g.all(P) images of all, start file label at P
g.all(N,M,P) make M images of snapshot N, start label at P
g.pan(60,135,1.0,40,135,1.5) pan during all() operation
g.pan() no pan during all() (default)
args = z theta, azimuthal phi, zoom factor at beginning and end
values at each step are interpolated between beginning and end values
g.select = "$x > %g*3.0" string to pass to d.aselect.test() during all()
g.select = "" no extra aselect (default)
%g varies from 0.0 to 1.0 from beginning to end of all()
g.acol(2,"green") set atom colors by atom type (1-N)
g.acol([2,4],["red","blue"]) 1st arg = one type or list of types
g.acol(0,"blue") 2nd arg = one color or list of colors
g.acol(range(20),["red","blue"]) if list lengths unequal, interpolate
g.acol(range(10),"loop") assign colors in loop, randomly ordered
if 1st arg is 0, set all types to 2nd arg
if list of types has a 0 (e.g. range(10)), +1 is added to each value
interpolate means colors blend smoothly from one value to the next
g.arad([1,2],[0.5,0.3]) set atom radii, same rules as acol()
g.bcol() set bond color, same args as acol()
g.brad() set bond thickness, same args as arad()
g.tcol() set triangle color, same args as acol()
g.tfill() set triangle fill, 0 fill, 1 line, 2 both
g.lcol() set line color, same args as acol()
g.lrad() set line thickness, same args as arad()
g.adef() set atom/bond/tri/line properties to default
g.bdef() default = "loop" for colors, 0.45 for radii
g.tdef() default = 0.25 for bond/line thickness
g.ldef() default = 0 fill
by default 100 types are assigned
if atom/bond/tri/line has type > # defined properties, is an error
from vizinfo import colors access color list
print(colors) list defined color names and RGB values
colors["nickname"] = [R,G,B] set new RGB values from 0 to 255
140 pre-defined colors: red, green, blue, purple, yellow, black, white, etc
Settings specific to gl tool:
g.q(10) set quality of image (def = 5)
g.axis(0/1) turn xyz axes off/on
g.ortho(0/1) perspective (0) vs orthographic (1) view
g.clip('xlo',0.25) clip in xyz from lo/hi at box fraction (0-1)
g.reload() force all data to be reloaded
g.cache = 0/1 turn off/on GL cache lists (def = on)
theta,phi,x,y,scale,up = g.gview() grab all current view parameters
g.sview(theta,phi,x,y,scale,up) set all view parameters
data reload is necessary if dump selection is used to change the data
cache lists usually improve graphics performance
gview returns values to use in other commands:
theta,phi are args to rotate()
x,y are args to shift()
scale is arg to zoom()
up is a 3-vector arg to sview()
"""
# History
# 9/05, Steve Plimpton (SNL): original version
# ToDo list
# when do aselect with select str while looping N times on same timestep
# would not let you grow # of atoms selected
# Variables
# ztheta = vertical angle from z-azis of viewpoint
# azphi = azimuthal angle of viewpoint
# xshift,yshift = xy translation of scene (in pixels)
# distance = size of simulation box (largest dim)
# eye = viewpoint distance from center of scene
# file = filename prefix to use for images produced
# boxflag = 0/1/2 for drawing simulation box: none/variable/fixed
# bxcol = color of box
# bxthick = thickness of box lines
# bgcol = color of background
# vizinfo = scene attributes
# center[3] = center point of simulation box
# view[3] = direction towards eye in simulation box (unit vector)
# up[3] = screen up direction in simulation box (unit vector)
# right[3] = screen right direction in simulation box (unit vector)
# Imports and external programs
from math import sin,cos,sqrt,pi,acos
from OpenGL.Tk import *
from OpenGL.GLUT import *
import Image
from vizinfo import vizinfo
# Class definition
class gl:
# --------------------------------------------------------------------
def __init__(self,data):
self.data = data
self.root = None
self.xpixels = 512
self.ypixels = 512
self.ztheta = 60
self.azphi = 30
self.scale = 1.0
self.xshift = self.yshift = 0
self.file = "image"
self.boxflag = 0
self.bxcol = [1,1,0]
self.bxthick = 0.3
self.bgcol = [0,0,0]
self.labels = []
self.panflag = 0
self.select = ""
self.axisflag = 0
self.orthoflag = 1
self.nslices = 5
self.nstacks = 5
self.nsides = 10
self.theta_amplify = 2
self.shiny = 2
self.clipflag = 0
self.clipxlo = self.clipylo = self.clipzlo = 0.0
self.clipxhi = self.clipyhi = self.clipzhi = 1.0
self.nclist = 0
self.calllist = [0] # indexed by 1-Ntype, so start with 0 index
self.cache = 1
self.cachelist = 0
self.boxdraw = []
self.atomdraw = []
self.bonddraw = []
self.tridraw = []
self.linedraw = []
self.ready = 0
self.create_window()
self.vizinfo = vizinfo()
self.adef()
self.bdef()
self.tdef()
self.ldef()
self.center = 3*[0]
self.view = 3*[0]
self.up = 3*[0]
self.right = 3*[0]
self.viewupright()
# --------------------------------------------------------------------
def bg(self,color):
from vizinfo import colors
self.bgcol = [colors[color][0]/255.0,colors[color][1]/255.0,
colors[color][2]/255.0]
self.w.tkRedraw()
# --------------------------------------------------------------------
def size(self,xnew,ynew=None):
self.xpixels = xnew
if not ynew: self.ypixels = self.xpixels
else: self.ypixels = ynew
self.create_window()
# --------------------------------------------------------------------
def axis(self,value):
self.axisflag = value
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def create_window(self):
if self.root: self.root.destroy()
from __main__ import tkroot
self.root = Toplevel(tkroot)
self.root.title('Pizza.py gl tool')
self.w = MyOpengl(self.root,width=self.xpixels,height=self.ypixels,
double=1,depth=1)
self.w.pack(expand=YES)
# self.w.pack(expand=YES,fill=BOTH)
glViewport(0,0,self.xpixels,self.ypixels)
glEnable(GL_LIGHTING);
glEnable(GL_LIGHT0);
glEnable(GL_DEPTH_TEST);
glLightModeli(GL_LIGHT_MODEL_TWO_SIDE,GL_TRUE);
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)
self.rtrack = self.xpixels
if self.ypixels > self.xpixels: self.rtrack = self.ypixels
self.w.redraw = self.redraw
self.w.parent = self
self.w.tkRedraw()
tkroot.update_idletasks() # force window to appear
# --------------------------------------------------------------------
def clip(self,which,value):
if which == "xlo":
self.clipxlo = value
if value > self.clipxhi: self.clipxlo = self.clipxhi
elif which == "xhi":
self.clipxhi = value
if value < self.clipxlo: self.clipxhi = self.clipxlo
elif which == "ylo":
self.clipylo = value
if value > self.clipyhi: self.clipylo = self.clipyhi
elif which == "yhi":
self.clipyhi = value
if value < self.clipylo: self.clipyhi = self.clipylo
elif which == "zlo":
self.clipzlo = value
if value > self.clipzhi: self.clipzlo = self.clipzhi
elif which == "zhi":
self.clipzhi = value
if value < self.clipzlo: self.clipzhi = self.clipzlo
oldflag = self.clipflag
if self.clipxlo > 0 or self.clipylo > 0 or self.clipzlo > 0 or \
self.clipxhi < 1 or self.clipyhi < 1 or self.clipzhi < 1:
self.clipflag = 1
else: self.clipflag = 0
if oldflag == 0 and self.clipflag == 0: return
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def q(self,value):
self.nslices = value
self.nstacks = value
self.make_atom_calllist()
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def ortho(self,value):
self.orthoflag = value
self.w.tkRedraw()
# --------------------------------------------------------------------
# set unit vectors for view,up,right from ztheta,azphi
# assume +z in scene should be up on screen (unless looking down z-axis)
# right = up x view
def viewupright(self):
self.view[0] = cos(pi*self.azphi/180) * sin(pi*self.ztheta/180)
self.view[1] = sin(pi*self.azphi/180) * sin(pi*self.ztheta/180)
self.view[2] = cos(pi*self.ztheta/180)
if self.ztheta == 0.0:
self.up[0] = cos(pi*self.azphi/180)
self.up[1] = -sin(pi*self.azphi/180)
self.up[2] = 0.0
elif self.ztheta == 180.0:
self.up[0] = cos(pi*self.azphi/180)
self.up[1] = sin(pi*self.azphi/180)
self.up[2] = 0.0
else:
dot = self.view[2] # dot = (0,0,1) . view
self.up[0] = -dot*self.view[0] # up projected onto v = dot * v
self.up[1] = -dot*self.view[1] # up perp to v = up - dot * v
self.up[2] = 1.0 - dot*self.view[2]
self.up = vecnorm(self.up)
self.right = veccross(self.up,self.view)
# --------------------------------------------------------------------
# reset ztheta,azphi and thus view,up.right
# called as function from Pizza.py
def rotate(self,ztheta,azphi):
self.ztheta = ztheta
self.azphi = azphi
self.viewupright()
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
# return all view params to reproduce current display via sview()
def gview(self):
return self.ztheta,self.azphi,self.xshift,self.yshift,self.scale,self.up
# --------------------------------------------------------------------
# set current view, called by user with full set of view params
# up is not settable via any other call, all other params are
def sview(self,ztheta,azphi,xshift,yshift,scale,up):
self.ztheta = ztheta
self.azphi = azphi
self.xshift = xshift
self.yshift = yshift
self.scale = scale
self.up[0] = up[0]
self.up[1] = up[1]
self.up[2] = up[2]
self.up = vecnorm(self.up)
self.view[0] = cos(pi*self.azphi/180) * sin(pi*self.ztheta/180)
self.view[1] = sin(pi*self.azphi/180) * sin(pi*self.ztheta/180)
self.view[2] = cos(pi*self.ztheta/180)
self.right = veccross(self.up,self.view)
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
# rotation triggered by mouse trackball
# project old,new onto unit trackball surf
# rotate view,up around axis of rotation = old x new
# right = up x view
# reset ztheta,azphi from view
def mouse_rotate(self,xnew,ynew,xold,yold):
# change y pixels to measure from bottom of window instead of top
yold = self.ypixels - yold
ynew = self.ypixels - ynew
# vold = unit vector to (xold,yold) projected onto trackball
# vnew = unit vector to (xnew,ynew) projected onto trackball
# return (no rotation) if either projection point is outside rtrack
vold = [0,0,0]
vold[0] = xold - (0.5*self.xpixels + self.xshift)
vold[1] = yold - (0.5*self.ypixels + self.yshift)
vold[2] = self.rtrack*self.rtrack - vold[0]*vold[0] - vold[1]*vold[1]
if vold[2] < 0: return
vold[2] = sqrt(vold[2])
vold = vecnorm(vold)
vnew = [0,0,0]
vnew[0] = xnew - (0.5*self.xpixels + self.xshift)
vnew[1] = ynew - (0.5*self.ypixels + self.yshift)
vnew[2] = self.rtrack*self.rtrack - vnew[0]*vnew[0] - vnew[1]*vnew[1]
if vnew[2] < 0: return
vnew[2] = sqrt(vnew[2])
vnew = vecnorm(vnew)
# rot = trackball rotation axis in screen ref frame = vold x vnew
# theta = angle of rotation = sin(theta) for small theta
# axis = rotation axis in body ref frame described by right,up,view
rot = veccross(vold,vnew)
theta = sqrt(rot[0]*rot[0] + rot[1]*rot[1] + rot[2]*rot[2])
theta *= self.theta_amplify
axis = [0,0,0]
axis[0] = rot[0]*self.right[0] + rot[1]*self.up[0] + rot[2]*self.view[0]
axis[1] = rot[0]*self.right[1] + rot[1]*self.up[1] + rot[2]*self.view[1]
axis[2] = rot[0]*self.right[2] + rot[1]*self.up[2] + rot[2]*self.view[2]
axis = vecnorm(axis)
# view is changed by (axis x view) scaled by theta
# up is changed by (axis x up) scaled by theta
# force up to be perp to view via up_perp = up - (up . view) view
# right = up x view
delta = veccross(axis,self.view)
self.view[0] -= theta*delta[0]
self.view[1] -= theta*delta[1]
self.view[2] -= theta*delta[2]
self.view = vecnorm(self.view)
delta = veccross(axis,self.up)
self.up[0] -= theta*delta[0]
self.up[1] -= theta*delta[1]
self.up[2] -= theta*delta[2]
dot = vecdot(self.up,self.view)
self.up[0] -= dot*self.view[0]
self.up[1] -= dot*self.view[1]
self.up[2] -= dot*self.view[2]
self.up = vecnorm(self.up)
self.right = veccross(self.up,self.view)
# convert new view to ztheta,azphi
self.ztheta = acos(self.view[2])/pi * 180.0
if (self.ztheta == 0.0): self.azphi = 0.0
else: self.azphi = acos(self.view[0]/sin(pi*self.ztheta/180.0))/pi * 180.0
if self.view[1] < 0: self.azphi = 360.0 - self.azphi
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
def shift(self,x,y):
self.xshift = x;
self.yshift = y;
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
def zoom(self,scale):
self.scale = scale
self.setview()
self.w.tkRedraw()
# --------------------------------------------------------------------
# set view params needed by redraw
# input: center = center of box
# distance = size of scene (longest box length)
# scale = zoom factor (1.0 = no zoom)
# xshift,yshift = translation factor in pixels
# view = unit vector from center to viewpoint
# up = unit vector in up direction in scene
# right = unit vector in right direction in scene
# output: eye = distance to view scene from
# xto,yto,zto = point to look to
# xfrom,yfrom,zfrom = point to look from
def setview(self):
if not self.ready: return # no distance since no scene yet
self.eye = 3 * self.distance / self.scale
xfactor = 0.5*self.eye*self.xshift/self.xpixels
yfactor = 0.5*self.eye*self.yshift/self.ypixels
self.xto = self.center[0] - xfactor*self.right[0] - yfactor*self.up[0]
self.yto = self.center[1] - xfactor*self.right[1] - yfactor*self.up[1]
self.zto = self.center[2] - xfactor*self.right[2] - yfactor*self.up[2]
self.xfrom = self.xto + self.eye*self.view[0]
self.yfrom = self.yto + self.eye*self.view[1]
self.zfrom = self.zto + self.eye*self.view[2]
# --------------------------------------------------------------------
# box attributes, also used for triangle lines
def box(self,*args):
self.boxflag = args[0]
if len(args) > 1:
from vizinfo import colors
self.bxcol = [colors[args[1]][0]/255.0,colors[args[1]][1]/255.0,
colors[args[1]][2]/255.0]
if len(args) > 2: self.bxthick = args[2]
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
# grab all selected snapshots from data object
# add GL-specific info to each bond
def reload(self):
print("Loading data into gl tool ...")
data = self.data
self.timeframes = []
self.boxframes = []
self.atomframes = []
self.bondframes = []
self.triframes = []
self.lineframes = []
box = []
if self.boxflag == 2: box = data.maxbox()
flag = 0
while 1:
which,time,flag = data.iterator(flag)
if flag == -1: break
time,boxone,atoms,bonds,tris,lines = data.viz(which)
if self.boxflag < 2: box = boxone
if bonds: self.bonds_augment(bonds)
self.timeframes.append(time)
self.boxframes.append(box)
self.atomframes.append(atoms)
self.bondframes.append(bonds)
self.triframes.append(tris)
self.lineframes.append(lines)
print(time,end='')
sys.stdout.flush()
print()
self.nframes = len(self.timeframes)
self.distance = compute_distance(self.boxframes[0])
self.center = compute_center(self.boxframes[0])
self.ready = 1
self.setview()
# --------------------------------------------------------------------
def nolabel(self):
self.cachelist = -self.cachelist
self.labels = []
# --------------------------------------------------------------------
# show a single snapshot
# distance from snapshot box or max box for all selected steps
def show(self,ntime):
data = self.data
which = data.findtime(ntime)
time,box,atoms,bonds,tris,lines = data.viz(which)
if self.boxflag == 2: box = data.maxbox()
self.distance = compute_distance(box)
self.center = compute_center(box)
if bonds: self.bonds_augment(bonds)
self.boxdraw = box
self.atomdraw = atoms
self.bonddraw = bonds
self.tridraw = tris
self.linedraw = lines
self.ready = 1
self.setview()
self.cachelist = -self.cachelist
self.w.tkRedraw()
self.save()
# --------------------------------------------------------------------
def pan(self,*list):
if len(list) == 0: self.panflag = 0
else:
self.panflag = 1
self.ztheta_start = list[0]
self.azphi_start = list[1]
self.scale_start = list[2]
self.ztheta_stop = list[3]
self.azphi_stop = list[4]
self.scale_stop = list[5]
# --------------------------------------------------------------------
def all(self,*list):
data = self.data
if len(list) == 0:
nstart = 0
ncount = data.nselect
elif len(list) == 1:
nstart = list[0]
ncount = data.nselect
else:
ntime = list[0]
nstart = list[2]
ncount = list[1]
if self.boxflag == 2: box = data.maxbox()
# loop over all selected steps
# distance from 1st snapshot box or max box for all selected steps
# recompute box center on 1st step or if panning
if len(list) <= 1:
n = nstart
i = flag = 0
while 1:
which,time,flag = data.iterator(flag)
if flag == -1: break
fraction = float(i) / (ncount-1)
if self.select != "":
newstr = self.select % fraction
data.aselect.test(newstr,time)
time,boxone,atoms,bonds,tris,lines = data.viz(which)
if self.boxflag < 2: box = boxone
if n == nstart: self.distance = compute_distance(box)
if n < 10: file = self.file + "000" + str(n)
elif n < 100: file = self.file + "00" + str(n)
elif n < 1000: file = self.file + "0" + str(n)
else: file = self.file + str(n)
if self.panflag:
self.ztheta = self.ztheta_start + \
fraction*(self.ztheta_stop - self.ztheta_start)
self.azphi = self.azphi_start + \
fraction*(self.azphi_stop - self.azphi_start)
self.scale = self.scale_start + \
fraction*(self.scale_stop - self.scale_start)
self.viewupright()
if n == nstart or self.panflag: self.center = compute_center(box)
if bonds: self.bonds_augment(bonds)
self.boxdraw = box
self.atomdraw = atoms
self.bonddraw = bonds
self.tridraw = tris
self.linedraw = lines
self.ready = 1
self.setview()
self.cachelist = -self.cachelist
self.w.tkRedraw()
self.save(file)
print(time,end='')
sys.stdout.flush()
i += 1
n += 1
# loop ncount times on same step
# distance from 1st snapshot box or max box for all selected steps
# recompute box center on 1st step or if panning
else:
which = data.findtime(ntime)
n = nstart
for i in range(ncount):
fraction = float(i) / (ncount-1)
if self.select != "":
newstr = self.select % fraction
data.aselect.test(newstr,ntime)
time,boxone,atoms,bonds,tris,lines = data.viz(which)
if self.boxflag < 2: box = boxone
if n == nstart: self.distance = compute_distance(box)
if n < 10: file = self.file + "000" + str(n)
elif n < 100: file = self.file + "00" + str(n)
elif n < 1000: file = self.file + "0" + str(n)
else: file = self.file + str(n)
if self.panflag:
self.ztheta = self.ztheta_start + \
fraction*(self.ztheta_stop - self.ztheta_start)
self.azphi = self.azphi_start + \
fraction*(self.azphi_stop - self.azphi_start)
self.scale = self.scale_start + \
fraction*(self.scale_stop - self.scale_start)
self.viewupright()
if n == nstart or self.panflag: self.center = compute_center(box)
if bonds: self.bonds_augment(bonds)
self.boxdraw = box
self.atomdraw = atoms
self.bonddraw = bonds
self.tridraw = tris
self.linedraw = lines
self.ready = 1
self.setview()
self.cachelist = -self.cachelist
self.w.tkRedraw()
self.save(file)
print(n,end='')
sys.stdout.flush()
n += 1
print("\n%d images" % ncount)
# --------------------------------------------------------------------
def display(self,index):
self.boxdraw = self.boxframes[index]
self.atomdraw = self.atomframes[index]
self.bonddraw = self.bondframes[index]
self.tridraw = self.triframes[index]
self.linedraw = self.lineframes[index]
self.ready = 1
self.cachelist = -self.cachelist
self.w.tkRedraw()
return (self.timeframes[index],len(self.atomdraw))
# --------------------------------------------------------------------
# draw the GL scene
def redraw(self,o):
# clear window to background color
glClearColor(self.bgcol[0],self.bgcol[1],self.bgcol[2],0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# not ready if no scene yet
if not self.ready: return
# set view from eye, distance, 3 lookat vectors (from,to,up)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
if self.orthoflag:
glOrtho(-0.25*self.eye,0.25*self.eye,-0.25*self.eye,0.25*self.eye,
self.eye-2*self.distance,self.eye+2*self.distance)
else:
gluPerspective(30.0,1.0,0.01,10000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
gluLookAt(self.xfrom,self.yfrom,self.zfrom,self.xto,self.yto,self.zto,
self.up[0],self.up[1],self.up[2])
# draw scene from display list if caching allowed and list hasn't changed
# else redraw and store as new display list if caching allowed
if self.cache and self.cachelist > 0: glCallList(self.cachelist);
else:
if self.cache:
if self.cachelist < 0: glDeleteLists(-self.cachelist,1)
self.cachelist = glGenLists(1)
glNewList(self.cachelist,GL_COMPILE_AND_EXECUTE)
# draw box, clip-box, xyz axes, lines
glDisable(GL_LIGHTING)
if self.boxflag:
self.draw_box(0)
if self.clipflag: self.draw_box(1)
if self.axisflag: self.draw_axes()
ncolor = self.vizinfo.nlcolor
for line in self.linedraw:
itype = int(line[1])
if itype > ncolor: raise StandardError("line type too big")
red,green,blue = self.vizinfo.lcolor[itype]
glColor3f(red,green,blue)
thick = self.vizinfo.lrad[itype]
glLineWidth(thick)
glBegin(GL_LINES)
glVertex3f(line[2],line[3],line[4])
glVertex3f(line[5],line[6],line[7])
glEnd()
glEnable(GL_LIGHTING)
# draw non-clipped scene = atoms, bonds, triangles
# draw atoms as collection of points
# cannot put PointSize inside glBegin
# so probably need to group atoms by type for best performance
# or just allow one radius
# need to scale radius appropriately with box size
# or could leave it at absolute value
# use POINT_SMOOTH to enable anti-aliasing and round points
# multiple timesteps via vcr::play() is still not fast
# caching makes it fast for single frame, but multiple frames is slow
# need to enable clipping
# if not self.clipflag:
# glDisable(GL_LIGHTING)
# glEnable(GL_POINT_SMOOTH)
# glPointSize(self.vizinfo.arad[int(self.atomdraw[0][1])])
# glBegin(GL_POINTS)
# for atom in self.atomdraw:
# red,green,blue = self.vizinfo.acolor[int(atom[1])]
# glColor(red,green,blue)
# glVertex3d(atom[2],atom[3],atom[4])
# glEnd()
# glEnable(GL_LIGHTING)
if not self.clipflag:
for atom in self.atomdraw:
glTranslatef(atom[2],atom[3],atom[4]);
glCallList(self.calllist[int(atom[1])]);
glTranslatef(-atom[2],-atom[3],-atom[4]);
if self.bonddraw:
bound = 0.25 * self.distance
ncolor = self.vizinfo.nbcolor
for bond in self.bonddraw:
if bond[10] > bound: continue
itype = int(bond[1])
if itype > ncolor: raise StandardError("bond type too big")
red,green,blue = self.vizinfo.bcolor[itype]
rad = self.vizinfo.brad[itype]
glPushMatrix()
glTranslatef(bond[2],bond[3],bond[4])
glRotatef(bond[11],bond[12],bond[13],0.0)
glMaterialfv(GL_FRONT_AND_BACK,GL_EMISSION,[red,green,blue,1.0]);
glMaterialf(GL_FRONT_AND_BACK,GL_SHININESS,self.shiny);
obj = gluNewQuadric()
gluCylinder(obj,rad,rad,bond[10],self.nsides,self.nsides)
glPopMatrix()
if self.tridraw:
fillflag = self.vizinfo.tfill[int(self.tridraw[0][1])]
if fillflag != 1:
if fillflag:
glEnable(GL_POLYGON_OFFSET_FILL)
glPolygonOffset(1.0,1.0)
glBegin(GL_TRIANGLES)
ncolor = self.vizinfo.ntcolor
for tri in self.tridraw:
itype = int(tri[1])
if itype > ncolor: raise StandardError("tri type too big")
red,green,blue = self.vizinfo.tcolor[itype]
glMaterialfv(GL_FRONT_AND_BACK,GL_EMISSION,[red,green,blue,1.0]);
glMaterialf(GL_FRONT_AND_BACK,GL_SHININESS,self.shiny);
glNormal3f(tri[11],tri[12],tri[13])
glVertex3f(tri[2],tri[3],tri[4])
glVertex3f(tri[5],tri[6],tri[7])
glVertex3f(tri[8],tri[9],tri[10])
glEnd()
if fillflag: glDisable(GL_POLYGON_OFFSET_FILL)
if fillflag:
glDisable(GL_LIGHTING)
glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)
glLineWidth(self.bxthick)
glColor3f(self.bxcol[0],self.bxcol[1],self.bxcol[2])
glBegin(GL_TRIANGLES)
for tri in self.tridraw:
glVertex3f(tri[2],tri[3],tri[4])
glVertex3f(tri[5],tri[6],tri[7])
glVertex3f(tri[8],tri[9],tri[10])
glEnd()
glEnable(GL_LIGHTING)
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)
# draw clipped scene = atoms, bonds, triangles
else:
box = self.boxdraw
xlo = box[0] + self.clipxlo*(box[3] - box[0])
xhi = box[0] + self.clipxhi*(box[3] - box[0])
ylo = box[1] + self.clipylo*(box[4] - box[1])
yhi = box[1] + self.clipyhi*(box[4] - box[1])
zlo = box[2] + self.clipzlo*(box[5] - box[2])
zhi = box[2] + self.clipzhi*(box[5] - box[2])
for atom in self.atomdraw:
x,y,z = atom[2],atom[3],atom[4]
if x >= xlo and x <= xhi and y >= ylo and y <= yhi and \
z >= zlo and z <= zhi:
glTranslatef(x,y,z);
glCallList(self.calllist[int(atom[1])]);
glTranslatef(-x,-y,-z);
if self.bonddraw:
bound = 0.25 * self.distance
ncolor = self.vizinfo.nbcolor
for bond in self.bonddraw:
xmin = min2(bond[2],bond[5])
xmax = max2(bond[2],bond[5])
ymin = min2(bond[3],bond[6])
ymax = max2(bond[3],bond[6])
zmin = min2(bond[4],bond[7])
zmax = max2(bond[4],bond[7])
if xmin >= xlo and xmax <= xhi and \
ymin >= ylo and ymax <= yhi and zmin >= zlo and zmax <= zhi:
if bond[10] > bound: continue
itype = int(bond[1])
if itype > ncolor: raise StandardError("bond type too big")
red,green,blue = self.vizinfo.bcolor[itype]
rad = self.vizinfo.brad[itype]
glPushMatrix()
glTranslatef(bond[2],bond[3],bond[4])
glRotatef(bond[11],bond[12],bond[13],0.0)
glMaterialfv(GL_FRONT_AND_BACK,GL_EMISSION,[red,green,blue,1.0]);
glMaterialf(GL_FRONT_AND_BACK,GL_SHININESS,self.shiny);
obj = gluNewQuadric()
gluCylinder(obj,rad,rad,bond[10],self.nsides,self.nsides)
glPopMatrix()
if self.tridraw:
fillflag = self.vizinfo.tfill[int(self.tridraw[0][1])]
if fillflag != 1:
if fillflag:
glEnable(GL_POLYGON_OFFSET_FILL)
glPolygonOffset(1.0,1.0)
glBegin(GL_TRIANGLES)
ncolor = self.vizinfo.ntcolor
for tri in self.tridraw:
xmin = min3(tri[2],tri[5],tri[8])
xmax = max3(tri[2],tri[5],tri[8])
ymin = min3(tri[3],tri[6],tri[9])
ymax = max3(tri[3],tri[6],tri[9])
zmin = min3(tri[4],tri[7],tri[10])
zmax = max3(tri[4],tri[7],tri[10])
if xmin >= xlo and xmax <= xhi and \
ymin >= ylo and ymax <= yhi and \
zmin >= zlo and zmax <= zhi:
itype = int(tri[1])
if itype > ncolor: raise StandardError("tri type too big")
red,green,blue = self.vizinfo.tcolor[itype]
glMaterialfv(GL_FRONT_AND_BACK,GL_EMISSION,
[red,green,blue,1.0]);
glMaterialf(GL_FRONT_AND_BACK,GL_SHININESS,self.shiny);
glNormal3f(tri[11],tri[12],tri[13])
glVertex3f(tri[2],tri[3],tri[4])
glVertex3f(tri[5],tri[6],tri[7])
glVertex3f(tri[8],tri[9],tri[10])
glEnd()
if fillflag: glDisable(GL_POLYGON_OFFSET_FILL)
if fillflag:
glDisable(GL_LIGHTING)
glPolygonMode(GL_FRONT_AND_BACK,GL_LINE)
glLineWidth(self.bxthick)
glColor3f(self.bxcol[0],self.bxcol[1],self.bxcol[2])
glBegin(GL_TRIANGLES)
for tri in self.tridraw:
xmin = min3(tri[2],tri[5],tri[8])
xmax = max3(tri[2],tri[5],tri[8])
ymin = min3(tri[3],tri[6],tri[9])
ymax = max3(tri[3],tri[6],tri[9])
zmin = min3(tri[4],tri[7],tri[10])
zmax = max3(tri[4],tri[7],tri[10])
if xmin >= xlo and xmax <= xhi and \
ymin >= ylo and ymax <= yhi and \
zmin >= zlo and zmax <= zhi:
glVertex3f(tri[2],tri[3],tri[4])
glVertex3f(tri[5],tri[6],tri[7])
glVertex3f(tri[8],tri[9],tri[10])
glEnd()
glEnable(GL_LIGHTING)
glPolygonMode(GL_FRONT_AND_BACK,GL_FILL)
if self.cache: glEndList()
glFlush()
# --------------------------------------------------------------------
# make new call list for each atom type
# called when atom color/rad/quality is changed
def make_atom_calllist(self):
# extend calllist array if necessary
if self.vizinfo.nacolor > self.nclist:
for i in range(self.vizinfo.nacolor-self.nclist): self.calllist.append(0)
self.nclist = self.vizinfo.nacolor
# create new calllist for each atom type
for itype in xrange(1,self.vizinfo.nacolor+1):
if self.calllist[itype]: glDeleteLists(self.calllist[itype],1)
ilist = glGenLists(1)
self.calllist[itype] = ilist
glNewList(ilist,GL_COMPILE)
red,green,blue = self.vizinfo.acolor[itype]
rad = self.vizinfo.arad[itype]
glColor3f(red,green,blue);
# glPointSize(10.0*rad)
# glBegin(GL_POINTS)
# glVertex3f(0.0,0.0,0.0)
# glEnd()
glMaterialfv(GL_FRONT,GL_EMISSION,[red,green,blue,1.0]);
glMaterialf(GL_FRONT,GL_SHININESS,self.shiny);
glutSolidSphere(rad,self.nslices,self.nstacks)
glEndList()
# --------------------------------------------------------------------
# augment bond info returned by viz() with info needed for GL draw
# info = length, theta, -dy, dx for bond orientation
def bonds_augment(self,bonds):
for bond in bonds:
dx = bond[5] - bond[2]
dy = bond[6] - bond[3]
dz = bond[7] - bond[4]
length = sqrt(dx*dx + dy*dy + dz*dz)
dx /= length
dy /= length
dz /= length
theta = acos(dz)*180.0/pi
bond += [length,theta,-dy,dx]
# --------------------------------------------------------------------
def draw_box(self,flag):
xlo,ylo,zlo,xhi,yhi,zhi = self.boxdraw
if flag:
tmp = xlo + self.clipxlo*(xhi - xlo)
xhi = xlo + self.clipxhi*(xhi - xlo)
xlo = tmp
tmp = ylo + self.clipylo*(yhi - ylo)
yhi = ylo + self.clipyhi*(yhi - ylo)
ylo = tmp
tmp = zlo + self.clipzlo*(zhi - zlo)
zhi = zlo + self.clipzhi*(zhi - zlo)
zlo = tmp
glLineWidth(self.bxthick)
glColor3f(self.bxcol[0],self.bxcol[1],self.bxcol[2])
glBegin(GL_LINE_LOOP)
glVertex3f(xlo,ylo,zlo)
glVertex3f(xhi,ylo,zlo)
glVertex3f(xhi,yhi,zlo)
glVertex3f(xlo,yhi,zlo)
glEnd()
glBegin(GL_LINE_LOOP)
glVertex3f(xlo,ylo,zhi)
glVertex3f(xhi,ylo,zhi)
glVertex3f(xhi,yhi,zhi)
glVertex3f(xlo,yhi,zhi)
glEnd()
glBegin(GL_LINES)
glVertex3f(xlo,ylo,zlo)
glVertex3f(xlo,ylo,zhi)
glVertex3f(xhi,ylo,zlo)
glVertex3f(xhi,ylo,zhi)
glVertex3f(xhi,yhi,zlo)
glVertex3f(xhi,yhi,zhi)
glVertex3f(xlo,yhi,zlo)
glVertex3f(xlo,yhi,zhi)
glEnd()
# --------------------------------------------------------------------
def draw_axes(self):
xlo,ylo,zlo,xhi,yhi,zhi = self.boxdraw
delta = xhi-xlo
if yhi-ylo > delta: delta = yhi-ylo
if zhi-zlo > delta: delta = zhi-zlo
delta *= 0.1
glLineWidth(self.bxthick)
glBegin(GL_LINES)
glColor3f(1,0,0)
glVertex3f(xlo-delta,ylo-delta,zlo-delta)
glVertex3f(xhi-delta,ylo-delta,zlo-delta)
glColor3f(0,1,0)
glVertex3f(xlo-delta,ylo-delta,zlo-delta)
glVertex3f(xlo-delta,yhi-delta,zlo-delta)
glColor3f(0,0,1)
glVertex3f(xlo-delta,ylo-delta,zlo-delta)
glVertex3f(xlo-delta,ylo-delta,zhi-delta)
glEnd()
# --------------------------------------------------------------------
def save(self,file=None):
self.w.update() # force image on screen to be current before saving it
pstring = glReadPixels(0,0,self.xpixels,self.ypixels,
GL_RGBA,GL_UNSIGNED_BYTE)
snapshot = Image.fromstring("RGBA",(self.xpixels,self.ypixels),pstring)
snapshot = snapshot.transpose(Image.FLIP_TOP_BOTTOM)
if not file: file = self.file
snapshot.save(file + ".png")
# --------------------------------------------------------------------
def adef(self):
self.vizinfo.setcolors("atom",range(100),"loop")
self.vizinfo.setradii("atom",range(100),0.45)
self.make_atom_calllist()
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def bdef(self):
self.vizinfo.setcolors("bond",range(100),"loop")
self.vizinfo.setradii("bond",range(100),0.25)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def tdef(self):
self.vizinfo.setcolors("tri",range(100),"loop")
self.vizinfo.setfills("tri",range(100),0)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def ldef(self):
self.vizinfo.setcolors("line",range(100),"loop")
self.vizinfo.setradii("line",range(100),0.25)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def acol(self,atypes,colors):
self.vizinfo.setcolors("atom",atypes,colors)
self.make_atom_calllist()
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def arad(self,atypes,radii):
self.vizinfo.setradii("atom",atypes,radii)
self.make_atom_calllist()
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def bcol(self,btypes,colors):
self.vizinfo.setcolors("bond",btypes,colors)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def brad(self,btypes,radii):
self.vizinfo.setradii("bond",btypes,radii)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def tcol(self,ttypes,colors):
self.vizinfo.setcolors("tri",ttypes,colors)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def tfill(self,ttypes,flags):
self.vizinfo.setfills("tri",ttypes,flags)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def lcol(self,ltypes,colors):
self.vizinfo.setcolors("line",ltypes,colors)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
def lrad(self,ltypes,radii):
self.vizinfo.setradii("line",ltypes,radii)
self.cachelist = -self.cachelist
self.w.tkRedraw()
# --------------------------------------------------------------------
# derived class from Togl's Opengl
# overwrite redraw, translate, rotate, scale methods
# latter 3 are mouse-motion methods
class MyOpengl(Opengl):
def __init__(self, master, cnf={}, **kw):
args = (self,master,cnf)
Opengl.__init__(*args,**kw)
Opengl.autospin_allowed = 0
# redraw Opengl scene
# call parent redraw() method
def tkRedraw(self,*dummy):
if not self.initialised: return
self.tk.call(self._w,'makecurrent')
self.redraw(self)
self.tk.call(self._w,'swapbuffers')
# left button translate
# access parent xshift/yshift and call parent trans() method
def tkTranslate(self,event):
dx = event.x - self.xmouse
dy = event.y - self.ymouse
x = self.parent.xshift + dx
y = self.parent.yshift - dy
self.parent.shift(x,y)
self.tkRedraw()
self.tkRecordMouse(event)
# middle button trackball
# call parent mouse_rotate() method
def tkRotate(self,event):
self.parent.mouse_rotate(event.x,event.y,self.xmouse,self.ymouse)
self.tkRedraw()
self.tkRecordMouse(event)
# right button zoom
# access parent scale and call parent zoom() method
def tkScale(self,event):
scale = 1 - 0.01 * (event.y - self.ymouse)
if scale < 0.001: scale = 0.001
elif scale > 1000: scale = 1000
scale *= self.parent.scale
self.parent.zoom(scale)
self.tkRedraw()
self.tkRecordMouse(event)
# --------------------------------------------------------------------
# draw a line segment
def segment(p1,p2):
glVertex3f(p1[0],p1[1],p1[2])
glVertex3f(p2[0],p2[1],p2[2])
# --------------------------------------------------------------------
# normalize a 3-vector to unit length
def vecnorm(v):
length = sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])
return [v[0]/length,v[1]/length,v[2]/length]
# --------------------------------------------------------------------
# dot product of two 3-vectors
def vecdot(v1,v2):
return v1[0]*v2[0] + v1[1]*v2[1] + v1[2]*v2[2]
# --------------------------------------------------------------------
# cross product of two 3-vectors
def veccross(v1,v2):
v = [0,0,0]
v[0] = v1[1]*v2[2] - v1[2]*v2[1]
v[1] = v1[2]*v2[0] - v1[0]*v2[2]
v[2] = v1[0]*v2[1] - v1[1]*v2[0]
return v
# --------------------------------------------------------------------
# return characteristic distance of simulation domain = max dimension
def compute_distance(box):
distance = box[3]-box[0]
if box[4]-box[1] > distance: distance = box[4]-box[1]
if box[5]-box[2] > distance: distance = box[5]-box[2]
return distance
# --------------------------------------------------------------------
# return center of box as 3 vector
def compute_center(box):
c = [0,0,0]
c[0] = 0.5 * (box[0] + box[3])
c[1] = 0.5 * (box[1] + box[4])
c[2] = 0.5 * (box[2] + box[5])
return c
# --------------------------------------------------------------------
# return min of 2 values
def min2(a,b):
if b < a: a = b
return a
# --------------------------------------------------------------------
# return max of 2 values
def max2(a,b):
if b > a: a = b
return a
# --------------------------------------------------------------------
# return min of 3 values
def min3(a,b,c):
if b < a: a = b
if c < a: a = c
return a
# --------------------------------------------------------------------
# return max of 3 values
def max3(a,b,c):
if b > a: a = b
if c > a: a = c
return a
| gpl-2.0 |
DeltaEpsilon-HackFMI2/FMICalendar-REST | venv/lib/python2.7/site-packages/django/contrib/auth/admin.py | 62 | 7740 | from django.db import transaction
from django.conf import settings
from django.contrib import admin
from django.contrib.auth.forms import (UserCreationForm, UserChangeForm,
AdminPasswordChangeForm)
from django.contrib.auth.models import User, Group
from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.html import escape
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext, ugettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
class GroupAdmin(admin.ModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'permissions':
qs = kwargs.get('queryset', db_field.rel.to.objects)
# Avoid a major performance hit resolving permission names which
# triggers a content_type load:
kwargs['queryset'] = qs.select_related('content_type')
return super(GroupAdmin, self).formfield_for_manytomany(
db_field, request=request, **kwargs)
class UserAdmin(admin.ModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
fieldsets = (
(None, {'fields': ('username', 'password')}),
(_('Personal info'), {'fields': ('first_name', 'last_name', 'email')}),
(_('Permissions'), {'fields': ('is_active', 'is_staff', 'is_superuser',
'groups', 'user_permissions')}),
(_('Important dates'), {'fields': ('last_login', 'date_joined')}),
)
add_fieldsets = (
(None, {
'classes': ('wide',),
'fields': ('username', 'password1', 'password2')}
),
)
form = UserChangeForm
add_form = UserCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('username', 'email', 'first_name', 'last_name', 'is_staff')
list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups')
search_fields = ('username', 'first_name', 'last_name', 'email')
ordering = ('username',)
filter_horizontal = ('groups', 'user_permissions',)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super(UserAdmin, self).get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults.update({
'form': self.add_form,
'fields': admin.util.flatten_fieldsets(self.add_fieldsets),
})
defaults.update(kwargs)
return super(UserAdmin, self).get_form(request, obj, **defaults)
def get_urls(self):
from django.conf.urls import patterns
return patterns('',
(r'^(\d+)/password/$',
self.admin_site.admin_view(self.user_change_password))
) + super(UserAdmin, self).get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super(UserAdmin, self).lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
@transaction.commit_on_success
def add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, Django requires that your user '
'account have both the "Add user" and "Change user" '
'permissions set.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super(UserAdmin, self).add_view(request, form_url,
extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
if not self.has_change_permission(request):
raise PermissionDenied
user = get_object_or_404(self.queryset(request), pk=id)
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
msg = ugettext('Password changed successfully.')
messages.success(request, msg)
return HttpResponseRedirect('..')
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': '_popup' in request.REQUEST,
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
return TemplateResponse(request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context, current_app=self.admin_site.name)
def response_add(self, request, obj, post_url_continue=None):
"""
Determines the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and '_popup' not in request.POST:
request.POST['_continue'] = 1
return super(UserAdmin, self).response_add(request, obj,
post_url_continue)
admin.site.register(Group, GroupAdmin)
admin.site.register(User, UserAdmin)
| mit |
subutai/htmresearch | projects/sdr_paper/pytorch_experiments/test_score_table.py | 2 | 3491 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2019, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
from __future__ import print_function
import os
import numpy as np
from tabulate import tabulate
from htmresearch.frameworks.pytorch.mnist_sparse_experiment import \
MNISTSparseExperiment
def bestScore(scores):
"""
Given a single repetition of a single experiment, return the test, and
total noise score from the epoch with maximum test accuracy.
"""
testScores, totalCorrect = scores[0], scores[1]
bestEpoch = np.argmax(testScores)
maxTestScore = testScores[bestEpoch]
maxNoiseScore = totalCorrect[bestEpoch]
return maxTestScore, bestEpoch, maxNoiseScore
def getErrorBars(expPath, suite):
"""
Go through each epoch in each repetition in this path. For each repetition
select the epoch with the best test score as the best epoch. Collect the test
score and noise score for that epoch, as the optimal for that repetition.
Return the overall mean, and stdev for test accuracy and noise accuracy across
the optimal values for each repetition.
"""
# Get the iteration with maximum validation accuracy.
results = suite.get_all_histories_over_repetitions(
exp=expPath,
tags=["testerror", "totalCorrect"])
numExps = len(results["testerror"])
testScores = np.zeros(numExps)
noiseScores = np.zeros(numExps)
for i,scoresForRepetition in enumerate(
zip(results["testerror"], results["totalCorrect"])):
maxTestScore, bestEpoch, maxNoiseScore = bestScore(scoresForRepetition)
testScores[i] = maxTestScore
noiseScores[i] = maxNoiseScore
return {
"test_score": (testScores.mean(), testScores.std()),
"noise_score": (noiseScores.mean(), noiseScores.std())
}
if __name__ == '__main__':
suite = MNISTSparseExperiment()
suite.parse_opt()
suite.parse_cfg()
experiments = suite.options.experiments or suite.cfgparser.sections()
testScoresTable = [["Network", "Test Score", "Noise Score"]]
for expName in experiments:
results = suite.get_exp(expName)
for exp in results:
if not os.path.exists(exp):
continue
errorBars = getErrorBars(exp, suite)
test_score = u"{0:.2f} ± {1:.2f}".format(*errorBars["test_score"])
noise_score = u"{0:,.0f} ± {1:.2f}".format(*errorBars["noise_score"])
params = suite.get_params(exp=exp)
testScoresTable.append([params["name"], test_score, noise_score])
print(tabulate(testScoresTable, headers="firstrow", tablefmt="grid"))
print()
print(tabulate(testScoresTable, headers="firstrow", tablefmt="latex"))
| agpl-3.0 |
pizzapanther/GAE-Bulk-Mailer | django/contrib/staticfiles/storage.py | 29 | 12194 | from __future__ import unicode_literals
import hashlib
import os
import posixpath
import re
try:
from urllib.parse import unquote, urlsplit, urlunsplit, urldefrag
except ImportError: # Python 2
from urllib import unquote
from urlparse import urlsplit, urlunsplit, urldefrag
from django.conf import settings
from django.core.cache import (get_cache, InvalidCacheBackendError,
cache as default_cache)
from django.core.exceptions import ImproperlyConfigured
from django.core.files.base import ContentFile
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.utils.datastructures import SortedDict
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.utils._os import upath
from django.contrib.staticfiles.utils import check_settings, matches_patterns
class StaticFilesStorage(FileSystemStorage):
"""
Standard file system storage for static files.
The defaults for ``location`` and ``base_url`` are
``STATIC_ROOT`` and ``STATIC_URL``.
"""
def __init__(self, location=None, base_url=None, *args, **kwargs):
if location is None:
location = settings.STATIC_ROOT
if base_url is None:
base_url = settings.STATIC_URL
check_settings(base_url)
super(StaticFilesStorage, self).__init__(location, base_url,
*args, **kwargs)
def path(self, name):
if not self.location:
raise ImproperlyConfigured("You're using the staticfiles app "
"without having set the STATIC_ROOT "
"setting to a filesystem path.")
return super(StaticFilesStorage, self).path(name)
class CachedFilesMixin(object):
default_template = """url("%s")"""
patterns = (
("*.css", (
r"""(url\(['"]{0,1}\s*(.*?)["']{0,1}\))""",
(r"""(@import\s*["']\s*(.*?)["'])""", """@import url("%s")"""),
)),
)
def __init__(self, *args, **kwargs):
super(CachedFilesMixin, self).__init__(*args, **kwargs)
try:
self.cache = get_cache('staticfiles')
except InvalidCacheBackendError:
# Use the default backend
self.cache = default_cache
self._patterns = SortedDict()
for extension, patterns in self.patterns:
for pattern in patterns:
if isinstance(pattern, (tuple, list)):
pattern, template = pattern
else:
template = self.default_template
compiled = re.compile(pattern)
self._patterns.setdefault(extension, []).append((compiled, template))
def file_hash(self, name, content=None):
"""
Retuns a hash of the file with the given name and optional content.
"""
if content is None:
return None
md5 = hashlib.md5()
for chunk in content.chunks():
md5.update(chunk)
return md5.hexdigest()[:12]
def hashed_name(self, name, content=None):
parsed_name = urlsplit(unquote(name))
clean_name = parsed_name.path.strip()
opened = False
if content is None:
if not self.exists(clean_name):
raise ValueError("The file '%s' could not be found with %r." %
(clean_name, self))
try:
content = self.open(clean_name)
except IOError:
# Handle directory paths and fragments
return name
opened = True
try:
file_hash = self.file_hash(clean_name, content)
finally:
if opened:
content.close()
path, filename = os.path.split(clean_name)
root, ext = os.path.splitext(filename)
if file_hash is not None:
file_hash = ".%s" % file_hash
hashed_name = os.path.join(path, "%s%s%s" %
(root, file_hash, ext))
unparsed_name = list(parsed_name)
unparsed_name[2] = hashed_name
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
if '?#' in name and not unparsed_name[3]:
unparsed_name[2] += '?'
return urlunsplit(unparsed_name)
def cache_key(self, name):
return 'staticfiles:%s' % hashlib.md5(force_bytes(name)).hexdigest()
def url(self, name, force=False):
"""
Returns the real URL in DEBUG mode.
"""
if settings.DEBUG and not force:
hashed_name, fragment = name, ''
else:
clean_name, fragment = urldefrag(name)
if urlsplit(clean_name).path.endswith('/'): # don't hash paths
hashed_name = name
else:
cache_key = self.cache_key(name)
hashed_name = self.cache.get(cache_key)
if hashed_name is None:
hashed_name = self.hashed_name(clean_name).replace('\\', '/')
# set the cache if there was a miss
# (e.g. if cache server goes down)
self.cache.set(cache_key, hashed_name)
final_url = super(CachedFilesMixin, self).url(hashed_name)
# Special casing for a @font-face hack, like url(myfont.eot?#iefix")
# http://www.fontspring.com/blog/the-new-bulletproof-font-face-syntax
query_fragment = '?#' in name # [sic!]
if fragment or query_fragment:
urlparts = list(urlsplit(final_url))
if fragment and not urlparts[4]:
urlparts[4] = fragment
if query_fragment and not urlparts[3]:
urlparts[2] += '?'
final_url = urlunsplit(urlparts)
return unquote(final_url)
def url_converter(self, name, template=None):
"""
Returns the custom URL converter for the given file name.
"""
if template is None:
template = self.default_template
def converter(matchobj):
"""
Converts the matched URL depending on the parent level (`..`)
and returns the normalized and hashed URL using the url method
of the storage.
"""
matched, url = matchobj.groups()
# Completely ignore http(s) prefixed URLs,
# fragments and data-uri URLs
if url.startswith(('#', 'http:', 'https:', 'data:', '//')):
return matched
name_parts = name.split(os.sep)
# Using posix normpath here to remove duplicates
url = posixpath.normpath(url)
url_parts = url.split('/')
parent_level, sub_level = url.count('..'), url.count('/')
if url.startswith('/'):
sub_level -= 1
url_parts = url_parts[1:]
if parent_level or not url.startswith('/'):
start, end = parent_level + 1, parent_level
else:
if sub_level:
if sub_level == 1:
parent_level -= 1
start, end = parent_level, 1
else:
start, end = 1, sub_level - 1
joined_result = '/'.join(name_parts[:-start] + url_parts[end:])
hashed_url = self.url(unquote(joined_result), force=True)
file_name = hashed_url.split('/')[-1:]
relative_url = '/'.join(url.split('/')[:-1] + file_name)
# Return the hashed version to the file
return template % unquote(relative_url)
return converter
def post_process(self, paths, dry_run=False, **options):
"""
Post process the given list of files (called from collectstatic).
Processing is actually two separate operations:
1. renaming files to include a hash of their content for cache-busting,
and copying those files to the target storage.
2. adjusting files which contain references to other files so they
refer to the cache-busting filenames.
If either of these are performed on a file, then that file is considered
post-processed.
"""
# don't even dare to process the files if we're in dry run mode
if dry_run:
return
# where to store the new paths
hashed_paths = {}
# build a list of adjustable files
matches = lambda path: matches_patterns(path, self._patterns.keys())
adjustable_paths = [path for path in paths if matches(path)]
# then sort the files by the directory level
path_level = lambda name: len(name.split(os.sep))
for name in sorted(paths.keys(), key=path_level, reverse=True):
# use the original, local file, not the copied-but-unprocessed
# file, which might be somewhere far away, like S3
storage, path = paths[name]
with storage.open(path) as original_file:
# generate the hash with the original content, even for
# adjustable files.
hashed_name = self.hashed_name(name, original_file)
# then get the original's file content..
if hasattr(original_file, 'seek'):
original_file.seek(0)
hashed_file_exists = self.exists(hashed_name)
processed = False
# ..to apply each replacement pattern to the content
if name in adjustable_paths:
content = original_file.read().decode(settings.FILE_CHARSET)
for patterns in self._patterns.values():
for pattern, template in patterns:
converter = self.url_converter(name, template)
content = pattern.sub(converter, content)
if hashed_file_exists:
self.delete(hashed_name)
# then save the processed result
content_file = ContentFile(force_bytes(content))
saved_name = self._save(hashed_name, content_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
processed = True
else:
# or handle the case in which neither processing nor
# a change to the original file happened
if not hashed_file_exists:
processed = True
saved_name = self._save(hashed_name, original_file)
hashed_name = force_text(saved_name.replace('\\', '/'))
# and then set the cache accordingly
hashed_paths[self.cache_key(name.replace('\\', '/'))] = hashed_name
yield name, hashed_name, processed
# Finally set the cache
self.cache.set_many(hashed_paths)
class CachedStaticFilesStorage(CachedFilesMixin, StaticFilesStorage):
"""
A static file system storage backend which also saves
hashed copies of the files it saves.
"""
pass
class AppStaticStorage(FileSystemStorage):
"""
A file system storage backend that takes an app module and works
for the ``static`` directory of it.
"""
prefix = None
source_dir = 'static'
def __init__(self, app, *args, **kwargs):
"""
Returns a static file storage if available in the given app.
"""
# app is the actual app module
mod = import_module(app)
mod_path = os.path.dirname(upath(mod.__file__))
location = os.path.join(mod_path, self.source_dir)
super(AppStaticStorage, self).__init__(location, *args, **kwargs)
class ConfiguredStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(settings.STATICFILES_STORAGE)()
staticfiles_storage = ConfiguredStorage()
| bsd-2-clause |
guewen/OpenUpgrade | addons/mrp_operations/report/mrp_wc_barcode.py | 381 | 1501 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class workcenter_code(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(workcenter_code, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.wc.barcode', 'mrp.workcenter', 'addons/mrp_operations/report/mrp_wc_barcode.rml',parser=workcenter_code,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
landier/imdb-crawler | crawler/libs/sqlalchemy/dialects/mysql/pyodbc.py | 18 | 2739 | # mysql/pyodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MySQL database via the pyodbc adapter.
pyodbc is available at:
http://pypi.python.org/pypi/pyodbc/
Connecting
----------
Connect string::
mysql+pyodbc://<username>:<password>@<dsnname>
Limitations
-----------
The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.
"""
from sqlalchemy.dialects.mysql.base import MySQLDialect, MySQLExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy.engine import base as engine_base
from sqlalchemy import util
import re
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault('convert_unicode', True)
super(MySQLDialect_pyodbc, self).__init__(**kw)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
return 'latin1'
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
else:
return None
dialect = MySQLDialect_pyodbc
| gpl-3.0 |
stefanseefeld/qmtest | qm/test/resource.py | 2 | 4158 | ########################################################################
#
# File: resource.py
# Author: Mark Mitchell
# Date: 2001-10-10
#
# Contents:
# QMTest Resource class.
#
# Copyright (c) 2001, 2002, 2003 by CodeSourcery, LLC. All rights reserved.
#
# For license terms see the file COPYING.
#
########################################################################
########################################################################
# Imports
########################################################################
import qm
import qm.test.runnable
########################################################################
# Classes
########################################################################
class Resource(qm.test.runnable.Runnable):
"""A 'Resource' sets up before a test and cleans up afterwards.
Some tests take a lot of work to set up. For example, a database
test that checks the result of SQL queries may require that the
database first be populated with a substantial number of records.
If there are many tests that all use the same set of records, it
would be wasteful to set up the database for each test. It would
be more efficient to set up the database once, run all of the
tests, and then remove the databases upon completion.
You can use a 'Resource' to gain this efficiency. If a test
depends on a resource, QMTest will ensure that the resource is
available before the test runs. Once all tests that depend on the
resource have been run QMTest will destroy the resource.
Each resource class (i.e., class derived from 'Resource')
describes a set of "arguments". Each argument has a name and a
type. The values of these arguments determine the design-time
parameters for the resource. See the documentation for the 'Test'
class for more complete information.
Each resource class also defines a 'SetUp' method that indicates how
to set up the resource, and a 'CleanUp' method that indicates how
to clean up afterwards.
'Resource' is an abstract class.
You can extend QMTest by providing your own resource class
implementation. If the resource classes that come with QMTest
cannot be used conveniently with your application domain, you may
wish to create a new resource class.
To create your own resource class, you must create a Python class
derived (directly or indirectly) from 'Resource'. The
documentation for each method of 'Resource' indicates whether you
must override it in your resource class implementation. Some
methods may be overridden, but do not need to be. You might want
to override such a method to provide a more efficient
implementation, but QMTest will work fine if you just use the
default version.
If QMTest calls a method on a resource and that method raises an
exception that is not caught within the method itself, QMTest will
catch the exception and continue processing."""
kind = "resource"
def SetUp(self, context, result):
"""Set up the resource.
'context' -- A 'Context' giving run-time parameters to the
resource. The resource may place additional variables into
the 'context'; these variables will be visible to tests that
depend on the resource.
'result' -- A 'Result' object. The outcome will be
'Result.PASS' when this method is called. The 'result' may be
modified by this method to indicate outcomes other than
'Result.PASS' or to add annotations.
This method should not return a value.
Derived classes must override this method."""
raise NotImplementedError
def CleanUp(self, result):
"""Clean up the resource.
'result' -- A 'Result' object. The outcome will be
'Result.PASS' when this method is called. The 'result' may be
modified by this method to indicate outcomes other than
'Result.PASS' or to add annotations.
This method should not return a value.
Derived classes may override this method."""
pass
| gpl-2.0 |
HUGG/NGWM2016-modelling-course | Lessons/04-Basic-fluid-mechanics/scripts/1D-asthenospheric-counterflow.py | 1 | 1766 | # -*- coding: utf-8 -*-
"""
1D-asthenospheric-counterflow.py
A script for plotting velocity magnitudes for 1D counterflow in the
asthenosphere.
dwhipp 01.16
"""
#--- User-defined input variables
hl = 100.0 # Thickness of lithosphere [km]
h = 200.0 # Thickness of asthenosphere [km]
u0 = 15.0 # Lithospheric plate velocity [cm/a]
numpts = 101 # Number of points to calculate velocity across channel
#--- End user-defined input
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
# Convert inputs to SI values
h = h * 1000.0 # [km] -> [m]
hl = hl * 1000.0 # [km] -> [m]
u0 = u0 / 1000.0 / 365.25 / 24.0 / 3600.0 # [mm/a] -> [m/s]
# Define channel arrays
y = np.linspace(0.0,h,numpts)
u = np.zeros(numpts)
# Loop across all values of y and define velocity
for i in range(numpts):
# Insert equation for asthenospheric counterflow below
u[i] = ????
# Rescale values of y and u for plotting
y = y / 1000.0
u = u * 1000.0 * 365.25 * 24.0 * 3600.0
# Create figure for plotting
plt.figure()
# Make plot
plt.plot(u,y,'ko-')
# Invert y axis
plt.gca().invert_yaxis()
# Add text label with thickness of lithospheric plate
plt.text(????)
# Label axes and add title
plt.xlabel("Flow velocity [mm/a]")
plt.ylabel("Distance across channel [km]")
plt.title("Asthenospheric counterflow")
# Show plot
plt.show() | mit |
cliftbar/FlaPyDisaster | FlaPyDisaster/explosion/explosion_math.py | 1 | 2734 | import math
def hello():
ret_string = "This is the explosion math package! Some help info is below."
print(ret_string)
def general_bomb_equation(mass_kg, radius_m):
"""
General Sadovsky bomb overpressure equation, surface explosion at standard atmospheric condidtions.
:param mass_kg: Mass in kg TNT
:param radius_m: Distance from explosion in meters
:returns: Overpressure in atmospheres
:Reference: BlastEffectCalculation.pdf, Equation 2
"""
if radius_m == 0 or mass_kg == 0:
return -1
return ((0.95 * (math.pow(mass_kg, .33333) / radius_m))
+ (3.9 * math.pow((mass_kg * mass_kg), .33333) / (radius_m * radius_m))
+ (13.0 * mass_kg / (radius_m ** 3.0)))
def newmark_overpressure(energy_mttnt, radius_m):
"""
Newmark-Hansen Overpressure formula. Intended for surface blasts, but adapted to air-bursts.
:param energy_mttnt: Energy in Megatons TNT
:param radius_m: Actual distance from blast in m (hypotenuse distance for airburst events).
:returns: overpressure in bar
:Reference: NuclearBlastOverpressure.pdf, Equation 3
"""
energy_tnt = energy_mttnt * 1000000
return (6784 * (energy_tnt / (radius_m ** 3))) + (93 * (math.sqrt(energy_tnt / (radius_m ** 3))))
def radius_from_overpressure(overpressure_bar, energy_tnt, radius_upper_bound_km=1000, error_threshold=0.0001, max_iterations=100):
"""
Find the radius of a given overpressure for a given event energy. Lower limit of 0
Uses a bisection search to solve the Newmark-Hansen Ovepressure Formula
:param overpressure_bar: Overpressure in Bars
:param energy_tnt: Energy in Megatons TNT
:param radius_upper_bound_km: Upper bound for radius in kilometers. Default value of 1000 km
:param error_threshold: Error threshold (percentage) to stop bisection search at. Default value of 0.0001
:param max_iterations: Maximum number of bisection search iterations to run. Default value of 100
:returns: Radius in km and calculation error in a tuple, in that order
"""
x_upper = radius_upper_bound_km * 1000
x_lower = 0.1
x_mid = 0
y_mid = 0
x_old = 1
i = 0
error_val = 100
while True:
x_mid = (x_upper + x_lower / 2)
y_mid = newmark_overpressure(energy_tnt, x_mid)
if x_mid != 0:
error_val = math.fabs((x_mid - x_old) / x_mid) * 100
if y_mid < overpressure_bar:
x_upper = x_mid
elif y_mid > overpressure_bar:
x_lower = x_mid
else:
return [x_mid / 1000, error_val]
i += 1
if error_val <= error_threshold or i > max_iterations:
return [x_mid / 1000, error_val]
| mit |
jnobre/lxmls-toolkit-2017 | lxmls/parsing/dependency_writer.py | 2 | 1427 | import sys
import numpy as np
import os
from os import path
class DependencyWriter:
"""
Dependency writer class
"""
def __init__(self):
pass
def save(self, language, heads_pred):
"""Saves predicted dependency trees."""
base_deppars_dir = path.join(path.dirname(__file__), "..", "..", "data", "deppars")
languages = ["danish", "dutch", "portuguese", "english"]
i = 0
word_dict = {}
pos_dict = {}
feat_counts = {}
if language not in languages:
print "Language does not exist: \"%s\": Available are: %s" % (language, languages)
return
# Load test data
n_toks = 0
n_sents = 0
conll_file = open(path.join(base_deppars_dir, language + "_test.conll"))
conll_file_out = open(path.join(base_deppars_dir, language + "_test.conll.pred"), 'w')
for line in conll_file:
line = line.rstrip()
if len(line) == 0:
n_toks = 0
n_sents += 1
conll_file_out.write("\n")
continue
fields = line.split("\t")
fields[6] = "{0}".format(heads_pred[n_sents][n_toks+1])
line_out = "\t".join(fields)
n_toks += 1
conll_file_out.write(line_out)
conll_file_out.write("\n")
conll_file_out.close()
conll_file.close()
| mit |
LCOGT/citsciportal | app/agentex/datareduc.py | 1 | 37132 | '''
Citizen Science Portal: App containing Agent Exoplant and Show Me Stars for Las Cumbres Observatory Global Telescope Network
Copyright (C) 2014-2015 LCOGT
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
All non render/page based views are stored here, rather than views.py.
'''
from astropy.io import fits
from calendar import timegm
from datetime import datetime,timedelta
from django.conf import settings
from django.contrib import messages
from django.contrib.admin.models import LogEntry, ADDITION
from django.contrib.auth.decorators import login_required
from django.contrib.contenttypes.models import ContentType
from django.core.cache import cache
from django.core.serializers import serialize
from django.templatetags.static import static
from django.db import connection
from django.db.models import Count, Avg, Min, Max, Variance, Q, Sum
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.shortcuts import render_to_response, render
from django.template import RequestContext
from django.urls import reverse
from itertools import chain
from rest_framework import status
from rest_framework.response import Response
from time import mktime
import numpy as np
from agentex.models import *
from agentex.forms import DataEntryForm, RegisterForm, CommentForm,RegistrationEditForm
import agentex.dataset as ds
from agentex.utils import achievementscheck
from agentex.agentex_settings import planet_level
from agentex.utils import dictconv
import logging
logger = logging.getLogger(__name__)
def personcheck(request):
return request.user
def calibrator_data(calid,code):
data = []
sources, times = zip(*DataSource.objects.filter(event__slug=code).values_list('id','timestamp').order_by('timestamp'))
points = Datapoint.objects.filter(data__in=sources)
#points.filter(pointtype='C').values('data__id','user','value')
people = Decision.objects.filter(source__id=calid,planet__name=code,value='D',current=True).values_list('person__username',flat=True).distinct()
norm = dict((key,0) for key in sources)
for pid in people:
cal = []
sc = dict(points.filter(user__username=pid,pointtype='S').values_list('data__id','value'))
bg = dict(points.filter(user__username=pid,pointtype='B').values_list('data__id','value'))
c = dict(points.filter(user__username=pid,pointtype='C',coorder__source__id=calid).values_list('data__id','value'))
sc_norm = dict(norm.items() + sc.items())
bg_norm = dict(norm.items() + bg.items())
c_norm = dict(norm.items() + c.items())
for v in sources:
try:
cal.append((sc_norm[v]- bg_norm[v])/(c_norm[v] - bg_norm[v]))
except:
cal.append(0)
data.append(cal)
return data,[timegm(s.timetuple())+1e-6*s.microsecond for s in times],list(people)
def average_combine(measurements,averages,ids,star,category,progress,admin=False):
if progress['done'] < progress['total']:
ave_measurement = averages.filter(star=star,settype=category)
if ave_measurement.count() > 0:
## Find the array indices of my values and replace these averages
ave = np.array(ave_measurement[0].data)
mine = measurements.values_list('data','value')
for vals in mine:
# Fit my values in with the average values because dataset is not complete
try:
my_id = ids.index(vals[0])
ave[my_id] = vals[1]
except Exception as e:
logger.error(e)
return ave
else:
return np.array([])
elif progress['done'] == progress['total']:
mine = np.array(measurements.values_list('value',flat=True))
return mine
elif not progress:
logger.debug("No progress was passed")
return np.array([])
else:
logger.debug("Error - too many measurements: %s %s" % (measurements.count() , numobs))
return np.array([])
def calibrator_averages(code,person=None,progress=False):
cals = []
cats = []
planet = Event.objects.get(slug=code)
sources = list(DataSource.objects.filter(event=planet).order_by('timestamp').values_list('id','timestamp'))
ids,stamps = zip(*sources)
if person:
## select calibrator stars used, excluding ones where ID == None, i.e. non-catalogue stars
dc = DataCollection.objects.filter(~Q(source=None),person=person,planet=planet).order_by('calid')
## Measurement values only for selected 'person'
dps = Datapoint.objects.filter(data__event=planet,user=person).order_by('data__timestamp')
else:
# select calibrator stars used, excluding ones where ID == None, i.e. non-catalogue stars
dc = DataCollection.objects.filter(~Q(source=None),planet=planet).order_by('calid')
## Measurement values only for selected 'person'
dps = Datapoint.objects.filter(data__event=planet).order_by('data__timestamp')
averages = AverageSet.objects.filter(planet=planet)
if person:
# Make a combined list of source values
measurements = dps.filter(pointtype='S')
sc = average_combine(measurements,averages,ids,None,'S',progress)
# Make a combined list of background values
measurements = dps.filter(pointtype='B')
bg = average_combine(measurements,averages,ids,None,'B',progress)
else:
sc = np.array(averages.filter(star=None,settype='S')[0].data)
bg = np.array(averages.filter(star=None,settype='B')[0].data)
# Make a combined list of all calibration stars used by 'person'
for calibrator in dc:
if person:
measurements = dps.filter(pointtype='C',coorder=calibrator)
ave = average_combine(measurements,averages,ids,calibrator.source,'C',progress)
else:
ave_cal = averages.filter(star=calibrator,settype='C')
if ave_cal.count() > 0:
ave = np.array(ave_cal[0].data)
else:
ave = np.array([])
if ave.size > 0:
cals.append(ave)
try:
if person:
decvalue = Decision.objects.filter(source=calibrator.source,person=person,planet=planet,current=True)[0].value
else:
decvalue = Decision.objects.filter(source=calibrator.source, planet=planet,current=True)[0].value
except:
decvalue ='X'
cat_item = {'sourcename':calibrator.source.name,'catalogue':calibrator.source.catalogue}
cat_item['decision'] = decvalue
cat_item['order'] = str(calibrator.calid)
cats.append(cat_item)
return cals,sc,bg,stamps,ids,cats
def photometry(code,person,progress=False,admin=False):
# Empty lists to store normalised calibrators and maximum values
normcals = []
maxvals = []
dates = []
stamps = []
# Call in averages
cals,sc,bg,times,ids,cats = calibrator_averages(code,person,progress)
indexes = [int(i) for i in ids]
#sc = np.array(sc)
#bg = np.array(bg)
# Iterate over every calibrator
for cal in cals:
if len(cal) == progress['total']:
#### Do not attempt to do the photmetry where the number of calibrators does not match the total
# Determine calibrated flux from source
val = (sc - bg)/(cal-bg)
# Determine maximum flux from source
maxval = np.mean(np.r_[val[:3],val[-3:]])
# Append to maxvals
maxvals.append(maxval)
# Normalise the maxval
norm = val/maxval
#Append the normalised value
normcals.append(list(norm))
# Find my data and create unix timestamps
unixt = lambda x: timegm(x.timetuple())+1e-6*x.microsecond
iso = lambda x: x.isoformat(" ")
stamps = map(unixt,times)
dates = map(iso,times)
if admin:
return normcals,stamps,indexes,cats
return cals,normcals,list(sc),list(bg),list(dates),list(stamps),indexes,cats
def measure_offset(d,person,basiccoord):
# Find the likely offset of this new calibrator compared to the basic ones and find any sources within 20 pixel radius search
finderid = d.event.finder
finderdp = Datapoint.objects.values_list('xpos','ypos').filter(user=person,data__id=finderid,pointtype='C').order_by('coorder__calid')
if finderdp.count() == 0:
logger.debug('not enough measurements for accurate offset - {}'.format(person.username))
return 0.,0.
finder = basiccoord - np.array(finderdp)
t = np.transpose(finder)
xmean = np.mean(t[0])
ymean = np.mean(t[1])
return xmean,ymean
def updatedisplay(request,code):
# Wipe all the validations for user and event
o = personcheck(request)
dc = DataCollection.objects.filter(person=o.user,planet=Event.objects.get(name=code),complete=True)
dc.update(display = False)
empty = True
formdata = request.POST
for i,val in formdata.items():
if i[4:] == val:
# Add validations back one by one
col = dc.filter(calid=val)
col.update(display= True)
empty = False
return empty
def addvalidset(request,slug):
o = personcheck(request)
calid = request.POST.get('calid','')
choice1 = request.POST.get('choice1','')
choice2 = request.POST.get('choice2','')
planet = Event.objects.get(slug=slug)
point = DataCollection.objects.filter(person=o,calid=calid,planet=planet)
if choice1 and point and calid:
value = decisions[choice1]
source = point[0].source
old = Decision.objects.filter(person=o,planet=planet,source=source)
old.delete()
decision1 = Decision(source=source,
value=value,
person=o,
planet=planet)
if choice2:
value2 = decisions[choice2]
decision2 = Decision(source=source,
value=value2,
person=o,
planet=planet,
current=True)
decision2.save()
else:
decision1.current = True
decision1.save()
return False
else:
return True
@login_required
def my_data(o,code):
data = []
sources = DataSource.objects.filter(event__slug=code).order_by('timestamp')
points = Datapoint.objects.filter(data__event__slug=code,user=o.user)
for s in sources:
ps = points.filter(data=s)
myp = ps.filter(pointtype='S')
try:
mypoint = '%f' % myp[0].value
except:
mypoint = 'null'
cals = ps.filter(pointtype='C').values_list('value',flat=True).order_by('coorder')
line = {
'id' : "%i" % s.id,
'date' : s.timestamp.isoformat(" "),
'datestamp' : timegm(s.timestamp.timetuple())+1e-6*s.timestamp.microsecond,
'data' : { 'source' : list(ps.filter(pointtype='S').values_list('value',flat=True)),
'background' : list(ps.filter(pointtype='B').values_list('value',flat=True)),
'calibrator' : list(cals),
},
}
data.append(line)
return data,points
def fitsanalyse(data):
coords = list(zip(data['x'], data['y']))
datasource = DataSource.objects.get(pk=data['id'])
# Grab a fits file
if settings.USE_S3:
file_path = datasource.fits.url
else:
file_path = datasource.fits.path
dc = fits.getdata(file_path, header=False)
r = datasource.event.radius
linex = list()
liney = list()
counts = list()
# Find all the pixels a radial distance r from x0,y0
for co in coords:
x0 = int(np.floor(co[0]))
y0 = int(np.floor(co[1]))
# Sum for this aperture
sum = 0
numpix = 0
ys = y = y0 - r
ye = y0 +r
vline = list()
hline = list()
while (y < ye):
angle = np.fabs(1.*(y-y0)/r)
dx = int(np.sin(np.arccos(angle))*r)
x = xs = x0 - dx
xe = x0 + dx
while (x < xe):
sum += float(dc[y][x])
x += 1
if (x == x0):
hline.append(float(dc[y][x]))
if (y == y0):
vline.append(float(dc[y][x]))
numpix += 1
y += 1
linex.append(hline)
liney.append(vline)
counts.append(sum)
#logger.debug(datetime.now() - now)
# Send back the raw total counts. Analysis can be done when the graph is produced.
pointsum = {'bg' : '%.2f' % counts[0], 'sc' : '%.2f' % counts[1], 'cal' : counts[2:]}
lines = {'data' : {
'coords' : {'xy' : coords,'r':r},
'sum' : pointsum,
'points' : {'bg':
{'horiz' : linex[0],
'vert' : liney[0],
},
'sc':
{'horiz' : linex[1],
'vert' : liney[1],
},
'cal':
{'horiz' : linex[2:],
'vert' : liney[2:],
},
},
#'quality' : flag,
'pixelcount' : numpix,
},
}
return lines
def savemeasurement(person, lines, dataid, mode):
pointsum = lines['data']['sum']
coordsxy = lines['data']['coords']
# Only update the user's preference if they change it
pointtype = {'sc':'S','bg':'B'}
coords = list(coordsxy['xy']).copy()
d = DataSource.objects.get(id=dataid)
s_x = float(coords[1][0])
s_y = float(coords[1][1])
if d.id == d.event.finder:
xvar = np.abs(s_x - d.event.xpos)
yvar = np.abs(s_y - d.event.ypos)
if (xvar > 3 or yvar > 3):
# Remove previous values for this point
return Response(data={'msg': 'Target marker not correctly aligned'}, status=status.HTTP_400_BAD_REQUEST)
xmean = 0
ymean = 0
# Remove previous values for this point
oldpoints = Datapoint.objects.filter(data=d,user=person)
oldpoints.delete()
numpoints = Datapoint.objects.filter(data__event=d.event,user=person).count()
datestamp = datetime.now()
reduced = 0
calave = 0.
error = ''
### Add a datacollection for the current user
r = d.event.radius
for k,value in pointtype.items():
# Background and source
data = Datapoint(ident=d.event.slug,
user=person,
pointtype = value,
data=d,
radius=r,
entrymode=mode,
tstamp=mktime(d.timestamp.timetuple())
)
if k == 'sc':
coord = coords[1]
data.offset = 0
elif k == 'bg':
coord = coords[0]
data.offset = int(np.sqrt((s_x - float(coord[0]))**2 + (s_y - float(coord[1]))**2))
data.value= float(pointsum[k])
data.xpos = int(float(coord[0]))
data.ypos = int(float(coord[1]))
data.taken=datestamp
try:
data.save()
except:
logger.error("save error")
return Response(data={'msg': 'Error saving data point'}, status=status.HTTP_400_BAD_REQUEST)
# Slice coord data so we only have calibration stars
coord = coords[2:]
# Slice to get source and sky
basiccoord = np.array(coords[:3])
nocals = len(coord)
sc_cal = float(pointsum['sc']) - float(pointsum['bg'])
# Find out if means have been calculated already, if not do it for the source
# This step can only happen if we are not at the finder frame
if numpoints != 0 and d.event.finder != d.id:
xmean, ymean = measure_offset(d,person,coord)
# check the source is within this tolerance too
sc_xpos = d.event.xpos
sc_ypos = d.event.ypos
xvar = np.abs(np.abs(sc_xpos-s_x)-np.abs(xmean))
yvar = np.abs(np.abs(sc_ypos-s_y)-np.abs(ymean))
if (xvar > 20 or yvar > 20):
# Remove previous values for this point
oldpoints = Datapoint.objects.filter(data__id=int(dataid),user=person)
oldpoints.delete()
return Response(data={'msg':'Markers not correctly aligned'}, status=status.HTTP_400_BAD_REQUEST)
for i,value in enumerate(pointsum['cal']):
xpos = int(float(coord[i][0]))
ypos = int(float(coord[i][1]))
newcoord = coord
nocolls = DataCollection.objects.filter(planet=d.event,person=person,calid=i).count()
if (nocolls == 0):
## Find closest catalogue sources
if i > 2:
# Add more datacollections if i is > 2 i.e. after basic 3 have been entered
cats = CatSource.objects.filter(xpos__lt=xpos-xmean+5,ypos__lt=ypos-ymean+5,xpos__gt=xpos-xmean-5,ypos__gt=ypos-ymean-5,data__event=d.event)
else:
cats = CatSource.objects.filter(xpos__lt=xpos+5,ypos__lt=ypos+5,xpos__gt=xpos-5,ypos__gt=ypos-5,data__event=d.event)
if cats:
dcoll = DataCollection(person=person,planet=d.event,complete=False,calid=i,source=cats[0])
else:
dcoll = DataCollection(person=person,planet=d.event,complete=False,calid=i)
dcoll.display = True
dcoll.save()
else:
dcoll = DataCollection.objects.filter(person=person,planet=d.event,calid=i)[0]
data = Datapoint(ident=d.event.slug,
user=person,
pointtype = 'C',
data=d,
radius=r,
entrymode='W',
tstamp=mktime(d.timestamp.timetuple())
)
data.value= float(value)
data.xpos = xpos
data.ypos = ypos
data.offset = int(np.sqrt((s_x - float(coord[i][0]))**2 + (s_y - float(coord[i][1]))**2))
data.taken=datestamp
data.coorder = dcoll
try:
data.save()
except:
return Response(data={'msg': 'Error saving'}, status=status.HTTP_400_BAD_REQUEST)
calave = calave +sc_cal/(value - float(pointsum['bg']))/float(nocals)
if Datapoint.objects.filter(user=person, pointtype='S', data__event=d.event).count() == d.event.numobs:
num_rows = DataCollection.objects.filter(person=person,planet=d.event).update(complete = True)
logger.debug("{} Data Collections for {} complete".format(num_rows, d.event))
nomeas = Datapoint.objects.filter(user=person).values('taken').annotate(Count('taken')).count()
noplanet = DataCollection.objects.filter(person=person).values('planet').annotate(Count('person')).count()
ndecs = Decision.objects.filter(person=person,current=True).count() # filter: ,planet=d.event
unlock = False
nunlock = 0
resp = achievementscheck(person,d.event,nomeas,noplanet,nocals,ndecs,0)
msg = '<br />'
for item in resp:
if messages.SUCCESS == item['code'] :
msg += "<img src=\""+static(item['image'])+"\" style=\"width:96px;height:96px;\" alt=\"Badge\" />"
if resp:
lines['msg'] = 'Achievement unlocked {}'.format(msg)
else:
lines['msg'] = 'Measurements saved'
return Response(data=lines, status=status.HTTP_200_OK)
def datagen(slug,user):
# Collect sources
sources = DataSource.objects.filter(event__slug=slug).order_by('timestamp')
numsuper,fz,mycals,std,nodata = supercaldata(user,slug)
data = []
for i,s in enumerate(sources):
line = {
'id' : "%i" % s.id,
'date' : s.timestamp.isoformat(" "),
'datestamp' : timegm(s.timestamp.timetuple())+1e-6*s.timestamp.microsecond,
'data' : {
'mean' : fz[i],
'std' : std[i],
'mine' : 'null',#myvals[i],
},
}
data.append(line)
return data
def supercaldata(user,slug):
# Extract the name of the planet being analysed
planet = Event.objects.get(slug=slug)
# Pull all of the decisions into an object
decs = Decision.objects.values_list('person','source').filter(value='D', current=True, planet=planet, source__datacollection__display=True).annotate(Count('source'))
numobs = planet.numobs
# Create empty list to store calibrators and datapoints
calibs = []
mypoints = []
# Count number of decisions
numsuper = decs.count()
# Lists are created here
peoplelst,sourcelst,tmp = zip(*decs)
# Organise list of people and sources
people = set(peoplelst)
sources = set(sourcelst)
# Import entire Datapoint database and sort by timestamp
cache_name = '{}_datapoints'.format(slug)
db = cache.get(cache_name)
if not db:
db = Datapoint.objects.filter(ident=slug).values_list('user_id','coorder__source','value','pointtype').order_by('tstamp')
cache.set(cache_name, db, 120)
# Convert to numpy np.array
dp_array = np.array(db)
# Read in all values of calibrators
calvals_data = Datapoint.objects.values_list('user_id','coorder__source','value').filter(coorder__source__in=sources,pointtype='C',coorder__source__final=True,coorder__complete=True,coorder__display=True).order_by('tstamp')
# Convert to numpy np.array
calvals_array = np.array(np.vstack(calvals_data))
# Iterate over each person
for p in people:
# Empty list to store calibrators
calslist = []
# Query datapoints to extract all values for given planet
# Both dp_array[:,1] and calvals_array[:,0] extract entries for user_id==p from column 0
vals = dp_array[dp_array[:,0]==p]
if vals.size == 0:
# Jump to the next person if we don't have any values
continue
calvals = calvals_array[calvals_array[:,0]==p]
if calvals.size == 0:
# Jump to the next person if we don't have any calibrator values
continue
# Query vals to extract average values
# vals[:,6]=='S' and vals[:,6]=='B' extract the entries from vals that have pointtype=='S' and 'B' in column 6. sc_extract[:,4] and bg_extract[:,4] pulls the exact source and background values for those entries from column 4
sc_extract = vals[vals[:,3]=='S']
sc = sc_extract[:,2]
bg_extract = vals[vals[:,3]=='B']
bg = bg_extract[:,2]
# Iterates over the number of sources defined earlier
for c in sources:
# Determines their associated averages
# Performs similar routine to above to extract the source type from column 2, and then the values from column 3
calpoints_extract = calvals[calvals[:,1]==c]
calpoints = calpoints_extract[:,2]
# If there are more calibrator points than observations
if len(calpoints) == numobs:
# Append calpoints
calslist.append(list(calpoints))
# Loops through calslist
if len(calslist) > 0:
# Stacks the values
calstack = np.array([])
calstack = np.vstack(calslist)
#logger.debug('calstack=',calstack)
# This throws a wobbly sometimes
cc = (sc-bg)/(calstack-bg)
calibs.append(cc.tolist())
# Create normalisation function
norm_a = lambda a: np.mean(np.r_[a[:3],a[-3:]])
mycals = []
#logger.debug('calibs=', calibs)
try:
# Stacks all of the calibrators
cala = np.vstack(calibs)
#logger.debug('cala=', cala)
# Normalises stacked calibrators
norms = np.apply_along_axis(norm_a, 1, cala)
#logger.debug('norms=', norms)
# Determines the length of the stacked calibrators
dim = len(cala)
#logger.debug('dim=', dim)
# Normalises the calibrators
norm1 = cala/norms.reshape(dim,1)
#logger.debug('norms.reshape(dim,1)=', norms.reshape(dim,1))
#logger.debug('norm1=', norm1)
# Empty list to store calibrators
mynorm1=[]
# If mypoints is not an empty list
if mypoints != []:
#mynorms = apply_along_axis(norm_a, 1, mypoints)
# Averages the datapoints
myaves = average(mypoints,0)
# Averages the normalised points
mynorm_val = norm_a(myaves)
# Normalises the averages
mycals = list(myaves/mynorm_val)
except Exception as e:
logger.error(e)
logger.error("\033[1;35mHave you started again but not removed all the data?\033[1;m")
return None,[],[],[],None
#if dim != len(mycals):
# check if I have a full set of data, if not we need to do all the calibrator averages manually
# Performs mean statistics (normalise, variance, standard dev.)
norm_alt = np.mean(norm1,axis=0)
variance = np.var(norm1,axis=0)
std = np.sqrt(variance)
fz = list(norm_alt)
# Final return statements
nodata = False
if mycals == []:
mycals = myaverages(planet.slug,user)
nodata = True
return numsuper,fz,mycals,list(std),nodata
else:
return None,[],[],[],None
def myaverages(code,person):
ds = DataSource.objects.filter(event__slug=code).order_by('timestamp').values_list('id',flat=True)
now = datetime.now()
cals = []
mycals = []
dates = []
stamps = []
timestamps = []
normcals = []
maxvals = []
cats = []
# Find which Cat Sources I have observed and there is a complete set of (including other people's data)
# Unlike CalibrateMyData it only includes set where there are full sets
e = Event.objects.get(slug=code)
dc = DataCollection.objects.filter(~Q(source=None),person=person,planet=e).order_by('calid')
cs = CatSource.objects.filter(id__in=[c.source.id for c in dc]).annotate(count=Count('datacollection__datapoint')).filter(count__gte=e.numobs).values_list('id',flat=True)
mydecisions = Decision.objects.filter(person=person,current=True,planet=e,value='D').values_list('source__id',flat=True)
if cs.count() > 0:
# Only use ones where we have more than numobs
for c in dc:
# make sure these are in the mydecision list (i.e. I've said they have a Dip)
if c.source.id in mydecisions:
v = Datapoint.objects.filter(coorder__source=c.source.id,pointtype='C',user=person).order_by('data__timestamp').values_list('data__id','value')
cals.append(dict(v))
if cals:
# Only proceed if we have calibrators in the list (i.e. np.arrays of numobs)
points = Datapoint.objects.filter(user=person,data__event__slug=code).order_by('data__timestamp')
scA = points.filter(pointtype='S').values_list('data__id','value')
bgA = points.filter(pointtype='B').values_list('data__id','value')
# Create a list of normalised values with gaps if I haven't done the full dataset but have contributed to a 'Dip' classification
sc=dict(scA)
bg=dict(bgA)
sc = dictconv(sc,ds)
sc = np.array(sc)
bg = dictconv(bg,ds)
bg = np.array(bg)
for cal in cals:
val = (sc - bg)/(np.array(dictconv(cal,ds))-bg)
val = np.nan_to_num(val)
normcals.append(val)
normmean = np.mean(normcals,axis=0)
return list(normmean/max(normmean))
# If they have no 'D' decisions
return [0.]*ds.count()
def admin_averagecals(code,person):
# Uses and SQL statement to try to speed up the query for averaging data points
# If person == 0 this will return all calibrator values individually - for problem solving
now = datetime.now()
cals = []
mycals = []
dates = []
stamps = []
timestamps = []
normcals = []
maxvals = []
callist = []
cats = []
# Find which Cat Sources I have observed and there is a complete set of (including other people's data)
# Unlike CalibrateMyData it only includes set where there are full sets
e = Event.objects.filter(name=code)[0]
if person == 0:
dc = DataCollection.objects.filter(~Q(source=None),planet__slug=code).values_list('source__id',flat=True).distinct()
cs = CatSource.objects.filter(id__in=[c for c in dc]).annotate(count=Count('datacollection__datapoint')).filter(count__gte=e.numobs).values_list('id',flat=True).distinct()
dcall = DataCollection.objects.filter(planet=e,source__in=cs).values_list('id',flat=True)
logger.debug("** Collections %s" % dcall.count())
if cs.count() > 0:
# Only use ones where we have more than numobs
for c in dc:
# make sure these are in the CatSource list (can't use cs because the order isn't right)
if c in cs:
people = Decision.objects.filter(source__id=c,current=True,value='D').values_list('person',flat=True)
if people:
v = Datapoint.objects.filter(coorder__source=c,pointtype='C',user__id__in=people).order_by('data__timestamp').values_list('data__id').annotate(Avg('value'))
else:
v = Datapoint.objects.filter(coorder__source=c,pointtype='C').order_by('data__timestamp').values_list('data__id').annotate(Avg('value'))
# Double check we have same number of obs and cals
if v.count() == e.numobs:
ids,b = zip(*v)
cals.append(list(b))
decvalue_full = Decision.objects.filter(source=c,planet__slug=code,current=True).values_list('value').annotate(total=Count('id'))
decvalue = dict((str(key),value) for key,value in decvalue_full)
source = CatSource.objects.get(id=c)
cat_item = {'sourcename':str(source.name),'catalogue':str(source.catalogue),'sourceid': str(c),'include':source.final}
cat_item['decisions'] = decvalue
cats.append(cat_item)
callist.append(c)
else:
dc = DataCollection.objects.filter(~Q(source=None),person=person,planet__slug=code).order_by('calid')
cs = CatSource.objects.filter(id__in=[c.source.id for c in dc]).annotate(count=Count('datacollection__datapoint')).filter(count__gte=e.numobs).values_list('id',flat=True).distinct()
dcall = DataCollection.objects.filter(planet=e,source__in=cs).values_list('id',flat=True)
logger.debug("** Collections %s" % dcall.count())
if cs.count() > 0:
# Only use ones where we have more than numobs
for c in dc:
# make sure these are in the CatSource list (can't use cs because the order isn't right)
if c.source.id in cs:
v = Datapoint.objects.filter(coorder__source=c.source.id,pointtype='C').order_by('data__timestamp').values_list('data__id').annotate(Avg('value'))
# Double check we have same number of obs and cals
if v.count() == e.numobs:
ids,b = zip(*v)
cals.append(list(b))
try:
decvalue = Decision.objects.filter(source=c.source,person=person,planet__slug=code,current=True)[0].value
except:
decvalue ='X'
cat_item = {'sourcename':c.source.name,'catalogue':c.source.catalogue}
cat_item['decision'] = decvalue
cat_item['order'] = str(c.calid)
cats.append(cat_item)
callist.append(c.source.id)
if callist:
# Only proceed if we have calibrators in the list (i.e. np.arrays of numobs)
ds = DataSource.objects.filter(event=e).order_by('timestamp')
users = DataCollection.objects.filter(id__in=dcall).values_list('person',flat=True).distinct()
maxnum = ds.count()
dsmax1 = ds.aggregate(Max('id'))
dsmax = dsmax1['id__max']
dsmin = dsmax - maxnum
ds = ds.values_list('id',flat=True)
if person == 0:
people = Decision.objects.filter(planet=e,value='D',current=True).values_list('person',flat=True).distinct()
dp = Datapoint.objects.filter(data__event=e,user__id__in=people)
sc = []
bg = []
for d in ds:
sc_ave = dp.filter(pointtype='S',data__id=d).aggregate(val=Avg('value'))
bg_ave = dp.filter(pointtype='B',data__id=d).aggregate(val=Avg('value'))
sc.append(sc_ave['val'])
bg.append(bg_ave['val'])
else:
sc_my = ds.filter(datapoint__pointtype='S',datapoint__user=person).annotate(value=Sum('datapoint__value')).values_list('id','value')
bg_my = ds.filter(datapoint__pointtype='B',datapoint__user=person).annotate(value=Sum('datapoint__value')).values_list('id','value')
if sc_my.count() < maxnum:
return cals,normcals,[],[],dates,stamps,[],cats
else:
tmp,sc=zip(*sc_my)
tmp,bg=zip(*bg_my)
# Convert to numpy np.arrays to allow simple calibrations
sc = np.array(sc)
bg = np.array(bg)
for cal in cals:
val = (sc - bg)/(np.array(cal)-bg)
maxval = mean(r_[val[:3],val[-3:]])
maxvals.append(maxval)
norm = val/maxval
normcals.append(list(norm))
# Find my data and create unix timestamps
unixt = lambda x: timegm(x.timetuple())+1e-6*x.microsecond
iso = lambda x: x.isoformat(" ")
times = ds.values_list('timestamp',flat=True)
stamps = map(unixt,times)
dates = map(iso,times)
if person == 0:
return normcals,stamps,[int(i) for i in ids],cats
return cals,normcals,list(sc),list(bg),dates,stamps,[int(i) for i in ids],cats
if person == 0:
return normcals,stamps,[],[]
return cals,normcals,[],[],dates,stamps,[],cats
def averagecals_async(e):
catsource = DataCollection.objects.values_list('source').filter(planet=e, display=True).annotate(Count('source'))
for cat in catsource:
if cat[0] != None:
dps = Datapoint.objects.filter(data__event=e, coorder__source__id=cat[0], pointtype='C').order_by('data__timestamp').values_list('data').annotate(Avg('value'))
# Double check we have same number of obs and cals
if dps.count() == e.numobs:
av, create = AverageSet.objects.get_or_create(star=CatSource.objects.get(id=cat[0]),planet=e,settype='C')
av.values = ";".join([str(i[1]) for i in dps])
av.save()
logger.debug("Updated average sets on planet %s for %s" % (e.title,CatSource.objects.get(id=cat[0])))
# Make averages for Source star and Background
for category in ['S','B']:
dps = Datapoint.objects.filter(data__event=e, pointtype=category).order_by('data__timestamp').values_list('data').annotate(Avg('value'))
# Double check we have same number of obs and cals
if dps.count() == e.numobs:
av, created = AverageSet.objects.get_or_create(planet=e,settype=category)
av.values = ";".join([str(i[1]) for i in dps])
av.save()
logger.debug("Updated average sets on planet %s for %s" % (e.title,category))
return
def leastmeasured(code):
coords = []
e = Event.objects.get(slug=code)
dc = DataCollection.objects.values('source').filter(~Q(source=None),planet=e).annotate(count = Count('source')).order_by('count')[:4]
for coll in dc:
s = CatSource.objects.get(id=coll['source'])
coords.append({'x':int(s.xpos),'y':int(s.ypos),'r':int(e.radius)})
return coords
| gpl-3.0 |
o5k/openerp-oemedical-v0.1 | openerp/addons/oemedical/oemedical_gynecology_and_obstetrics/__init__.py | 4 | 1169 | # -*- coding: utf-8 -*-
#/#############################################################################
#
# Tech-Receptives Solutions Pvt. Ltd.
# Copyright (C) 2004-TODAY Tech-Receptives(<http://www.techreceptives.com>)
# Special Credit and Thanks to Thymbra Latinoamericana S.A.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#/#############################################################################
import oemedical_gynecology_and_obstetrics
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
sciCloud/OLiMS | lims/browser/widgets/referenceresultswidget.py | 2 | 9295 | from dependencies.dependency import ClassSecurityInfo
from dependencies.dependency import registerWidget
from dependencies.dependency import TypesWidget
from dependencies.dependency import getToolByName
from lims.browser import BrowserView
from lims import bikaMessageFactory as _
from lims.utils import t
from lims.browser.bika_listing import BikaListingView
class ReferenceResultsView(BikaListingView):
""" bika listing to display reference results for a
Reference Sample's widget
referenceresults parameter must be list of
dict(ReferenceResultsField value)
"""
def __init__(self, context, request, fieldvalue, allow_edit):
BikaListingView.__init__(self, context, request)
self.context_actions = {}
self.contentFilter = {'review_state': 'impossible_state'}
self.base_url = self.context.absolute_url()
self.view_url = self.base_url
self.show_sort_column = False
self.show_select_row = False
self.show_select_all_checkbox = False
self.show_select_column = False
self.pagesize = 999999
self.allow_edit = allow_edit
self.show_categories = True
# self.expand_all_categories = False
self.referenceresults = {}
# we want current field value as a dict
# key:uid, value:{dictionary from field list of dict}
for refres in fieldvalue:
self.referenceresults[refres['uid']] = refres
self.columns = {
'service': {'title': _('Service')},
'result': {'title': _('Expected Result')},
'error': {'title': _('Permitted Error %')},
'min': {'title': _('Min')},
'max': {'title': _('Max')}
}
self.review_states = [
{'id': 'default',
'title': _('All'),
'contentFilter': {},
'transitions': [],
'columns': ['service', 'result', 'error', 'min', 'max'],
},
]
def folderitems(self):
bsc = getToolByName(self.context, 'bika_setup_catalog')
self.categories = []
services = bsc(portal_type='AnalysisService',
sort_on='sortable_title')
items = []
for service in services:
service = service.getObject()
cat = service.getCategoryTitle()
if cat not in self.categories:
self.categories.append(cat)
if service.UID() in self.referenceresults:
refres = self.referenceresults[service.UID()]
else:
refres = {'uid': service.UID(),
'result': '',
'min': '',
'max': ''}
after_icons = '<span class="discreet">(%s)</span> ' % \
service.getKeyword()
if service.getAccredited():
after_icons += "<img\
src='%s/++resource++bika.lims.images/accredited.png'\
title='%s'>" % (self.context.absolute_url(),
_("Accredited"))
if service.getReportDryMatter():
after_icons += "<img\
src='%s/++resource++bika.lims.images/dry.png'\
title='%s'>" % (self.context.absolute_url(),
_("Can be reported as dry matter"))
if service.getAttachmentOption() == 'r':
after_icons += "<img\
src='%s/++resource++bika.lims.images/attach_reqd.png'\
title='%s'>" % (self.context.absolute_url(),
_("Attachment required"))
if service.getAttachmentOption() == 'n':
after_icons += "<img\
src='%s/++resource++bika.lims.images/attach_no.png'\
title='%s'>" % (self.context.absolute_url(),
_('Attachment not permitted'))
workflow = getToolByName(self.context, 'portal_workflow')
state = workflow.getInfoFor(service, 'inactive_state', '')
unit = service.getUnit()
unitspan = unit and "<span class='discreet'>%s</span>" % unit or ''
percspan = "<span class='discreet'>%</span>"
# this folderitems doesn't subclass from the bika_listing.py
# so we create items from scratch
item = {
'obj': service,
'id': service.getId(),
'uid': service.UID(),
'title': service.Title(),
'category': cat,
'selected': service.UID() in self.referenceresults.keys(),
'type_class': 'contenttype-ReferenceResult',
'url': service.absolute_url(),
'relative_url': service.absolute_url(),
'view_url': service.absolute_url(),
'service': service.Title(),
'result': refres['result'],
'error': '',
'min': refres['min'],
'max': refres['max'],
'replace': {},
'before': {},
'after': {'service': after_icons,
'result': unitspan,
'min': unitspan,
'max': unitspan,
'error': percspan},
'choices': {},
'class': "state-%s" % state,
'state_class': 'state-%s' % state,
'allow_edit': ['result', 'error', 'min', 'max'],
'required': [],
}
items.append(item)
self.categories.sort()
return items
class TableRenderShim(BrowserView):
""" This view renders the actual table.
It's in it's own view so that we can tie it to a URL
for javascript to re-render the table during ReferenceSample edit.
"""
def __init__(self, context, request, fieldvalue={}, allow_edit=True):
""" If uid is in request, we use that reference definition's reference
results value. Otherwise the parameter specified here.
"""
super(TableRenderShim, self).__init__(context, request)
self.allow_edit = allow_edit
if 'uid' in request:
uc = getToolByName(context, 'uid_catalog')
refres = uc(UID=request['uid'])[
0].getObject().getReferenceResults()
self.fieldvalue = refres
else:
self.fieldvalue = fieldvalue
def __call__(self):
""" Prints a bika listing with categorized services.
field contains the archetypes field with a list of services in it
"""
view = ReferenceResultsView(self.context,
self.request,
fieldvalue=self.fieldvalue,
allow_edit=self.allow_edit)
return view.contents_table(table_only=True)
class ReferenceResultsWidget(TypesWidget):
_properties = TypesWidget._properties.copy()
_properties.update({
'macro': "bika_widgets/referenceresultswidget",
'helper_js': ("bika_widgets/referenceresultswidget.js",),
'helper_css': ("bika_widgets/referenceresultswidget.css",)
})
security = ClassSecurityInfo()
security.declarePublic('process_form')
def process_form(self, instance, field, form,
empty_marker=None, emptyReturnsMarker=False):
""" Return a list of dictionaries fit for ReferenceResultsField
consumption. Only services which have float()able entries in
result,min and max field will be included.
If any of min, max, or result fields are blank, the row value
is ignored here.
"""
value = []
if 'service' in form:
for uid, service in form['service'][0].items():
result = form['result'][0][uid]
result = result if result else False
Min = form['min'][0][uid]
Min = Min if Min else False
Max = form['max'][0][uid]
Max = Max if Max else False
# big old false check because these could be zeroes
if Min is not False \
and Max is not False \
and result is not False:
value.append({'uid': uid,
'result': result,
'min': Min,
'max': Max})
return value, {}
security.declarePublic('ReferenceResults')
def ReferenceResults(self, field, allow_edit=False):
""" Prints a bika listing with categorized services.
field contains the archetypes field with a list of services in it
"""
fieldvalue = getattr(field, field.accessor)()
view = TableRenderShim(self,
self.REQUEST,
fieldvalue=fieldvalue,
allow_edit=allow_edit)
return view()
registerWidget(ReferenceResultsWidget,
title='Reference definition results',
description=('Reference definition results.'),
)
| agpl-3.0 |
memtoko/django | tests/gis_tests/geo3d/tests.py | 17 | 12625 | from __future__ import unicode_literals
import os
import re
from unittest import skipUnless
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.geos import HAS_GEOS
from django.test import TestCase, ignore_warnings, skipUnlessDBFeature
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
if HAS_GEOS:
from django.contrib.gis.db.models import Union, Extent3D
from django.contrib.gis.geos import GEOSGeometry, LineString, Point, Polygon
from .models import (City3D, Interstate2D, Interstate3D, InterstateProj2D,
InterstateProj3D, Point2D, Point3D, MultiPoint3D, Polygon2D, Polygon3D)
if HAS_GDAL:
from django.contrib.gis.utils import LayerMapping, LayerMapError
data_path = os.path.realpath(os.path.join(os.path.dirname(upath(__file__)), '..', 'data'))
city_file = os.path.join(data_path, 'cities', 'cities.shp')
vrt_file = os.path.join(data_path, 'test_vrt', 'test_vrt.vrt')
# The coordinates of each city, with Z values corresponding to their
# altitude in meters.
city_data = (
('Houston', (-95.363151, 29.763374, 18)),
('Dallas', (-96.801611, 32.782057, 147)),
('Oklahoma City', (-97.521157, 34.464642, 380)),
('Wellington', (174.783117, -41.315268, 14)),
('Pueblo', (-104.609252, 38.255001, 1433)),
('Lawrence', (-95.235060, 38.971823, 251)),
('Chicago', (-87.650175, 41.850385, 181)),
('Victoria', (-123.305196, 48.462611, 15)),
)
# Reference mapping of city name to its altitude (Z value).
city_dict = {name: coords for name, coords in city_data}
# 3D freeway data derived from the National Elevation Dataset:
# http://seamless.usgs.gov/products/9arc.php
interstate_data = (
('I-45',
'LINESTRING(-95.3708481 29.7765870 11.339,-95.3694580 29.7787980 4.536,'
'-95.3690305 29.7797359 9.762,-95.3691886 29.7812450 12.448,'
'-95.3696447 29.7850144 10.457,-95.3702511 29.7868518 9.418,'
'-95.3706724 29.7881286 14.858,-95.3711632 29.7896157 15.386,'
'-95.3714525 29.7936267 13.168,-95.3717848 29.7955007 15.104,'
'-95.3717719 29.7969804 16.516,-95.3717305 29.7982117 13.923,'
'-95.3717254 29.8000778 14.385,-95.3719875 29.8013539 15.160,'
'-95.3720575 29.8026785 15.544,-95.3721321 29.8040912 14.975,'
'-95.3722074 29.8050998 15.688,-95.3722779 29.8060430 16.099,'
'-95.3733818 29.8076750 15.197,-95.3741563 29.8103686 17.268,'
'-95.3749458 29.8129927 19.857,-95.3763564 29.8144557 15.435)',
(11.339, 4.536, 9.762, 12.448, 10.457, 9.418, 14.858,
15.386, 13.168, 15.104, 16.516, 13.923, 14.385, 15.16,
15.544, 14.975, 15.688, 16.099, 15.197, 17.268, 19.857,
15.435),
),
)
# Bounding box polygon for inner-loop of Houston (in projected coordinate
# system 32140), with elevation values from the National Elevation Dataset
# (see above).
bbox_data = (
'POLYGON((941527.97 4225693.20,962596.48 4226349.75,963152.57 4209023.95,'
'942051.75 4208366.38,941527.97 4225693.20))',
(21.71, 13.21, 9.12, 16.40, 21.71)
)
@skipUnless(HAS_GDAL, "GDAL is required for Geo3DTest.")
@skipUnlessDBFeature("gis_enabled", "supports_3d_functions")
class Geo3DTest(TestCase):
"""
Only a subset of the PostGIS routines are 3D-enabled, and this TestCase
tries to test the features that can handle 3D and that are also
available within GeoDjango. For more information, see the PostGIS docs
on the routines that support 3D:
http://postgis.net/docs/PostGIS_Special_Functions_Index.html#PostGIS_3D_Functions
"""
def _load_interstate_data(self):
# Interstate (2D / 3D and Geographic/Projected variants)
for name, line, exp_z in interstate_data:
line_3d = GEOSGeometry(line, srid=4269)
line_2d = LineString([l[:2] for l in line_3d.coords], srid=4269)
# Creating a geographic and projected version of the
# interstate in both 2D and 3D.
Interstate3D.objects.create(name=name, line=line_3d)
InterstateProj3D.objects.create(name=name, line=line_3d)
Interstate2D.objects.create(name=name, line=line_2d)
InterstateProj2D.objects.create(name=name, line=line_2d)
def _load_city_data(self):
for name, pnt_data in city_data:
City3D.objects.create(name=name, point=Point(*pnt_data, srid=4326))
def _load_polygon_data(self):
bbox_wkt, bbox_z = bbox_data
bbox_2d = GEOSGeometry(bbox_wkt, srid=32140)
bbox_3d = Polygon(tuple((x, y, z) for (x, y), z in zip(bbox_2d[0].coords, bbox_z)), srid=32140)
Polygon2D.objects.create(name='2D BBox', poly=bbox_2d)
Polygon3D.objects.create(name='3D BBox', poly=bbox_3d)
def test_3d_hasz(self):
"""
Make sure data is 3D and has expected Z values -- shouldn't change
because of coordinate system.
"""
self._load_interstate_data()
for name, line, exp_z in interstate_data:
interstate = Interstate3D.objects.get(name=name)
interstate_proj = InterstateProj3D.objects.get(name=name)
for i in [interstate, interstate_proj]:
self.assertTrue(i.line.hasz)
self.assertEqual(exp_z, tuple(i.line.z))
self._load_city_data()
for name, pnt_data in city_data:
city = City3D.objects.get(name=name)
z = pnt_data[2]
self.assertTrue(city.point.hasz)
self.assertEqual(z, city.point.z)
def test_3d_polygons(self):
"""
Test the creation of polygon 3D models.
"""
self._load_polygon_data()
p3d = Polygon3D.objects.get(name='3D BBox')
self.assertTrue(p3d.poly.hasz)
self.assertIsInstance(p3d.poly, Polygon)
self.assertEqual(p3d.poly.srid, 32140)
def test_3d_layermapping(self):
"""
Testing LayerMapping on 3D models.
"""
point_mapping = {'point': 'POINT'}
mpoint_mapping = {'mpoint': 'MULTIPOINT'}
# The VRT is 3D, but should still be able to map sans the Z.
lm = LayerMapping(Point2D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point2D.objects.count())
# The city shapefile is 2D, and won't be able to fill the coordinates
# in the 3D model -- thus, a LayerMapError is raised.
self.assertRaises(LayerMapError, LayerMapping,
Point3D, city_file, point_mapping, transform=False)
# 3D model should take 3D data just fine.
lm = LayerMapping(Point3D, vrt_file, point_mapping, transform=False)
lm.save()
self.assertEqual(3, Point3D.objects.count())
# Making sure LayerMapping.make_multi works right, by converting
# a Point25D into a MultiPoint25D.
lm = LayerMapping(MultiPoint3D, vrt_file, mpoint_mapping, transform=False)
lm.save()
self.assertEqual(3, MultiPoint3D.objects.count())
def test_kml(self):
"""
Test GeoQuerySet.kml() with Z values.
"""
self._load_city_data()
h = City3D.objects.kml(precision=6).get(name='Houston')
# KML should be 3D.
# `SELECT ST_AsKML(point, 6) FROM geo3d_city3d WHERE name = 'Houston';`
ref_kml_regex = re.compile(r'^<Point><coordinates>-95.363\d+,29.763\d+,18</coordinates></Point>$')
self.assertTrue(ref_kml_regex.match(h.kml))
def test_geojson(self):
"""
Test GeoQuerySet.geojson() with Z values.
"""
self._load_city_data()
h = City3D.objects.geojson(precision=6).get(name='Houston')
# GeoJSON should be 3D
# `SELECT ST_AsGeoJSON(point, 6) FROM geo3d_city3d WHERE name='Houston';`
ref_json_regex = re.compile(r'^{"type":"Point","coordinates":\[-95.363151,29.763374,18(\.0+)?\]}$')
self.assertTrue(ref_json_regex.match(h.geojson))
def test_union(self):
"""
Testing the Union aggregate of 3D models.
"""
# PostGIS query that returned the reference EWKT for this test:
# `SELECT ST_AsText(ST_Union(point)) FROM geo3d_city3d;`
self._load_city_data()
ref_ewkt = (
'SRID=4326;MULTIPOINT(-123.305196 48.462611 15,-104.609252 38.255001 1433,'
'-97.521157 34.464642 380,-96.801611 32.782057 147,-95.363151 29.763374 18,'
'-95.23506 38.971823 251,-87.650175 41.850385 181,174.783117 -41.315268 14)'
)
ref_union = GEOSGeometry(ref_ewkt)
union = City3D.objects.aggregate(Union('point'))['point__union']
self.assertTrue(union.hasz)
# Ordering of points in the resulting geometry may vary between implementations
self.assertSetEqual({p.ewkt for p in ref_union}, {p.ewkt for p in union})
@ignore_warnings(category=RemovedInDjango20Warning)
def test_extent(self):
"""
Testing the Extent3D aggregate for 3D models.
"""
self._load_city_data()
# `SELECT ST_Extent3D(point) FROM geo3d_city3d;`
ref_extent3d = (-123.305196, -41.315268, 14, 174.783117, 48.462611, 1433)
extent1 = City3D.objects.aggregate(Extent3D('point'))['point__extent3d']
extent2 = City3D.objects.extent3d()
def check_extent3d(extent3d, tol=6):
for ref_val, ext_val in zip(ref_extent3d, extent3d):
self.assertAlmostEqual(ref_val, ext_val, tol)
for e3d in [extent1, extent2]:
check_extent3d(e3d)
self.assertIsNone(City3D.objects.none().extent3d())
self.assertIsNone(City3D.objects.none().aggregate(Extent3D('point'))['point__extent3d'])
def test_perimeter(self):
"""
Testing GeoQuerySet.perimeter() on 3D fields.
"""
self._load_polygon_data()
# Reference query for values below:
# `SELECT ST_Perimeter3D(poly), ST_Perimeter2D(poly) FROM geo3d_polygon3d;`
ref_perim_3d = 76859.2620451
ref_perim_2d = 76859.2577803
tol = 6
self.assertAlmostEqual(ref_perim_2d,
Polygon2D.objects.perimeter().get(name='2D BBox').perimeter.m,
tol)
self.assertAlmostEqual(ref_perim_3d,
Polygon3D.objects.perimeter().get(name='3D BBox').perimeter.m,
tol)
def test_length(self):
"""
Testing GeoQuerySet.length() on 3D fields.
"""
# ST_Length_Spheroid Z-aware, and thus does not need to use
# a separate function internally.
# `SELECT ST_Length_Spheroid(line, 'SPHEROID["GRS 1980",6378137,298.257222101]')
# FROM geo3d_interstate[2d|3d];`
self._load_interstate_data()
tol = 3
ref_length_2d = 4368.1721949481
ref_length_3d = 4368.62547052088
self.assertAlmostEqual(ref_length_2d,
Interstate2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
Interstate3D.objects.length().get(name='I-45').length.m,
tol)
# Making sure `ST_Length3D` is used on for a projected
# and 3D model rather than `ST_Length`.
# `SELECT ST_Length(line) FROM geo3d_interstateproj2d;`
ref_length_2d = 4367.71564892392
# `SELECT ST_Length3D(line) FROM geo3d_interstateproj3d;`
ref_length_3d = 4368.16897234101
self.assertAlmostEqual(ref_length_2d,
InterstateProj2D.objects.length().get(name='I-45').length.m,
tol)
self.assertAlmostEqual(ref_length_3d,
InterstateProj3D.objects.length().get(name='I-45').length.m,
tol)
def test_scale(self):
"""
Testing GeoQuerySet.scale() on Z values.
"""
self._load_city_data()
# Mapping of City name to reference Z values.
zscales = (-3, 4, 23)
for zscale in zscales:
for city in City3D.objects.scale(1.0, 1.0, zscale):
self.assertEqual(city_dict[city.name][2] * zscale, city.scale.z)
def test_translate(self):
"""
Testing GeoQuerySet.translate() on Z values.
"""
self._load_city_data()
ztranslations = (5.23, 23, -17)
for ztrans in ztranslations:
for city in City3D.objects.translate(0, 0, ztrans):
self.assertEqual(city_dict[city.name][2] + ztrans, city.translate.z)
| bsd-3-clause |
sbuss/voteswap | lib/sendgrid/helpers/mail/mail.py | 5 | 21263 | """v3/mail/send response body builder"""
import json
class Mail(object):
"""Creates the response body for v3/mail/send"""
def __init__(self, from_email = None, subject = None, to_email = None, content = None):
self.from_email = None
self.subject = None
self.personalizations = None
self.contents = None
self.attachments = None
self.template_id = None
self.sections = None
self.headers = None
self.categories = None
self.custom_args = None
self.send_at = None
self.batch_id = None
self.asm = None
self.ip_pool_name = None
self.mail_settings = None
self.tracking_settings = None
self.reply_to = None
# Minimum required to send an email
if from_email and subject and to_email and content:
self.set_from(from_email)
personalization = Personalization()
personalization.add_to(to_email)
self.add_personalization(personalization)
self.set_subject(subject)
self.add_content(content)
def __str__(self):
self.get()
def get(self):
"""
:return: response body dict
"""
mail = {}
if self.from_email != None:
mail["from"] = self.from_email.get()
if self.subject != None:
mail["subject"] = self.subject
if self.personalizations != None:
mail["personalizations"] = [personalization.get() for personalization in self.personalizations]
if self.contents != None:
mail["content"] = [ob.get() for ob in self.contents]
if self.attachments != None:
mail["attachments"] = [ob.get() for ob in self.attachments]
if self.template_id != None:
mail["template_id"] = self.template_id
if self.sections != None:
sections = {}
for key in self.sections:
sections.update(key.get())
mail["sections"] = sections
if self.headers != None:
headers = {}
for key in self.headers:
headers.update(key.get())
mail["headers"] = headers
if self.categories != None:
mail["categories"] = [category.get() for category in self.categories]
if self.custom_args != None:
custom_args = {}
for key in self.custom_args:
custom_args.update(key.get())
mail["custom_args"] = custom_args
if self.send_at != None:
mail["send_at"] = self.send_at
if self.batch_id != None:
mail["batch_id"] = self.batch_id
if self.asm != None:
mail["asm"] = self.asm
if self.ip_pool_name != None:
mail["ip_pool_name"] = self.ip_pool_name
if self.mail_settings != None:
mail["mail_settings"] = self.mail_settings.get()
if self.tracking_settings != None:
mail["tracking_settings"] = self.tracking_settings.get()
if self.reply_to != None:
mail["reply_to"] = self.reply_to.get()
return mail
def set_from(self, email):
self.from_email = email
def set_subject(self, subject):
self.subject = subject
def add_personalization(self, personalizations):
if self.personalizations is None:
self.personalizations = []
self.personalizations.append(personalizations)
def add_content(self, content):
if self.contents is None:
self.contents = []
self.contents.append(content)
def add_attachment(self, attachment):
if self.attachments is None:
self.attachments = []
self.attachments.append(attachment)
def set_template_id(self, template_id):
self.template_id = template_id
def add_section(self, section):
if self.sections is None:
self.sections = []
self.sections.append(section)
def add_header(self, header):
if self.headers is None:
self.headers = []
self.headers.append(header)
def add_category(self, category):
if self.categories is None:
self.categories = []
self.categories.append(category)
def add_custom_arg(self, custom_arg):
if self.custom_args is None:
self.custom_args = []
self.custom_args.append(custom_arg)
def set_send_at(self, send_at):
self.send_at = send_at
def set_batch_id(self, batch_id):
self.batch_id = batch_id
def set_asm(self, asm):
self.asm = asm.get()
def set_mail_settings(self, mail_settings):
self.mail_settings = mail_settings
def set_tracking_settings(self, tracking_settings):
self.tracking_settings = tracking_settings
def set_ip_pool_name(self, ip_pool_name):
self.ip_pool_name = ip_pool_name
def set_reply_to(self, reply_to):
self.reply_to = reply_to
################################################################
# The following objects are meant to be extended with validation
################################################################
class Email(object):
def __init__(self, email=None, name=None):
self.name = name if name != None else None
self.email = email if email != None else None
def set_name(self, name):
self.name = name
def set_email(self, email):
self.email = email
def get(self):
email = {}
if self.name != None:
email["name"] = self.name
if self.email != None:
email["email"] = self.email
return email
class Content(object):
def __init__(self, type=None, value=None):
self.type = type if type != None else None
self.value = value if value != None else None
def set_type(self, type):
self.type = type
def set_value(self, value):
self.value = value
def get(self):
content = {}
if self.type != None:
content["type"] = self.type
if self.value != None:
content["value"] = self.value
return content
class Header(object):
def __init__(self, key=None, value=None):
self.key = key if key != None else None
self.value = value if value != None else None
def set_key(self, key):
self.key = key
def set_value(self, value):
self.value = value
def get(self):
header = {}
if self.key != None and self.value != None:
header[self.key] = self.value
return header
class Substitution(object):
def __init__(self, key=None, value=None):
self.key = key if key != None else None
self.value = value if value != None else None
def set_key(self, key):
self.key = key
def set_value(self, value):
self.value = value
def get(self):
substitution = {}
if self.key != None and self.value != None:
substitution[self.key] = self.value
return substitution
class Section(object):
def __init__(self, key=None, value=None):
self.key = key if key != None else None
self.value = value if value != None else None
def set_key(self, key):
self.key = key
def set_value(self, value):
self.value = value
def get(self):
section = {}
if self.key != None and self.value != None:
section[self.key] = self.value
return section
class CustomArg(object):
def __init__(self, key=None, value=None):
self.key = key if key != None else None
self.value = value if value != None else None
def set_key(self, key):
self.key = key
def set_value(self, value):
self.value = value
def get(self):
custom_arg = {}
if self.key != None and self.value != None:
custom_arg[self.key] = self.value
return custom_arg
class Personalization(object):
def __init__(self):
self.tos = None
self.ccs = None
self.bccs = None
self.subject = None
self.headers = None
self.substitutions = None
self.custom_args = None
self.send_at = None
def add_to(self, email):
if self.tos is None:
self.tos = []
self.tos.append(email.get())
def add_cc(self, email):
if self.ccs is None:
self.ccs = []
self.ccs.append(email.get())
def add_bcc(self, email):
if self.bccs is None:
self.bccs = []
self.bccs.append(email.get())
def set_subject(self, subject):
self.subject = subject
def add_header(self, header):
if self.headers is None:
self.headers = []
self.headers.append(header.get())
def add_substitution(self, substitution):
if self.substitutions is None:
self.substitutions = []
self.substitutions.append(substitution.get())
def add_custom_arg(self, custom_arg):
if self.custom_args is None:
self.custom_args = []
self.custom_args.append(custom_arg.get())
def set_send_at(self, send_at):
self.send_at = send_at
def get(self):
personalization = {}
if self.tos != None:
personalization["to"] = self.tos
if self.ccs != None:
personalization["cc"] = self.ccs
if self.bccs != None:
personalization["bcc"] = self.bccs
if self.subject != None:
personalization["subject"] = self.subject
if self.headers != None:
headers = {}
for key in self.headers:
headers.update(key)
personalization["headers"] = headers
if self.substitutions != None:
substitutions = {}
for key in self.substitutions:
substitutions.update(key)
personalization["substitutions"] = substitutions
if self.custom_args != None:
custom_args = {}
for key in self.custom_args:
custom_args.update(key)
personalization["custom_args"] = custom_args
if self.send_at != None:
personalization["send_at"] = self.send_at
return personalization
class Attachment(object):
def __init__(self):
self.content = None
self.type = None
self.filename = None
self.disposition = None
self.content_id = None
def set_content(self, content):
self.content = content
def set_type(self, type):
self.type = type
def set_filename(self, filename):
self.filename = filename
def set_disposition(self, disposition):
self.disposition = disposition
def set_content_id(self, content_id):
self.content_id = content_id
def get(self):
attachment = {}
if self.content != None:
attachment["content"] = self.content
if self.type != None:
attachment["type"] = self.type
if self.filename != None:
attachment["filename"] = self.filename
if self.disposition != None:
attachment["disposition"] = self.disposition
if self.content_id != None:
attachment["content_id"] = self.content_id
return attachment
class Category(object):
def __init__(self, name=None):
self.name = name if name != None else None
def get(self):
return self.name
class ASM(object):
def __init__(self, group_id=None, groups_to_display=None):
self.group_id = group_id if group_id != None else None
self.groups_to_display = groups_to_display if groups_to_display != None else None
def get(self):
asm = {}
if self.group_id != None:
asm["group_id"] = self.group_id
if self.groups_to_display != None:
asm["groups_to_display"] = self.groups_to_display
return asm
class BCCSettings(object):
def __init__(self, enable=None, email=None):
self.enable = enable if enable != None else None
self.email = email if email != None else None
def get(self):
bcc_settings = {}
if self.enable != None:
bcc_settings["enable"] = self.enable
if self.email != None:
email = self.email.get()
bcc_settings["email"] = email["email"]
return bcc_settings
class BypassListManagement(object):
def __init__(self, enable=None):
self.enable = enable if enable != None else None
def get(self):
bypass_list_management = {}
if self.enable != None:
bypass_list_management["enable"] = self.enable
return bypass_list_management
class FooterSettings(object):
def __init__(self, enable=None, text=None, html=None):
self.enable = enable if enable != None else None
self.text = text if text else text
self.html = html if html else html
def set_enable(self, enable):
self.enable = enable
def set_text(self, text):
self.text = text
def set_html(self, html):
self.html = html
def get(self):
footer_settings = {}
if self.enable != None:
footer_settings["enable"] = self.enable
if self.text != None:
footer_settings["text"] = self.text
if self.html != None:
footer_settings["html"] = self.html
return footer_settings
class SandBoxMode(object):
def __init__(self, enable=None):
self.enable = enable if enable else False
def get(self):
sandbox_mode = {}
if self.enable != None:
sandbox_mode["enable"] = self.enable
return sandbox_mode
class SpamCheck(object):
def __init__(self, enable=None, threshold=None, post_to_url=None):
self.enable = enable if enable != None else None
self.threshold = threshold if threshold != None else None
self.post_to_url = post_to_url if post_to_url != None else None
def set_enable(self, enable):
self.enable = enable
def set_threshold(self, threshold):
self.threshold = threshold
def set_post_to_url(self, post_to_url):
self.post_to_url = post_to_url
def get(self):
spam_check = {}
if self.enable != None:
spam_check["enable"] = self.enable
if self.threshold != None:
spam_check["threshold"] = self.threshold
if self.post_to_url != None:
spam_check["post_to_url"] = self.post_to_url
return spam_check
class MailSettings(object):
def __init__(self):
self.bcc_settings = None
self.bypass_list_management = None
self.footer_settings = None
self.sandbox_mode = None
self.spam_check = None
def set_bcc_settings(self, bcc_settings):
self.bcc_settings = bcc_settings
def set_bypass_list_management(self, bypass_list_management):
self.bypass_list_management = bypass_list_management
def set_footer_settings(self, footer_settings):
self.footer_settings = footer_settings
def set_sandbox_mode(self, sandbox_mode):
self.sandbox_mode = sandbox_mode
def set_spam_check(self, spam_check):
self.spam_check = spam_check
def get(self):
mail_settings = {}
if self.bcc_settings != None:
mail_settings["bcc"] = self.bcc_settings.get()
if self.bypass_list_management != None:
mail_settings["bypass_list_management"] = self.bypass_list_management.get()
if self.footer_settings != None:
mail_settings["footer"] = self.footer_settings.get()
if self.sandbox_mode != None:
mail_settings["sandbox_mode"] = self.sandbox_mode.get()
if self.spam_check != None:
mail_settings["spam_check"] = self.spam_check.get()
return mail_settings
class ClickTracking(object):
def __init__(self, enable=None, enable_text=None):
self.enable = enable if enable else None
self.enable_text = enable_text if enable_text !=None else None
def set_enable(self, enable):
self.enable = enable
def set_enable_text(self, enable_text):
self.enable_text = enable_text
def get(self):
click_tracking = {}
if self.enable != None:
click_tracking["enable"] = self.enable
if self.enable_text != None:
click_tracking["enable_text"] = self.enable_text
return click_tracking
class OpenTracking(object):
def __init__(self, enable=None, substitution_tag=None):
self.enable = enable if enable != None else None
self.substitution_tag = substitution_tag if substitution_tag !=None else None
def set_enable(self, enable):
self.enable = enable
def set_substitution_tag(self, substitution_tag):
self.substitution_tag = substitution_tag
def get(self):
open_tracking = {}
if self.enable != None:
open_tracking["enable"] = self.enable
if self.substitution_tag != None:
open_tracking["substitution_tag"] = self.substitution_tag
return open_tracking
class SubscriptionTracking(object):
def __init__(self, enable=None, text=None, html=None, substitution_tag=None):
self.enable = enable if enable != None else None
self.text = text if text != None else None
self.html = html if html != None else None
self.substitution_tag = substitution_tag if substitution_tag != None else None
def set_enable(self, enable):
self.enable = enable
def set_text(self, text):
self.text = text
def set_html(self, html):
self.html = html
def set_substitution_tag(self, substitution_tag):
self.substitution_tag = substitution_tag
def get(self):
subscription_tracking = {}
if self.enable != None:
subscription_tracking["enable"] = self.enable
if self.text != None:
subscription_tracking["text"] = self.text
if self.html != None:
subscription_tracking["html"] = self.html
if self.substitution_tag != None:
subscription_tracking["substitution_tag"] = self.substitution_tag
return subscription_tracking
class Ganalytics(object):
def __init__(self,
enable=None,
utm_source=None,
utm_medium=None,
utm_term=None,
utm_content=None,
utm_campaign=None):
self.enable = enable if enable != None else None
self.utm_source = utm_source if utm_source != None else None
self.utm_medium = utm_medium if utm_medium != None else None
self.utm_term = utm_term if utm_term != None else None
self.utm_content = utm_content if utm_content != None else None
self.utm_campaign = utm_campaign if utm_campaign != None else None
def set_enable(self, enable):
self.enable = enable
def set_utm_source(self, utm_source):
self.utm_source = utm_source
def set_utm_medium(self, utm_medium):
self.utm_medium = utm_medium
def set_utm_term(self, utm_term):
self.utm_term = utm_term
def set_utm_content(self, utm_content):
self.utm_content = utm_content
def set_utm_campaign(self, utm_campaign):
self.utm_campaign = utm_campaign
def get(self):
ganalytics = {}
if self.enable != None:
ganalytics["enable"] = self.enable
if self.utm_source != None:
ganalytics["utm_source"] = self.utm_source
if self.utm_medium != None:
ganalytics["utm_medium"] = self.utm_medium
if self.utm_term != None:
ganalytics["utm_term"] = self.utm_term
if self.utm_content != None:
ganalytics["utm_content"] = self.utm_content
if self.utm_campaign != None:
ganalytics["utm_campaign"] = self.utm_campaign
return ganalytics
class TrackingSettings(object):
def __init__(self):
self.click_tracking = None
self.open_tracking = None
self.subscription_tracking = None
self.ganalytics = None
def set_click_tracking(self, click_tracking):
self.click_tracking = click_tracking
def set_open_tracking(self, open_tracking):
self.open_tracking = open_tracking
def set_subscription_tracking(self, subscription_tracking):
self.subscription_tracking = subscription_tracking
def set_ganalytics(self, ganalytics):
self.ganalytics = ganalytics
def get(self):
tracking_settings = {}
if self.click_tracking != None:
tracking_settings["click_tracking"] = self.click_tracking.get()
if self.open_tracking != None:
tracking_settings["open_tracking"] = self.open_tracking.get()
if self.subscription_tracking != None:
tracking_settings["subscription_tracking"] = self.subscription_tracking.get()
if self.ganalytics != None:
tracking_settings["ganalytics"] = self.ganalytics.get()
return tracking_settings
| mit |
TrafficLab/nox11oflib | src/utilities/switch_command.py | 16 | 2438 | #!/usr/bin/python
#
# Send arbitrary command to a switch
#
import getopt,sys,os
import httplib
import simplejson
import urllib
# TODO: need to set the path for this
from nox.webapps.webserviceclient.simple import PersistentLogin, NOXWSClient
def usage():
print """
Usage:
switch_command.py -d <directory name> -s <switch name> -c <command>
[-u <admin username>] [-p <admin passwd>]
[args]
e.g. switch_command -d Built-in -s foo -c restart
Note: accepts mangled switch names
"""
if __name__ == '__main__':
sys.path.append('/opt/nox/bin')
try:
opts, args = getopt.getopt(sys.argv[1:], "hd:s:c:u:p:")
except getopt.GetoptError, err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
usage()
sys.exit(2)
directory = None
switch = None
command = None
adminu = "admin"
adminp = "admin"
for o, a in opts:
if o == "-h":
usage()
sys.exit()
elif o == '-d':
directory = a
elif o == '-s':
switch = a
try:
if switch.find(';') != -1:
directory = switch.split(';')[0]
switch = switch.split(';')[0]
except Exception, e:
print 'Format error in mangled name',switch
sys.exit()
elif o == '-c':
command = a
elif o == '-u':
adminu = a
elif o == '-p':
adminp = a
else:
assert False, "unhandled option"
if not directory or not switch or not command:
usage()
sys.exit()
print ' Logging into web service.. ',
loginmgr = PersistentLogin("admin","admin")
# currently only support localhost
wsc = NOXWSClient("127.0.0.1", 443, True, loginmgr)
print 'done'
urlstr = '/ws.v1/switch/'+directory+'/'+switch+'/command'
print ' Issuing:'
print '\t',urlstr
url = urllib.quote(urlstr)
d = {}
d['command'] = command
d['args'] = args
headers = {}
headers["content-type"] = "application/json"
response = wsc.put(url, headers, simplejson.dumps(d))
body = response.getBody()
if body == '0':
print 'Command sent succesfully'
else:
print 'Error: ',body
| gpl-3.0 |
Jaimenms/celldeposit-condenser | models/external_film_condensation.py | 1 | 11523 | __doc__="""
DaetTools model that describes the behavior of a water flowing in a pipe with the effect of biofim formation.
"""
from daetools.pyDAE import *
from daetools_extended.daemodel_extended import daeModelExtended
from pyUnits import m, kg, s, K, Pa, J, W, rad
from water_properties import density, viscosity, conductivity, heat_capacity
from daetools_extended.tools import daeVariable_wrapper, distribute_on_domains
from water_at_saturation_properties import saturation_temperature, vapour_density, vapour_total_compressibility,vaporization_enthalpy
class ExternalFilmCondensation(daeModelExtended):
def __init__(self, Name, Parent=None, Description="", data={}, node_tree={}):
daeModelExtended.__init__(self, Name, Parent=Parent, Description=Description, data=data, node_tree=node_tree)
def define_parameters(self):
self.Do = daeParameter("Do", m, self, "Outside pipe diameter")
self.kwall = daeParameter("kwall", (K ** (-1))*(J ** (1))*(s ** (-1))*(m ** (-1)), self, "Wall conductivity")
self.Pext0 = daeParameter("Pext0", Pa, self, "Initial External Pressure")
self.Vext = daeParameter("Vext", m**3, self, "External Volume")
self.PextSP = daeParameter("PextSP", Pa, self, "External Setpoint Pressure")
self.PextH = daeParameter("PextH", Pa, self, "External High Pressure")
self.fNtub = daeParameter("fNtub", unit(), self, "Factor for number of pipes over the actual pipe, including it", self.YDomains)
self.kvap = daeParameter("kvap", kg/s, self, "Vapour Inlet Flowrate")
def define_variables(self):
# Variable types
temperature_t = daeVariableType("temperature_t", (K ** (1)), 273.0, 473.0, 310.0, 1e-5)
thermal_resistance_t = daeVariableType("thermal_resistance_t", (K ** (1))*(W ** (-1))*(m ** (1)), 1e-6, 1e3, 1e-3, 1e-5)
heat_transfer_coefficient_t = daeVariableType("heat_transfer_coefficient_t",
(K ** (-1)) * (W ** (1)) * (m ** (-2)), 1e-3,
1e6, 1e3, 1e-5)
flowrate_t = daeVariableType("flowrate_t", (kg ** (1)) * (s ** (-1)), 0., 200.0, 1.0, 1e-09,
eValueGT)
self.To = daeVariable("To", temperature_t, self, "Outside Wall Temperature", self.Domains)
self.Ti = daeVariable("Ti", temperature_t, self, "Inside Wall Temperature", self.Domains)
self.hint = daeVariable("hint", heat_transfer_coefficient_t, self, "Internal Convection coefficient", self.Domains)
self.hext = daeVariable("hext", heat_transfer_coefficient_t, self, "External Convection coefficient", self.Domains)
self.Pext = daeVariable("Pext", pressure_t, self, "External Pressure")
self.Text = daeVariable("Text", temperature_t, self, "External Temperature")
self.kcond = daeVariable("kcond", flowrate_t, self, "Condensate Outlet Flowrate")
self.wext = daeVariable("wext", flowrate_t, self, "Vapour Outlet Flowrate")
def eq_calculate_kcond(self):
eq = self.CreateEquation("CondensateFlowrate", "CondensateFlowrate")
Past = self.Pext() / Constant(1 * Pa)
hvap = vaporization_enthalpy(Past, simplified = True) * Constant(1 * J * (kg**-1) )
kcond = self.kcond()
if self.YDomains:
Q = Sum(self.Qtotal.array('*'))
else:
Q = self.Qtotal()
eq.Residual = kcond - Q / hvap
def eq_ext_relief(self):
self.stnRegulator = self.STN("Regulator")
self.STATE("Closed")
eq = self.CreateEquation("ReliefSystem1", "ReliefSystem1")
wext = self.wext()
Pext = self.Pext()
PextH = self.PextH()
kvap = self.kvap()
eq.Residual = wext
#self.ON_CONDITION(self.Pext() >= self.PextSP() , switchToStates=[('Regulator', 'Openned')],
# setVariableValues=[],
# triggerEvents=[],
# userDefinedActions=[])
self.STATE("Openned")
eq = self.CreateEquation("ReliefSystem2", "ReliefSystem2")
wext = self.wext()
kvap = self.kvap()
eq.Residual = wext - kvap
#self.ON_CONDITION(self.Pext() <= self.Pext0() , switchToStates=[('Regulator', 'Closed')],
# setVariableValues=[],
# triggerEvents=[],
# userDefinedActions=[])
self.END_STN()
def eq_calculate_Pext(self):
self.stnShellPressure = self.STN("ShellPressure")
self.STATE("Fixed")
eq = self.CreateEquation("ExternalPressureInitial", "ExternalPressureInitial")
Pext = self.Pext()
Pext0 = self.Pext0()
eq.Residual = Pext - Pext0
self.STATE("PreVariable")
eq = self.CreateEquation("ExternalPressureInitial", "ExternalPressureInitial")
Pext = self.Pext()
Pext0 = self.Pext0()
eq.Residual = Pext - Pext0
self.ON_CONDITION(Time() > Constant(0*s), switchToStates = [ ('ShellPressure', 'Variable') ],
setVariableValues = [],
triggerEvents = [],
userDefinedActions = [] )
self.STATE("Variable")
eq = self.CreateEquation("ExternalPressure", "ExternalPressure")
Pext = self.Pext()
Vext = self.Vext()
kcond = self.kcond()
kvap = self.kvap()
wext = self.wext()
Past = Pext / Constant(1 * Pa)
drhodPshell = vapour_total_compressibility(Past) * Constant(1 * kg * (m**-3) * (Pa**-1) )
eq.Residual = self.dt_day(Pext)* (Vext * drhodPshell) - (kvap - wext - kcond)
#self.ON_CONDITION(Time() > Constant(400000*s), switchToStates = [ ('ShellPressure', 'Constant') ],
# setVariableValues = [],
# triggerEvents = [],
# userDefinedActions = [] )
self.STATE("Constant")
eq = self.CreateEquation("ExternalPressure", "ExternalPressure")
Pext = self.Pext()
Vext = self.Vext()
kcond = self.kcond()
kvap = self.kvap()
wext = self.wext()
Past = Pext / Constant(1 * Pa)
eq.Residual = Pext - Constant(25600 * Pa)
self.END_STN()
def eq_calculate_Text(self):
eq = self.CreateEquation("ExternalTemperature", "ExternalTemperature")
Pext = self.Pext()
Past = Pext / Constant(1 * Pa)
eq.Residual = self.Text() - saturation_temperature(Past, simplified=True) * Constant(1 * K)
def eq_calculate_hint(self):
eq = self.CreateEquation("InternalConvection", "Internal convection - hint")
domains = distribute_on_domains(self.Domains, eq, eClosedClosed)
T = daeVariable_wrapper(self.T, domains)
P = daeVariable_wrapper(self.P, domains)
Ti = daeVariable_wrapper(self.Ti, domains)
fD = daeVariable_wrapper(self.fD, domains)
Re = daeVariable_wrapper(self.Re, domains)
hint = daeVariable_wrapper(self.hint, domains)
D = daeVariable_wrapper(self.D, domains)
# Calculates the Nussel dimensionless number using Petukhov correlation modified by Gnielinski. See Incropera 4th Edition [8.63]
Tm = 0.5 * (T + Ti)
Tast = Tm / Constant(1 * K)
Past = P / Constant(1 * Pa)
mu = viscosity( Tast, Past, simplified = True)
kappa = conductivity( Tast, Past, simplified = True)
cp = heat_capacity( Tast, Past, simplified = True)
prandtl = cp * mu / kappa
kappa_i = kappa * Constant(1 * (K ** (-1))*(W ** (1))*(m ** (-1)))
nusselt = (fD / 8.) * (Re - 1000.) * prandtl / (1. + 12.7 * Sqrt(fD / 8.) * (prandtl ** (2 / 3)) - 1.)
hint_calc = nusselt * kappa_i / D
eq.Residual = hint - hint_calc
def eq_calculate_hext(self):
eq = self.CreateEquation("Hext", "Heat balance - Hext")
xdomains = distribute_on_domains(self.XDomains, eq, eClosedClosed)
ydomains = distribute_on_domains(self.YDomains, eq, eClosedClosed)
domains = xdomains + ydomains
g = self.g
Text = self.Text()
Do = self.Do()
To = daeVariable_wrapper(self.To, domains)
hext = daeVariable_wrapper(self.hext, domains)
fNtub = daeVariable_wrapper(self.fNtub, ydomains)
Tf = 0.5 * (Text + To)
Pf = self.Pext()
Tast = Tf / Constant(1 * K)
Past = Pf / Constant(1 * Pa)
hvap = vaporization_enthalpy(Past, simplified = True) * Constant(1 * J * (kg**-1) )
rhov = vapour_density( Past, simplified = True) * Constant(1 * (kg ** (1))*(m ** (-3)))
rho_o = density( Tast, Past, simplified = True) * Constant(1 * (kg ** (1))*(m ** (-3)))
mu_o = viscosity( Tast, Past, simplified = True) * Constant(1 * (Pa ** (1))*(s ** (1)))
kappa_o = conductivity( Tast, Past, simplified = True) * Constant(1 * (K ** (-1))*(W ** (1))*(m ** (-1)))
num = (g * rho_o * (rho_o - rhov) * kappa_o ** 3 * hvap)
den = mu_o * Abs(Text - To) * Do
hd1 = (0.729 * (num / den) ** 0.25)
eq.Residual = hext - fNtub * hd1
def eq_total_he(self):
eq = self.CreateEquation("TotalHeat", "Heat balance - Qout")
domains = distribute_on_domains(self.Domains, eq, eClosedClosed)
Text = self.Text()
T = daeVariable_wrapper(self.T, domains)
Qout = daeVariable_wrapper(self.Qout, domains)
hext = daeVariable_wrapper(self.hext, domains)
hint = daeVariable_wrapper(self.hint, domains)
D = daeVariable_wrapper(self.D, domains)
kwall = self.kwall()
Do = self.Do()
Di = self.Di()
pi = self.pi
Resext = 1 / (pi * Do * hext)
Resint = 1 / (pi * D * hint)
Reswall = Log(Do / Di) / (2 * pi * kwall)
eq.Residual = Qout * (Resint + Reswall + Resext) - (Text - T )
def eq_calculate_To(self):
eq = self.CreateEquation("WallHeat1", "Heat balance - wall")
domains = distribute_on_domains(self.Domains, eq, eClosedClosed)
Do = self.Do()
Text = self.Text()
pi = self.pi
To = daeVariable_wrapper(self.To, domains)
hext = daeVariable_wrapper(self.hext, domains)
Qout = daeVariable_wrapper(self.Qout, domains)
eq.Residual = Qout - (Text - To) * (pi * Do * hext)
def eq_calculate_Ti(self):
eq = self.CreateEquation("WallHeat2", "Heat balance - wall")
domains = distribute_on_domains(self.Domains, eq, eClosedClosed)
kwall = self.kwall()
Do = self.Do()
Di = self.Di()
pi = self.pi
Ti = daeVariable_wrapper(self.Ti, domains)
To = daeVariable_wrapper(self.To, domains)
Qout = daeVariable_wrapper(self.Qout, domains)
eq.Residual = Qout * Log(Do / Di) - (To - Ti) * (2 * pi * kwall)
def DeclareEquations(self):
daeModelExtended.DeclareEquations(self)
self.eq_calculate_Pext()
self.eq_calculate_Text()
self.eq_calculate_To()
self.eq_calculate_Ti()
self.eq_calculate_hint()
self.eq_calculate_hext()
self.eq_calculate_kcond()
self.eq_ext_relief() | gpl-3.0 |
Intel-tensorflow/tensorflow | tensorflow/python/keras/legacy_tf_layers/base.py | 4 | 24294 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=g-classes-have-attributes
"""Contains the base Layer class, from which all layers inherit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import warnings
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.legacy_tf_layers import variable_scope_shim
from tensorflow.python.keras.mixed_precision import policy
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.training.tracking import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = base_layer.InputSpec # pylint: disable=invalid-name
_KERAS_STYLE_SCOPE = False
@keras_export(
v1=['keras.__internal__.legacy.layers.experimental.keras_style_scope'])
@tf_export(v1=['layers.experimental.keras_style_scope'])
@tf_contextlib.contextmanager
def keras_style_scope():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created in this scope use Keras-style
variable management. Creating such layers with a scope= argument is
disallowed, and reuse=True is disallowed.
The purpose of this scope is to allow users of existing layers to
slowly transition to a Keras layers API without breaking existing
functionality.
One example of this is when using TensorFlow's RNN classes with Keras
Models or Networks. Because Keras models do not properly set variable
scopes, users of RNNs may either accidentally share scopes between two
different models, or get errors about variables that already exist.
Example:
```python
class RNNModel(tf.keras.Model):
def __init__(self, name):
super(RNNModel, self).__init__(name=name)
self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
[tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)])
def call(self, input, state):
return self.rnn(input, state)
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# OK
output_1, next_state_1 = model_1(input, state)
# Raises an error about trying to create an already existing variable.
output_2, next_state_2 = model_2(input, state)
```
The solution is to wrap the model construction and execution in a keras-style
scope:
```python
with keras_style_scope():
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
Yields:
A keras layer style scope.
"""
global _KERAS_STYLE_SCOPE
stack = _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
try:
yield
finally:
_KERAS_STYLE_SCOPE = stack
@keras_export(
v1=['keras.__internal__.legacy.layers.experimental.set_keras_style'])
@tf_export(v1=['layers.experimental.set_keras_style'])
def set_keras_style():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created after keras style ha been enabled
use Keras-style variable management. Creating such layers with a
scope= argument is disallowed, and reuse=True is disallowed.
The purpose of this function is to allow users of existing layers to
slowly transition to Keras layers API without breaking existing
functionality.
For more details, see the documentation for `keras_style_scope`.
Note, once keras style has been set, it is set globally for the entire
program and cannot be unset.
Example:
```python
set_keras_style()
model_1 = RNNModel(name="model_1")
model_2 = RNNModel(name="model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
"""
global _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
def _is_in_keras_style_scope():
global _KERAS_STYLE_SCOPE
return _KERAS_STYLE_SCOPE
@keras_export(v1=['keras.__internal__.legacy.layers.Layer'])
@tf_export(v1=['layers.Layer'])
class Layer(base_layer.Layer):
"""Base layer class.
It is considered legacy, and we recommend the use of `tf.keras.layers.Layer`
instead.
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
Read-only properties:
name: The name of the layer (string).
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and
non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
Mutable properties:
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
"""
def __init__(self, trainable=True, name=None, dtype=None,
**kwargs):
# For backwards compatibility, legacy layers do not use `ResourceVariable`
# by default.
self._use_resource_variables = False
scope = kwargs.pop('_scope', None)
self._reuse = kwargs.pop('_reuse', None)
# Avoid an incorrect lint error
self._trainable_weights = []
self.built = False
if dtype is None:
# Indicates to infer dtype from inputs. When the V2 dtype behavior is
# enabled, Keras layers default their dtype to floatx instead, so we pass
# an "_infer" policy to keep the old V1 behavior.
dtype = policy.Policy('_infer')
if 'autocast' not in kwargs:
kwargs['autocast'] = False
# Mark that legacy layers should not be instrumented as Keras usage
self._disable_keras_instrumentation = True
super(Layer, self).__init__(trainable=trainable, name=name, dtype=dtype,
**kwargs)
if _is_in_keras_style_scope():
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
if self._reuse is not None:
raise ValueError(
'reuse argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(self._reuse))
self._keras_style = True
else:
self._keras_style = False
self._call_has_scope_arg = 'scope' in self._call_fn_args
if scope:
with vs.variable_scope(scope) as captured_scope:
self._scope = captured_scope
else:
self._scope = None
self._current_scope = None
# We no longer track graph in tf.layers layers. This property is only kept to
# maintain API backward compatibility.
@property
def graph(self):
warnings.warn('`Layer.graph` is deprecated and '
'will be removed in a future version. '
'Please stop using this property because tf.layers layers no '
'longer track their graph.')
if context.executing_eagerly():
raise RuntimeError('Layer.graph not supported when executing eagerly.')
return None
def _init_set_name(self, name):
# Determine layer name (non-unique).
if isinstance(name, vs.VariableScope):
base_name = name.name
self._name, _ = self._make_unique_name()
else:
base_name = name
self._name = name
if not name:
self._name, base_name = self._make_unique_name()
self._base_name = base_name
def _make_unique_name(self, name_uid_map=None, avoid_names=None,
namespace='', zero_based=False):
base_name = base_layer.to_snake_case(self.__class__.__name__)
name = backend.unique_object_name(
base_name,
name_uid_map=name_uid_map,
avoid_names=avoid_names,
namespace=namespace,
zero_based=zero_based)
return (name, base_name)
@property
def scope_name(self):
if not self._scope:
raise ValueError('No name available for layer scope because the layer "' +
self._name + '" has not been used yet. The scope name ' +
' is determined the first time the layer instance is ' +
'called. You must therefore call the layer before ' +
'querying `scope_name`.')
return self._scope.name
def add_loss(self, losses, inputs=None):
previous_losses_length = len(self._losses)
previous_callable_losses_length = len(self._callable_losses)
super(Layer, self).add_loss(losses, inputs=inputs)
if not context.executing_eagerly():
# TODO(fchollet): deprecate collection below.
new_losses = self._losses[previous_losses_length:]
new_callable_losses = self._callable_losses[
previous_callable_losses_length:]
for regularizer in new_callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
new_losses.append(loss_tensor)
_add_elements_to_collection(
new_losses,
ops.GraphKeys.REGULARIZATION_LOSSES)
def _name_scope(self): # pylint: disable=method-hidden
"""Determines op naming for the Layer."""
if self._keras_style:
return super(Layer, self)._name_scope()
return self._current_scope.original_name_scope
def _set_scope(self, scope=None):
if self._scope is None:
# If constructed with _scope=None, lazy setting of scope.
if self._reuse:
with vs.variable_scope(
scope if scope is not None else self._base_name) as captured_scope:
self._scope = captured_scope
else:
with vs.variable_scope(
scope, default_name=self._base_name) as captured_scope:
self._scope = captured_scope
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
use_resource=None,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE,
partitioner=None,
**kwargs):
"""Adds a new variable to the layer, or gets an existing one; returns it.
Args:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
constraint: constraint instance (callable).
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
partitioner: (optional) partitioner instance (callable). If
provided, when the requested variable is created it will be split
into multiple partitions according to `partitioner`. In this case,
an instance of `PartitionedVariable` is returned. Available
partitioners include `tf.compat.v1.fixed_size_partitioner` and
`tf.compat.v1.variable_axis_size_partitioner`. For more details, see
the documentation of `tf.compat.v1.get_variable` and the "Variable
Partitioners and Sharding" section of the API guide.
**kwargs: Additional keyword arguments.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When trainable has been set to True with synchronization
set as `ON_READ`.
"""
for kwarg in kwargs:
if kwarg != 'experimental_autocast':
raise TypeError('Unknown keyword argument:', kwarg)
if self._keras_style:
return super(Layer, self).add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable and self.trainable,
constraint=constraint,
use_resource=use_resource,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE,
partitioner=partitioner,
**kwargs)
if synchronization == vs.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
def _should_add_regularizer(variable, existing_variable_set):
if base_layer_utils.is_split_variable(variable):
for var in variable:
if var in existing_variable_set:
return False
return True
else:
return variable not in existing_variable_set
init_graph = None
if not context.executing_eagerly():
default_graph = ops.get_default_graph()
if default_graph.building_function:
with ops.init_scope():
# Retrieve the variables from the graph into which variables
# will be lifted; if initialization ops will be lifted into
# the eager context, then there is nothing to retrieve, since variable
# collections are not supported when eager execution is enabled.
if not context.executing_eagerly():
init_graph = ops.get_default_graph()
existing_variables = set(tf_variables.global_variables())
else:
# Initialization ops will not be lifted out of the default graph.
init_graph = default_graph
existing_variables = set(tf_variables.global_variables())
if dtype is None:
dtype = self.dtype or dtypes.float32
self._set_scope(None)
reuse = self.built or self._reuse
prev_len_trainable = len(self._trainable_weights)
with vs.variable_scope(
self._scope, reuse=reuse, auxiliary_name_scope=False) as scope:
self._current_scope = scope
with backend.name_scope(self._name_scope()): # pylint: disable=not-callable
use_resource = (use_resource or
self._use_resource_variables or
scope.use_resource)
if initializer is None:
initializer = scope.initializer
variable = super(Layer, self).add_weight(
name,
shape,
dtype=dtypes.as_dtype(dtype),
initializer=initializer,
trainable=trainable and self.trainable,
constraint=constraint,
partitioner=partitioner,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
getter=vs.get_variable,
**kwargs)
if regularizer:
if (ops.executing_eagerly_outside_functions()
or _should_add_regularizer(variable, existing_variables)):
self._handle_weight_regularization(name, variable, regularizer)
var_store = vs._get_default_variable_store() # pylint: disable=protected-access
# When the shim to get variable scope working in TF2 is used,
# We need to explicitly make the shim track the regularization
# losses as the collections will not be accessible.
if hasattr(var_store, 'add_regularizer'):
var_store.add_regularizer(variable, regularizer)
if init_graph is not None:
# Handle edge case where a custom getter has overridden `trainable`.
# There is one known occurrence of this, in unit test
# testBasicRNNCellNotTrainable in
# contrib.rnn.python.kernel_tests.core_rnn_cell_test
with init_graph.as_default():
trainable_variables = tf_variables.trainable_variables()
if (trainable and self.trainable and
variable not in trainable_variables):
# A custom getter / variable scope overrode the trainable flag.
extra_trainable_vars = self._trainable_weights[prev_len_trainable:]
self._trainable_weights = self._trainable_weights[
:prev_len_trainable]
self._non_trainable_weights += extra_trainable_vars
return variable
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Args:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
**Note**: kwarg `scope` is reserved for use by the layer.
Returns:
Output tensor(s).
Note:
- If the layer's `call` method takes a `scope` keyword argument,
this argument will be automatically set to the current variable scope.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
"""
scope = kwargs.pop('scope', None)
if self._keras_style:
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
return super(Layer, self).__call__(inputs, *args, **kwargs)
self._set_scope(scope)
if self.built:
try:
# Some classes which inherit from Layer do not use its constructor, so
# rather than initializing to None we check for an AttributeError.
scope_context_manager = self._always_reuse_variable_scope # pylint: disable=access-member-before-definition
except AttributeError:
scope_context_manager = None
if scope_context_manager is None:
# From this point we will always set reuse=True, so create a "final"
# variable scope with this setting. We avoid re-creating variable scopes
# after this point as an optimization.
scope_context_manager = vs.variable_scope(
self._scope, reuse=True, auxiliary_name_scope=False)
# Do not cache variable scopes if Eager mode is enabled. If Eager mode
# is enabled then we don't want to reuse scopes because the cached scope
# might be from a FuncGraph or Eager scope we are no longer in.
if not ops.executing_eagerly_outside_functions():
self._always_reuse_variable_scope = scope_context_manager
else:
scope_context_manager = vs.variable_scope(
self._scope, reuse=self._reuse, auxiliary_name_scope=False)
with scope_context_manager as scope:
self._current_scope = scope
try:
call_has_scope_arg = self._call_has_scope_arg
except AttributeError:
self._call_fn_args = variable_scope_shim.fn_args(self.call)
self._call_has_scope_arg = 'scope' in self._call_fn_args
call_has_scope_arg = self._call_has_scope_arg
if call_has_scope_arg:
kwargs['scope'] = scope
# Actually call layer
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
if not context.executing_eagerly():
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
def __deepcopy__(self, memo):
no_copy = set(['_graph', '_thread_local', '_metrics_lock'])
shallow_copy = set(['_scope', '_always_reuse_variable_scope'])
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k in no_copy:
setattr(result, k, v)
elif k in shallow_copy:
setattr(result, k, copy.copy(v))
elif base_layer.is_tensor_or_tensor_list(v):
setattr(result, k, v)
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
def __setattr__(self, value, name):
# By-pass the automatic dependency tracking performed by the parent Layer.
super(trackable.Trackable, self).__setattr__(value, name) # pylint: disable=bad-super-call
@property
def _is_legacy_layer(self):
"""Used by keras to check compatibility. This should not be overridden."""
return True
def _add_elements_to_collection(elements, collection_list):
if context.executing_eagerly():
raise RuntimeError('Using collections from Layers not supported in Eager '
'mode. Tried to add %s to %s' % (elements,
collection_list))
elements = nest.flatten(elements)
collection_list = nest.flatten(collection_list)
for name in collection_list:
collection = ops.get_collection_ref(name)
collection_set = {id(e) for e in collection}
for element in elements:
if id(element) not in collection_set:
collection.append(element)
| apache-2.0 |
ageron/tensorflow | tensorflow/python/keras/utils/io_utils.py | 9 | 4877 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-import-not-at-top
"""Utilities related to disk I/O."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import numpy as np
import six
from tensorflow.python.util.tf_export import keras_export
try:
import h5py
except ImportError:
h5py = None
@keras_export('keras.utils.HDF5Matrix')
class HDF5Matrix(object):
"""Representation of HDF5 dataset to be used instead of a Numpy array.
Example:
```python
x_data = HDF5Matrix('input/file.hdf5', 'data')
model.predict(x_data)
```
Providing `start` and `end` allows use of a slice of the dataset.
Optionally, a normalizer function (or lambda) can be given. This will
be called on every slice of data retrieved.
Arguments:
datapath: string, path to a HDF5 file
dataset: string, name of the HDF5 dataset in the file specified
in datapath
start: int, start of desired slice of the specified dataset
end: int, end of desired slice of the specified dataset
normalizer: function to be called on data when retrieved
Returns:
An array-like HDF5 dataset.
"""
refs = defaultdict(int)
def __init__(self, datapath, dataset, start=0, end=None, normalizer=None):
if h5py is None:
raise ImportError('The use of HDF5Matrix requires '
'HDF5 and h5py installed.')
if datapath not in list(self.refs.keys()):
f = h5py.File(datapath)
self.refs[datapath] = f
else:
f = self.refs[datapath]
self.data = f[dataset]
self.start = start
if end is None:
self.end = self.data.shape[0]
else:
self.end = end
self.normalizer = normalizer
def __len__(self):
return self.end - self.start
def __getitem__(self, key):
if isinstance(key, slice):
start, stop = key.start, key.stop
if start is None:
start = 0
if stop is None:
stop = self.shape[0]
if stop + self.start <= self.end:
idx = slice(start + self.start, stop + self.start)
else:
raise IndexError
elif isinstance(key, (int, np.integer)):
if key + self.start < self.end:
idx = key + self.start
else:
raise IndexError
elif isinstance(key, np.ndarray):
if np.max(key) + self.start < self.end:
idx = (self.start + key).tolist()
else:
raise IndexError
else:
# Assume list/iterable
if max(key) + self.start < self.end:
idx = [x + self.start for x in key]
else:
raise IndexError
if self.normalizer is not None:
return self.normalizer(self.data[idx])
else:
return self.data[idx]
@property
def shape(self):
"""Gets a numpy-style shape tuple giving the dataset dimensions.
Returns:
A numpy-style shape tuple.
"""
return (self.end - self.start,) + self.data.shape[1:]
@property
def dtype(self):
"""Gets the datatype of the dataset.
Returns:
A numpy dtype string.
"""
return self.data.dtype
@property
def ndim(self):
"""Gets the number of dimensions (rank) of the dataset.
Returns:
An integer denoting the number of dimensions (rank) of the dataset.
"""
return self.data.ndim
@property
def size(self):
"""Gets the total dataset size (number of elements).
Returns:
An integer denoting the number of elements in the dataset.
"""
return np.prod(self.shape)
def ask_to_proceed_with_overwrite(filepath):
"""Produces a prompt asking about overwriting a file.
Arguments:
filepath: the path to the file to be overwritten.
Returns:
True if we can proceed with overwrite, False otherwise.
"""
overwrite = six.moves.input('[WARNING] %s already exists - overwrite? '
'[y/n]' % (filepath)).strip().lower()
while overwrite not in ('y', 'n'):
overwrite = six.moves.input('Enter "y" (overwrite) or "n" '
'(cancel).').strip().lower()
if overwrite == 'n':
return False
print('[TIP] Next time specify overwrite=True!')
return True
| apache-2.0 |
borjam/exabgp | src/exabgp/rib/change.py | 3 | 2158 | # encoding: utf-8
"""
change.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2017 Exa Networks. All rights reserved.
License: 3-clause BSD. (See the COPYRIGHT file)
"""
class Source(object):
UNSET = 0
CONFIGURATION = 1
API = 2
NETWORK = 3
class Change(object):
SOURCE = Source.UNSET
@staticmethod
def family_prefix(family):
return b'%02x%02x' % family
def __init__(self, nlri, attributes):
self.nlri = nlri
self.attributes = attributes
# prevent multiple allocation of the index when calling .index()
# storing the value at __init__ time causes api-attributes.sequence to fail
# XXX: the NLRI content is half missing !!
self.__index = ''
def index(self):
if not self.__index:
self.__index = b'%02x%02x' % self.nlri.family() + self.nlri.index()
return self.__index
def __eq__(self, other):
return self.nlri == other.nlri and self.attributes == other.attributes
def __ne__(self, other):
return self.nlri != other.nlri or self.attributes != other.attributes
def __lt__(self, other):
raise RuntimeError('comparing Change for ordering does not make sense')
def __le__(self, other):
raise RuntimeError('comparing Change for ordering does not make sense')
def __gt__(self, other):
raise RuntimeError('comparing Change for ordering does not make sense')
def __ge__(self, other):
raise RuntimeError('comparing Change for ordering does not make sense')
def extensive(self):
# If you change this you must change as well extensive in Update
return "%s%s" % (str(self.nlri), str(self.attributes))
def __repr__(self):
return self.extensive()
def feedback(self):
if self.nlri is not None:
return self.nlri.feedback(self.nlri.action)
return 'no check implemented for the family %s %s' % self.nlri.family()
class ConfigurationChange(Change):
SOURCE = Source.CONFIGURATION
class APIChange(Change):
SOURCE = Source.API
class NetworkChange(Change):
SOURCE = Source.NETWORK
| bsd-3-clause |
simontakite/sysadmin | pythonscripts/programmingpython/System/Processes/pipe2.py | 2 | 1166 | # same as pipe1.py, but wrap pipe input in stdio file object
# to read by line, and close unused pipe fds in both processes
import os, time
def child(pipeout):
zzz = 0
while True:
time.sleep(zzz) # make parent wait
msg = ('Spam %03d\n' % zzz).encode() # pipes are binary in 3.X
os.write(pipeout, msg) # send to parent
zzz = (zzz+1) % 5 # roll to 0 at 5
def parent():
pipein, pipeout = os.pipe() # make 2-ended pipe
if os.fork() == 0: # in child, write to pipe
os.close(pipein) # close input side here
child(pipeout)
else: # in parent, listen to pipe
os.close(pipeout) # close output side here
pipein = os.fdopen(pipein) # make text mode input file object
while True:
line = pipein.readline()[:-1] # blocks until data sent
print('Parent %d got [%s] at %s' % (os.getpid(), line, time.time()))
parent()
| gpl-2.0 |
jay-tyler/ansible | lib/ansible/playbook/playbook_include.py | 26 | 5924 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError
from ansible.parsing.splitter import split_args, parse_kv
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping
from ansible.playbook.attribute import FieldAttribute
from ansible.playbook.base import Base
from ansible.playbook.conditional import Conditional
from ansible.playbook.taggable import Taggable
from ansible.template import Templar
class PlaybookInclude(Base, Conditional, Taggable):
_name = FieldAttribute(isa='string')
_include = FieldAttribute(isa='string')
_vars = FieldAttribute(isa='dict', default=dict())
@staticmethod
def load(data, basedir, variable_manager=None, loader=None):
return PlaybookInclude().load_data(ds=data, basedir=basedir, variable_manager=variable_manager, loader=loader)
def load_data(self, ds, basedir, variable_manager=None, loader=None):
'''
Overrides the base load_data(), as we're actually going to return a new
Playbook() object rather than a PlaybookInclude object
'''
# import here to avoid a dependency loop
from ansible.playbook import Playbook
# first, we use the original parent method to correctly load the object
# via the load_data/preprocess_data system we normally use for other
# playbook objects
new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader)
all_vars = dict()
if variable_manager:
all_vars = variable_manager.get_vars(loader=loader)
templar = Templar(loader=loader, variables=all_vars)
if not new_obj.evaluate_conditional(templar=templar, all_vars=all_vars):
return None
# then we use the object to load a Playbook
pb = Playbook(loader=loader)
file_name = new_obj.include
if not os.path.isabs(file_name):
file_name = os.path.join(basedir, file_name)
pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager)
# finally, update each loaded playbook entry with any variables specified
# on the included playbook and/or any tags which may have been set
for entry in pb._entries:
temp_vars = entry.vars.copy()
temp_vars.update(new_obj.vars)
entry.vars = temp_vars
entry.tags = list(set(entry.tags).union(new_obj.tags))
return pb
def preprocess_data(self, ds):
'''
Regorganizes the data for a PlaybookInclude datastructure to line
up with what we expect the proper attributes to be
'''
assert isinstance(ds, dict)
# the new, cleaned datastructure, which will have legacy
# items reduced to a standard structure
new_ds = AnsibleMapping()
if isinstance(ds, AnsibleBaseYAMLObject):
new_ds.ansible_pos = ds.ansible_pos
for (k,v) in ds.iteritems():
if k == 'include':
self._preprocess_include(ds, new_ds, k, v)
else:
# some basic error checking, to make sure vars are properly
# formatted and do not conflict with k=v parameters
# FIXME: we could merge these instead, but controlling the order
# in which they're encountered could be difficult
if k == 'vars':
if 'vars' in new_ds:
raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds)
elif not isinstance(v, dict):
raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds)
new_ds[k] = v
return super(PlaybookInclude, self).preprocess_data(new_ds)
def _preprocess_include(self, ds, new_ds, k, v):
'''
Splits the include line up into filename and parameters
'''
# The include line must include at least one item, which is the filename
# to include. Anything after that should be regarded as a parameter to the include
items = split_args(v)
if len(items) == 0:
raise AnsibleParserError("include statements must specify the file name to include", obj=ds)
else:
# FIXME/TODO: validate that items[0] is a file, which also
# exists and is readable
new_ds['include'] = items[0]
if len(items) > 1:
# rejoin the parameter portion of the arguments and
# then use parse_kv() to get a dict of params back
params = parse_kv(" ".join(items[1:]))
if 'tags' in params:
new_ds['tags'] = params.pop('tags')
if 'vars' in new_ds:
# FIXME: see fixme above regarding merging vars
raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds)
new_ds['vars'] = params
| gpl-3.0 |
sekikn/ambari | ambari-server/src/main/python/setupAgent.py | 2 | 15576 | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import socket
import time
import sys
import logging
import os
from ambari_commons import subprocess32
from ambari_commons import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl
from ambari_commons.os_utils import get_ambari_repo_file_full_name
if OSCheck.is_windows_family():
import urllib2
from ambari_commons.exceptions import FatalException
from ambari_commons.os_utils import run_os_command
AMBARI_PASSPHRASE_VAR = "AMBARI_PASSPHRASE"
PROJECT_VERSION_DEFAULT = "DEFAULT"
INSTALL_DRIVE = os.path.splitdrive(__file__.replace('/', os.sep))[0]
AMBARI_INSTALL_ROOT = os.path.join(INSTALL_DRIVE, os.sep, "ambari")
AMBARI_AGENT_INSTALL_SYMLINK = os.path.join(AMBARI_INSTALL_ROOT, "ambari-agent")
def _ret_init(ret):
if not ret:
ret = {'exitstatus': 0, 'log': ('', '')}
return ret
def _ret_append_stdout(ret, stdout):
temp_stdout = ret['log'][0]
temp_stderr = ret['log'][1]
if stdout:
if temp_stdout:
temp_stdout += os.linesep
temp_stdout += stdout
ret['log'] = (temp_stdout, temp_stderr)
def _ret_append_stderr(ret, stderr):
temp_stdout = ret['log'][0]
temp_stderr = ret['log'][1]
if stderr:
if temp_stderr:
temp_stderr += os.linesep
temp_stderr += stderr
ret['log'] = (temp_stdout, temp_stderr)
def _ret_merge(ret, retcode, stdout, stderr):
ret['exitstatus'] = retcode
temp_stdout = ret['log'][0]
temp_stderr = ret['log'][1]
if stdout:
if temp_stdout:
temp_stdout += os.linesep
temp_stdout += stdout
if stderr:
if temp_stderr:
temp_stderr += os.linesep
temp_stderr += stderr
ret['log'] = (temp_stdout, temp_stderr)
return ret
def _ret_merge2(ret, ret2):
return _ret_merge(ret, ret2['exitstatus'], ret['log'][0], ret['log'][1])
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def execOsCommand(osCommand, tries=1, try_sleep=0, ret=None, cwd=None):
ret = _ret_init(ret)
for i in range(0, tries):
if i > 0:
time.sleep(try_sleep)
_ret_append_stderr(ret, "Retrying " + str(osCommand))
retcode, stdout, stderr = run_os_command(osCommand, cwd=cwd)
_ret_merge(ret, retcode, stdout, stderr)
if retcode == 0:
break
return ret
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def execOsCommand(osCommand, tries=1, try_sleep=0, ret=None, cwd=None):
ret = _ret_init(ret)
for i in range(0, tries):
if i>0:
time.sleep(try_sleep)
osStat = subprocess32.Popen(osCommand, stdout=subprocess32.PIPE, cwd=cwd)
log = osStat.communicate(0)
ret = {"exitstatus": osStat.returncode, "log": log}
if ret['exitstatus'] == 0:
break
return ret
def installAgent(projectVersion, ret=None):
""" Run install and make sure the agent install alright """
# The command doesn't work with file mask ambari-agent*.rpm, so rename it on agent host
if OSCheck.is_suse_family():
Command = ["zypper", "--no-gpg-checks", "install", "-y", "ambari-agent-" + projectVersion]
elif OSCheck.is_ubuntu_family():
# add * to end of version in case of some test releases
Command = ["apt-get", "install", "-y", "--allow-unauthenticated", "ambari-agent=" + projectVersion + "*"]
elif OSCheck.is_windows_family():
packageParams = "/AmbariRoot:" + AMBARI_INSTALL_ROOT
Command = ["cmd", "/c", "choco", "install", "-y", "ambari-agent", "--version=" + projectVersion, "--params=\"" + packageParams + "\""]
else:
Command = ["yum", "-y", "install", "--nogpgcheck", "ambari-agent-" + projectVersion]
return execOsCommand(Command, tries=3, try_sleep=10, ret=ret)
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def configureAgent(server_hostname, user_run_as, ret=None):
#Customize ambari-agent.ini & register the Ambari Agent service
agentSetupCmd = ["cmd", "/c", "ambari-agent.cmd", "setup", "--hostname=" + server_hostname]
return execOsCommand(agentSetupCmd, tries=3, try_sleep=10, cwd=AMBARI_AGENT_INSTALL_SYMLINK, ret=ret)
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def configureAgent(server_hostname, user_run_as, ret=None):
""" Configure the agent so that it has all the configs knobs properly installed """
osCommand = ["sed", "-i.bak", "s/hostname=localhost/hostname=" + server_hostname +
"/g", "/etc/ambari-agent/conf/ambari-agent.ini"]
ret = execOsCommand(osCommand, ret=ret)
if ret['exitstatus'] != 0:
return ret
osCommand = ["sed", "-i.bak", "s/run_as_user=.*$/run_as_user=" + user_run_as +
"/g", "/etc/ambari-agent/conf/ambari-agent.ini"]
ret = execOsCommand(osCommand, ret=ret)
return ret
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def runAgent(passPhrase, expected_hostname, user_run_as, verbose, ret=None):
ret = _ret_init(ret)
#Invoke ambari-agent restart as a child process
agentRestartCmd = ["cmd", "/c", "ambari-agent.cmd", "restart"]
return execOsCommand(agentRestartCmd, tries=3, try_sleep=10, cwd=AMBARI_AGENT_INSTALL_SYMLINK, ret=ret)
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def runAgent(passPhrase, expected_hostname, user_run_as, verbose, ret=None):
os.environ[AMBARI_PASSPHRASE_VAR] = passPhrase
vo = ""
if verbose:
vo = " -v"
cmd = ['su', user_run_as, '-l', '-c', '/usr/sbin/ambari-agent restart --expected-hostname=%1s %2s' % (expected_hostname, vo)]
log = ""
p = subprocess32.Popen(cmd, stdout=subprocess32.PIPE)
p.communicate()
agent_retcode = p.returncode
for i in range(3):
time.sleep(1)
ret = execOsCommand(["tail", "-20", "/var/log/ambari-agent/ambari-agent.log"], ret=ret)
if (0 == ret['exitstatus']):
try:
log = ret['log']
except Exception:
log = "Log not found"
print log
break
return {"exitstatus": agent_retcode, "log": log}
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def checkVerbose():
verbose = False
if os.path.exists(AMBARI_AGENT_INSTALL_SYMLINK):
agentStatusCmd = ["cmd", "/c", "ambari-agent.cmd", "status"]
ret = execOsCommand(agentStatusCmd, tries=3, try_sleep=10, cwd=AMBARI_AGENT_INSTALL_SYMLINK)
if ret["exitstatus"] == 0 and ret["log"][0].find("running") != -1:
verbose = True
return verbose
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def checkVerbose():
verbose = False
cmds = ["bash", "-c", "ps aux | grep 'AmbariAgent.py' | grep ' \-v'"]
cmdl = ["bash", "-c", "ps aux | grep 'AmbariAgent.py' | grep ' \--verbose'"]
if execOsCommand(cmds)["exitstatus"] == 0 or execOsCommand(cmdl)["exitstatus"] == 0:
verbose = True
return verbose
@OsFamilyFuncImpl(OSConst.WINSRV_FAMILY)
def getOptimalVersion(initialProjectVersion):
optimalVersion = initialProjectVersion
ret = findNearestAgentPackageVersion(optimalVersion)
if ret["exitstatus"] == 0 and ret["log"][0].strip() != "" and initialProjectVersion \
and ret["log"][0].strip().startswith(initialProjectVersion):
optimalVersion = ret["log"][0].strip()
retcode = 0
else:
ret = getAvailableAgentPackageVersions()
retcode = 1
optimalVersion = ret["log"]
return {"exitstatus": retcode, "log": optimalVersion}
@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT)
def getOptimalVersion(initialProjectVersion):
optimalVersion = initialProjectVersion
ret = findNearestAgentPackageVersion(optimalVersion)
if ret["exitstatus"] == 0 and ret["log"][0].strip() != "" \
and ret["log"][0].strip() == initialProjectVersion:
optimalVersion = ret["log"][0].strip()
retcode = 0
else:
ret = getAvailableAgentPackageVersions()
retcode = 1
optimalVersion = ret["log"]
return {"exitstatus": retcode, "log": optimalVersion}
def findNearestAgentPackageVersion(projectVersion):
if projectVersion == "":
projectVersion = " "
if OSCheck.is_suse_family():
Command = ["bash", "-c", "zypper --no-gpg-checks --non-interactive -q search -s --match-exact ambari-agent | grep '" + projectVersion +
"' | cut -d '|' -f 4 | head -n1 | sed -e 's/-\w[^:]*//1' "]
elif OSCheck.is_windows_family():
listPackagesCommand = ["cmd", "/c", "choco list ambari-agent --pre --all | findstr " + projectVersion + " > agentPackages.list"]
execOsCommand(listPackagesCommand)
Command = ["cmd", "/c", "powershell", "get-content agentPackages.list | select-object -last 1 | foreach-object {$_ -replace 'ambari-agent ', ''}"]
elif OSCheck.is_ubuntu_family():
if projectVersion == " ":
Command = ["bash", "-c", "apt-cache -q show ambari-agent |grep 'Version\:'|cut -d ' ' -f 2|tr -d '\\n'|sed -s 's/[-|~][A-Za-z0-9]*//'"]
else:
Command = ["bash", "-c", "apt-cache -q show ambari-agent |grep 'Version\:'|cut -d ' ' -f 2|grep '" +
projectVersion + "'|tr -d '\\n'|sed -s 's/[-|~][A-Za-z0-9]*//'"]
else:
Command = ["bash", "-c", "yum -q list all ambari-agent | grep '" + projectVersion +
"' | sed -re 's/\s+/ /g' | cut -d ' ' -f 2 | head -n1 | sed -e 's/-\w[^:]*//1' "]
return execOsCommand(Command)
def isAgentPackageAlreadyInstalled(projectVersion):
if OSCheck.is_ubuntu_family():
Command = ["bash", "-c", "dpkg-query -W -f='${Status} ${Version}\n' ambari-agent | grep -v deinstall | grep " + projectVersion]
elif OSCheck.is_windows_family():
Command = ["cmd", "/c", "choco list ambari-agent --local-only | findstr ambari-agent | findstr " + projectVersion]
else:
Command = ["bash", "-c", "rpm -qa | grep ambari-agent-"+projectVersion]
ret = execOsCommand(Command)
res = False
if ret["exitstatus"] == 0 and ret["log"][0].strip() != "":
res = True
return res
def getAvailableAgentPackageVersions():
if OSCheck.is_suse_family():
Command = ["bash", "-c",
"zypper --no-gpg-checks --non-interactive -q search -s --match-exact ambari-agent | grep ambari-agent | sed -re 's/\s+/ /g' | cut -d '|' -f 4 | tr '\\n' ', ' | sed -s 's/[-|~][A-Za-z0-9]*//g'"]
elif OSCheck.is_windows_family():
Command = ["cmd", "/c", "choco list ambari-agent --pre --all | findstr ambari-agent"]
elif OSCheck.is_ubuntu_family():
Command = ["bash", "-c",
"apt-cache -q show ambari-agent|grep 'Version\:'|cut -d ' ' -f 2| tr '\\n' ', '|sed -s 's/[-|~][A-Za-z0-9]*//g'"]
else:
Command = ["bash", "-c",
"yum -q list all ambari-agent | grep -E '^ambari-agent' | sed -re 's/\s+/ /g' | cut -d ' ' -f 2 | tr '\\n' ', ' | sed -s 's/[-|~][A-Za-z0-9]*//g'"]
return execOsCommand(Command)
def checkServerReachability(host, port):
ret = {}
s = socket.socket()
try:
s.connect((host, port))
ret = {"exitstatus": 0, "log": ""}
except Exception:
ret["exitstatus"] = 1
ret["log"] = "Host registration aborted. Ambari Agent host cannot reach Ambari Server '" +\
host+":"+str(port) + "'. " +\
"Please check the network connectivity between the Ambari Agent host and the Ambari Server"
return ret
# Command line syntax help
# IsOptional Index Description
# 0 Expected host name
# 1 Password
# 2 Host name
# 3 User to run agent as
# X 4 Project Version (Ambari)
# X 5 Server port
def parseArguments(argv=None):
if argv is None: # make sure that arguments was passed
return {"exitstatus": 2, "log": "No arguments were passed"}
args = argv[1:] # shift path to script
if len(args) < 3:
return {"exitstatus": 1, "log": "Not all required arguments were passed"}
expected_hostname = args[0]
passPhrase = args[1]
hostname = args[2]
user_run_as = args[3]
projectVersion = ""
server_port = 8080
if len(args) > 4:
projectVersion = args[4]
if len(args) > 5:
try:
server_port = int(args[5])
except (Exception):
server_port = 8080
parsed_args = (expected_hostname, passPhrase, hostname, user_run_as, projectVersion, server_port)
return {"exitstatus": 0, "log": "", "parsed_args": parsed_args}
def run_setup(argv=None):
# Parse passed arguments
retcode = parseArguments(argv)
if (retcode["exitstatus"] != 0):
return retcode
(expected_hostname, passPhrase, hostname, user_run_as, projectVersion, server_port) = retcode["parsed_args"]
retcode = checkServerReachability(hostname, server_port)
if (retcode["exitstatus"] != 0):
return retcode
if projectVersion == "null" or projectVersion == "{ambariVersion}" or projectVersion == "":
retcode = getOptimalVersion("")
else:
retcode = getOptimalVersion(projectVersion)
if retcode["exitstatus"] == 0 and retcode["log"] != None and retcode["log"] != "" and retcode["log"][0].strip() != "":
availableProjectVersion = retcode["log"].strip()
if not isAgentPackageAlreadyInstalled(availableProjectVersion):
# Verify that the ambari repo file is available before trying to install ambari-agent
ambari_repo_file = get_ambari_repo_file_full_name()
if os.path.exists(ambari_repo_file):
retcode = installAgent(availableProjectVersion)
if (not retcode["exitstatus"] == 0):
return retcode
else:
return {"exitstatus": 2, "log": "Ambari repo file not found: {0}".format(ambari_repo_file)}
pass
elif retcode["exitstatus"] == 1:
if retcode["log"] != None and retcode["log"] != "" and retcode["log"][0].strip() != "":
return {"exitstatus": 1, "log": "Desired version ("+projectVersion+") of ambari-agent package"
" is not available."
" Repository has following "
"versions of ambari-agent:"+retcode["log"][0].strip()}
else:
# We are here because ambari-agent is not installed and version cannot be obtained from the repo file
logmessage = "Desired version ("+projectVersion+") of ambari-agent package is not available."
ambari_repo_file = get_ambari_repo_file_full_name()
if not os.path.exists(ambari_repo_file):
logmessage = logmessage + " " + "Ambari repo file not found: {0}".format(ambari_repo_file)
return {"exitstatus": retcode["exitstatus"], "log": logmessage}
pass
else:
return retcode
retcode = configureAgent(hostname, user_run_as)
if retcode['exitstatus'] != 0:
return retcode
return runAgent(passPhrase, expected_hostname, user_run_as, verbose)
def main(argv=None):
#Check --verbose option if agent already running
global verbose
verbose = checkVerbose()
if verbose:
exitcode = run_setup(argv)
else:
try:
exitcode = run_setup(argv)
except Exception, e:
exitcode = {"exitstatus": -1, "log": str(e)}
return exitcode
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
ret = main(sys.argv)
retcode = ret["exitstatus"]
print ret["log"]
sys.exit(retcode)
| apache-2.0 |
milinbhakta/flaskjinja | flask1/Lib/site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| gpl-2.0 |
chand3040/cloud_that | common/lib/xmodule/xmodule/video_module/bumper_utils.py | 86 | 4238 | """
Utils for video bumper
"""
import copy
import json
import pytz
import logging
from collections import OrderedDict
from datetime import datetime, timedelta
from django.conf import settings
from .video_utils import set_query_parameter
try:
import edxval.api as edxval_api
except ImportError:
edxval_api = None
log = logging.getLogger(__name__)
def get_bumper_settings(video):
"""
Get bumper settings from video instance.
"""
bumper_settings = copy.deepcopy(getattr(video, 'video_bumper', {}))
# clean up /static/ prefix from bumper transcripts
for lang, transcript_url in bumper_settings.get('transcripts', {}).items():
bumper_settings['transcripts'][lang] = transcript_url.replace("/static/", "")
return bumper_settings
def is_bumper_enabled(video):
"""
Check if bumper enabled.
- Feature flag ENABLE_VIDEO_BUMPER should be set to True
- Do not show again button should not be clicked by user.
- Current time minus periodicity must be greater that last time viewed
- edxval_api should be presented
Returns:
bool.
"""
bumper_last_view_date = getattr(video, 'bumper_last_view_date', None)
utc_now = datetime.utcnow().replace(tzinfo=pytz.utc)
periodicity = settings.FEATURES.get('SHOW_BUMPER_PERIODICITY', 0)
has_viewed = any([
getattr(video, 'bumper_do_not_show_again'),
(bumper_last_view_date and bumper_last_view_date + timedelta(seconds=periodicity) > utc_now)
])
is_studio = getattr(video.system, "is_author_mode", False)
return bool(
not is_studio and
settings.FEATURES.get('ENABLE_VIDEO_BUMPER') and
get_bumper_settings(video) and
edxval_api and
not has_viewed
)
def bumperize(video):
"""
Populate video with bumper settings, if they are presented.
"""
video.bumper = {
'enabled': False,
'edx_video_id': "",
'transcripts': {},
'metadata': None,
}
if not is_bumper_enabled(video):
return
bumper_settings = get_bumper_settings(video)
try:
video.bumper['edx_video_id'] = bumper_settings['video_id']
video.bumper['transcripts'] = bumper_settings['transcripts']
except (TypeError, KeyError):
log.warning(
"Could not retrieve video bumper information from course settings"
)
return
sources = get_bumper_sources(video)
if not sources:
return
video.bumper.update({
'metadata': bumper_metadata(video, sources),
'enabled': True, # Video poster needs this.
})
def get_bumper_sources(video):
"""
Get bumper sources from edxval.
Returns list of sources.
"""
try:
val_profiles = ["desktop_webm", "desktop_mp4"]
val_video_urls = edxval_api.get_urls_for_profiles(video.bumper['edx_video_id'], val_profiles)
bumper_sources = filter(None, [val_video_urls[p] for p in val_profiles])
except edxval_api.ValInternalError:
# if no bumper sources, nothing will be showed
log.warning(
"Could not retrieve information from VAL for Bumper edx Video ID: %s.", video.bumper['edx_video_id']
)
return []
return bumper_sources
def bumper_metadata(video, sources):
"""
Generate bumper metadata.
"""
transcripts = video.get_transcripts_info(is_bumper=True)
unused_track_url, bumper_transcript_language, bumper_languages = video.get_transcripts_for_student(transcripts)
metadata = OrderedDict({
'saveStateUrl': video.system.ajax_url + '/save_user_state',
'showCaptions': json.dumps(video.show_captions),
'sources': sources,
'streams': '',
'transcriptLanguage': bumper_transcript_language,
'transcriptLanguages': bumper_languages,
'transcriptTranslationUrl': set_query_parameter(
video.runtime.handler_url(video, 'transcript', 'translation/__lang__').rstrip('/?'), 'is_bumper', 1
),
'transcriptAvailableTranslationsUrl': set_query_parameter(
video.runtime.handler_url(video, 'transcript', 'available_translations').rstrip('/?'), 'is_bumper', 1
),
})
return metadata
| agpl-3.0 |
ClovisIRex/Snake-django | env/lib/python3.6/site-packages/django/contrib/admindocs/urls.py | 97 | 1192 | from django.conf.urls import url
from django.contrib.admindocs import views
urlpatterns = [
url(r'^$',
views.BaseAdminDocsView.as_view(template_name='admin_doc/index.html'),
name='django-admindocs-docroot'),
url(r'^bookmarklets/$',
views.BookmarkletsView.as_view(),
name='django-admindocs-bookmarklets'),
url(r'^tags/$',
views.TemplateTagIndexView.as_view(),
name='django-admindocs-tags'),
url(r'^filters/$',
views.TemplateFilterIndexView.as_view(),
name='django-admindocs-filters'),
url(r'^views/$',
views.ViewIndexView.as_view(),
name='django-admindocs-views-index'),
url(r'^views/(?P<view>[^/]+)/$',
views.ViewDetailView.as_view(),
name='django-admindocs-views-detail'),
url(r'^models/$',
views.ModelIndexView.as_view(),
name='django-admindocs-models-index'),
url(r'^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.ModelDetailView.as_view(),
name='django-admindocs-models-detail'),
url(r'^templates/(?P<template>.*)/$',
views.TemplateDetailView.as_view(),
name='django-admindocs-templates'),
]
| mit |
xadahiya/django | tests/model_validation/models.py | 166 | 1314 | from django.db import models
class ThingItem(object):
def __init__(self, value, display):
self.value = value
self.display = display
def __iter__(self):
return (x for x in [self.value, self.display])
def __len__(self):
return 2
class Things(object):
def __iter__(self):
return (x for x in [ThingItem(1, 2), ThingItem(3, 4)])
class ThingWithIterableChoices(models.Model):
# Testing choices= Iterable of Iterables
# See: https://code.djangoproject.com/ticket/20430
thing = models.CharField(max_length=100, blank=True, choices=Things())
class Meta:
# Models created as unmanaged as these aren't ever queried
managed = False
class ManyToManyRel(models.Model):
thing1 = models.ManyToManyField(ThingWithIterableChoices, related_name='+')
thing2 = models.ManyToManyField(ThingWithIterableChoices, related_name='+')
class Meta:
# Models created as unmanaged as these aren't ever queried
managed = False
class FKRel(models.Model):
thing1 = models.ForeignKey(ThingWithIterableChoices, related_name='+')
thing2 = models.ForeignKey(ThingWithIterableChoices, related_name='+')
class Meta:
# Models created as unmanaged as these aren't ever queried
managed = False
| bsd-3-clause |
vseledkin/keras | examples/mnist_irnn.py | 70 | 3041 | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.initializations import normal, identity
from keras.layers.recurrent import SimpleRNN, LSTM
from keras.optimizers import RMSprop
from keras.utils import np_utils
'''
This is a reproduction of the IRNN experiment
with pixel-by-pixel sequential MNIST in
"A Simple Way to Initialize Recurrent Networks of Rectified Linear Units "
by Quoc V. Le, Navdeep Jaitly, Geoffrey E. Hinton
arXiv:1504.00941v2 [cs.NE] 7 Apr 201
http://arxiv.org/pdf/1504.00941v2.pdf
Optimizer is replaced with RMSprop which yields more stable and steady
improvement.
Reaches 0.93 train/test accuracy after 900 epochs (which roughly corresponds
to 1687500 steps in the original paper.)
'''
batch_size = 32
nb_classes = 10
nb_epochs = 200
hidden_units = 100
learning_rate = 1e-6
clip_norm = 1.0
BPTT_truncate = 28*28
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], -1, 1)
X_test = X_test.reshape(X_test.shape[0], -1, 1)
X_train = X_train.astype("float32")
X_test = X_test.astype("float32")
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
print('Evaluate IRNN...')
model = Sequential()
model.add(SimpleRNN(input_dim=1, output_dim=hidden_units,
init=lambda shape: normal(shape, scale=0.001),
inner_init=lambda shape: identity(shape, scale=1.0),
activation='relu', truncate_gradient=BPTT_truncate))
model.add(Dense(hidden_units, nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=rmsprop)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('IRNN test score:', scores[0])
print('IRNN test accuracy:', scores[1])
print('Compare to LSTM...')
model = Sequential()
model.add(LSTM(1, hidden_units))
model.add(Dense(hidden_units, nb_classes))
model.add(Activation('softmax'))
rmsprop = RMSprop(lr=learning_rate)
model.compile(loss='categorical_crossentropy', optimizer=rmsprop)
model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epochs,
show_accuracy=True, verbose=1, validation_data=(X_test, Y_test))
scores = model.evaluate(X_test, Y_test, show_accuracy=True, verbose=0)
print('LSTM test score:', scores[0])
print('LSTM test accuracy:', scores[1])
| mit |
dstaple/z3test | scripts/exereport.py | 3 | 1170 | # Copyright (c) 2015 Microsoft Corporation
# Simple script for executing a command and reporting the result by email
import sendmail
import sys
import subprocess
import tempfile
import config
import socket
import os
try:
hostname = socket.gethostbyaddr(socket.gethostname())[0]
except:
try:
hostname = os.uname()[0]
except:
hostname = "unknown"
cmd = ' '.join(sys.argv[1:])
OUT=open('out.txt', 'w')
ERR=open('err.txt', 'w')
try:
result = subprocess.call(sys.argv[1:], stdout=OUT, stderr=ERR)
except Exception as ex:
ERR.write('Python exception when trying to execute:\n%s\n' % cmd)
ERR.write(str(ex))
ERR.write('\n')
result = 1
OUT.close()
ERR.close()
if result != 0:
sendmail.send(config.DEVS,
"Failed to execute '%s' at '%s'" % (cmd, hostname),
"See attached files for standard output and standard error",
["out.txt", "err.txt"])
exit(1)
else:
sendmail.send(config.DEVS,
"Executed '%s' at '%s'" % (cmd, hostname),
"Command was successfully executed",
["out.txt", "err.txt"])
exit(0)
| mit |
Piasy/proxy-searcher | site-packages/django/contrib/gis/maps/google/zoom.py | 92 | 6624 | from django.contrib.gis.geos import GEOSGeometry, LinearRing, Polygon, Point
from django.contrib.gis.maps.google.gmap import GoogleMapException
from math import pi, sin, log, exp, atan
# Constants used for degree to radian conversion, and vice-versa.
DTOR = pi / 180.
RTOD = 180. / pi
class GoogleZoom(object):
"""
GoogleZoom is a utility for performing operations related to the zoom
levels on Google Maps.
This class is inspired by the OpenStreetMap Mapnik tile generation routine
`generate_tiles.py`, and the article "How Big Is the World" (Hack #16) in
"Google Maps Hacks" by Rich Gibson and Schuyler Erle.
`generate_tiles.py` may be found at:
http://trac.openstreetmap.org/browser/applications/rendering/mapnik/generate_tiles.py
"Google Maps Hacks" may be found at http://safari.oreilly.com/0596101619
"""
def __init__(self, num_zoom=19, tilesize=256):
"Initializes the Google Zoom object."
# Google's tilesize is 256x256, square tiles are assumed.
self._tilesize = tilesize
# The number of zoom levels
self._nzoom = num_zoom
# Initializing arrays to hold the parameters for each one of the
# zoom levels.
self._degpp = [] # Degrees per pixel
self._radpp = [] # Radians per pixel
self._npix = [] # 1/2 the number of pixels for a tile at the given zoom level
# Incrementing through the zoom levels and populating the parameter arrays.
z = tilesize # The number of pixels per zoom level.
for i in xrange(num_zoom):
# Getting the degrees and radians per pixel, and the 1/2 the number of
# for every zoom level.
self._degpp.append(z / 360.) # degrees per pixel
self._radpp.append(z / (2 * pi)) # radians per pixel
self._npix.append(z / 2) # number of pixels to center of tile
# Multiplying `z` by 2 for the next iteration.
z *= 2
def __len__(self):
"Returns the number of zoom levels."
return self._nzoom
def get_lon_lat(self, lonlat):
"Unpacks longitude, latitude from GEOS Points and 2-tuples."
if isinstance(lonlat, Point):
lon, lat = lonlat.coords
else:
lon, lat = lonlat
return lon, lat
def lonlat_to_pixel(self, lonlat, zoom):
"Converts a longitude, latitude coordinate pair for the given zoom level."
# Setting up, unpacking the longitude, latitude values and getting the
# number of pixels for the given zoom level.
lon, lat = self.get_lon_lat(lonlat)
npix = self._npix[zoom]
# Calculating the pixel x coordinate by multiplying the longitude value
# with with the number of degrees/pixel at the given zoom level.
px_x = round(npix + (lon * self._degpp[zoom]))
# Creating the factor, and ensuring that 1 or -1 is not passed in as the
# base to the logarithm. Here's why:
# if fac = -1, we'll get log(0) which is undefined;
# if fac = 1, our logarithm base will be divided by 0, also undefined.
fac = min(max(sin(DTOR * lat), -0.9999), 0.9999)
# Calculating the pixel y coordinate.
px_y = round(npix + (0.5 * log((1 + fac)/(1 - fac)) * (-1.0 * self._radpp[zoom])))
# Returning the pixel x, y to the caller of the function.
return (px_x, px_y)
def pixel_to_lonlat(self, px, zoom):
"Converts a pixel to a longitude, latitude pair at the given zoom level."
if len(px) != 2:
raise TypeError('Pixel should be a sequence of two elements.')
# Getting the number of pixels for the given zoom level.
npix = self._npix[zoom]
# Calculating the longitude value, using the degrees per pixel.
lon = (px[0] - npix) / self._degpp[zoom]
# Calculating the latitude value.
lat = RTOD * ( 2 * atan(exp((px[1] - npix)/ (-1.0 * self._radpp[zoom]))) - 0.5 * pi)
# Returning the longitude, latitude coordinate pair.
return (lon, lat)
def tile(self, lonlat, zoom):
"""
Returns a Polygon corresponding to the region represented by a fictional
Google Tile for the given longitude/latitude pair and zoom level. This
tile is used to determine the size of a tile at the given point.
"""
# The given lonlat is the center of the tile.
delta = self._tilesize / 2
# Getting the pixel coordinates corresponding to the
# the longitude/latitude.
px = self.lonlat_to_pixel(lonlat, zoom)
# Getting the lower-left and upper-right lat/lon coordinates
# for the bounding box of the tile.
ll = self.pixel_to_lonlat((px[0]-delta, px[1]-delta), zoom)
ur = self.pixel_to_lonlat((px[0]+delta, px[1]+delta), zoom)
# Constructing the Polygon, representing the tile and returning.
return Polygon(LinearRing(ll, (ll[0], ur[1]), ur, (ur[0], ll[1]), ll), srid=4326)
def get_zoom(self, geom):
"Returns the optimal Zoom level for the given geometry."
# Checking the input type.
if not isinstance(geom, GEOSGeometry) or geom.srid != 4326:
raise TypeError('get_zoom() expects a GEOS Geometry with an SRID of 4326.')
# Getting the envelope for the geometry, and its associated width, height
# and centroid.
env = geom.envelope
env_w, env_h = self.get_width_height(env.extent)
center = env.centroid
for z in xrange(self._nzoom):
# Getting the tile at the zoom level.
tile_w, tile_h = self.get_width_height(self.tile(center, z).extent)
# When we span more than one tile, this is an approximately good
# zoom level.
if (env_w > tile_w) or (env_h > tile_h):
if z == 0:
raise GoogleMapException('Geometry width and height should not exceed that of the Earth.')
return z-1
# Otherwise, we've zoomed in to the max.
return self._nzoom-1
def get_width_height(self, extent):
"""
Returns the width and height for the given extent.
"""
# Getting the lower-left, upper-left, and upper-right
# coordinates from the extent.
ll = Point(extent[:2])
ul = Point(extent[0], extent[3])
ur = Point(extent[2:])
# Calculating the width and height.
height = ll.distance(ul)
width = ul.distance(ur)
return width, height
| mit |
code-sauce/tensorflow | tensorflow/python/summary/impl/event_file_loader.py | 68 | 2971 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functionality for loading events from a record file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.util import event_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.framework import errors
from tensorflow.python.platform import app
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
class EventFileLoader(object):
"""An EventLoader is an iterator that yields Event protos."""
def __init__(self, file_path):
if file_path is None:
raise ValueError('A file path is required')
file_path = resource_loader.readahead_file_path(file_path)
logging.debug('Opening a record reader pointing at %s', file_path)
with errors.raise_exception_on_not_ok_status() as status:
self._reader = pywrap_tensorflow.PyRecordReader_New(
compat.as_bytes(file_path), 0, compat.as_bytes(''), status)
# Store it for logging purposes.
self._file_path = file_path
if not self._reader:
raise IOError('Failed to open a record reader pointing to %s' % file_path)
def Load(self):
"""Loads all new values from disk.
Calling Load multiple times in a row will not 'drop' events as long as the
return value is not iterated over.
Yields:
All values that were written to disk that have not been yielded yet.
"""
while True:
try:
with errors.raise_exception_on_not_ok_status() as status:
self._reader.GetNext(status)
except (errors.DataLossError, errors.OutOfRangeError):
# We ignore partial read exceptions, because a record may be truncated.
# PyRecordReader holds the offset prior to the failed read, so retrying
# will succeed.
break
event = event_pb2.Event()
event.ParseFromString(self._reader.record())
yield event
logging.debug('No more events in %s', self._file_path)
def main(argv):
if len(argv) != 2:
print('Usage: event_file_loader <path-to-the-recordio-file>')
return 1
loader = EventFileLoader(argv[1])
for event in loader.Load():
print(event)
if __name__ == '__main__':
app.run()
| apache-2.0 |
fujunwei/chromium-crosswalk | tools/chrome_proxy/integration_tests/chrome_proxy_metrics.py | 3 | 22546 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import time
from integration_tests import network_metrics
from telemetry.page import page_test
from telemetry.value import scalar
class ChromeProxyMetricException(page_test.MeasurementFailure):
pass
CHROME_PROXY_VIA_HEADER = 'Chrome-Compression-Proxy'
class ChromeProxyResponse(network_metrics.HTTPResponse):
""" Represents an HTTP response from a timeleine event."""
def __init__(self, event):
super(ChromeProxyResponse, self).__init__(event)
def ShouldHaveChromeProxyViaHeader(self):
resp = self.response
# Ignore https and data url
if resp.url.startswith('https') or resp.url.startswith('data:'):
return False
# Ignore 304 Not Modified and cache hit.
if resp.status == 304 or resp.served_from_cache:
return False
# Ignore invalid responses that don't have any header. Log a warning.
if not resp.headers:
logging.warning('response for %s does not any have header '
'(refer=%s, status=%s)',
resp.url, resp.GetHeader('Referer'), resp.status)
return False
return True
def HasChromeProxyViaHeader(self):
via_header = self.response.GetHeader('Via')
if not via_header:
return False
vias = [v.strip(' ') for v in via_header.split(',')]
# The Via header is valid if it has a 4-character version prefix followed by
# the proxy name, for example, "1.1 Chrome-Compression-Proxy".
return any(v[4:] == CHROME_PROXY_VIA_HEADER for v in vias)
def IsValidByViaHeader(self):
return (not self.ShouldHaveChromeProxyViaHeader() or
self.HasChromeProxyViaHeader())
def GetChromeProxyClientType(self):
"""Get the client type directive from the Chrome-Proxy request header.
Returns:
The client type directive from the Chrome-Proxy request header for the
request that lead to this response. For example, if the request header
"Chrome-Proxy: c=android" is present, then this method would return
"android". Returns None if no client type directive is present.
"""
if 'Chrome-Proxy' not in self.response.request_headers:
return None
chrome_proxy_request_header = self.response.request_headers['Chrome-Proxy']
values = [v.strip() for v in chrome_proxy_request_header.split(',')]
for value in values:
kvp = value.split('=', 1)
if len(kvp) == 2 and kvp[0].strip() == 'c':
return kvp[1].strip()
return None
def HasChromeProxyLoFi(self):
if 'Chrome-Proxy' not in self.response.request_headers:
return False
chrome_proxy_request_header = self.response.request_headers['Chrome-Proxy']
values = [v.strip() for v in chrome_proxy_request_header.split(',')]
for value in values:
if len(value) == 5 and value == 'q=low':
return True
return False
class ChromeProxyMetric(network_metrics.NetworkMetric):
"""A Chrome proxy timeline metric."""
def __init__(self):
super(ChromeProxyMetric, self).__init__()
self.compute_data_saving = True
def SetEvents(self, events):
"""Used for unittest."""
self._events = events
def ResponseFromEvent(self, event):
return ChromeProxyResponse(event)
def AddResults(self, tab, results):
raise NotImplementedError
def AddResultsForDataSaving(self, tab, results):
resources_via_proxy = 0
resources_from_cache = 0
resources_direct = 0
super(ChromeProxyMetric, self).AddResults(tab, results)
for resp in self.IterResponses(tab):
if resp.response.served_from_cache:
resources_from_cache += 1
if resp.HasChromeProxyViaHeader():
resources_via_proxy += 1
else:
resources_direct += 1
if resources_from_cache + resources_via_proxy + resources_direct == 0:
raise ChromeProxyMetricException, (
'Expected at least one response, but zero responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'resources_via_proxy', 'count',
resources_via_proxy))
results.AddValue(scalar.ScalarValue(
results.current_page, 'resources_from_cache', 'count',
resources_from_cache))
results.AddValue(scalar.ScalarValue(
results.current_page, 'resources_direct', 'count', resources_direct))
def AddResultsForHeaderValidation(self, tab, results):
via_count = 0
for resp in self.IterResponses(tab):
if resp.IsValidByViaHeader():
via_count += 1
else:
r = resp.response
raise ChromeProxyMetricException, (
'%s: Via header (%s) is not valid (refer=%s, status=%d)' % (
r.url, r.GetHeader('Via'), r.GetHeader('Referer'), r.status))
if via_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response through the proxy, but zero such '
'responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'checked_via_header', 'count', via_count))
def AddResultsForLatency(self, tab, results):
# TODO(bustamante): This is a hack to workaround crbug.com/467174,
# once fixed just pull down window.performance.timing object and
# reference that everywhere.
load_event_start = tab.EvaluateJavaScript(
'window.performance.timing.loadEventStart')
navigation_start = tab.EvaluateJavaScript(
'window.performance.timing.navigationStart')
dom_content_loaded_event_start = tab.EvaluateJavaScript(
'window.performance.timing.domContentLoadedEventStart')
fetch_start = tab.EvaluateJavaScript(
'window.performance.timing.fetchStart')
request_start = tab.EvaluateJavaScript(
'window.performance.timing.requestStart')
domain_lookup_end = tab.EvaluateJavaScript(
'window.performance.timing.domainLookupEnd')
domain_lookup_start = tab.EvaluateJavaScript(
'window.performance.timing.domainLookupStart')
connect_end = tab.EvaluateJavaScript(
'window.performance.timing.connectEnd')
connect_start = tab.EvaluateJavaScript(
'window.performance.timing.connectStart')
response_end = tab.EvaluateJavaScript(
'window.performance.timing.responseEnd')
response_start = tab.EvaluateJavaScript(
'window.performance.timing.responseStart')
# NavigationStart relative markers in milliseconds.
load_start = (float(load_event_start) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'load_start', 'ms', load_start))
dom_content_loaded_start = (
float(dom_content_loaded_event_start) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'dom_content_loaded_start', 'ms',
dom_content_loaded_start))
fetch_start = (float(fetch_start) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'fetch_start', 'ms', fetch_start,
important=False))
request_start = (float(request_start) - navigation_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'request_start', 'ms', request_start,
important=False))
# Phase measurements in milliseconds.
domain_lookup_duration = (float(domain_lookup_end) - domain_lookup_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'domain_lookup_duration', 'ms',
domain_lookup_duration, important=False))
connect_duration = (float(connect_end) - connect_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'connect_duration', 'ms', connect_duration,
important=False))
request_duration = (float(response_start) - request_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'request_duration', 'ms', request_duration,
important=False))
response_duration = (float(response_end) - response_start)
results.AddValue(scalar.ScalarValue(
results.current_page, 'response_duration', 'ms', response_duration,
important=False))
def AddResultsForClientVersion(self, tab, results):
via_count = 0
for resp in self.IterResponses(tab):
r = resp.response
if resp.response.status != 200:
raise ChromeProxyMetricException, ('%s: Response is not 200: %d' %
(r.url, r.status))
if not resp.IsValidByViaHeader():
raise ChromeProxyMetricException, ('%s: Response missing via header' %
(r.url))
via_count += 1
if via_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response through the proxy, but zero such '
'responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'responses_via_proxy', 'count', via_count))
def GetClientTypeFromRequests(self, tab):
"""Get the Chrome-Proxy client type value from requests made in this tab.
Returns:
The client type value from the first request made in this tab that
specifies a client type in the Chrome-Proxy request header. See
ChromeProxyResponse.GetChromeProxyClientType for more details about the
Chrome-Proxy client type. Returns None if none of the requests made in
this tab specify a client type.
"""
for resp in self.IterResponses(tab):
client_type = resp.GetChromeProxyClientType()
if client_type:
return client_type
return None
def AddResultsForClientType(self, tab, results, client_type,
bypass_for_client_type):
via_count = 0
bypass_count = 0
for resp in self.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
via_count += 1
if client_type.lower() == bypass_for_client_type.lower():
raise ChromeProxyMetricException, (
'%s: Response for client of type "%s" has via header, but should '
'be bypassed.' % (
resp.response.url, bypass_for_client_type, client_type))
elif resp.ShouldHaveChromeProxyViaHeader():
bypass_count += 1
if client_type.lower() != bypass_for_client_type.lower():
raise ChromeProxyMetricException, (
'%s: Response missing via header. Only "%s" clients should '
'bypass for this page, but this client is "%s".' % (
resp.response.url, bypass_for_client_type, client_type))
if via_count + bypass_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response that was eligible to be proxied, but '
'zero such responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'via', 'count', via_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
def AddResultsForLoFi(self, tab, results):
lo_fi_count = 0
for resp in self.IterResponses(tab):
if resp.HasChromeProxyLoFi():
lo_fi_count += 1
else:
raise ChromeProxyMetricException, (
'%s: LoFi not in request header.' % (resp.response.url))
if resp.content_length > 100:
raise ChromeProxyMetricException, (
'Image %s is %d bytes. Expecting less than 100 bytes.' %
(resp.response.url, resp.content_length))
if lo_fi_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one LoFi response, but zero such responses were '
'received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'lo_fi', 'count', lo_fi_count))
super(ChromeProxyMetric, self).AddResults(tab, results)
def AddResultsForBypass(self, tab, results):
bypass_count = 0
for resp in self.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'%s: Should not have Via header (%s) (refer=%s, status=%d)' % (
r.url, r.GetHeader('Via'), r.GetHeader('Referer'), r.status))
bypass_count += 1
if bypass_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response to be bypassed, but zero such '
'responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
def AddResultsForCorsBypass(self, tab, results):
eligible_response_count = 0
bypass_count = 0
bypasses = {}
for resp in self.IterResponses(tab):
logging.warn('got a resource %s' % (resp.response.url))
for resp in self.IterResponses(tab):
if resp.ShouldHaveChromeProxyViaHeader():
eligible_response_count += 1
if not resp.HasChromeProxyViaHeader():
bypass_count += 1
elif resp.response.status == 502:
bypasses[resp.response.url] = 0
for resp in self.IterResponses(tab):
if resp.ShouldHaveChromeProxyViaHeader():
if not resp.HasChromeProxyViaHeader():
if resp.response.status == 200:
if (bypasses.has_key(resp.response.url)):
bypasses[resp.response.url] = bypasses[resp.response.url] + 1
for url in bypasses:
if bypasses[url] == 0:
raise ChromeProxyMetricException, (
'%s: Got a 502 without a subsequent 200' % (url))
elif bypasses[url] > 1:
raise ChromeProxyMetricException, (
'%s: Got a 502 and multiple 200s: %d' % (url, bypasses[url]))
if bypass_count == 0:
raise ChromeProxyMetricException, (
'At least one response should be bypassed. '
'(eligible_response_count=%d, bypass_count=%d)\n' % (
eligible_response_count, bypass_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'cors_bypass', 'count', bypass_count))
def AddResultsForBlockOnce(self, tab, results):
eligible_response_count = 0
bypass_count = 0
for resp in self.IterResponses(tab):
if resp.ShouldHaveChromeProxyViaHeader():
eligible_response_count += 1
if not resp.HasChromeProxyViaHeader():
bypass_count += 1
if eligible_response_count <= 1:
raise ChromeProxyMetricException, (
'There should be more than one DRP eligible response '
'(eligible_response_count=%d, bypass_count=%d)\n' % (
eligible_response_count, bypass_count))
elif bypass_count != 1:
raise ChromeProxyMetricException, (
'Exactly one response should be bypassed. '
'(eligible_response_count=%d, bypass_count=%d)\n' % (
eligible_response_count, bypass_count))
else:
results.AddValue(scalar.ScalarValue(
results.current_page, 'eligible_responses', 'count',
eligible_response_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
def AddResultsForSafebrowsingOn(self, tab, results):
results.AddValue(scalar.ScalarValue(
results.current_page, 'safebrowsing', 'timeout responses', 1))
def AddResultsForSafebrowsingOff(self, tab, results):
response_count = 0
for resp in self.IterResponses(tab):
# Data reduction proxy should return the real response for sites with
# malware.
response_count += 1
if not resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'%s: Safebrowsing feature should be off for desktop and webview.\n'
'Reponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
if response_count == 0:
raise ChromeProxyMetricException, (
'Safebrowsing test failed: No valid responses received')
results.AddValue(scalar.ScalarValue(
results.current_page, 'safebrowsing', 'responses', response_count))
def AddResultsForHTTPFallback(self, tab, results):
via_fallback_count = 0
for resp in self.IterResponses(tab):
if resp.ShouldHaveChromeProxyViaHeader():
# All responses should have come through the HTTP fallback proxy, which
# means that they should have the via header, and if a remote port is
# defined, it should be port 80.
if (not resp.HasChromeProxyViaHeader() or
(resp.remote_port and resp.remote_port != 80)):
r = resp.response
raise ChromeProxyMetricException, (
'%s: Should have come through the fallback proxy.\n'
'Reponse: remote_port=%s status=(%d, %s)\nHeaders:\n %s' % (
r.url, str(resp.remote_port), r.status, r.status_text,
r.headers))
via_fallback_count += 1
if via_fallback_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response through the fallback proxy, but zero '
'such responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'via_fallback', 'count', via_fallback_count))
def AddResultsForHTTPToDirectFallback(self, tab, results,
fallback_response_host):
via_fallback_count = 0
bypass_count = 0
responses = self.IterResponses(tab)
# The first response(s) coming from fallback_response_host should be
# through the HTTP fallback proxy.
resp = next(responses, None)
while resp and fallback_response_host in resp.response.url:
if fallback_response_host in resp.response.url:
if (not resp.HasChromeProxyViaHeader() or resp.remote_port != 80):
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should have come through the fallback proxy.\n'
'Response: remote_port=%s status=(%d, %s)\nHeaders:\n %s' % (
r.url, str(resp.remote_port), r.status, r.status_text,
r.headers))
else:
via_fallback_count += 1
resp = next(responses, None)
# All other responses should be bypassed.
while resp:
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should not have via header.\n'
'Response: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
bypass_count += 1
resp = next(responses, None)
# At least one response should go through the http proxy and be bypassed.
if via_fallback_count == 0 or bypass_count == 0:
raise ChromeProxyMetricException(
'There should be at least one response through the fallback proxy '
'(actual %s) and at least one bypassed response (actual %s)' %
(via_fallback_count, bypass_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'via_fallback', 'count', via_fallback_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
def AddResultsForReenableAfterBypass(
self, tab, results, bypass_seconds_min, bypass_seconds_max):
"""Verify results for a re-enable after bypass test.
Args:
tab: the tab for the test.
results: the results object to add the results values to.
bypass_seconds_min: the minimum duration of the bypass.
bypass_seconds_max: the maximum duration of the bypass.
"""
bypass_count = 0
via_count = 0
for resp in self.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should not have via header.\n'
'Reponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
bypass_count += 1
# Wait until 30 seconds before the bypass should expire, and fetch a page.
# It should not have the via header because the proxy should still be
# bypassed.
time.sleep(bypass_seconds_min - 30)
tab.ClearCache(force=True)
before_metrics = ChromeProxyMetric()
before_metrics.Start(results.current_page, tab)
tab.Navigate('http://chromeproxy-test.appspot.com/default')
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 10)
before_metrics.Stop(results.current_page, tab)
for resp in before_metrics.IterResponses(tab):
if resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should not have via header; proxy should still '
'be bypassed.\nReponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
bypass_count += 1
if bypass_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response to be bypassed before the bypass '
'expired, but zero such responses were received.')
# Wait until 30 seconds after the bypass should expire, and fetch a page. It
# should have the via header since the proxy should no longer be bypassed.
time.sleep((bypass_seconds_max + 30) - (bypass_seconds_min - 30))
tab.ClearCache(force=True)
after_metrics = ChromeProxyMetric()
after_metrics.Start(results.current_page, tab)
tab.Navigate('http://chromeproxy-test.appspot.com/default')
tab.WaitForJavaScriptExpression('performance.timing.loadEventStart', 10)
after_metrics.Stop(results.current_page, tab)
for resp in after_metrics.IterResponses(tab):
if not resp.HasChromeProxyViaHeader():
r = resp.response
raise ChromeProxyMetricException, (
'Response for %s should have via header; proxy should no longer '
'be bypassed.\nReponse: status=(%d, %s)\nHeaders:\n %s' % (
r.url, r.status, r.status_text, r.headers))
else:
via_count += 1
if via_count == 0:
raise ChromeProxyMetricException, (
'Expected at least one response through the proxy after the bypass '
'expired, but zero such responses were received.')
results.AddValue(scalar.ScalarValue(
results.current_page, 'bypass', 'count', bypass_count))
results.AddValue(scalar.ScalarValue(
results.current_page, 'via', 'count', via_count))
| bsd-3-clause |
ralph-mikera/RouteFlow-1 | pox/pox/web/webcore.py | 21 | 15821 | # Copyright 2011,2012 James McCauley
#
# This file is part of POX.
#
# POX is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# POX is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with POX. If not, see <http://www.gnu.org/licenses/>.
"""
Webcore is a basic web server framework based on the SocketServer-based
BaseHTTPServer that comes with Python. The big difference is that this
one can carve up URL-space by prefix, such that "/foo/*" gets handled by
a different request handler than "/bar/*". I refer to this as "splitting".
You should also be able to make a request handler written without splitting
run under Webcore. This may not work for all request handlers, but it
definitely works for some. :) The easiest way to do this is with the
wrapRequestHandler() function, like so:
from CGIHTTPServer import CGIHTTPRequestHandler as CHRH
core.WebServer.set_handler("/foo", wrapRequestHandler(CHRH))
.. now URLs under the /foo/ directory will let you browse through the
filesystem next to pox.py. If you create a cgi-bin directory next to
pox.py, you'll be able to run executables in it.
For this specific purpose, there's actually a SplitCGIRequestHandler
which demonstrates wrapping a normal request handler while also
customizing it a bit -- SplitCGIRequestHandler shoehorns in functionality
to use arbitrary base paths.
BaseHTTPServer is not very fast and needs to run on its own thread.
It'd actually be great to have a version of this written against, say,
CherryPy, but I did want to include a simple, dependency-free web solution.
"""
from SocketServer import ThreadingMixIn
from BaseHTTPServer import *
from time import sleep
import select
import threading
import random
import hashlib
import base64
from pox.core import core
import os
import posixpath
import urllib
import cgi
import errno
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
log = core.getLogger()
try:
weblog = log.getChild("server")
except:
# I'm tired of people running Python 2.6 having problems with this.
#TODO: Remove this someday.
weblog = core.getLogger("webcore.server")
def _setAttribs (parent, child):
attrs = ['command', 'request_version', 'close_connection',
'raw_requestline', 'requestline', 'path', 'headers', 'wfile',
'rfile', 'server', 'client_address']
for a in attrs:
setattr(child, a, getattr(parent, a))
setattr(child, 'parent', parent)
import SimpleHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
class SplitRequestHandler (BaseHTTPRequestHandler):
"""
To write HTTP handlers for POX, inherit from this class instead of
BaseHTTPRequestHandler. The interface should be the same -- the same
variables should be set, and the same do_GET(), etc. methods should
be called.
In addition, there will be a self.args which can be specified
when you set_handler() on the server.
"""
# Also a StreamRequestHandler
def __init__ (self, parent, prefix, args):
_setAttribs(parent, self)
self.parent = parent
self.args = args
self.prefix = prefix
self._init()
def _init (self):
"""
This is called by __init__ during initialization. You can
override it to, for example, parse .args.
"""
pass
def handle_one_request (self):
raise RuntimeError("Not supported")
def handle(self):
raise RuntimeError("Not supported")
def _split_dispatch (self, command, handler = None):
if handler is None: handler = self
mname = 'do_' + self.command
if not hasattr(handler, mname):
self.send_error(501, "Unsupported method (%r)" % self.command)
return
method = getattr(handler, mname)
return method()
def log_request (self, code = '-', size = '-'):
weblog.debug(self.prefix + (':"%s" %s %s' %
(self.requestline, str(code), str(size))))
def log_error (self, fmt, *args):
weblog.error(self.prefix + ':' + (fmt % args))
def log_message (self, fmt, *args):
weblog.info(self.prefix + ':' + (fmt % args))
_favicon = ("47494638396110001000c206006a5797927bc18f83ada9a1bfb49ceabda"
+ "4f4ffffffffffff21f904010a0007002c000000001000100000034578badcfe30b20"
+ "1c038d4e27a0f2004e081e2172a4051942abba260309ea6b805ab501581ae3129d90"
+ "1275c6404b80a72f5abcd4a2454cb334dbd9e58e74693b97425e07002003b")
_favicon = ''.join([chr(int(_favicon[n:n+2],16))
for n in xrange(0,len(_favicon),2)])
class CoreHandler (SplitRequestHandler):
"""
A default page to say hi from POX.
"""
def do_GET (self):
"""Serve a GET request."""
self.do_content(True)
def do_HEAD (self):
"""Serve a HEAD request."""
self.do_content(False)
def do_content (self, is_get):
if self.path == "/":
self.send_info(is_get)
elif self.path.startswith("/favicon."):
self.send_favicon(is_get)
else:
self.send_error(404, "File not found on CoreHandler")
def send_favicon (self, is_get = False):
self.send_response(200)
self.send_header("Content-type", "image/gif")
self.send_header("Content-Length", str(len(_favicon)))
self.end_headers()
if is_get:
self.wfile.write(_favicon)
def send_info (self, is_get = False):
r = "<html><head><title>POX</title></head>\n"
r += "<body>\n<h1>POX Webserver</h1>\n<h2>Components</h2>\n"
r += "<ul>"
for k in sorted(core.components):
v = core.components[k]
r += "<li>%s - %s</li>\n" % (cgi.escape(str(k)), cgi.escape(str(v)))
r += "</ul>\n\n<h2>Web Prefixes</h2>"
r += "<ul>"
m = [map(cgi.escape, map(str, [x[0],x[1],x[3]]))
for x in self.args.matches]
m.sort()
for v in m:
r += "<li><a href='{0}'>{0}</a> - {1} {2}</li>\n".format(*v)
r += "</ul></body></html>\n"
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", str(len(r)))
self.end_headers()
if is_get:
self.wfile.write(r)
class StaticContentHandler (SplitRequestHandler, SimpleHTTPRequestHandler):
# We slightly modify SimpleHTTPRequestHandler to serve from given
# directories and inherit from from Python, but
# modified to serve from given directories and to inherit from
# SplitRequestHandler.
"""
A SplitRequestHandler for serving static content
This is largely the same as the Python SimpleHTTPRequestHandler, but
we modify it to serve from arbitrary directories at arbitrary
positions in the URL space.
"""
server_version = "StaticContentHandler/1.0"
def send_head (self):
# We override this and handle the directory redirection case because
# we want to include the per-split prefix.
path = self.translate_path(self.path)
if os.path.isdir(path):
if not self.path.endswith('/'):
self.send_response(301)
self.send_header("Location", self.prefix + self.path + "/")
self.end_headers()
return None
return SimpleHTTPRequestHandler.send_head(self)
def list_directory (self, dirpath):
# dirpath is an OS path
try:
d = os.listdir(dirpath)
except OSError as e:
if e.errno == errno.EACCES:
self.send_error(403, "This directory is not listable")
elif e.errno == errno.ENOENT:
self.send_error(404, "This directory does not exist")
else:
self.send_error(400, "Unknown error")
return None
d.sort(key=str.lower)
r = StringIO()
r.write("<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 3.2 Final//EN\">\n")
path = posixpath.join(self.prefix, cgi.escape(self.path).lstrip("/"))
r.write("<html><head><title>" + path + "</title></head>\n")
r.write("<body><pre>")
parts = path.rstrip("/").split("/")
r.write('<a href="/">/</a>')
for i,part in enumerate(parts):
link = urllib.quote("/".join(parts[:i+1]))
if i > 0: part += "/"
r.write('<a href="%s">%s</a>' % (link, cgi.escape(part)))
r.write("\n" + "-" * (0+len(path)) + "\n")
dirs = []
files = []
for f in d:
if f.startswith("."): continue
if os.path.isdir(os.path.join(dirpath, f)):
dirs.append(f)
else:
files.append(f)
def entry (n, rest=''):
link = urllib.quote(n)
name = cgi.escape(n)
r.write('<a href="%s">%s</a>\n' % (link,name+rest))
for f in dirs:
entry(f, "/")
for f in files:
entry(f)
r.write("</pre></body></html>")
r.seek(0)
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(r.getvalue())))
self.end_headers()
return r
def translate_path (self, path, include_prefix = True):
"""
Translate a web-path to a local filesystem path
Odd path elements (e.g., ones that contain local filesystem path
separators) are stripped.
"""
def fixpath (p):
o = []
skip = 0
while True:
p,tail = posixpath.split(p)
if p in ('/','') and tail == '': break
if tail in ('','.', os.path.curdir, os.path.pardir): continue
if os.path.sep in tail: continue
if os.path.altsep and os.path.altsep in tail: continue
if os.path.splitdrive(tail)[0] != '': continue
if tail == '..':
skip += 1
continue
if skip:
skip -= 1
continue
o.append(tail)
o.reverse()
return o
# Remove query string / fragment
if "?" in path: path = path[:path.index("?")]
if "#" in path: path = path[:path.index("#")]
path = fixpath(path)
if path:
path = os.path.join(*path)
else:
path = ''
if include_prefix:
path = os.path.join(os.path.abspath(self.args['root']), path)
return path
def wrapRequestHandler (handlerClass):
return type("Split" + handlerClass.__name__,
(SplitRequestHandler, handlerClass, object), {})
from CGIHTTPServer import CGIHTTPRequestHandler
class SplitCGIRequestHandler (SplitRequestHandler,
CGIHTTPRequestHandler, object):
"""
Runs CGIRequestHandler serving from an arbitrary path.
This really should be a feature of CGIRequestHandler and the way of
implementing it here is scary and awful, but it at least sort of works.
"""
__lock = threading.Lock()
def _split_dispatch (self, command):
with self.__lock:
olddir = os.getcwd()
try:
os.chdir(self.args)
return SplitRequestHandler._split_dispatch(self, command)
finally:
os.chdir(olddir)
class SplitterRequestHandler (BaseHTTPRequestHandler):
def __init__ (self, *args, **kw):
#self.rec = Recording(args[0])
#self.args = args
#self.matches = self.matches.sort(key=lambda e:len(e[0]),reverse=True)
#BaseHTTPRequestHandler.__init__(self, self.rec, *args[1:], **kw)
BaseHTTPRequestHandler.__init__(self, *args, **kw)
def log_request (self, code = '-', size = '-'):
weblog.debug('splitter:"%s" %s %s',
self.requestline, str(code), str(size))
def log_error (self, fmt, *args):
weblog.error('splitter:' + fmt % args)
def log_message (self, fmt, *args):
weblog.info('splitter:' + fmt % args)
def handle_one_request(self):
self.raw_requestline = self.rfile.readline()
if not self.raw_requestline:
self.close_connection = 1
return
if not self.parse_request(): # An error code has been sent, just exit
return
handler = None
while True:
for m in self.server.matches:
if self.path.startswith(m[0]):
#print m,self.path
handler = m[1](self, m[0], m[3])
#pb = self.rec.getPlayback()
#handler = m[1](pb, *self.args[1:])
_setAttribs(self, handler)
if m[2]:
# Trim. Behavior is not "perfect"
handler.path = self.path[len(m[0]):]
if m[0].endswith('/'):
handler.path = '/' + handler.path
break
if handler is None:
handler = self
if not self.path.endswith('/'):
# Handle splits like directories
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
break
break
return handler._split_dispatch(self.command)
class SplitThreadedServer(ThreadingMixIn, HTTPServer):
matches = [] # Tuples of (Prefix, TrimPrefix, Handler)
# def __init__ (self, *args, **kw):
# BaseHTTPRequestHandler.__init__(self, *args, **kw)
# self.matches = self.matches.sort(key=lambda e:len(e[0]),reverse=True)
def set_handler (self, prefix, handler, args = None, trim_prefix = True):
# Not very efficient
assert (handler is None) or (issubclass(handler, SplitRequestHandler))
self.matches = [m for m in self.matches if m[0] != prefix]
if handler is None: return
self.matches.append((prefix, handler, trim_prefix, args))
self.matches.sort(key=lambda e:len(e[0]),reverse=True)
def add_static_dir (self, www_path, local_path=None, relative=False):
"""
Serves a directory of static content.
www_path is the prefix of the URL that maps to this directory.
local_path is the directory to serve content from. If it's not
specified, it is assume to be a directory with the same name as
www_path.
relative, if True, means that the local path is to be a sibling
of the calling module.
For an example, see the launch() function in this module.
"""
if not www_path.startswith('/'): www_path = '/' + www_path
if local_path is None:
local_path = www_path[1:]
if relative:
local_path = os.path.basename(local_path)
if relative:
import inspect
path = inspect.stack()[1][1]
path = os.path.dirname(path)
local_path = os.path.join(path, local_path)
local_path = os.path.abspath(local_path)
log.debug("Serving %s at %s", local_path, www_path)
self.set_handler(www_path, StaticContentHandler,
{'root':local_path}, True);
def launch (address='', port=8000, static=False):
httpd = SplitThreadedServer((address, int(port)), SplitterRequestHandler)
core.register("WebServer", httpd)
httpd.set_handler("/", CoreHandler, httpd, True)
#httpd.set_handler("/foo", StaticContentHandler, {'root':'.'}, True)
#httpd.set_handler("/f", StaticContentHandler, {'root':'pox'}, True)
#httpd.set_handler("/cgis", SplitCGIRequestHandler, "pox/web/www_root")
if static is True:
httpd.add_static_dir('static', 'www_root', relative=True)
elif static is False:
pass
else:
static = static.split(",")
for entry in static:
if entry.lower() == "":
httpd.add_static_dir('static', 'www_root', relative=True)
continue
if ':' not in entry:
directory = entry
prefix = os.path.split(directory)
if prefix[1] == '':
prefix = os.path.split(prefix[0])
prefix = prefix[1]
assert prefix != ''
else:
prefix,directory = entry.split(":")
directory = os.path.expanduser(directory)
httpd.add_static_dir(prefix, directory, relative=False)
def run ():
try:
log.debug("Listening on %s:%i" % httpd.socket.getsockname())
httpd.serve_forever()
except:
pass
log.info("Server quit")
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
| apache-2.0 |
tjhei/burnman-original | burnman/slb.py | 2 | 8946 | # BurnMan - a lower mantle toolkit
# Copyright (C) 2012, 2013, Heister, T., Unterborn, C., Rose, I. and Cottaar, S.
# Released under GPL v2 or later.
import scipy.optimize as opt
import birch_murnaghan as bm
import debye
import numpy as np
from equation_of_state import equation_of_state
import warnings
import matplotlib.pyplot as plt
class slb_base(equation_of_state):
"""
Base class for the finite strain-Mie-Grueneiesen-Debye equation of state detailed
in Stixrude and Lithgow-Bertelloni (2005). For the most part, the equations are
all third order in strain, but see further the slb2 and slb3 classes
"""
def __debye_temperature(self,x,params):
"""
Finite strain approximation for Debye Temperature [K]
x = ref_vol/vol
"""
f = 1./2. * (pow(x, 2./3.) - 1.)
a1_ii = 6. * params['grueneisen_0'] # EQ 47
a2_iikk = -12.*params['grueneisen_0']+36.*pow(params['grueneisen_0'],2.) - 18.*params['q_0']*params['grueneisen_0'] # EQ 47
return params['Debye_0'] * np.sqrt(1. + a1_ii * f + 1./2. * a2_iikk*f*f)
def volume_dependent_q(self, x, params):
"""
Finite strain approximation for q, the isotropic volume strain
derivative of the grueneisen parameter
"""
f = 1./2. * (pow(x, 2./3.) - 1.)
a1_ii = 6. * params['grueneisen_0'] # EQ 47
a2_iikk = -12.*params['grueneisen_0']+36.*pow(params['grueneisen_0'],2.) - 18.*params['q_0']*params['grueneisen_0'] # EQ 47
nu_o_nu0_sq = 1.+ a1_ii*f + (1./2.)*a2_iikk * f*f # EQ 41
gr = 1./6./nu_o_nu0_sq * (2.*f+1.) * ( a1_ii + a2_iikk*f )
q = 1./9.*(18.*gr - 6. - 1./2. / nu_o_nu0_sq * (2.*f+1.)*(2.*f+1.)*a2_iikk/gr)
return q
def __isotropic_eta_s(self, x, params):
"""
Finite strain approximation for eta_s_0, the isotropic shear
strain derivative of the grueneisen parameter
"""
f = 1./2. * (pow(x, 2./3.) - 1.)
a2_s = -2.*params['grueneisen_0'] - 2.*params['eta_s_0'] # EQ 47
a1_ii = 6. * params['grueneisen_0'] # EQ 47
a2_iikk = -12.*params['grueneisen_0']+36.*pow(params['grueneisen_0'],2.) - 18.*params['q_0']*params['grueneisen_0'] # EQ 47
nu_o_nu0_sq = 1.+ a1_ii*f + (1./2.)*a2_iikk * pow(f,2.) # EQ 41
gr = 1./6./nu_o_nu0_sq * (2.*f+1.) * ( a1_ii + a2_iikk*f )
eta_s = - gr - (1./2. * pow(nu_o_nu0_sq,-1.) * pow((2.*f)+1.,2.)*a2_s) # EQ 46 NOTE the typo from Stixrude 2005
return eta_s
def volume(self, pressure, temperature, params):
"""
Returns molar volume at the pressure and temperature [m^3]
"""
debye_T = lambda x : self.__debye_temperature(params['V_0']/x, params)
gr = lambda x : self.grueneisen_parameter(pressure, temperature, x, params)
E_th = lambda x : debye.thermal_energy(temperature, debye_T(x), params['n']) #thermal energy at temperature T
E_th_ref = lambda x : debye.thermal_energy(300., debye_T(x), params['n']) #thermal energy at reference temperature
b_iikk= 9.*params['K_0'] # EQ 28
b_iikkmm= 27.*params['K_0']*(params['Kprime_0']-4.) # EQ 29
f = lambda x: 0.5*(pow(params['V_0']/x,2./3.)-1.) # EQ 24
func = lambda x: (1./3.)*(pow(1.+2.*f(x),5./2.))*((b_iikk*f(x)) \
+(0.5*b_iikkmm*pow(f(x),2.))) + gr(x)*(E_th(x) - E_th_ref(x))/x - pressure #EQ 21
# we need to have a sign change in [a,b] to find a zero. Let us start with a
# conservative guess:
a = 0.6*params['V_0']
b = 1.2*params['V_0']
# if we have a sign change, we are done:
if func(a)*func(b)<0:
return opt.brentq(func, a, b)
else:
tol = 0.0001
sol = opt.fmin(lambda x : func(x)*func(x), 1.0*params['V_0'], ftol=tol, full_output=1, disp=0)
if sol[1] > tol*2:
raise ValueError('Cannot find volume, likely outside of the range of validity for EOS')
else:
warnings.warn("May be outside the range of validity for EOS")
return sol[0]
def grueneisen_parameter(self, pressure, temperature, volume, params):
"""
Returns grueneisen parameter at the pressure, temperature, and volume
"""
x = params['V_0'] / volume
f = 1./2. * (pow(x, 2./3.) - 1.)
gruen_0 = params['grueneisen_0']
a1_ii = 6. * gruen_0 # EQ 47
a2_iikk = -12.*gruen_0 + 36.*gruen_0*gruen_0 - 18.*params['q_0']*gruen_0 # EQ 47
nu_o_nu0_sq = 1.+ a1_ii*f + (1./2.)*a2_iikk * f*f # EQ 41
return 1./6./nu_o_nu0_sq * (2.*f+1.) * ( a1_ii + a2_iikk*f )
def isothermal_bulk_modulus(self, pressure,temperature, volume, params):
"""
Returns isothermal bulk modulus at the pressure, temperature, and volume [Pa]
"""
debye_T = self.__debye_temperature(params['V_0']/volume, params)
gr = self.grueneisen_parameter(pressure, temperature, volume, params)
E_th = debye.thermal_energy(temperature, debye_T, params['n']) #thermal energy at temperature T
E_th_ref = debye.thermal_energy(300.,debye_T, params['n']) #thermal energy at reference temperature
C_v = debye.heat_capacity_v(temperature, debye_T, params['n']) #heat capacity at temperature T
C_v_ref = debye.heat_capacity_v(300.,debye_T, params['n']) #heat capacity at reference temperature
q = self.volume_dependent_q(params['V_0']/volume, params)
K = bm.bulk_modulus(volume, params) \
+ (gr + 1.-q)* ( gr / volume ) * (E_th - E_th_ref) \
- ( pow(gr , 2.) / volume )*(C_v*temperature - C_v_ref*300.)
return K
def adiabatic_bulk_modulus(self, pressure, temperature, volume, params):
"""
Returns adiabatic bulk modulus at the pressure, temperature, and volume [Pa]
"""
K_T=self.isothermal_bulk_modulus(pressure, temperature, volume, params)
alpha = self.thermal_expansivity(pressure, temperature, volume, params)
gr = self.grueneisen_parameter(pressure, temperature, volume, params)
K_S = K_T*(1. + gr * alpha * temperature)
return K_S
def shear_modulus(self, pressure, temperature, volume, params):
"""
Returns shear modulus at the pressure, temperature, and volume [Pa]
"""
debye_T = self.__debye_temperature(params['V_0']/volume, params)
eta_s = self.__isotropic_eta_s(params['V_0']/volume, params)
E_th = debye.thermal_energy(temperature ,debye_T, params['n'])
E_th_ref = debye.thermal_energy(300.,debye_T, params['n'])
if self.order==2:
return bm.shear_modulus_second_order(volume, params) - eta_s * (E_th-E_th_ref) / volume
elif self.order==3:
return bm.shear_modulus_third_order(volume, params) - eta_s * (E_th-E_th_ref) / volume
else:
raise NotImplementedError("")
def heat_capacity_v(self, pressure, temperature, volume, params):
"""
Returns heat capacity at constant volume at the pressure, temperature, and volume [J/K/mol]
"""
debye_T = self.__debye_temperature(params['V_0']/volume, params)
return debye.heat_capacity_v(temperature, debye_T,params['n'])
def heat_capacity_p(self, pressure, temperature, volume, params):
"""
Returns heat capacity at constant pressure at the pressure, temperature, and volume [J/K/mol]
"""
alpha = self.thermal_expansivity(pressure, temperature, volume, params)
gr = self.grueneisen_parameter(pressure, temperature, volume, params)
C_v = self.heat_capacity_v(pressure, temperature, volume, params)
C_p = C_v*(1. + gr * alpha * temperature)
return C_p
def thermal_expansivity(self, pressure, temperature, volume, params):
"""
Returns thermal expansivity at the pressure, temperature, and volume [1/K]
"""
C_v = self.heat_capacity_v(pressure, temperature, volume, params)
gr = self.grueneisen_parameter(pressure, temperature, volume, params)
K = self.isothermal_bulk_modulus(pressure, temperature, volume, params)
alpha = gr * C_v / K / volume
return alpha
class slb3(slb_base):
"""
SLB equation of state with third order finite strain expansion for the
shear modulus (this should be preferred, as it is more thermodynamically
consistent.
"""
def __init__(self):
self.order=3
class slb2(slb_base):
"""
SLB equation of state with second order finite strain expansion for the
shear modulus. In general, this should not be used, but sometimes
shear modulus data is fit to a second order equation of state. In that
case, you should use this. The moral is, be careful!
"""
def __init__(self):
self.order=2
| gpl-2.0 |
asedunov/intellij-community | python/lib/Lib/pstats.py | 94 | 25940 | """Class for printing reports on profiled python code."""
# Class for printing reports on profiled python code. rev 1.0 4/1/94
#
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
#
# see profile.doc and profile.py for more info.
# Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind
#
# Permission to use, copy, modify, and distribute this Python software
# and its associated documentation for any purpose (subject to the
# restriction in the following sentence) without fee is hereby granted,
# provided that the above copyright notice appears in all copies, and
# that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of InfoSeek not be used in
# advertising or publicity pertaining to distribution of the software
# without specific, written prior permission. This permission is
# explicitly restricted to the copying and modification of the software
# to remain in Python, compiled Python, or other languages (such as C)
# wherein the modified or derived code is exclusively imported into a
# Python module.
#
# INFOSEEK CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL INFOSEEK CORPORATION BE LIABLE FOR ANY
# SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import sys
import os
import time
import marshal
import re
__all__ = ["Stats"]
class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
by direct access to members of Profile class, or by reading in a dictionary
that was emitted (via marshal) from the Profile class.
The big change from the previous Profiler (in terms of raw functionality)
is that an "add()" method has been provided to combine Stats from
several distinct profile runs. Both the constructor and the add()
method now take arbitrarily many file names as arguments.
All the print methods now take an argument that indicates how many lines
to print. If the arg is a floating point number between 0 and 1.0, then
it is taken as a decimal percentage of the available lines to be printed
(e.g., .1 means print 10% of all available lines). If it is an integer,
it is taken to mean the number of lines of data that you wish to have
printed.
The sort_stats() method now processes some additional options (i.e., in
addition to the old -1, 0, 1, or 2). It takes an arbitrary number of
quoted strings to select the sort order. For example sort_stats('time',
'name') sorts on the major key of 'internal function time', and on the
minor key of 'the name of the function'. Look at the two tables in
sort_stats() and get_sort_arg_defs(self) for more examples.
All methods return self, so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
def __init__(self, *args, **kwds):
# I can't figure out how to explictly specify a stream keyword arg
# with *args:
# def __init__(self, *args, stream=sys.stdout): ...
# so I use **kwds and sqauwk if something unexpected is passed in.
self.stream = sys.stdout
if "stream" in kwds:
self.stream = kwds["stream"]
del kwds["stream"]
if kwds:
keys = kwds.keys()
keys.sort()
extras = ", ".join(["%s=%s" % (k, kwds[k]) for k in keys])
raise ValueError, "unrecognized keyword args: %s" % extras
if not len(args):
arg = None
else:
arg = args[0]
args = args[1:]
self.init(arg)
self.add(*args)
def init(self, arg):
self.all_callees = None # calc only if needed
self.files = []
self.fcn_list = None
self.total_tt = 0
self.total_calls = 0
self.prim_calls = 0
self.max_name_len = 0
self.top_level = {}
self.stats = {}
self.sort_arg_dict = {}
self.load_stats(arg)
trouble = 1
try:
self.get_top_level_stats()
trouble = 0
finally:
if trouble:
print >> self.stream, "Invalid timing data",
if self.files: print >> self.stream, self.files[-1],
print >> self.stream
def load_stats(self, arg):
if not arg: self.stats = {}
elif isinstance(arg, basestring):
f = open(arg, 'rb')
self.stats = marshal.load(f)
f.close()
try:
file_stats = os.stat(arg)
arg = time.ctime(file_stats.st_mtime) + " " + arg
except: # in case this is not unix
pass
self.files = [ arg ]
elif hasattr(arg, 'create_stats'):
arg.create_stats()
self.stats = arg.stats
arg.stats = {}
if not self.stats:
raise TypeError, "Cannot create or construct a %r object from '%r''" % (
self.__class__, arg)
return
def get_top_level_stats(self):
for func, (cc, nc, tt, ct, callers) in self.stats.items():
self.total_calls += nc
self.prim_calls += cc
self.total_tt += tt
if callers.has_key(("jprofile", 0, "profiler")):
self.top_level[func] = None
if len(func_std_string(func)) > self.max_name_len:
self.max_name_len = len(func_std_string(func))
def add(self, *arg_list):
if not arg_list: return self
if len(arg_list) > 1: self.add(*arg_list[1:])
other = arg_list[0]
if type(self) != type(other) or self.__class__ != other.__class__:
other = Stats(other)
self.files += other.files
self.total_calls += other.total_calls
self.prim_calls += other.prim_calls
self.total_tt += other.total_tt
for func in other.top_level:
self.top_level[func] = None
if self.max_name_len < other.max_name_len:
self.max_name_len = other.max_name_len
self.fcn_list = None
for func, stat in other.stats.iteritems():
if func in self.stats:
old_func_stat = self.stats[func]
else:
old_func_stat = (0, 0, 0, 0, {},)
self.stats[func] = add_func_stats(old_func_stat, stat)
return self
def dump_stats(self, filename):
"""Write the profile data to a file we know how to load back."""
f = file(filename, 'wb')
try:
marshal.dump(self.stats, f)
finally:
f.close()
# list the tuple indices and directions for sorting,
# along with some printable description
sort_arg_dict_default = {
"calls" : (((1,-1), ), "call count"),
"cumulative": (((3,-1), ), "cumulative time"),
"file" : (((4, 1), ), "file name"),
"line" : (((5, 1), ), "line number"),
"module" : (((4, 1), ), "file name"),
"name" : (((6, 1), ), "function name"),
"nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
"pcalls" : (((0,-1), ), "call count"),
"stdname" : (((7, 1), ), "standard name"),
"time" : (((2,-1), ), "internal time"),
}
def get_sort_arg_defs(self):
"""Expand all abbreviations that are unique."""
if not self.sort_arg_dict:
self.sort_arg_dict = dict = {}
bad_list = {}
for word, tup in self.sort_arg_dict_default.iteritems():
fragment = word
while fragment:
if not fragment:
break
if fragment in dict:
bad_list[fragment] = 0
break
dict[fragment] = tup
fragment = fragment[:-1]
for word in bad_list:
del dict[word]
return self.sort_arg_dict
def sort_stats(self, *field):
if not field:
self.fcn_list = 0
return self
if len(field) == 1 and type(field[0]) == type(1):
# Be compatible with old profiler
field = [ {-1: "stdname",
0:"calls",
1:"time",
2: "cumulative" } [ field[0] ] ]
sort_arg_defs = self.get_sort_arg_defs()
sort_tuple = ()
self.sort_type = ""
connector = ""
for word in field:
sort_tuple = sort_tuple + sort_arg_defs[word][0]
self.sort_type += connector + sort_arg_defs[word][1]
connector = ", "
stats_list = []
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
stats_list.append((cc, nc, tt, ct) + func +
(func_std_string(func), func))
stats_list.sort(TupleComp(sort_tuple).compare)
self.fcn_list = fcn_list = []
for tuple in stats_list:
fcn_list.append(tuple[-1])
return self
def reverse_order(self):
if self.fcn_list:
self.fcn_list.reverse()
return self
def strip_dirs(self):
oldstats = self.stats
self.stats = newstats = {}
max_name_len = 0
for func, (cc, nc, tt, ct, callers) in oldstats.iteritems():
newfunc = func_strip_path(func)
if len(func_std_string(newfunc)) > max_name_len:
max_name_len = len(func_std_string(newfunc))
newcallers = {}
for func2, caller in callers.iteritems():
newcallers[func_strip_path(func2)] = caller
if newfunc in newstats:
newstats[newfunc] = add_func_stats(
newstats[newfunc],
(cc, nc, tt, ct, newcallers))
else:
newstats[newfunc] = (cc, nc, tt, ct, newcallers)
old_top = self.top_level
self.top_level = new_top = {}
for func in old_top:
new_top[func_strip_path(func)] = None
self.max_name_len = max_name_len
self.fcn_list = None
self.all_callees = None
return self
def calc_callees(self):
if self.all_callees: return
self.all_callees = all_callees = {}
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
if not func in all_callees:
all_callees[func] = {}
for func2, caller in callers.iteritems():
if not func2 in all_callees:
all_callees[func2] = {}
all_callees[func2][func] = caller
return
#******************************************************************
# The following functions support actual printing of reports
#******************************************************************
# Optional "amount" is either a line count, or a percentage of lines.
def eval_print_amount(self, sel, list, msg):
new_list = list
if type(sel) == type(""):
new_list = []
for func in list:
if re.search(sel, func_std_string(func)):
new_list.append(func)
else:
count = len(list)
if type(sel) == type(1.0) and 0.0 <= sel < 1.0:
count = int(count * sel + .5)
new_list = list[:count]
elif type(sel) == type(1) and 0 <= sel < count:
count = sel
new_list = list[:count]
if len(list) != len(new_list):
msg = msg + " List reduced from %r to %r due to restriction <%r>\n" % (
len(list), len(new_list), sel)
return new_list, msg
def get_print_list(self, sel_list):
width = self.max_name_len
if self.fcn_list:
list = self.fcn_list[:]
msg = " Ordered by: " + self.sort_type + '\n'
else:
list = self.stats.keys()
msg = " Random listing order was used\n"
for selection in sel_list:
list, msg = self.eval_print_amount(selection, list, msg)
count = len(list)
if not list:
return 0, list
print >> self.stream, msg
if count < len(self.stats):
width = 0
for func in list:
if len(func_std_string(func)) > width:
width = len(func_std_string(func))
return width+2, list
def print_stats(self, *amount):
for filename in self.files:
print >> self.stream, filename
if self.files: print >> self.stream
indent = ' ' * 8
for func in self.top_level:
print >> self.stream, indent, func_get_function_name(func)
print >> self.stream, indent, self.total_calls, "function calls",
if self.total_calls != self.prim_calls:
print >> self.stream, "(%d primitive calls)" % self.prim_calls,
print >> self.stream, "in %.3f CPU seconds" % self.total_tt
print >> self.stream
width, list = self.get_print_list(amount)
if list:
self.print_title()
for func in list:
self.print_line(func)
print >> self.stream
print >> self.stream
return self
def print_callees(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.calc_callees()
self.print_call_heading(width, "called...")
for func in list:
if func in self.all_callees:
self.print_call_line(width, func, self.all_callees[func])
else:
self.print_call_line(width, func, {})
print >> self.stream
print >> self.stream
return self
def print_callers(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.print_call_heading(width, "was called by...")
for func in list:
cc, nc, tt, ct, callers = self.stats[func]
self.print_call_line(width, func, callers, "<-")
print >> self.stream
print >> self.stream
return self
def print_call_heading(self, name_size, column_title):
print >> self.stream, "Function ".ljust(name_size) + column_title
# print sub-header only if we have new-style callers
subheader = False
for cc, nc, tt, ct, callers in self.stats.itervalues():
if callers:
value = callers.itervalues().next()
subheader = isinstance(value, tuple)
break
if subheader:
print >> self.stream, " "*name_size + " ncalls tottime cumtime"
def print_call_line(self, name_size, source, call_dict, arrow="->"):
print >> self.stream, func_std_string(source).ljust(name_size) + arrow,
if not call_dict:
print >> self.stream
return
clist = call_dict.keys()
clist.sort()
indent = ""
for func in clist:
name = func_std_string(func)
value = call_dict[func]
if isinstance(value, tuple):
nc, cc, tt, ct = value
if nc != cc:
substats = '%d/%d' % (nc, cc)
else:
substats = '%d' % (nc,)
substats = '%s %s %s %s' % (substats.rjust(7+2*len(indent)),
f8(tt), f8(ct), name)
left_width = name_size + 1
else:
substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3]))
left_width = name_size + 3
print >> self.stream, indent*left_width + substats
indent = " "
def print_title(self):
print >> self.stream, ' ncalls tottime percall cumtime percall',
print >> self.stream, 'filename:lineno(function)'
def print_line(self, func): # hack : should print percentages
cc, nc, tt, ct, callers = self.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
print >> self.stream, c.rjust(9),
print >> self.stream, f8(tt),
if nc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(tt/nc),
print >> self.stream, f8(ct),
if cc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(ct/cc),
print >> self.stream, func_std_string(func)
class TupleComp:
"""This class provides a generic function for comparing any two tuples.
Each instance records a list of tuple-indices (from most significant
to least significant), and sort direction (ascending or decending) for
each tuple-index. The compare functions can then be used as the function
argument to the system sort() function when a list of tuples need to be
sorted in the instances order."""
def __init__(self, comp_select_list):
self.comp_select_list = comp_select_list
def compare (self, left, right):
for index, direction in self.comp_select_list:
l = left[index]
r = right[index]
if l < r:
return -direction
if l > r:
return direction
return 0
#**************************************************************************
# func_name is a triple (file:string, line:int, name:string)
def func_strip_path(func_name):
filename, line, name = func_name
return os.path.basename(filename), line, name
def func_get_function_name(func):
return func[2]
def func_std_string(func_name): # match what old profile produced
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
return "%s:%d(%s)" % func_name
#**************************************************************************
# The following functions combine statists for pairs functions.
# The bulk of the processing involves correctly handling "call" lists,
# such as callers and callees.
#**************************************************************************
def add_func_stats(target, source):
"""Add together all the stats for two profile entries."""
cc, nc, tt, ct, callers = source
t_cc, t_nc, t_tt, t_ct, t_callers = target
return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
add_callers(t_callers, callers))
def add_callers(target, source):
"""Combine two caller lists in a single list."""
new_callers = {}
for func, caller in target.iteritems():
new_callers[func] = caller
for func, caller in source.iteritems():
if func in new_callers:
new_callers[func] = caller + new_callers[func]
else:
new_callers[func] = caller
return new_callers
def count_calls(callers):
"""Sum the caller statistics to get total number of calls received."""
nc = 0
for calls in callers.itervalues():
nc += calls
return nc
#**************************************************************************
# The following functions support printing of reports
#**************************************************************************
def f8(x):
return "%8.3f" % x
#**************************************************************************
# Statistics browser added by ESR, April 2001
#**************************************************************************
if __name__ == '__main__':
import cmd
try:
import readline
except ImportError:
pass
class ProfileBrowser(cmd.Cmd):
def __init__(self, profile=None):
cmd.Cmd.__init__(self)
self.prompt = "% "
if profile is not None:
self.stats = Stats(profile)
self.stream = self.stats.stream
else:
self.stats = None
self.stream = sys.stdout
def generic(self, fn, line):
args = line.split()
processed = []
for term in args:
try:
processed.append(int(term))
continue
except ValueError:
pass
try:
frac = float(term)
if frac > 1 or frac < 0:
print >> self.stream, "Fraction argument must be in [0, 1]"
continue
processed.append(frac)
continue
except ValueError:
pass
processed.append(term)
if self.stats:
getattr(self.stats, fn)(*processed)
else:
print >> self.stream, "No statistics object is loaded."
return 0
def generic_help(self):
print >> self.stream, "Arguments may be:"
print >> self.stream, "* An integer maximum number of entries to print."
print >> self.stream, "* A decimal fractional number between 0 and 1, controlling"
print >> self.stream, " what fraction of selected entries to print."
print >> self.stream, "* A regular expression; only entries with function names"
print >> self.stream, " that match it are printed."
def do_add(self, line):
self.stats.add(line)
return 0
def help_add(self):
print >> self.stream, "Add profile info from given file to current statistics object."
def do_callees(self, line):
return self.generic('print_callees', line)
def help_callees(self):
print >> self.stream, "Print callees statistics from the current stat object."
self.generic_help()
def do_callers(self, line):
return self.generic('print_callers', line)
def help_callers(self):
print >> self.stream, "Print callers statistics from the current stat object."
self.generic_help()
def do_EOF(self, line):
print >> self.stream, ""
return 1
def help_EOF(self):
print >> self.stream, "Leave the profile brower."
def do_quit(self, line):
return 1
def help_quit(self):
print >> self.stream, "Leave the profile brower."
def do_read(self, line):
if line:
try:
self.stats = Stats(line)
except IOError, args:
print >> self.stream, args[1]
return
self.prompt = line + "% "
elif len(self.prompt) > 2:
line = self.prompt[-2:]
else:
print >> self.stream, "No statistics object is current -- cannot reload."
return 0
def help_read(self):
print >> self.stream, "Read in profile data from a specified file."
def do_reverse(self, line):
self.stats.reverse_order()
return 0
def help_reverse(self):
print >> self.stream, "Reverse the sort order of the profiling report."
def do_sort(self, line):
abbrevs = self.stats.get_sort_arg_defs()
if line and not filter(lambda x,a=abbrevs: x not in a,line.split()):
self.stats.sort_stats(*line.split())
else:
print >> self.stream, "Valid sort keys (unique prefixes are accepted):"
for (key, value) in Stats.sort_arg_dict_default.iteritems():
print >> self.stream, "%s -- %s" % (key, value[1])
return 0
def help_sort(self):
print >> self.stream, "Sort profile data according to specified keys."
print >> self.stream, "(Typing `sort' without arguments lists valid keys.)"
def complete_sort(self, text, *args):
return [a for a in Stats.sort_arg_dict_default if a.startswith(text)]
def do_stats(self, line):
return self.generic('print_stats', line)
def help_stats(self):
print >> self.stream, "Print statistics from the current stat object."
self.generic_help()
def do_strip(self, line):
self.stats.strip_dirs()
return 0
def help_strip(self):
print >> self.stream, "Strip leading path information from filenames in the report."
def postcmd(self, stop, line):
if stop:
return stop
return None
import sys
if len(sys.argv) > 1:
initprofile = sys.argv[1]
else:
initprofile = None
try:
browser = ProfileBrowser(initprofile)
print >> browser.stream, "Welcome to the profile statistics browser."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass
# That's all, folks.
| apache-2.0 |
brandond/ansible | lib/ansible/plugins/terminal/exos.py | 58 | 2026 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.errors import AnsibleConnectionFailure
from ansible.plugins.terminal import TerminalBase
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n](?:! )?(?:\* )?(?:\(.*\) )?(?:Slot-\d+ )?\S+\.\d+ (?:[>#]) ?$")
]
terminal_stderr_re = [
re.compile(br"% ?Error"),
re.compile(br"% ?Bad secret"),
re.compile(br"[\r\n%] Bad passwords"),
re.compile(br"invalid input", re.I),
re.compile(br"(?:incomplete|ambiguous) command", re.I),
re.compile(br"connection timed out", re.I),
re.compile(br"[^\r\n]+ not found"),
re.compile(br"'[^']' +returned error code: ?\d+"),
re.compile(br"Bad mask", re.I),
re.compile(br"% ?(\S+) ?overlaps with ?(\S+)", re.I),
re.compile(br"[%\S] ?Error: ?[\s]+", re.I),
re.compile(br"[%\S] ?Informational: ?[\s]+", re.I),
re.compile(br"%% Invalid .* at '\^' marker.", re.I),
]
def on_open_shell(self):
try:
for cmd in (b'disable clipaging', b'configure cli columns 256'):
self._exec_cli_command(cmd)
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| gpl-3.0 |
wave72/hubzilla-dev | library/blueimp_upload/server/gae-python/main.py | 245 | 5845 | # -*- coding: utf-8 -*-
#
# jQuery File Upload Plugin GAE Python Example 2.2.0
# https://github.com/blueimp/jQuery-File-Upload
#
# Copyright 2011, Sebastian Tschan
# https://blueimp.net
#
# Licensed under the MIT license:
# http://www.opensource.org/licenses/MIT
#
from __future__ import with_statement
from google.appengine.api import files, images
from google.appengine.ext import blobstore, deferred
from google.appengine.ext.webapp import blobstore_handlers
import json
import re
import urllib
import webapp2
WEBSITE = 'https://blueimp.github.io/jQuery-File-Upload/'
MIN_FILE_SIZE = 1 # bytes
MAX_FILE_SIZE = 5000000 # bytes
IMAGE_TYPES = re.compile('image/(gif|p?jpeg|(x-)?png)')
ACCEPT_FILE_TYPES = IMAGE_TYPES
THUMBNAIL_MODIFICATOR = '=s80' # max width / height
EXPIRATION_TIME = 300 # seconds
def cleanup(blob_keys):
blobstore.delete(blob_keys)
class UploadHandler(webapp2.RequestHandler):
def initialize(self, request, response):
super(UploadHandler, self).initialize(request, response)
self.response.headers['Access-Control-Allow-Origin'] = '*'
self.response.headers[
'Access-Control-Allow-Methods'
] = 'OPTIONS, HEAD, GET, POST, PUT, DELETE'
self.response.headers[
'Access-Control-Allow-Headers'
] = 'Content-Type, Content-Range, Content-Disposition'
def validate(self, file):
if file['size'] < MIN_FILE_SIZE:
file['error'] = 'File is too small'
elif file['size'] > MAX_FILE_SIZE:
file['error'] = 'File is too big'
elif not ACCEPT_FILE_TYPES.match(file['type']):
file['error'] = 'Filetype not allowed'
else:
return True
return False
def get_file_size(self, file):
file.seek(0, 2) # Seek to the end of the file
size = file.tell() # Get the position of EOF
file.seek(0) # Reset the file position to the beginning
return size
def write_blob(self, data, info):
blob = files.blobstore.create(
mime_type=info['type'],
_blobinfo_uploaded_filename=info['name']
)
with files.open(blob, 'a') as f:
f.write(data)
files.finalize(blob)
return files.blobstore.get_blob_key(blob)
def handle_upload(self):
results = []
blob_keys = []
for name, fieldStorage in self.request.POST.items():
if type(fieldStorage) is unicode:
continue
result = {}
result['name'] = re.sub(
r'^.*\\',
'',
fieldStorage.filename
)
result['type'] = fieldStorage.type
result['size'] = self.get_file_size(fieldStorage.file)
if self.validate(result):
blob_key = str(
self.write_blob(fieldStorage.value, result)
)
blob_keys.append(blob_key)
result['deleteType'] = 'DELETE'
result['deleteUrl'] = self.request.host_url +\
'/?key=' + urllib.quote(blob_key, '')
if (IMAGE_TYPES.match(result['type'])):
try:
result['url'] = images.get_serving_url(
blob_key,
secure_url=self.request.host_url.startswith(
'https'
)
)
result['thumbnailUrl'] = result['url'] +\
THUMBNAIL_MODIFICATOR
except: # Could not get an image serving url
pass
if not 'url' in result:
result['url'] = self.request.host_url +\
'/' + blob_key + '/' + urllib.quote(
result['name'].encode('utf-8'), '')
results.append(result)
deferred.defer(
cleanup,
blob_keys,
_countdown=EXPIRATION_TIME
)
return results
def options(self):
pass
def head(self):
pass
def get(self):
self.redirect(WEBSITE)
def post(self):
if (self.request.get('_method') == 'DELETE'):
return self.delete()
result = {'files': self.handle_upload()}
s = json.dumps(result, separators=(',', ':'))
redirect = self.request.get('redirect')
if redirect:
return self.redirect(str(
redirect.replace('%s', urllib.quote(s, ''), 1)
))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
def delete(self):
key = self.request.get('key') or ''
blobstore.delete(key)
s = json.dumps({key: True}, separators=(',', ':'))
if 'application/json' in self.request.headers.get('Accept'):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(s)
class DownloadHandler(blobstore_handlers.BlobstoreDownloadHandler):
def get(self, key, filename):
if not blobstore.get(key):
self.error(404)
else:
# Prevent browsers from MIME-sniffing the content-type:
self.response.headers['X-Content-Type-Options'] = 'nosniff'
# Cache for the expiration time:
self.response.headers['Cache-Control'] = 'public,max-age=%d' % EXPIRATION_TIME
# Send the file forcing a download dialog:
self.send_blob(key, save_as=filename, content_type='application/octet-stream')
app = webapp2.WSGIApplication(
[
('/', UploadHandler),
('/([^/]+)/([^/]+)', DownloadHandler)
],
debug=True
)
| mit |
3quarterstack/simple_blog | django/contrib/gis/db/backends/postgis/creation.py | 106 | 4554 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.utils.functional import cached_property
class PostGISCreation(DatabaseCreation):
geom_index_type = 'GIST'
geom_index_ops = 'GIST_GEOMETRY_OPS'
geom_index_ops_nd = 'GIST_GEOMETRY_OPS_ND'
@cached_property
def template_postgis(self):
template_postgis = getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis')
cursor = self.connection.cursor()
cursor.execute('SELECT 1 FROM pg_database WHERE datname = %s LIMIT 1;', (template_postgis,))
if cursor.fetchone():
return template_postgis
return None
def sql_indexes_for_field(self, model, f, style):
"Return any spatial index creation SQL for the field."
from django.contrib.gis.db.models.fields import GeometryField
output = super(PostGISCreation, self).sql_indexes_for_field(model, f, style)
if isinstance(f, GeometryField):
gqn = self.connection.ops.geo_quote_name
qn = self.connection.ops.quote_name
db_table = model._meta.db_table
if f.geography or self.connection.ops.geometry:
# Geography and Geometry (PostGIS 2.0+) columns are
# created normally.
pass
else:
# Geometry columns are created by `AddGeometryColumn`
# stored procedure.
output.append(style.SQL_KEYWORD('SELECT ') +
style.SQL_TABLE('AddGeometryColumn') + '(' +
style.SQL_TABLE(gqn(db_table)) + ', ' +
style.SQL_FIELD(gqn(f.column)) + ', ' +
style.SQL_FIELD(str(f.srid)) + ', ' +
style.SQL_COLTYPE(gqn(f.geom_type)) + ', ' +
style.SQL_KEYWORD(str(f.dim)) + ');')
if not f.null:
# Add a NOT NULL constraint to the field
output.append(style.SQL_KEYWORD('ALTER TABLE ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' ALTER ') +
style.SQL_FIELD(qn(f.column)) +
style.SQL_KEYWORD(' SET NOT NULL') + ';')
if f.spatial_index:
# Spatial indexes created the same way for both Geometry and
# Geography columns.
# PostGIS 2.0 does not support GIST_GEOMETRY_OPS. So, on 1.5
# we use GIST_GEOMETRY_OPS, on 2.0 we use either "nd" ops
# which are fast on multidimensional cases, or just plain
# gist index for the 2d case.
if f.geography:
index_ops = ''
elif self.connection.ops.geometry:
if f.dim > 2:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops_nd)
else:
index_ops = ''
else:
index_ops = ' ' + style.SQL_KEYWORD(self.geom_index_ops)
output.append(style.SQL_KEYWORD('CREATE INDEX ') +
style.SQL_TABLE(qn('%s_%s_id' % (db_table, f.column))) +
style.SQL_KEYWORD(' ON ') +
style.SQL_TABLE(qn(db_table)) +
style.SQL_KEYWORD(' USING ') +
style.SQL_COLTYPE(self.geom_index_type) + ' ( ' +
style.SQL_FIELD(qn(f.column)) + index_ops + ' );')
return output
def sql_table_creation_suffix(self):
if self.template_postgis is not None:
return ' TEMPLATE %s' % (
self.connection.ops.quote_name(self.template_postgis),)
return ''
def _create_test_db(self, verbosity, autoclobber):
test_database_name = super(PostGISCreation, self)._create_test_db(verbosity, autoclobber)
if self.template_postgis is None:
# Connect to the test database in order to create the postgis extension
self.connection.close()
self.connection.settings_dict["NAME"] = test_database_name
cursor = self.connection.cursor()
cursor.execute("CREATE EXTENSION postgis")
cursor.connection.commit()
return test_database_name
| mit |
Chilledheart/gyp | pylib/gyp/common.py | 9 | 19179 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# Convert to normalized (and therefore absolute paths).
path = os.path.realpath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
| bsd-3-clause |
sangh/LaserShow | pyglet-hg/pyglet/window/xlib/__init__.py | 4 | 49403 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
import unicodedata
import warnings
import pyglet
from pyglet.window import WindowException, NoSuchDisplayException, \
MouseCursorException, MouseCursor, \
DefaultMouseCursor, ImageMouseCursor, BaseWindow, _PlatformEventHandler, \
_ViewEventHandler
from pyglet.window import key
from pyglet.window import mouse
from pyglet.event import EventDispatcher
from pyglet.canvas.xlib import XlibCanvas
from pyglet.libs.x11 import xlib
from pyglet.libs.x11 import cursorfont
from pyglet.compat import asbytes
try:
from pyglet.libs.x11 import xsync
_have_xsync = True
except:
_have_xsync = False
class mwmhints_t(Structure):
_fields_ = [
('flags', c_uint32),
('functions', c_uint32),
('decorations', c_uint32),
('input_mode', c_int32),
('status', c_uint32)
]
XA_CARDINAL = 6 # Xatom.h:14
# Do we have the November 2000 UTF8 extension?
_have_utf8 = hasattr(xlib._lib, 'Xutf8TextListToTextProperty')
# symbol,ctrl -> motion mapping
_motion_map = {
(key.UP, False): key.MOTION_UP,
(key.RIGHT, False): key.MOTION_RIGHT,
(key.DOWN, False): key.MOTION_DOWN,
(key.LEFT, False): key.MOTION_LEFT,
(key.RIGHT, True): key.MOTION_NEXT_WORD,
(key.LEFT, True): key.MOTION_PREVIOUS_WORD,
(key.HOME, False): key.MOTION_BEGINNING_OF_LINE,
(key.END, False): key.MOTION_END_OF_LINE,
(key.PAGEUP, False): key.MOTION_PREVIOUS_PAGE,
(key.PAGEDOWN, False): key.MOTION_NEXT_PAGE,
(key.HOME, True): key.MOTION_BEGINNING_OF_FILE,
(key.END, True): key.MOTION_END_OF_FILE,
(key.BACKSPACE, False): key.MOTION_BACKSPACE,
(key.DELETE, False): key.MOTION_DELETE,
}
class XlibException(WindowException):
'''An X11-specific exception. This exception is probably a programming
error in pyglet.'''
pass
class XlibMouseCursor(MouseCursor):
drawable = False
def __init__(self, cursor):
self.cursor = cursor
# Platform event data is single item, so use platform event handler directly.
XlibEventHandler = _PlatformEventHandler
ViewEventHandler = _ViewEventHandler
class XlibWindow(BaseWindow):
_x_display = None # X display connection
_x_screen_id = None # X screen index
_x_ic = None # X input context
_window = None # Xlib window handle
_minimum_size = None
_maximum_size = None
_override_redirect = False
_x = 0
_y = 0 # Last known window position
_width = 0
_height = 0 # Last known window size
_mouse_exclusive_client = None # x,y of "real" mouse during exclusive
_mouse_buttons = [False] * 6 # State of each xlib button
_keyboard_exclusive = False
_active = True
_applied_mouse_exclusive = False
_applied_keyboard_exclusive = False
_mapped = False
_lost_context = False
_lost_context_state = False
_enable_xsync = False
_current_sync_value = None
_current_sync_valid = False
_needs_resize = False # True when resize event has been received but not
# dispatched
_default_event_mask = (0x1ffffff
& ~xlib.PointerMotionHintMask
& ~xlib.ResizeRedirectMask
& ~xlib.SubstructureNotifyMask)
def __init__(self, *args, **kwargs):
# Bind event handlers
self._event_handlers = {}
self._view_event_handlers = {}
for name in self._platform_event_names:
if not hasattr(self, name):
continue
func = getattr(self, name)
for message in func._platform_event_data:
if hasattr(func, '_view'):
self._view_event_handlers[message] = func
else:
self._event_handlers[message] = func
super(XlibWindow, self).__init__(*args, **kwargs)
def _recreate(self, changes):
# If flipping to/from fullscreen, need to recreate the window. (This
# is the case with both override_redirect method and
# _NET_WM_STATE_FULLSCREEN).
#
# A possible improvement could be to just hide the top window,
# destroy the GLX window, and reshow it again when leaving fullscreen.
# This would prevent the floating window from being moved by the
# WM.
if ('fullscreen' in changes or 'resizable' in changes):
# clear out the GLX context
self.context.detach()
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
del self.display._window_map[self._view]
self._window = None
self._mapped = False
# TODO: detect state loss only by examining context share.
if 'context' in changes:
self._lost_context = True
self._lost_context_state = True
self._create()
def _create(self):
# Unmap existing window if necessary while we fiddle with it.
if self._window and self._mapped:
self._unmap()
self._x_display = self.display._display
self._x_screen_id = self.display.x_screen
# Create X window if not already existing.
if not self._window:
root = xlib.XRootWindow(self._x_display, self._x_screen_id)
visual_info = self.config.get_visual_info()
visual = visual_info.visual
visual_id = xlib.XVisualIDFromVisual(visual)
default_visual = xlib.XDefaultVisual(
self._x_display, self._x_screen_id)
default_visual_id = xlib.XVisualIDFromVisual(default_visual)
window_attributes = xlib.XSetWindowAttributes()
if visual_id != default_visual_id:
window_attributes.colormap = xlib.XCreateColormap(
self._x_display, root, visual, xlib.AllocNone)
else:
window_attributes.colormap = xlib.XDefaultColormap(
self._x_display, self._x_screen_id)
window_attributes.bit_gravity = xlib.StaticGravity
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration
# unless CWBackPixel is given in mask. Should have
# no effect on other systems, so it's set
# unconditionally.
mask = xlib.CWColormap | xlib.CWBitGravity | xlib.CWBackPixel
if self._fullscreen:
width, height = self.screen.width, self.screen.height
self._view_x = (width - self._width) // 2
self._view_y = (height - self._height) // 2
else:
width, height = self._width, self._height
self._view_x = self._view_y = 0
self._window = xlib.XCreateWindow(self._x_display, root,
0, 0, width, height, 0, visual_info.depth,
xlib.InputOutput, visual, mask,
byref(window_attributes))
self._view = xlib.XCreateWindow(self._x_display,
self._window, self._view_x, self._view_y,
self._width, self._height, 0, visual_info.depth,
xlib.InputOutput, visual, mask,
byref(window_attributes));
xlib.XMapWindow(self._x_display, self._view)
xlib.XSelectInput(
self._x_display, self._view, self._default_event_mask)
self.display._window_map[self._window] = \
self.dispatch_platform_event
self.display._window_map[self._view] = \
self.dispatch_platform_event_view
self.canvas = XlibCanvas(self.display, self._view)
self.context.attach(self.canvas)
self.context.set_vsync(self._vsync) # XXX ?
# Setting null background pixmap disables drawing the background,
# preventing flicker while resizing (in theory).
#
# Issue 287: Compiz on Intel/Mesa doesn't draw window decoration if
# this is called. As it doesn't seem to have any
# effect anyway, it's just commented out.
#xlib.XSetWindowBackgroundPixmap(self._x_display, self._window, 0)
self._enable_xsync = (pyglet.options['xsync'] and
self.display._enable_xsync and
self.config.double_buffer)
# Set supported protocols
protocols = []
protocols.append(xlib.XInternAtom(self._x_display,
asbytes('WM_DELETE_WINDOW'), False))
if self._enable_xsync:
protocols.append(xlib.XInternAtom(self._x_display,
asbytes('_NET_WM_SYNC_REQUEST'),
False))
protocols = (c_ulong * len(protocols))(*protocols)
xlib.XSetWMProtocols(self._x_display, self._window,
protocols, len(protocols))
# Create window resize sync counter
if self._enable_xsync:
value = xsync.XSyncValue()
self._sync_counter = xlib.XID(
xsync.XSyncCreateCounter(self._x_display, value))
atom = xlib.XInternAtom(self._x_display,
asbytes('_NET_WM_SYNC_REQUEST_COUNTER'), False)
ptr = pointer(self._sync_counter)
xlib.XChangeProperty(self._x_display, self._window,
atom, XA_CARDINAL, 32,
xlib.PropModeReplace,
cast(ptr, POINTER(c_ubyte)), 1)
# Set window attributes
attributes = xlib.XSetWindowAttributes()
attributes_mask = 0
self._override_redirect = False
if self._fullscreen:
if pyglet.options['xlib_fullscreen_override_redirect']:
# Try not to use this any more, it causes problems; disabled
# by default in favour of _NET_WM_STATE_FULLSCREEN.
attributes.override_redirect = self._fullscreen
attributes_mask |= xlib.CWOverrideRedirect
self._override_redirect = True
else:
self._set_wm_state('_NET_WM_STATE_FULLSCREEN')
if self._fullscreen:
xlib.XMoveResizeWindow(self._x_display, self._window,
self.screen.x, self.screen.y,
self.screen.width, self.screen.height)
else:
xlib.XResizeWindow(self._x_display, self._window,
self._width, self._height)
xlib.XChangeWindowAttributes(self._x_display, self._window,
attributes_mask, byref(attributes))
# Set style
styles = {
self.WINDOW_STYLE_DEFAULT: '_NET_WM_WINDOW_TYPE_NORMAL',
self.WINDOW_STYLE_DIALOG: '_NET_WM_WINDOW_TYPE_DIALOG',
self.WINDOW_STYLE_TOOL: '_NET_WM_WINDOW_TYPE_UTILITY',
}
if self._style in styles:
self._set_atoms_property('_NET_WM_WINDOW_TYPE',
(styles[self._style],))
elif self._style == self.WINDOW_STYLE_BORDERLESS:
MWM_HINTS_DECORATIONS = 1 << 1
PROP_MWM_HINTS_ELEMENTS = 5
mwmhints = mwmhints_t()
mwmhints.flags = MWM_HINTS_DECORATIONS
mwmhints.decorations = 0
name = xlib.XInternAtom(self._x_display, '_MOTIF_WM_HINTS', False)
xlib.XChangeProperty(self._x_display, self._window,
name, name, 32, xlib.PropModeReplace,
cast(pointer(mwmhints), POINTER(c_ubyte)),
PROP_MWM_HINTS_ELEMENTS)
# Set resizeable
if not self._resizable and not self._fullscreen:
self.set_minimum_size(self._width, self._height)
self.set_maximum_size(self._width, self._height)
# Set caption
self.set_caption(self._caption)
# Create input context. A good but very outdated reference for this
# is http://www.sbin.org/doc/Xlib/chapt_11.html
if _have_utf8 and not self._x_ic:
if not self.display._x_im:
xlib.XSetLocaleModifiers(asbytes('@im=none'))
self.display._x_im = \
xlib.XOpenIM(self._x_display, None, None, None)
xlib.XFlush(self._x_display);
# Need to set argtypes on this function because it's vararg,
# and ctypes guesses wrong.
xlib.XCreateIC.argtypes = [xlib.XIM,
c_char_p, c_int,
c_char_p, xlib.Window,
c_char_p, xlib.Window,
c_void_p]
self._x_ic = xlib.XCreateIC(self.display._x_im,
asbytes('inputStyle'), xlib.XIMPreeditNothing|xlib.XIMStatusNothing,
asbytes('clientWindow'), self._window,
asbytes('focusWindow'), self._window,
None)
filter_events = c_ulong()
xlib.XGetICValues(self._x_ic,
'filterEvents', byref(filter_events),
None)
self._default_event_mask |= filter_events.value
xlib.XSetICFocus(self._x_ic)
self.switch_to()
if self._visible:
self.set_visible(True)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = None
self._update_exclusivity()
def _map(self):
if self._mapped:
return
# Map the window, wait for map event before continuing.
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XMapRaised(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.MapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = True
if self._override_redirect:
# Possibly an override_redirect issue.
self.activate()
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_show')
self.dispatch_event('on_expose')
def _unmap(self):
if not self._mapped:
return
xlib.XSelectInput(
self._x_display, self._window, xlib.StructureNotifyMask)
xlib.XUnmapWindow(self._x_display, self._window)
e = xlib.XEvent()
while True:
xlib.XNextEvent(self._x_display, e)
if e.type == xlib.UnmapNotify:
break
xlib.XSelectInput(
self._x_display, self._window, self._default_event_mask)
self._mapped = False
def _get_root(self):
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
return attributes.root
def close(self):
if not self._window:
return
self.context.destroy()
self._unmap()
if self._window:
xlib.XDestroyWindow(self._x_display, self._window)
del self.display._window_map[self._window]
self._window = None
if _have_utf8:
xlib.XDestroyIC(self._x_ic)
self._x_ic = None
super(XlibWindow, self).close()
def switch_to(self):
if self.context:
self.context.set_current()
def flip(self):
self.draw_mouse_cursor()
# TODO canvas.flip?
if self.context:
self.context.flip()
self._sync_resize()
def set_vsync(self, vsync):
if pyglet.options['vsync'] is not None:
vsync = pyglet.options['vsync']
self._vsync = vsync
self.context.set_vsync(vsync)
def set_caption(self, caption):
if caption is None:
caption = ''
self._caption = caption
self._set_text_property('WM_NAME', caption, allow_utf8=False)
self._set_text_property('WM_ICON_NAME', caption, allow_utf8=False)
self._set_text_property('_NET_WM_NAME', caption)
self._set_text_property('_NET_WM_ICON_NAME', caption)
def get_caption(self):
return self._caption
def set_size(self, width, height):
if self._fullscreen:
raise WindowException('Cannot set size of fullscreen window.')
self._width = width
self._height = height
if not self._resizable:
self.set_minimum_size(width, height)
self.set_maximum_size(width, height)
xlib.XResizeWindow(self._x_display, self._window, width, height)
self._update_view_size()
self.dispatch_event('on_resize', width, height)
def _update_view_size(self):
xlib.XResizeWindow(self._x_display, self._view,
self._width, self._height)
def get_size(self):
# XGetGeometry and XWindowAttributes seem to always return the
# original size of the window, which is wrong after the user
# has resized it.
# XXX this is probably fixed now, with fix of resize.
return self._width, self._height
def set_location(self, x, y):
# Assume the window manager has reparented our top-level window
# only once, in which case attributes.x/y give the offset from
# the frame to the content window. Better solution would be
# to use _NET_FRAME_EXTENTS, where supported.
attributes = xlib.XWindowAttributes()
xlib.XGetWindowAttributes(self._x_display, self._window,
byref(attributes))
# XXX at least under KDE's WM these attrs are both 0
x -= attributes.x
y -= attributes.y
xlib.XMoveWindow(self._x_display, self._window, x, y)
def get_location(self):
child = xlib.Window()
x = c_int()
y = c_int()
xlib.XTranslateCoordinates(self._x_display,
self._window,
self._get_root(),
0, 0,
byref(x),
byref(y),
byref(child))
return x.value, y.value
def activate(self):
xlib.XSetInputFocus(self._x_display, self._window,
xlib.RevertToParent, xlib.CurrentTime)
def set_visible(self, visible=True):
if visible:
self._map()
else:
self._unmap()
self._visible = visible
def set_minimum_size(self, width, height):
self._minimum_size = width, height
self._set_wm_normal_hints()
def set_maximum_size(self, width, height):
self._maximum_size = width, height
self._set_wm_normal_hints()
def minimize(self):
xlib.XIconifyWindow(self._x_display, self._window, self._x_screen_id)
def maximize(self):
self._set_wm_state('_NET_WM_STATE_MAXIMIZED_HORZ',
'_NET_WM_STATE_MAXIMIZED_VERT')
def set_mouse_platform_visible(self, platform_visible=None):
if platform_visible is None:
platform_visible = self._mouse_visible and \
not self._mouse_cursor.drawable
if not platform_visible:
# Hide pointer by creating an empty cursor
black = xlib.XBlackPixel(self._x_display, self._x_screen_id)
black = xlib.XColor()
bmp = xlib.XCreateBitmapFromData(self._x_display, self._window,
c_buffer(8), 8, 8)
cursor = xlib.XCreatePixmapCursor(self._x_display, bmp, bmp,
black, black, 0, 0)
xlib.XDefineCursor(self._x_display, self._window, cursor)
xlib.XFreeCursor(self._x_display, cursor)
xlib.XFreePixmap(self._x_display, bmp)
else:
# Restore cursor
if isinstance(self._mouse_cursor, XlibMouseCursor):
xlib.XDefineCursor(self._x_display, self._window,
self._mouse_cursor.cursor)
else:
xlib.XUndefineCursor(self._x_display, self._window)
def set_mouse_position(self, x, y):
xlib.XWarpPointer(self._x_display,
0, # src window
self._window, # dst window
0, 0, # src x, y
0, 0, # src w, h
x, self._height - y,
)
def _update_exclusivity(self):
mouse_exclusive = self._active and self._mouse_exclusive
keyboard_exclusive = self._active and self._keyboard_exclusive
if mouse_exclusive != self._applied_mouse_exclusive:
if mouse_exclusive:
self.set_mouse_platform_visible(False)
# Restrict to client area
xlib.XGrabPointer(self._x_display, self._window,
True,
0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._window,
0,
xlib.CurrentTime)
# Move pointer to center of window
x = self._width / 2
y = self._height / 2
self._mouse_exclusive_client = x, y
self.set_mouse_position(x, y)
elif self._fullscreen and not self.screen._xinerama:
# Restrict to fullscreen area (prevent viewport scrolling)
self.set_mouse_position(0, 0)
r = xlib.XGrabPointer(self._x_display, self._view,
True, 0,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
self._view,
0,
xlib.CurrentTime)
if r:
# Failed to grab, try again later
self._applied_mouse_exclusive = None
return
self.set_mouse_platform_visible()
else:
# Unclip
xlib.XUngrabPointer(self._x_display, xlib.CurrentTime)
self.set_mouse_platform_visible()
self._applied_mouse_exclusive = mouse_exclusive
if keyboard_exclusive != self._applied_keyboard_exclusive:
if keyboard_exclusive:
xlib.XGrabKeyboard(self._x_display,
self._window,
False,
xlib.GrabModeAsync,
xlib.GrabModeAsync,
xlib.CurrentTime)
else:
xlib.XUngrabKeyboard(self._x_display, xlib.CurrentTime)
self._applied_keyboard_exclusive = keyboard_exclusive
def set_exclusive_mouse(self, exclusive=True):
if exclusive == self._mouse_exclusive:
return
self._mouse_exclusive = exclusive
self._update_exclusivity()
def set_exclusive_keyboard(self, exclusive=True):
if exclusive == self._keyboard_exclusive:
return
self._keyboard_exclusive = exclusive
self._update_exclusivity()
def get_system_mouse_cursor(self, name):
if name == self.CURSOR_DEFAULT:
return DefaultMouseCursor()
# NQR means default shape is not pretty... surely there is another
# cursor font?
cursor_shapes = {
self.CURSOR_CROSSHAIR: cursorfont.XC_crosshair,
self.CURSOR_HAND: cursorfont.XC_hand2,
self.CURSOR_HELP: cursorfont.XC_question_arrow, # NQR
self.CURSOR_NO: cursorfont.XC_pirate, # NQR
self.CURSOR_SIZE: cursorfont.XC_fleur,
self.CURSOR_SIZE_UP: cursorfont.XC_top_side,
self.CURSOR_SIZE_UP_RIGHT: cursorfont.XC_top_right_corner,
self.CURSOR_SIZE_RIGHT: cursorfont.XC_right_side,
self.CURSOR_SIZE_DOWN_RIGHT: cursorfont.XC_bottom_right_corner,
self.CURSOR_SIZE_DOWN: cursorfont.XC_bottom_side,
self.CURSOR_SIZE_DOWN_LEFT: cursorfont.XC_bottom_left_corner,
self.CURSOR_SIZE_LEFT: cursorfont.XC_left_side,
self.CURSOR_SIZE_UP_LEFT: cursorfont.XC_top_left_corner,
self.CURSOR_SIZE_UP_DOWN: cursorfont.XC_sb_v_double_arrow,
self.CURSOR_SIZE_LEFT_RIGHT: cursorfont.XC_sb_h_double_arrow,
self.CURSOR_TEXT: cursorfont.XC_xterm,
self.CURSOR_WAIT: cursorfont.XC_watch,
self.CURSOR_WAIT_ARROW: cursorfont.XC_watch, # NQR
}
if name not in cursor_shapes:
raise MouseCursorException('Unknown cursor name "%s"' % name)
cursor = xlib.XCreateFontCursor(self._x_display, cursor_shapes[name])
return XlibMouseCursor(cursor)
def set_icon(self, *images):
# Careful! XChangeProperty takes an array of long when data type
# is 32-bit (but long can be 64 bit!), so pad high bytes of format if
# necessary.
import sys
format = {
('little', 4): 'BGRA',
('little', 8): 'BGRAAAAA',
('big', 4): 'ARGB',
('big', 8): 'AAAAARGB'
}[(sys.byteorder, sizeof(c_ulong))]
data = ''
for image in images:
image = image.get_image_data()
pitch = -(image.width * len(format))
s = c_buffer(sizeof(c_ulong) * 2)
memmove(s, cast((c_ulong * 2)(image.width, image.height),
POINTER(c_ubyte)), len(s))
data += s.raw + image.get_data(format, pitch)
buffer = (c_ubyte * len(data))()
memmove(buffer, data, len(data))
atom = xlib.XInternAtom(self._x_display, '_NET_WM_ICON', False)
xlib.XChangeProperty(self._x_display, self._window, atom, XA_CARDINAL,
32, xlib.PropModeReplace, buffer, len(data)/sizeof(c_ulong))
# Private utility
def _set_wm_normal_hints(self):
hints = xlib.XAllocSizeHints().contents
if self._minimum_size:
hints.flags |= xlib.PMinSize
hints.min_width, hints.min_height = self._minimum_size
if self._maximum_size:
hints.flags |= xlib.PMaxSize
hints.max_width, hints.max_height = self._maximum_size
xlib.XSetWMNormalHints(self._x_display, self._window, byref(hints))
def _set_text_property(self, name, value, allow_utf8=True):
atom = xlib.XInternAtom(self._x_display, asbytes(name), False)
if not atom:
raise XlibException('Undefined atom "%s"' % name)
assert type(value) in (str, unicode)
property = xlib.XTextProperty()
if _have_utf8 and allow_utf8:
buf = create_string_buffer(value.encode('utf8'))
result = xlib.Xutf8TextListToTextProperty(self._x_display,
cast(pointer(buf), c_char_p), 1, xlib.XUTF8StringStyle,
byref(property))
if result < 0:
raise XlibException('Could not create UTF8 text property')
else:
buf = create_string_buffer(value.encode('ascii', 'ignore'))
result = xlib.XStringListToTextProperty(
cast(pointer(buf), c_char_p), 1, byref(property))
if result < 0:
raise XlibException('Could not create text property')
xlib.XSetTextProperty(self._x_display,
self._window, byref(property), atom)
# XXX <rj> Xlib doesn't like us freeing this
#xlib.XFree(property.value)
def _set_atoms_property(self, name, values, mode=xlib.PropModeReplace):
name_atom = xlib.XInternAtom(self._x_display, asbytes(name), False)
atoms = []
for value in values:
atoms.append(xlib.XInternAtom(self._x_display, asbytes(value), False))
atom_type = xlib.XInternAtom(self._x_display, asbytes('ATOM'), False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
name_atom, atom_type, 32, mode,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
def _set_wm_state(self, *states):
# Set property
net_wm_state = xlib.XInternAtom(self._x_display, '_NET_WM_STATE', False)
atoms = []
for state in states:
atoms.append(xlib.XInternAtom(self._x_display, state, False))
atom_type = xlib.XInternAtom(self._x_display, 'ATOM', False)
if len(atoms):
atoms_ar = (xlib.Atom * len(atoms))(*atoms)
xlib.XChangeProperty(self._x_display, self._window,
net_wm_state, atom_type, 32, xlib.PropModePrepend,
cast(pointer(atoms_ar), POINTER(c_ubyte)), len(atoms))
else:
xlib.XDeleteProperty(self._x_display, self._window, net_wm_state)
# Nudge the WM
e = xlib.XEvent()
e.xclient.type = xlib.ClientMessage
e.xclient.message_type = net_wm_state
e.xclient.display = cast(self._x_display, POINTER(xlib.Display))
e.xclient.window = self._window
e.xclient.format = 32
e.xclient.data.l[0] = xlib.PropModePrepend
for i, atom in enumerate(atoms):
e.xclient.data.l[i + 1] = atom
xlib.XSendEvent(self._x_display, self._get_root(),
False, xlib.SubstructureRedirectMask, byref(e))
# Event handling
def dispatch_events(self):
self.dispatch_pending_events()
self._allow_dispatch_event = True
e = xlib.XEvent()
# Cache these in case window is closed from an event handler
_x_display = self._x_display
_window = self._window
_view = self._view
# Check for the events specific to this window
while xlib.XCheckWindowEvent(_x_display, _window,
0x1ffffff, byref(e)):
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event(e)
# Check for the events specific to this view
while xlib.XCheckWindowEvent(_x_display, _view,
0x1ffffff, byref(e)):
# Key events are filtered by the xlib window event
# handler so they get a shot at the prefiltered event.
if e.xany.type not in (xlib.KeyPress, xlib.KeyRelease):
if xlib.XFilterEvent(e, 0):
continue
self.dispatch_platform_event_view(e)
# Generic events for this window (the window close event).
while xlib.XCheckTypedWindowEvent(_x_display, _window,
xlib.ClientMessage, byref(e)):
self.dispatch_platform_event(e)
if self._needs_resize:
self.dispatch_event('on_resize', self._width, self._height)
self.dispatch_event('on_expose')
self._needs_resize = False
self._allow_dispatch_event = False
def dispatch_pending_events(self):
while self._event_queue:
EventDispatcher.dispatch_event(self, *self._event_queue.pop(0))
# Dispatch any context-related events
if self._lost_context:
self._lost_context = False
EventDispatcher.dispatch_event(self, 'on_context_lost')
if self._lost_context_state:
self._lost_context_state = False
EventDispatcher.dispatch_event(self, 'on_context_state_lost')
def dispatch_platform_event(self, e):
if self._applied_mouse_exclusive is None:
self._update_exclusivity()
event_handler = self._event_handlers.get(e.type)
if event_handler:
event_handler(e)
def dispatch_platform_event_view(self, e):
event_handler = self._view_event_handlers.get(e.type)
if event_handler:
event_handler(e)
@staticmethod
def _translate_modifiers(state):
modifiers = 0
if state & xlib.ShiftMask:
modifiers |= key.MOD_SHIFT
if state & xlib.ControlMask:
modifiers |= key.MOD_CTRL
if state & xlib.LockMask:
modifiers |= key.MOD_CAPSLOCK
if state & xlib.Mod1Mask:
modifiers |= key.MOD_ALT
if state & xlib.Mod2Mask:
modifiers |= key.MOD_NUMLOCK
if state & xlib.Mod4Mask:
modifiers |= key.MOD_WINDOWS
if state & xlib.Mod5Mask:
modifiers |= key.MOD_SCROLLLOCK
return modifiers
# Event handlers
'''
def _event_symbol(self, event):
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol.
symbol = xlib.XKeycodeToKeysym(self._x_display, event.xkey.keycode, 0)
if symbol == 0:
# XIM event
return None
elif symbol not in key._key_names.keys():
symbol = key.user_key(event.xkey.keycode)
return symbol
'''
def _event_text_symbol(self, ev):
text = None
symbol = xlib.KeySym()
buffer = create_string_buffer(128)
# Look up raw keysym before XIM filters it (default for keypress and
# keyrelease)
count = xlib.XLookupString(ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), None)
# Give XIM a shot
filtered = xlib.XFilterEvent(ev, ev.xany.window)
if ev.type == xlib.KeyPress and not filtered:
status = c_int()
if _have_utf8:
encoding = 'utf8'
count = xlib.Xutf8LookupString(self._x_ic,
ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), byref(status))
if status.value == xlib.XBufferOverflow:
raise NotImplementedError('TODO: XIM buffer resize')
else:
encoding = 'ascii'
count = xlib.XLookupString(ev.xkey,
buffer, len(buffer) - 1,
byref(symbol), None)
if count:
status.value = xlib.XLookupBoth
if status.value & (xlib.XLookupChars | xlib.XLookupBoth):
text = buffer.value[:count].decode(encoding)
# Don't treat Unicode command codepoints as text, except Return.
if text and unicodedata.category(text) == 'Cc' and text != '\r':
text = None
symbol = symbol.value
# If the event is a XIM filtered event, the keysym will be virtual
# (e.g., aacute instead of A after a dead key). Drop it, we don't
# want these kind of key events.
if ev.xkey.keycode == 0 and not filtered:
symbol = None
# pyglet.self.key keysymbols are identical to X11 keysymbols, no
# need to map the keysymbol. For keysyms outside the pyglet set, map
# raw key code to a user key.
if symbol and symbol not in key._key_names and ev.xkey.keycode:
# Issue 353: Symbol is uppercase when shift key held down.
symbol = ord(unichr(symbol).lower())
# If still not recognised, use the keycode
if symbol not in key._key_names:
symbol = key.user_key(ev.xkey.keycode)
if filtered:
# The event was filtered, text must be ignored, but the symbol is
# still good.
return None, symbol
return text, symbol
def _event_text_motion(self, symbol, modifiers):
if modifiers & key.MOD_ALT:
return None
ctrl = modifiers & key.MOD_CTRL != 0
return _motion_map.get((symbol, ctrl), None)
@ViewEventHandler
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key_view(self, ev):
if ev.type == xlib.KeyRelease:
# Look in the queue for a matching KeyPress with same timestamp,
# indicating an auto-repeat rather than actual key event.
saved = []
while True:
auto_event = xlib.XEvent()
result = xlib.XCheckWindowEvent(self._x_display,
self._window, xlib.KeyPress|xlib.KeyRelease,
byref(auto_event))
if not result:
break
saved.append(auto_event)
if auto_event.type == xlib.KeyRelease:
# just save this off for restoration back to the queue
continue
if ev.xkey.keycode == auto_event.xkey.keycode:
# Found a key repeat: dispatch EVENT_TEXT* event
text, symbol = self._event_text_symbol(auto_event)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event(
'on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
ditched = saved.pop()
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
return
else:
# Key code of press did not match, therefore no repeating
# is going on, stop searching.
break
# Whoops, put the events back, it's for real.
for auto_event in reversed(saved):
xlib.XPutBackEvent(self._x_display, byref(auto_event))
text, symbol = self._event_text_symbol(ev)
modifiers = self._translate_modifiers(ev.xkey.state)
modifiers_ctrl = modifiers & (key.MOD_CTRL | key.MOD_ALT)
motion = self._event_text_motion(symbol, modifiers)
if ev.type == xlib.KeyPress:
if symbol:
self.dispatch_event('on_key_press', symbol, modifiers)
if motion:
if modifiers & key.MOD_SHIFT:
self.dispatch_event('on_text_motion_select', motion)
else:
self.dispatch_event('on_text_motion', motion)
elif text and not modifiers_ctrl:
self.dispatch_event('on_text', text)
elif ev.type == xlib.KeyRelease:
if symbol:
self.dispatch_event('on_key_release', symbol, modifiers)
@XlibEventHandler(xlib.KeyPress)
@XlibEventHandler(xlib.KeyRelease)
def _event_key(self, ev):
return self._event_key_view(ev)
@ViewEventHandler
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify_view(self, ev):
x = ev.xmotion.x
y = self.height - ev.xmotion.y
if self._mouse_in_window:
dx = x - self._mouse_x
dy = y - self._mouse_y
else:
dx = dy = 0
if self._applied_mouse_exclusive and \
(ev.xmotion.x, ev.xmotion.y) == self._mouse_exclusive_client:
# Ignore events caused by XWarpPointer
self._mouse_x = x
self._mouse_y = y
return
if self._applied_mouse_exclusive:
# Reset pointer position
ex, ey = self._mouse_exclusive_client
xlib.XWarpPointer(self._x_display,
0,
self._window,
0, 0,
0, 0,
ex, ey)
self._mouse_x = x
self._mouse_y = y
self._mouse_in_window = True
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
else:
# Motion event
self.dispatch_event('on_mouse_motion', x, y, dx, dy)
@XlibEventHandler(xlib.MotionNotify)
def _event_motionnotify(self, ev):
# Window motion looks for drags that are outside the view but within
# the window.
buttons = 0
if ev.xmotion.state & xlib.Button1MotionMask:
buttons |= mouse.LEFT
if ev.xmotion.state & xlib.Button2MotionMask:
buttons |= mouse.MIDDLE
if ev.xmotion.state & xlib.Button3MotionMask:
buttons |= mouse.RIGHT
if buttons:
# Drag event
x = ev.xmotion.x - self._view_x
y = self._height - (ev.xmotion.y - self._view_y)
if self._mouse_in_window:
dx = x - self._mouse_x
dy = y - self._mouse_y
else:
dx = dy = 0
self._mouse_x = x
self._mouse_y = y
modifiers = self._translate_modifiers(ev.xmotion.state)
self.dispatch_event('on_mouse_drag',
x, y, dx, dy, buttons, modifiers)
@XlibEventHandler(xlib.ClientMessage)
def _event_clientmessage(self, ev):
atom = ev.xclient.data.l[0]
if atom == xlib.XInternAtom(ev.xclient.display,
asbytes('WM_DELETE_WINDOW'), False):
self.dispatch_event('on_close')
elif (self._enable_xsync and
atom == xlib.XInternAtom(ev.xclient.display,
asbytes('_NET_WM_SYNC_REQUEST'), False)):
lo = ev.xclient.data.l[2]
hi = ev.xclient.data.l[3]
self._current_sync_value = xsync.XSyncValue(hi, lo)
def _sync_resize(self):
if self._enable_xsync and self._current_sync_valid:
if xsync.XSyncValueIsZero(self._current_sync_value):
self._current_sync_valid = False
return
xsync.XSyncSetCounter(self._x_display,
self._sync_counter,
self._current_sync_value)
self._current_sync_value = None
self._current_sync_valid = False
@ViewEventHandler
@XlibEventHandler(xlib.ButtonPress)
@XlibEventHandler(xlib.ButtonRelease)
def _event_button(self, ev):
x = ev.xbutton.x
y = self.height - ev.xbutton.y
button = 1 << (ev.xbutton.button - 1) # 1, 2, 3 -> 1, 2, 4
modifiers = self._translate_modifiers(ev.xbutton.state)
if ev.type == xlib.ButtonPress:
# override_redirect issue: manually activate this window if
# fullscreen.
if self._override_redirect and not self._active:
self.activate()
if ev.xbutton.button == 4:
self.dispatch_event('on_mouse_scroll', x, y, 0, 1)
elif ev.xbutton.button == 5:
self.dispatch_event('on_mouse_scroll', x, y, 0, -1)
elif ev.xbutton.button < len(self._mouse_buttons):
self._mouse_buttons[ev.xbutton.button] = True
self.dispatch_event('on_mouse_press',
x, y, button, modifiers)
else:
if ev.xbutton.button < 4:
self._mouse_buttons[ev.xbutton.button] = False
self.dispatch_event('on_mouse_release',
x, y, button, modifiers)
@ViewEventHandler
@XlibEventHandler(xlib.Expose)
def _event_expose(self, ev):
# Ignore all expose events except the last one. We could be told
# about exposure rects - but I don't see the point since we're
# working with OpenGL and we'll just redraw the whole scene.
if ev.xexpose.count > 0: return
self.dispatch_event('on_expose')
@ViewEventHandler
@XlibEventHandler(xlib.EnterNotify)
def _event_enternotify(self, ev):
# figure active mouse buttons
# XXX ignore modifier state?
state = ev.xcrossing.state
self._mouse_buttons[1] = state & xlib.Button1Mask
self._mouse_buttons[2] = state & xlib.Button2Mask
self._mouse_buttons[3] = state & xlib.Button3Mask
self._mouse_buttons[4] = state & xlib.Button4Mask
self._mouse_buttons[5] = state & xlib.Button5Mask
# mouse position
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = True
# XXX there may be more we could do here
self.dispatch_event('on_mouse_enter', x, y)
@ViewEventHandler
@XlibEventHandler(xlib.LeaveNotify)
def _event_leavenotify(self, ev):
x = self._mouse_x = ev.xcrossing.x
y = self._mouse_y = self.height - ev.xcrossing.y
self._mouse_in_window = False
self.dispatch_event('on_mouse_leave', x, y)
@XlibEventHandler(xlib.ConfigureNotify)
def _event_configurenotify(self, ev):
if self._enable_xsync and self._current_sync_value:
self._current_sync_valid = True
if self._fullscreen:
return
self.switch_to()
w, h = ev.xconfigure.width, ev.xconfigure.height
x, y = ev.xconfigure.x, ev.xconfigure.y
if self._width != w or self._height != h:
self._update_view_size()
self._width = w
self._height = h
self._needs_resize = True
if self._x != x or self._y != y:
self.dispatch_event('on_move', x, y)
self._x = x
self._y = y
@XlibEventHandler(xlib.FocusIn)
def _event_focusin(self, ev):
self._active = True
self._update_exclusivity()
self.dispatch_event('on_activate')
xlib.XSetICFocus(self._x_ic)
@XlibEventHandler(xlib.FocusOut)
def _event_focusout(self, ev):
self._active = False
self._update_exclusivity()
self.dispatch_event('on_deactivate')
xlib.XUnsetICFocus(self._x_ic)
@XlibEventHandler(xlib.MapNotify)
def _event_mapnotify(self, ev):
self._mapped = True
self.dispatch_event('on_show')
self._update_exclusivity()
@XlibEventHandler(xlib.UnmapNotify)
def _event_unmapnotify(self, ev):
self._mapped = False
self.dispatch_event('on_hide')
| bsd-3-clause |
todaychi/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/auth/management/__init__.py | 91 | 6767 | """
Creates permissions for all installed apps that need permissions.
"""
from __future__ import unicode_literals
import getpass
import unicodedata
from django.contrib.auth import (models as auth_app, get_permission_codename,
get_user_model)
from django.core import exceptions
from django.core.management.base import CommandError
from django.db import DEFAULT_DB_ALIAS, router
from django.db.models import get_model, get_models, signals, UnavailableApp
from django.utils.encoding import DEFAULT_LOCALE_ENCODING
from django.utils import six
from django.utils.six.moves import input
def _get_all_permissions(opts, ctype):
"""
Returns (codename, name) for all permissions in the given opts.
"""
builtin = _get_builtin_permissions(opts)
custom = list(opts.permissions)
_check_permission_clashing(custom, builtin, ctype)
return builtin + custom
def _get_builtin_permissions(opts):
"""
Returns (codename, name) for all autogenerated permissions.
"""
perms = []
for action in ('add', 'change', 'delete'):
perms.append((get_permission_codename(action, opts),
'Can %s %s' % (action, opts.verbose_name_raw)))
return perms
def _check_permission_clashing(custom, builtin, ctype):
"""
Check that permissions for a model do not clash. Raises CommandError if
there are duplicate permissions.
"""
pool = set()
builtin_codenames = set(p[0] for p in builtin)
for codename, _name in custom:
if codename in pool:
raise CommandError(
"The permission codename '%s' is duplicated for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
elif codename in builtin_codenames:
raise CommandError(
"The permission codename '%s' clashes with a builtin permission "
"for model '%s.%s'." %
(codename, ctype.app_label, ctype.model_class().__name__))
pool.add(codename)
def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kwargs):
try:
get_model('auth', 'Permission')
except UnavailableApp:
return
if not router.allow_syncdb(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
app_models = get_models(app)
# This will hold the permissions we're looking for as
# (content_type, (codename, name))
searched_perms = list()
# The codenames and ctypes that should exist.
ctypes = set()
for klass in app_models:
# Force looking up the content types in the current database
# before creating foreign keys to them.
ctype = ContentType.objects.db_manager(db).get_for_model(klass)
ctypes.add(ctype)
for perm in _get_all_permissions(klass._meta, ctype):
searched_perms.append((ctype, perm))
# Find all the Permissions that have a content_type for a model we're
# looking for. We don't need to check for codenames since we already have
# a list of the ones we're going to create.
all_perms = set(auth_app.Permission.objects.using(db).filter(
content_type__in=ctypes,
).values_list(
"content_type", "codename"
))
perms = [
auth_app.Permission(codename=codename, name=name, content_type=ctype)
for ctype, (codename, name) in searched_perms
if (ctype.pk, codename) not in all_perms
]
auth_app.Permission.objects.using(db).bulk_create(perms)
if verbosity >= 2:
for perm in perms:
print("Adding permission '%s'" % perm)
def create_superuser(app, created_models, verbosity, db, **kwargs):
try:
get_model('auth', 'Permission')
UserModel = get_user_model()
except UnavailableApp:
return
from django.core.management import call_command
if UserModel in created_models and kwargs.get('interactive', True):
msg = ("\nYou just installed Django's auth system, which means you "
"don't have any superusers defined.\nWould you like to create one "
"now? (yes/no): ")
confirm = input(msg)
while 1:
if confirm not in ('yes', 'no'):
confirm = input('Please enter either "yes" or "no": ')
continue
if confirm == 'yes':
call_command("createsuperuser", interactive=True, database=db)
break
def get_system_username():
"""
Try to determine the current system user's username.
:returns: The username as a unicode string, or an empty string if the
username could not be determined.
"""
try:
result = getpass.getuser()
except (ImportError, KeyError):
# KeyError will be raised by os.getpwuid() (called by getuser())
# if there is no corresponding entry in the /etc/passwd file
# (a very restricted chroot environment, for example).
return ''
if six.PY2:
try:
result = result.decode(DEFAULT_LOCALE_ENCODING)
except UnicodeDecodeError:
# UnicodeDecodeError - preventive treatment for non-latin Windows.
return ''
return result
def get_default_username(check_db=True):
"""
Try to determine the current system user's username to use as a default.
:param check_db: If ``True``, requires that the username does not match an
existing ``auth.User`` (otherwise returns an empty string).
:returns: The username, or an empty string if no username can be
determined.
"""
# If the User model has been swapped out, we can't make any assumptions
# about the default user name.
if auth_app.User._meta.swapped:
return ''
default_username = get_system_username()
try:
default_username = unicodedata.normalize('NFKD', default_username)\
.encode('ascii', 'ignore').decode('ascii').replace(' ', '').lower()
except UnicodeDecodeError:
return ''
# Run the username validator
try:
auth_app.User._meta.get_field('username').run_validators(default_username)
except exceptions.ValidationError:
return ''
# Don't return the default username if it is already taken.
if check_db and default_username:
try:
auth_app.User._default_manager.get(username=default_username)
except auth_app.User.DoesNotExist:
pass
else:
return ''
return default_username
signals.post_syncdb.connect(create_permissions,
dispatch_uid="django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
sender=auth_app, dispatch_uid="django.contrib.auth.management.create_superuser")
| apache-2.0 |
gamezdaniel/mswl-dt-2013 | myspider/pymyspider/__init__.py | 1 | 2784 | # Copyright (c) 2013, Daniel Gamez
# with the help of Israel Herraiz at URJC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1) Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2) Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import urllib2
from bs4 import BeautifulSoup as Soup
from urlparse import urlparse, urljoin
### URL's Retriever
def retrieve_links (url):
"""This function retrieve links from the URL provided
The URL must be in any of these formats:
http://
"""
opener = urllib2.build_opener ()
try:
t = opener.open (url).read ()
parser = Soup(t)
return [x['href'] for x in parser.findAll('a') if x.has_attr('href')]
except urllib2.URLError:
return []
### Obtain links list by depth per URL
def links_list (url, depth):
# Base case
if depth == 0:
l = retrieve_links (url)
for each in l:
print " - %s" % each
return l
else:
# Get URL base on b
b = validate_url (url)
#if not b:
#return "Invalid URL"
l = retrieve_links (url)
for each in l:
# Get URL base on e
e = validate_url (each)
# Correct list url item
if not e:
l[l.index(each)] = urljoin(b, each)
for each in l:
print " %s %s" % ("*"*depth, each)
l2 = links_list (each, depth-1)
print ""
### URL Validator
def validate_url (url):
v = urlparse(url)
if v.scheme and v.hostname:
# Get URL base and hostname to form correct URL base
u = v.scheme + '://' + v.hostname + '/'
return u
else:
# Not a valid URL
return False
| bsd-2-clause |
sdwilsh/buck | src/com/facebook/buck/apple/project_generator/build_with_buck_test.py | 10 | 5555 | import unittest
import tempfile
import uuid
import os
import platform
import pkg_resources
from build_with_buck import *
XCODE_DWARF = "dwarf"
XCODE_DSYM = "dwarf-with-dsym"
class TestBuildWithBuck(unittest.TestCase):
def run_with_data(self,
platform_name,
archs,
valid_archs,
debug_format,
repo_root,
buck_path,
flags,
target,
dwarf_flavor,
dsym_flavor):
os.environ['PLATFORM_NAME'] = platform_name
os.environ['ARCHS'] = archs
os.environ['VALID_ARCHS'] = valid_archs
os.environ['DEBUG_INFORMATION_FORMAT'] = debug_format
return get_command(repo_root, buck_path, flags, target, dwarf_flavor, dsym_flavor)
def test_generating_single_arch_dsym(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
result = self.run_with_data("some_plat",
"some_arch",
"some_arch other_arch",
XCODE_DSYM,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
self.assertEqual(result,
'/buck/path build --flags //My:Target#DSYM_FLAVOR,some_plat-some_arch')
def test_generating_single_arch_dwarf(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
result = self.run_with_data("some_plat",
"some_arch",
"some_arch other_arch",
XCODE_DWARF,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
self.assertEqual(result,
'/buck/path build --flags //My:Target#DWARF_FLAVOR,some_plat-some_arch')
def test_generating_single_arch_dwarf(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
result = self.run_with_data("some_plat",
"some_arch",
"some_arch other_arch",
XCODE_DWARF,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
self.assertEqual(result,
'/buck/path build --flags //My:Target#DWARF_FLAVOR,some_plat-some_arch')
def test_generating_double_arch(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
result = self.run_with_data("plat",
"arch1 arch2",
"arch2 arch1",
XCODE_DWARF,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
self.assertEqual(result,
'/buck/path build --flags //My:Target#DWARF_FLAVOR,plat-arch1,plat-arch2')
def test_generating_unsupported_arch(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
with self.assertRaises(ValueError) as context:
self.run_with_data("some_plat",
"----UNSUPPORTED_ARCH----",
"some_arch other_arch",
XCODE_DWARF,
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
def test_generating_unsupported_debug_format(self):
if platform.system() != 'Darwin':
# This script is expected to be used on OS X only
return
with self.assertRaises(ValueError) as context:
self.run_with_data("some_plat",
"some_arch",
"some_arch other_arch",
"------UNSUPPORTED-----",
"/repo/path",
"/buck/path",
"--flags",
"//My:Target",
"DWARF_FLAVOR",
"DSYM_FLAVOR")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.